* capacity of the blocks to let global variables live through
* them.
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <math.h>
#include <limits.h>
#include "irprintf.h"
#include "execfreq.h"
#include "dfs_t.h"
-#include "xmalloc.h"
#include "beutil.h"
#include "bearch_t.h"
return (p->time > q->time) - (p->time < q->time);
}
-static INLINE void workset_print(const workset_t *w)
+static inline void workset_print(const workset_t *w)
{
int i;
/**
* Alloc a new workset on obstack @p ob with maximum size @p max
*/
-static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
+static inline workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
workset_t *res;
size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
res = obstack_alloc(ob, size);
/**
* Alloc a new instance on obstack and make it equal to @param ws
*/
-static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
+static inline workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
workset_t *res;
size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
res = obstack_alloc(ob, size);
* Do NOT alloc anything. Make @param tgt equal to @param src.
* returns @param tgt for convenience
*/
-static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
+static inline workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
memcpy(tgt, src, size);
return tgt;
* @param count locations given at memory @param locs.
* Set the length of @param ws to count.
*/
-static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
+static inline void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
workset->len = count;
memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
}
* Inserts the value @p val into the workset, iff it is not
* already contained. The workset must not be full.
*/
-static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
+static inline void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
int i;
/* check for current regclass */
- if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
+ if (!arch_irn_consider_in_reg_alloc(env->cls, val)) {
// DBG((dbg, DBG_WORKSET, "Skipped %+F\n", val));
return;
}
/**
* Removes all entries from this workset
*/
-static INLINE void workset_clear(workset_t *ws) {
+static inline void workset_clear(workset_t *ws) {
ws->len = 0;
}
/**
* Removes the value @p val from the workset if present.
*/
-static INLINE void workset_remove(workset_t *ws, ir_node *val) {
+static inline void workset_remove(workset_t *ws, ir_node *val) {
int i;
for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val) {
}
}
-static INLINE int workset_get_index(const workset_t *ws, const ir_node *val) {
+static inline int workset_get_index(const workset_t *ws, const ir_node *val) {
int i;
for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val)
} block_info_t;
-static INLINE void *new_block_info(belady_env_t *bel, int id)
+static inline void *new_block_info(belady_env_t *bel, int id)
{
ir_node *bl = bel->blocks[id];
block_info_t *res = obstack_alloc(&bel->ob, sizeof(*res));
#define get_block_info(block) ((block_info_t *)get_irn_link(block))
#define set_block_info(block, info) set_irn_link(block, info)
-static INLINE ir_node *block_info_get_last_ins(block_info_t *bi)
+static inline ir_node *block_info_get_last_ins(block_info_t *bi)
{
if (!bi->last_ins)
bi->last_ins = be_get_end_of_block_insertion_point(bi->bl);
#define get_current_use(bi, irn) phase_get_irn_data(&(bi)->next_uses, (irn))
-static INLINE void advance_current_use(block_info_t *bi, const ir_node *irn)
+static inline void advance_current_use(block_info_t *bi, const ir_node *irn)
{
next_use_t *use = get_current_use(bi, irn);
block_info_t *bi; /**< The block to which bring in should happen. */
int pressure_so_far; /**< The maximal pressure till the first use of irn in bl. */
ir_node *first_use; /**< The first user of irn in bl. */
- sched_timestep_t use_step; /**< Schedule sttep of the first use. */
+ sched_timestep_t use_step; /**< Schedule step of the first use. */
int is_remat : 1; /**< Is rematerializable. */
int sect_pressure; /**< Offset to maximum pressure in block. */
bring_in_t *sect_head;
};
-static INLINE bring_in_t *new_bring_in(block_info_t *bi, ir_node *irn, const next_use_t *use)
+static inline bring_in_t *new_bring_in(block_info_t *bi, ir_node *irn, const next_use_t *use)
{
bring_in_t *br = obstack_alloc(&bi->bel->ob, sizeof(br[0]));
return (fq > fp) - (fq < fp);
}
-static INLINE unsigned get_curr_distance(block_info_t *bi, const ir_node *irn, int is_usage)
+static inline unsigned get_curr_distance(block_info_t *bi, const ir_node *irn, int is_usage)
{
belady_env_t *env = bi->bel;
sched_timestep_t curr_step = sched_get_time_step(env->instr);
next_use_t *use = get_current_use(bi, irn);
- int flags = arch_irn_get_flags(env->arch, irn);
+ int flags = arch_irn_get_flags(irn);
- assert(!(flags & arch_irn_flags_ignore));
+ assert(!arch_irn_is_ignore(irn));
- /* We have to keep nonspillable nodes in the workingset */
+ /* We have to keep non-spillable nodes in the working set */
if(flags & arch_irn_flags_dont_spill)
return 0;
return be_is_live_end(env->lv, bi->bl, irn) ? LIVE_END : DEAD;
}
-static INLINE int is_local_phi(const ir_node *bl, const ir_node *irn)
+static inline int is_local_phi(const ir_node *bl, const ir_node *irn)
{
return is_Phi(irn) && get_nodes_block(irn) == bl;
}
* @param irn The node in question.
* @return 1, if node is something transported into @p bl, 0 if not.
* @note The function will only give correct answers in the case
- * where @p irn is unsed in the block @p bl which is always
+ * where @p irn is unused in the block @p bl which is always
* the case in our usage scenario.
*/
-static INLINE int is_transport_in(const ir_node *bl, const ir_node *irn)
+static inline int is_transport_in(const ir_node *bl, const ir_node *irn)
{
return get_nodes_block(irn) != bl || is_Phi(irn);
}
static void displace(block_info_t *bi, workset_t *new_vals, int is_usage) {
belady_env_t *env = bi->bel;
workset_t *ws = env->ws;
- ir_node **to_insert = alloca(env->n_regs * sizeof(to_insert[0]));
+ ir_node **to_insert = ALLOCAN(ir_node*, env->n_regs);
int i, len, max_allowed, demand, iter;
ir_node *val;
int i, arity;
assert(workset_get_length(env->ws) <= env->n_regs && "Too much values in workset!");
- /* projs are handled with the tuple value.
+ /* Projs are handled with the tuple value.
* Phis are no real instr (see insert_starters())
* instr_nr does not increase */
if (is_Proj(irn) || is_Phi(irn))
/* allocate all values _defined_ by this instruction */
workset_clear(new_vals);
- if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
+ if (get_irn_mode(irn) == mode_T) { /* special handling for Tuples and Projs */
const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
if (is_op_forking(get_irn_op(env->instr))) {
for (i = get_irn_arity(env->instr) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(env->instr, i);
- block_info->free_at_jump -= arch_irn_consider_in_reg_alloc(env->arch, env->cls, op);
+ block_info->free_at_jump -= arch_irn_consider_in_reg_alloc(env->cls, op);
}
}
irn_action_t *ia_top;
} rollback_info_t;
-static INLINE block_state_t *get_block_state(global_end_state_t *ges, const block_info_t *bi)
+static inline block_state_t *get_block_state(global_end_state_t *ges, const block_info_t *bi)
{
int id = bi->id;
assert(!ver_is_younger(ges->bs_tops_vers[id], ges->version));
return ver_is_older(ges->bs_tops_vers[id], ges->version) ? NULL : ges->bs_tops[bi->id];
}
-static INLINE const workset_t *get_end_state(global_end_state_t *ges, block_info_t *bi)
+static inline const workset_t *get_end_state(global_end_state_t *ges, block_info_t *bi)
{
block_state_t *bs = get_block_state(ges, bi);
return bs ? bs->end_state : bi->ws_end;
return ia;
}
-static INLINE rollback_info_t trans_begin(global_end_state_t *ges)
+static inline rollback_info_t trans_begin(global_end_state_t *ges)
{
rollback_info_t rb;
rb.obst_level = obstack_base(&ges->obst);
return rb;
}
-static INLINE void trans_rollback(global_end_state_t *ges, rollback_info_t *rb)
+static inline void trans_rollback(global_end_state_t *ges, rollback_info_t *rb)
{
block_state_t *bs;
/*
* finally there is some room. we can at least reload the value.
- * but we will try to let ot live through anyhow.
+ * but we will try to let or live through anyhow.
*/
if (slot >= 0) {
irn_action_t *vs = new_irn_action(ges, irn, bi->bl);
if (is_transport_in(bl, irn)) {
int i, n = get_irn_arity(bl);
- ir_node **nodes = alloca(get_irn_arity(bl) * sizeof(nodes[0]));
rollback_info_t rb = trans_begin(ges);
glob_costs = 0.0;
double c;
/*
- * there might by unknwons as operands of phis in that case
+ * there might by Unknowns as operands of Phis in that case
* we set the costs to zero, since they won't get spilled.
*/
- if (arch_irn_consider_in_reg_alloc(env->arch, env->cls, op))
+ if (arch_irn_consider_in_reg_alloc(env->cls, op))
c = can_make_available_at_end(ges, pr, op, limit - glob_costs, level + 1);
else
c = 0.0;
// assert(!is_local_phi(bl, irn) || !bitset_contains_irn(ges->succ_phis, irn));
/*
- * if we cannot bring the value to the use, let's see ifit would be worthwhile
+ * if we cannot bring the value to the use, let's see if it would be worthwhile
* to bring the value to the beginning of the block to have a better spill
* location.
*
*
* If the second is larger than the first,
* we have to increment the total block pressure and hence
- * save the old pressure to restire it in case of failing to
+ * save the old pressure to restore it in case of failing to
* bring the variable into the block in a register.
*/
trans = trans_begin(ges);
*
* following actions can be taken:
* a) commit changes
- * b) mark phi as succeded if node was phi
+ * b) mark phi as succeeded if node was phi
* c) insert reload at use location
* d) give a spill location hint
*
}
/*
- * go from the last bring in use to the first and add all the variabled
+ * go from the last bring in use to the first and add all the variables
* which additionally live through the block to their pressure.
* at the point were the actually treated use is, we have to increase
- * the pressure by one more as the nrought in value starts to count.
+ * the pressure by one more as the brought in value starts to count.
* Finally, adjust the front pressure as well.
*/
pressure_inc = 0;
workset_set_version(bi->ws_end, j, ver_youngest);
}
- /* determine ordeer and optimize them */
+ /* determine order and optimize them */
for (br = determine_global_order(env); *br; ++br)
optimize_variable(&ges, *br);
if (!is_Phi(irn))
break;
- if (arch_irn_consider_in_reg_alloc(env->arch, env->cls, irn)
+ if (arch_irn_consider_in_reg_alloc(env->cls, irn)
&& !bitset_contains_irn(ges.succ_phis, irn))
be_spill_phi(env->senv, irn);
}