row_col_t *rows;
};
-static INLINE void alloc_cols(row_col_t *row, int c_cols) {
+static inline void alloc_cols(row_col_t *row, int c_cols) {
assert(c_cols > row->c_cols);
row->c_cols = c_cols;
row->cols = XREALLOC(row->cols, col_val_t, c_cols);
}
-static INLINE void alloc_rows(gs_matrix_t *m, int c_rows, int c_cols, int begin_init) {
+static inline void alloc_rows(gs_matrix_t *m, int c_rows, int c_cols, int begin_init) {
int i;
assert(c_rows > m->c_rows);
* @note also see comments for hashset_insert()
* @internal
*/
-static INLINE
+static inline
InsertReturnValue insert_nogrow(HashSet *self, KeyType key)
{
size_t num_probes = 0;
* calculate shrink and enlarge limits
* @internal
*/
-static INLINE
+static inline
void reset_thresholds(HashSet *self)
{
self->enlarge_threshold = (size_t) HT_OCCUPANCY_FLT(self->num_buckets);
* Resize the hashset
* @internal
*/
-static INLINE
+static inline
void resize(HashSet *self, size_t new_size)
{
size_t num_buckets = self->num_buckets;
#else
/* resize must be defined outside */
-static INLINE void resize(HashSet *self, size_t new_size);
+static inline void resize(HashSet *self, size_t new_size);
#endif
* grow the hashset if adding 1 more elements would make it too crowded
* @internal
*/
-static INLINE
+static inline
void maybe_grow(HashSet *self)
{
size_t resize_to;
* shrink the hashset if it is only sparsely filled
* @internal
*/
-static INLINE
+static inline
void maybe_shrink(HashSet *self)
{
size_t size;
* Initializes hashset with a specific size
* @internal
*/
-static INLINE
+static inline
void init_size(HashSet *self, size_t initial_size)
{
if(initial_size < 4)
DEBUG_ONLY(firm_dbg_module_t *dbg);
};
-static INLINE void *get_init_mem(struct obstack *obst, long sz) {
+static inline void *get_init_mem(struct obstack *obst, long sz) {
void *p = obstack_alloc(obst, sz);
memset(p, 0, sz);
return p;
*
* @param p The pdeq chunk.
*/
-static INLINE void free_pdeq_block (pdeq *p)
+static inline void free_pdeq_block (pdeq *p)
{
#ifndef NDEBUG
p->magic = 0xbadf00d1;
*
* @return A new pdeq chunk.
*/
-static INLINE pdeq *alloc_pdeq_block (void)
+static inline pdeq *alloc_pdeq_block (void)
{
pdeq *p;
if (TUNE_NSAVED_PDEQS && pdeqs_cached) {
table->naccess, table->ncollision, table->nkey, table->ndups, table->max_chain_len, nfree);
}
-static INLINE void
+static inline void
stat_chain_len (SET *table, int chain_len)
{
table->ncollision += chain_len;
* do one iteration step, return 1
* if still data in the set, 0 else
*/
-static INLINE int
+static inline int
iter_step (SET *table)
{
if (++table->iter_j >= SEGMENT_SIZE) {
/*
* limit the hash value
*/
-static INLINE unsigned
+static inline unsigned
Hash (SET *table, unsigned h)
{
unsigned address;
* returns non-zero if the number of elements in
* the set is greater then number of segments * MAX_LOAD_FACTOR
*/
-static INLINE int
+static inline int
loaded (SET *table)
{
return ( ++table->nkey
#include "irgwalk.h"
static ir_visited_t master_cg_visited = 0;
-static INLINE int cg_irg_visited (ir_graph *n);
-static INLINE void mark_cg_irg_visited(ir_graph *n);
-static INLINE void set_cg_irg_visited (ir_graph *n, ir_visited_t i);
+static inline int cg_irg_visited (ir_graph *n);
+static inline void mark_cg_irg_visited(ir_graph *n);
+static inline void set_cg_irg_visited (ir_graph *n, ir_visited_t i);
/** Returns the callgraph state of the program representation. */
irp_callgraph_state get_irp_callgraph_state(void) {
/**
* allocates a new scc_info on the obstack
*/
-static INLINE scc_info *new_scc_info(struct obstack *obst) {
+static inline scc_info *new_scc_info(struct obstack *obst) {
scc_info *info = obstack_alloc(obst, sizeof(*info));
memset(info, 0, sizeof(*info));
return info;
/**
* Returns non-zero if a graph was already visited.
*/
-static INLINE int cg_irg_visited(ir_graph *irg) {
+static inline int cg_irg_visited(ir_graph *irg) {
return irg->self_visited >= master_cg_visited;
}
/**
* Marks a graph as visited.
*/
-static INLINE void mark_cg_irg_visited(ir_graph *irg) {
+static inline void mark_cg_irg_visited(ir_graph *irg) {
irg->self_visited = master_cg_visited;
}
/**
* Set a graphs visited flag to i.
*/
-static INLINE void set_cg_irg_visited(ir_graph *irg, ir_visited_t i) {
+static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i) {
irg->self_visited = i;
}
/**
* Returns the visited flag of a graph.
*/
-static INLINE ir_visited_t get_cg_irg_visited(ir_graph *irg) {
+static inline ir_visited_t get_cg_irg_visited(ir_graph *irg) {
return irg->self_visited;
}
-static INLINE void mark_irg_in_stack(ir_graph *irg) {
+static inline void mark_irg_in_stack(ir_graph *irg) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->in_stack = 1;
}
-static INLINE void mark_irg_not_in_stack(ir_graph *irg) {
+static inline void mark_irg_not_in_stack(ir_graph *irg) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->in_stack = 0;
}
-static INLINE int irg_is_in_stack(ir_graph *irg) {
+static inline int irg_is_in_stack(ir_graph *irg) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->in_stack;
}
-static INLINE void set_irg_uplink(ir_graph *irg, int uplink) {
+static inline void set_irg_uplink(ir_graph *irg, int uplink) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->uplink = uplink;
}
-static INLINE int get_irg_uplink(ir_graph *irg) {
+static inline int get_irg_uplink(ir_graph *irg) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->uplink;
}
-static INLINE void set_irg_dfn(ir_graph *irg, int dfn) {
+static inline void set_irg_dfn(ir_graph *irg, int dfn) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->dfn = dfn;
}
-static INLINE int get_irg_dfn(ir_graph *irg) {
+static inline int get_irg_dfn(ir_graph *irg) {
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->dfn;
/**
* Initialize the irg stack.
*/
-static INLINE void init_stack(void) {
+static inline void init_stack(void) {
if (stack) {
ARR_RESIZE(ir_graph *, stack, 1000);
} else {
* push a graph on the irg stack
* @param n the graph to be pushed
*/
-static INLINE void push(ir_graph *irg) {
+static inline void push(ir_graph *irg) {
if (tos == ARR_LEN(stack)) {
int nlen = ARR_LEN(stack) * 2;
ARR_RESIZE(ir_node *, stack, nlen);
/**
* return the topmost graph on the stack and pop it
*/
-static INLINE ir_graph *pop(void) {
+static inline ir_graph *pop(void) {
ir_graph *irg = stack[--tos];
mark_irg_not_in_stack(irg);
return irg;
* The nodes up to irg belong to the current loop.
* Removes them from the stack and adds them to the current loop.
*/
-static INLINE void pop_scc_to_loop(ir_graph *irg) {
+static inline void pop_scc_to_loop(ir_graph *irg) {
ir_graph *m;
do {
* Removes and unmarks all nodes up to n from the stack.
* The nodes must be visited once more to assign them to a scc.
*/
-static INLINE void pop_scc_unmark_visit(ir_graph *n) {
+static inline void pop_scc_unmark_visit(ir_graph *n) {
ir_graph *m = NULL;
while (m != n) {
#define _dfs_int_is_ancestor(n, m) ((m)->pre_num >= (n)->pre_num && (m)->pre_num <= (n)->max_pre_num)
-static INLINE int _dfs_is_ancestor(const struct _dfs_t *dfs, const void *a, const void *b)
+static inline int _dfs_is_ancestor(const struct _dfs_t *dfs, const void *a, const void *b)
{
struct _dfs_node_t *n = _dfs_get_node(dfs, a);
struct _dfs_node_t *m = _dfs_get_node(dfs, b);
return (ef1->reg != ef2->reg);
}
-static INLINE unsigned int exec_freq_hash(reg_exec_freq *e) {
+static inline unsigned int exec_freq_hash(reg_exec_freq *e) {
return HASH_PTR(e->reg);
}
-static INLINE void set_region_exec_freq(void *reg, double freq) {
+static inline void set_region_exec_freq(void *reg, double freq) {
reg_exec_freq ef;
ef.reg = reg;
ef.freq = freq;
static double exception_prob = 0.001;
-static INLINE int is_loop_head(ir_node *cond)
+static inline int is_loop_head(ir_node *cond)
{
(void) cond;
return 0;
*
* Given all outs of the predecessor region, we can compute the weight of
* this single edge. */
-static INLINE double get_weighted_region_exec_freq(void *reg, int pos) {
+static inline double get_weighted_region_exec_freq(void *reg, int pos) {
void *pred_reg = get_region_in(reg, pos);
double res, full_freq = get_region_exec_freq (pred_reg);
int n_outs = get_region_n_outs (pred_reg);
return res;
}
-static INLINE void compute_region_freqency(void *reg, double head_weight) {
+static inline void compute_region_freqency(void *reg, double head_weight) {
int i, n_ins = get_region_n_ins(reg);
double my_freq = 0;
return (ra1->reg != ra2->reg);
}
-static INLINE int attr_set_hash(region_attr *a) {
+static inline int attr_set_hash(region_attr *a) {
return HASH_PTR(a->reg);
}
-static INLINE region_attr *get_region_attr(void *region) {
+static inline region_attr *get_region_attr(void *region) {
region_attr r_attr, *res;
r_attr.reg = region;
ARR_APP1(void *, get_region_attr(region)->op_array, cfop);
}
-static INLINE void exc_outs(void *reg, ir_node *cfop) {
+static inline void exc_outs(void *reg, ir_node *cfop) {
if (is_fragile_op(cfop) || (is_fragile_Proj(cfop)))
inc_region_n_exc_outs(reg);
}
} scc_info;
/** Allocate a new scc_info on the given obstack */
-static INLINE scc_info *new_scc_info(struct obstack *obst) {
+static inline scc_info *new_scc_info(struct obstack *obst) {
scc_info *info = obstack_alloc(obst, sizeof(*info));
memset(info, 0, sizeof(*info));
return info;
/**
* Marks the node n to be on the stack.
*/
-static INLINE void mark_irn_in_stack(ir_node *n) {
+static inline void mark_irn_in_stack(ir_node *n) {
scc_info *info = get_irn_link(n);
info->in_stack = 1;
}
/**
* Marks the node n to be not on the stack.
*/
-static INLINE void mark_irn_not_in_stack(ir_node *n) {
+static inline void mark_irn_not_in_stack(ir_node *n) {
scc_info *info = get_irn_link(n);
info->in_stack = 0;
}
/**
* Returns whether node n is on the stack.
*/
-static INLINE int irn_is_in_stack(ir_node *n) {
+static inline int irn_is_in_stack(ir_node *n) {
scc_info *info = get_irn_link(n);
return info->in_stack;
}
/**
* Sets node n uplink value.
*/
-static INLINE void set_irn_uplink(ir_node *n, int uplink) {
+static inline void set_irn_uplink(ir_node *n, int uplink) {
scc_info *info = get_irn_link(n);
info->uplink = uplink;
}
/**
* Return node n uplink value.
*/
-static INLINE int get_irn_uplink(ir_node *n) {
+static inline int get_irn_uplink(ir_node *n) {
scc_info *info = get_irn_link(n);
return info->uplink;
}
/**
* Sets node n dfn value.
*/
-static INLINE void set_irn_dfn(ir_node *n, int dfn) {
+static inline void set_irn_dfn(ir_node *n, int dfn) {
scc_info *info = get_irn_link(n);
info->dfn = dfn;
}
/**
* Returns node n dfn value.
*/
-static INLINE int get_irn_dfn(ir_node *n) {
+static inline int get_irn_dfn(ir_node *n) {
scc_info *info = get_irn_link(n);
return info->dfn;
}
/**
* Initializes the IR-node stack
*/
-static INLINE void init_stack(void) {
+static inline void init_stack(void) {
if (stack) {
ARR_RESIZE(ir_node *, stack, 1000);
} else {
/**
* Push a node n onto the IR-node stack.
*/
-static INLINE void push(ir_node *n) {
+static inline void push(ir_node *n) {
if (tos == ARR_LEN(stack)) {
int nlen = ARR_LEN(stack) * 2;
ARR_RESIZE(ir_node *, stack, nlen);
/**
* Pop a node from the IR-node stack and return it.
*/
-static INLINE ir_node *pop(void) {
+static inline ir_node *pop(void) {
ir_node *n = stack[--tos];
mark_irn_not_in_stack(n);
return n;
* The nodes from tos up to n belong to the current loop.
* Removes them from the stack and adds them to the current loop.
*/
-static INLINE void pop_scc_to_loop(ir_node *n) {
+static inline void pop_scc_to_loop(ir_node *n) {
ir_node *m;
do {
* Removes and unmarks all nodes up to n from the stack.
* The nodes must be visited once more to assign them to a scc.
*/
-static INLINE void pop_scc_unmark_visit(ir_node *n) {
+static inline void pop_scc_unmark_visit(ir_node *n) {
ir_node *m;
do {
* Clear the backedges for all nodes.
* Called from a walker.
*/
-static INLINE void init_node(ir_node *n, void *env) {
+static inline void init_node(ir_node *n, void *env) {
struct obstack *obst = env;
if (is_Block(n))
set_irn_link(n, new_scc_info(obst));
/**
* Initializes the common global settings for the scc algorithm
*/
-static INLINE void init_scc_common(void) {
+static inline void init_scc_common(void) {
current_dfn = 1;
loop_node_cnt = 0;
init_stack();
* Initializes the scc algorithm for the intraprocedural case.
* Add scc info to every block node.
*/
-static INLINE void init_scc(ir_graph *irg, struct obstack *obst) {
+static inline void init_scc(ir_graph *irg, struct obstack *obst) {
init_scc_common();
irg_walk_graph(irg, init_node, NULL, obst);
}
-static INLINE void finish_scc(void)
+static inline void finish_scc(void)
{
finish_stack();
}
/**
* Initializes the scc algorithm for the interprocedural case.
*/
-static INLINE void init_ip_scc(struct obstack *obst) {
+static inline void init_ip_scc(struct obstack *obst) {
init_scc_common();
cg_walk(init_node, NULL, obst);
/**
* returns non.zero if l is the outermost loop.
*/
-INLINE static int is_outermost_loop(ir_loop *l) {
+inline static int is_outermost_loop(ir_loop *l) {
return l == get_loop_outer_loop(l);
}
* if V is a root, return v, else return the vertex u, not being the
* root, with minimum u->semi on the path from v to its root.
*/
-INLINE static tmp_dom_info *dom_eval(tmp_dom_info *v) {
+inline static tmp_dom_info *dom_eval(tmp_dom_info *v) {
if (!v->ancestor) return v;
dom_compress (v);
return v->label;
}
/** make V W's ancestor */
-INLINE static void dom_link(tmp_dom_info *v, tmp_dom_info *w) {
+inline static void dom_link(tmp_dom_info *v, tmp_dom_info *w) {
w->ancestor = v;
}
* Checks whether a pointer points to a extended basic block.
* Intern version for libFirm.
*/
-static INLINE int
+static inline int
_is_ir_extbb (const void *thing) {
return (get_kind(thing) == k_ir_extblk);
}
* Gets the visited counter of an extended block.
* Internal version for libFirm.
*/
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_extbb_visited(const ir_extblk *blk) {
assert(blk);
return blk->visited;
* Sets the visited counter of an extended block.
* Internal version for libFirm.
*/
-static INLINE void
+static inline void
_set_extbb_visited(ir_extblk *blk, ir_visited_t visited) {
assert(blk);
blk->visited = visited;
* Mark an extended block as visited in a graph.
* Internal version for libFirm.
*/
-static INLINE void
+static inline void
_mark_extbb_visited(ir_extblk *blk) {
assert(blk);
blk->visited = current_ir_graph->block_visited;
* Returns non-zero if an extended was visited.
* Internal version for libFirm.
*/
-static INLINE int
+static inline int
_extbb_visited(const ir_extblk *blk) {
assert(blk);
return blk->visited >= current_ir_graph->block_visited;
* Returns non-zero if an extended block was NOT visited.
* Internal version for libFirm.
*/
-static INLINE int
+static inline int
_extbb_not_visited(const ir_extblk *blk) {
assert(blk);
return blk->visited < current_ir_graph->block_visited;
* Returns the link field of an extended block.
* Internal version for libFirm.
*/
-static INLINE void *
+static inline void *
_get_extbb_link(const ir_extblk *blk) {
assert(blk);
return blk->link;
* Sets the link field of an extended block.
* Internal version for libFirm.
*/
-static INLINE void
+static inline void
_set_extbb_link(ir_extblk *blk, void *link) {
assert(blk);
blk->link = link;
/**
* Return the number of basis blocks of an extended block
*/
-static INLINE int
+static inline int
_get_extbb_n_blocks(const ir_extblk *blk) {
assert(blk);
return ARR_LEN(blk->blks);
/**
* Return the i'th basis block of an extended block
*/
-static INLINE ir_node *
+static inline ir_node *
_get_extbb_block(ir_extblk *blk, int pos)
{
assert(blk && 0 <= pos && pos < _get_extbb_n_blocks(blk));
/**
* Return the leader basis block of an extended block
*/
-static INLINE ir_node *
+static inline ir_node *
_get_extbb_leader(ir_extblk *blk)
{
return blk->blks[0];
* @param irn A node.
* @return 1 if the node shall be considered in liveness, 0 if not.
*/
-static INLINE int is_liveness_node(const ir_node *irn)
+static inline int is_liveness_node(const ir_node *irn)
{
switch (get_irn_opcode(irn)) {
case iro_Block:
}
-static INLINE void compute_back_edge_chains(lv_chk_t *lv)
+static inline void compute_back_edge_chains(lv_chk_t *lv)
{
bitset_pos_t elm;
int i, n;
*/
void mature_loops(ir_loop *loop, struct obstack *obst);
-/* -------- INLINE functions -------- */
+/* -------- inline functions -------- */
-static INLINE int
+static inline int
_is_ir_loop(const void *thing) {
return get_kind(thing) == k_ir_loop;
}
-static INLINE void
+static inline void
_set_irg_loop(ir_graph *irg, ir_loop *loop) {
assert(irg);
irg->loop = loop;
}
-static INLINE ir_loop *
+static inline ir_loop *
_get_irg_loop(ir_graph *irg) {
assert(irg);
return irg->loop;
}
-static INLINE ir_loop *
+static inline ir_loop *
_get_loop_outer_loop(const ir_loop *loop) {
assert(_is_ir_loop(loop));
return loop->outer_loop;
}
-static INLINE int
+static inline int
_get_loop_depth(const ir_loop *loop) {
assert(_is_ir_loop(loop));
return loop->depth;
}
-static INLINE int
+static inline int
_get_loop_n_sons(const ir_loop *loop) {
assert(_is_ir_loop(loop));
return loop->n_sons;
}
/* Uses temporary information to get the loop */
-static INLINE ir_loop *
+static inline ir_loop *
_get_irn_loop(const ir_node *n) {
return n->loop;
}
* position 0, the Start block at position 1. This is necessary for
* the out block walker.
*/
-static INLINE void fix_start_proj(ir_graph *irg) {
+static inline void fix_start_proj(ir_graph *irg) {
ir_node *startbl = get_irg_start_block(irg);
if (get_Block_n_cfg_outs(startbl)) {
/**
* Allocates a new SCC info on the given obstack.
*/
-static INLINE scc_info *new_scc_info(struct obstack *obst) {
+static inline scc_info *new_scc_info(struct obstack *obst) {
scc_info *info = obstack_alloc(obst, sizeof(*info));
memset(info, 0, sizeof(*info));
return info;
/**
* Mark node n being on the SCC stack.
*/
-static INLINE void mark_irn_in_stack(ir_node *n) {
+static inline void mark_irn_in_stack(ir_node *n) {
scc_info *scc = get_irn_link(n);
assert(scc);
scc->in_stack = 1;
/**
* Mark node n NOT being on the SCC stack.
*/
-static INLINE void mark_irn_not_in_stack(ir_node *n) {
+static inline void mark_irn_not_in_stack(ir_node *n) {
scc_info *scc = get_irn_link(n);
assert(scc);
scc->in_stack = 0;
/**
* Checks if a node is on the SCC stack.
*/
-static INLINE int irn_is_in_stack(ir_node *n) {
+static inline int irn_is_in_stack(ir_node *n) {
scc_info *scc = get_irn_link(n);
assert(scc);
return scc->in_stack;
/**
* Sets the uplink number for a node.
*/
-static INLINE void set_irn_uplink(ir_node *n, int uplink) {
+static inline void set_irn_uplink(ir_node *n, int uplink) {
scc_info *scc = get_irn_link(n);
assert(scc);
scc->uplink = uplink;
/**
* Sets the depth-first-search number for a node.
*/
-static INLINE void set_irn_dfn(ir_node *n, int dfn) {
+static inline void set_irn_dfn(ir_node *n, int dfn) {
scc_info *scc = get_irn_link(n);
assert(scc);
scc->dfn = dfn;
/**
* initializes the stack
*/
-static INLINE void init_stack(void) {
+static inline void init_stack(void) {
if (stack) {
ARR_RESIZE(ir_node *, stack, 1000);
} else {
*
* @param n The node to push
*/
-static INLINE void push(ir_node *n) {
+static inline void push(ir_node *n) {
if (tos == ARR_LEN(stack)) {
int nlen = ARR_LEN(stack) * 2;
ARR_RESIZE(ir_node *, stack, nlen);
*
* @return The topmost node
*/
-static INLINE ir_node *pop(void) {
+static inline ir_node *pop(void) {
ir_node *n = stack[--tos];
mark_irn_not_in_stack(n);
return n;
* The nodes up to n belong to the current loop.
* Removes them from the stack and adds them to the current loop.
*/
-static INLINE void pop_scc_to_loop(ir_node *n) {
+static inline void pop_scc_to_loop(ir_node *n) {
ir_node *m;
int i = 0;
/* Removes and unmarks all nodes up to n from the stack.
The nodes must be visited once more to assign them to a scc. */
-static INLINE void pop_scc_unmark_visit(ir_node *n) {
+static inline void pop_scc_unmark_visit(ir_node *n) {
ir_node *m = NULL;
while (m != n) {
/* Initialization steps. **********************************************/
-static INLINE void init_node(ir_node *n, void *env) {
+static inline void init_node(ir_node *n, void *env) {
struct obstack *obst = env;
set_irn_link(n, new_scc_info(obst));
clear_backedges(n);
}
-static INLINE void init_scc_common(void) {
+static inline void init_scc_common(void) {
current_dfn = 1;
loop_node_cnt = 0;
init_stack();
}
-static INLINE void init_scc(ir_graph *irg, struct obstack *obst) {
+static inline void init_scc(ir_graph *irg, struct obstack *obst) {
init_scc_common();
irg_walk_graph(irg, init_node, NULL, obst);
/*
*/
}
-static INLINE void finish_scc(void)
+static inline void finish_scc(void)
{
finish_stack();
}
#ifdef INTERPROCEDURAL_VIEW
-static INLINE void init_ip_scc(struct obstack *obst) {
+static inline void init_ip_scc(struct obstack *obst) {
init_scc_common();
cg_walk(init_node, NULL, obst);
}
/* When to walk from nodes to blocks. Only for Control flow operations? */
-static INLINE int get_start_index(ir_node *n) {
+static inline int get_start_index(ir_node *n) {
#undef BLOCK_BEFORE_NODE
#define BLOCK_BEFORE_NODE 1
*
* @param n the node to check
*/
-static INLINE int is_possible_loop_head(ir_node *n) {
+static inline int is_possible_loop_head(ir_node *n) {
ir_op *op = get_irn_op(n);
return ((op == op_Block) ||
(op == op_Phi) ||
#endif
-static INLINE int is_outermost_loop(ir_loop *l) {
+static inline int is_outermost_loop(ir_loop *l) {
return l == get_loop_outer_loop(l);
}
ir_node ***phi_cls; /* the array of node pointers representing the class */
} irn_phi_class_t;
-static INLINE ir_node ***_get_phi_class(ir_phase *ph, ir_node *irn) {
+static inline ir_node ***_get_phi_class(ir_phase *ph, ir_node *irn) {
irn_phi_class_t *ipc = phase_get_or_set_irn_data(ph, irn);
return ipc->phi_cls;
}
-static INLINE void _set_phi_class(ir_phase *ph, ir_node *irn, ir_node ***cls) {
+static inline void _set_phi_class(ir_phase *ph, ir_node *irn, ir_node ***cls) {
irn_phi_class_t *ipc = phase_get_or_set_irn_data(ph, irn);
ipc->phi_cls = cls;
}
/**
* Set a node emitter. Make it a bit more type safe.
*/
-static INLINE void set_emitter(ir_op *op, emit_func arm_emit_node) {
+static inline void set_emitter(ir_op *op, emit_func arm_emit_node) {
op->ops.generic = (op_func)arm_emit_node;
}
*
****************************************************************************************************/
-static INLINE int mode_needs_gp_reg(ir_mode *mode) {
+static inline int mode_needs_gp_reg(ir_mode *mode) {
return mode_is_int(mode) || mode_is_reference(mode);
}
typedef ir_node *(*create_const_node_func)(dbg_info *db, ir_graph *irg, ir_node *block);
-static INLINE ir_node *create_const(ir_node **place,
+static inline ir_node *create_const(ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
/**
* Set a node emitter. Make it a bit more type safe.
*/
-static INLINE void set_transformer(ir_op *op, be_transform_func arm_transform_func) {
+static inline void set_transformer(ir_op *op, be_transform_func arm_transform_func) {
op->ops.generic = (op_func)arm_transform_func;
}
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
*/
-static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
+static inline be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
{
return get_or_set_call_arg(call, is_res, pos, 0);
}
* Returns non-zero if the call argument at given position
* is transfered on the stack.
*/
-static INLINE int is_on_stack(be_abi_call_t *call, int pos)
+static inline int is_on_stack(be_abi_call_t *call, int pos)
{
be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
return arg && !arg->in_reg;
* @param irn The node to get the responsible isa for.
* @return The irn operations given by the responsible isa.
*/
-static INLINE const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
+static inline const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
{
const ir_op *ops;
const arch_irn_ops_t *be_ops;
arch_register_type_t type; /**< The type of the register. */
};
-static INLINE const arch_register_class_t *
+static inline const arch_register_class_t *
_arch_register_get_class(const arch_register_t *reg)
{
return reg->reg_class;
}
-static INLINE
+static inline
unsigned _arch_register_get_index(const arch_register_t *reg)
{
return reg->index;
}
-static INLINE
+static inline
const char *_arch_register_get_name(const arch_register_t *reg)
{
return reg->name;
/** return the register class flags */
#define arch_register_class_flags(cls) ((cls)->flags)
-static INLINE const arch_register_t *
+static inline const arch_register_t *
_arch_register_for_index(const arch_register_class_t *cls, unsigned idx)
{
assert(idx < cls->n_regs);
(must_be_different) */
};
-static INLINE int reg_reqs_equal(const arch_register_req_t *req1,
+static inline int reg_reqs_equal(const arch_register_req_t *req1,
const arch_register_req_t *req2)
{
if (req1 == req2)
* @param is_def Is the border a use or a def.
* @return The created border.
*/
-static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
+static inline border_t *border_add(be_chordal_env_t *env, struct list_head *head,
ir_node *irn, unsigned step, unsigned pressure,
unsigned is_def, unsigned is_real)
{
* @param irn The node.
* @return 1, if the node is of that register class, 0 if not.
*/
-static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
+static inline int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
{
return arch_irn_consider_in_reg_alloc(env->cls, irn);
}
/**
* Starts a walk for memory operands if supported by the backend.
*/
-static INLINE void check_for_memory_operands(ir_graph *irg)
+static inline void check_for_memory_operands(ir_graph *irg)
{
irg_walk_graph(irg, NULL, memory_operand_walker, NULL);
}
bitset_t *ignore_colors;/**< A set of colors which shall be ignored in register allocation. */
};
-static INLINE struct list_head *_get_block_border_head(const be_chordal_env_t *inf, ir_node *bl) {
+static inline struct list_head *_get_block_border_head(const be_chordal_env_t *inf, ir_node *bl) {
return pmap_get(inf->border_heads, bl);
}
static pset *pinned_global; /**< optimized nodes should not be altered any more */
-static INLINE int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
+static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
{
if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
* If a local pinned conflict occurs, a new edge in the conflict graph is added.
* The next maximum independent set build, will regard it.
*/
-static INLINE void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
+static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
conflict_t c;
DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2));
/**
* Checks if two nodes are in a conflict.
*/
-static INLINE int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
+static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
conflict_t c;
/* search for live range interference */
if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2))
/**
* Finds a node status entry of a node if existent. Otherwise return NULL
*/
-static INLINE const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
+static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
node_stat_t find;
find.irn = irn;
return set_find(qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
* Finds a node status entry of a node if existent. Otherwise it will return
* an initialized new entry for this node.
*/
-static INLINE node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) {
+static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) {
node_stat_t find;
find.irn = irn;
find.new_color = NO_COLOR;
/**
* Returns the virtual color of a node if set before, else returns the real color.
*/
-static INLINE int qnode_get_new_color(const qnode_t *qn, ir_node *irn) {
+static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) {
const node_stat_t *found = qnode_find_node(qn, irn);
if (found)
return found->new_color;
/**
* Sets the virtual color of a node.
*/
-static INLINE void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) {
+static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) {
node_stat_t *found = qnode_find_or_insert_node(qn, irn);
found->new_color = color;
DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color));
* to the same optimization unit and has been optimized before the current
* processed node.
*/
-static INLINE int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) {
+static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) {
const node_stat_t *found = qnode_find_node(qn, irn);
if (found)
return found->pinned_local;
* Local-pins a node, so optimizations of further nodes of the same opt unit
* can handle situations in which a color change would undo prior optimizations.
*/
-static INLINE void qnode_pin_local(const qnode_t *qn, ir_node *irn) {
+static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) {
node_stat_t *found = qnode_find_or_insert_node(qn, irn);
found->pinned_local = 1;
if (found->new_color == NO_COLOR)
* Determines a maximum weighted independent set with respect to
* the interference and conflict edges of all nodes in a qnode.
*/
-static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
+static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr, *best;
/**
* Creates a new qnode
*/
-static INLINE qnode_t *new_qnode(const unit_t *ou, int color) {
+static inline qnode_t *new_qnode(const unit_t *ou, int color) {
qnode_t *qn = XMALLOC(qnode_t);
qn->ou = ou;
qn->color = color;
/**
* Frees space used by a queue node
*/
-static INLINE void free_qnode(qnode_t *qn) {
+static inline void free_qnode(qnode_t *qn) {
del_set(qn->conflicts);
del_set(qn->changed_nodes);
xfree(qn->mis);
* Inserts a qnode in the sorted queue of the optimization unit. Queue is
* ordered by field 'size' (the size of the mis) in decreasing order.
*/
-static INLINE void ou_insert_qnode(unit_t *ou, qnode_t *qn) {
+static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) {
struct list_head *lh;
if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) {
return ci->tmp_fixed ? ci->tmp_col : ci->orig_col;
}
-static INLINE int color_is_fix(co2_t *env, const ir_node *irn)
+static inline int color_is_fix(co2_t *env, const ir_node *irn)
{
co2_irn_t *ci = get_co2_irn(env, irn);
return ci->fixed || ci->tmp_fixed;
}
-static INLINE bitset_t *get_adm(co2_t *env, co2_irn_t *ci)
+static inline bitset_t *get_adm(co2_t *env, co2_irn_t *ci)
{
if(ci->adm_cache == NULL) {
const arch_register_req_t *req;
return ci->adm_cache;
}
-static INLINE bitset_t *admissible_colors(co2_t *env, co2_irn_t *ci, bitset_t *bs)
+static inline bitset_t *admissible_colors(co2_t *env, co2_irn_t *ci, bitset_t *bs)
{
bitset_copy(bs, get_adm(env, ci));
return bs;
}
-static INLINE int is_color_admissible(co2_t *env, co2_irn_t *ci, col_t col)
+static inline int is_color_admissible(co2_t *env, co2_irn_t *ci, col_t col)
{
bitset_t *bs = get_adm(env, ci);
return bitset_is_set(bs, col);
}
-static INLINE int is_constrained(co2_t *env, co2_irn_t *ci)
+static inline int is_constrained(co2_t *env, co2_irn_t *ci)
{
if(!ci->adm_cache)
get_adm(env, ci);
#endif /* DEBUG_libfirm */
-static INLINE int get_mst_irn_col(const co_mst_irn_t *node) {
+static inline int get_mst_irn_col(const co_mst_irn_t *node) {
return node->tmp_col >= 0 ? node->tmp_col : node->col;
}
/**
* Creates a new affinity chunk
*/
-static INLINE aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
+static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
c->n = NEW_ARR_F(const ir_node *, 0);
c->interfere = NEW_ARR_F(const ir_node *, 0);
/**
* Frees all memory allocated by an affinity chunk.
*/
-static INLINE void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
+static inline void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
pset_remove(env->chunkset, c, c->id);
DEL_ARR_F(c->interfere);
DEL_ARR_F(c->n);
* @return the position where n is found in the array arr or ~pos
* if the nodes is not here.
*/
-static INLINE int nodes_bsearch(const ir_node **arr, const ir_node *n) {
+static inline int nodes_bsearch(const ir_node **arr, const ir_node *n) {
int hi = ARR_LEN(arr);
int lo = 0;
/**
* Adds a node to an affinity chunk
*/
-static INLINE void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
+static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
int i;
if (! nodes_insert(&c->n, node->irn))
/**
* Check if affinity chunk @p chunk interferes with node @p irn.
*/
-static INLINE int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn) {
+static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn) {
return node_contains(chunk->interfere, irn);
}
* @param c2 Another chunk
* @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
*/
-static INLINE int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2) {
+static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2) {
int i;
if (c1 == c2)
* Returns the affinity chunk of @p irn or creates a new
* one with @p irn as element if there is none assigned.
*/
-static INLINE aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
+static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
co_mst_irn_t *node = get_co_mst_irn(env, irn);
return node->chunk;
}
* Resets the temporary fixed color of all nodes within wait queue @p nodes.
* ATTENTION: the queue is empty after calling this function!
*/
-static INLINE void reject_coloring(struct list_head *nodes) {
+static inline void reject_coloring(struct list_head *nodes) {
co_mst_irn_t *n, *temp;
DB((dbg, LEVEL_4, "\treject coloring for"));
list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
DB((dbg, LEVEL_4, "\n"));
}
-static INLINE void materialize_coloring(struct list_head *nodes) {
+static inline void materialize_coloring(struct list_head *nodes) {
co_mst_irn_t *n, *temp;
list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
assert(n->tmp_col >= 0);
}
}
-static INLINE void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
+static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
{
assert(col >= 0);
assert(!node->fixed);
node->tmp_col = col;
}
-static INLINE int is_loose(co_mst_irn_t *node)
+static inline int is_loose(co_mst_irn_t *node)
{
return !node->fixed && node->tmp_col < 0;
}
/**
* Checks if a node is simplicial in the graph heeding the already removed nodes.
*/
-static INLINE int sr_is_simplicial(size_red_t *sr, const ir_node *ifn) {
+static inline int sr_is_simplicial(size_red_t *sr, const ir_node *ifn) {
int i, o, size = 0;
ir_node **all, *curr;
be_ifg_t *ifg = sr->co->cenv->ifg;
#else /* WITH_ILP */
-static INLINE void only_that_you_can_compile_without_WITH_ILP_defined(void) {
+static inline void only_that_you_can_compile_without_WITH_ILP_defined(void) {
}
#endif /* WITH_ILP */
#else /* WITH_ILP */
-static INLINE void only_that_you_can_compile_without_WITH_ILP_defined(void) {
+static inline void only_that_you_can_compile_without_WITH_ILP_defined(void) {
}
#endif /* WITH_ILP */
#define HASH_EDGE(e) (hash_irn((e)->n1) ^ hash_irn((e)->n2))
-static INLINE edge_t *add_edge(set *edges, ir_node *n1, ir_node *n2, int *counter) {
+static inline edge_t *add_edge(set *edges, ir_node *n1, ir_node *n2, int *counter) {
edge_t new_edge;
if (PTR_TO_INT(n1) < PTR_TO_INT(n2)) {
return set_insert(edges, &new_edge, sizeof(new_edge), HASH_EDGE(&new_edge));
}
-static INLINE edge_t *find_edge(set *edges, ir_node *n1, ir_node *n2) {
+static inline edge_t *find_edge(set *edges, ir_node *n1, ir_node *n2) {
edge_t new_edge;
if (PTR_TO_INT(n1) < PTR_TO_INT(n2)) {
return set_find(edges, &new_edge, sizeof(new_edge), HASH_EDGE(&new_edge));
}
-static INLINE void remove_edge(set *edges, ir_node *n1, ir_node *n2, int *counter) {
+static inline void remove_edge(set *edges, ir_node *n1, ir_node *n2, int *counter) {
edge_t new_edge, *e;
if (PTR_TO_INT(n1) < PTR_TO_INT(n2)) {
#else /* WITH_ILP */
-static INLINE void only_that_you_can_compile_without_WITH_ILP_defined(void) {
+static inline void only_that_you_can_compile_without_WITH_ILP_defined(void) {
}
#endif /* WITH_ILP */
* units (ou's) args could be merged and weights are accumulated.
* Is this necessary?
*/
-static INLINE int co_ilp_get_costs(copy_opt_t *co, ir_node *root, ir_node *arg) {
+static inline int co_ilp_get_costs(copy_opt_t *co, ir_node *root, ir_node *arg) {
int i;
unit_t *curr;
nbr->costs += costs;
}
-static INLINE void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs) {
+static inline void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs) {
if (! be_ifg_connected(co->cenv->ifg, n1, n2)) {
add_edge(co, n1, n2, costs);
add_edge(co, n2, n1, costs);
#define get_Perm_src(irn) (get_irn_n(get_Proj_pred(irn), get_Proj_proj(irn)))
#define is_Perm_Proj(irn) (is_Proj(irn) && be_is_Perm(get_Proj_pred(irn)))
-static INLINE int is_2addr_code(const arch_register_req_t *req)
+static inline int is_2addr_code(const arch_register_req_t *req)
{
return (req->type & arch_register_req_type_should_be_same) != 0;
}
};
-static INLINE affinity_node_t *get_affinity_info(const copy_opt_t *co, const ir_node *irn) {
+static inline affinity_node_t *get_affinity_info(const copy_opt_t *co, const ir_node *irn) {
affinity_node_t find;
ASSERT_GS_AVAIL(co);
* @return 1 if the block at pos @p pos removed a critical edge
* 0 else
*/
-static INLINE int was_edge_critical(const ir_node *bl, int pos) {
+static inline int was_edge_critical(const ir_node *bl, int pos) {
const ir_edge_t *edge;
const ir_node *bl_at_pos, *bl_before;
assert(is_Block(bl));
* @param bl The block.
* @return The immediate dominator of the block.
*/
-static INLINE
+static inline
ir_node *get_idom(ir_node *bl)
{
ir_node *idom = get_Block_idom(bl);
*
* @param env the emitter environment
*/
-static INLINE void be_emit_char(char c)
+static inline void be_emit_char(char c)
{
obstack_1grow(&emit_obst, c);
emit_linelength++;
* @param str the string
* @param l the length of the given string
*/
-static INLINE void be_emit_string_len(const char *str, size_t l)
+static inline void be_emit_string_len(const char *str, size_t l)
{
obstack_grow(&emit_obst, str, l);
emit_linelength += l;
* @param env the emitter environment
* @param str the null-terminated string
*/
-static INLINE void be_emit_string(const char *str)
+static inline void be_emit_string(const char *str)
{
size_t len = strlen(str);
be_emit_string_len(str, len);
it->nodes = obstack_finish(&it->obst);
}
-static INLINE void node_break(nodes_iter_t *it, int force)
+static inline void node_break(nodes_iter_t *it, int force)
{
if((it->curr >= it->n || force) && it->nodes) {
obstack_free(&it->obst, NULL);
ir_nodeset_iterator_init(&it->iter, &it->neighbours);
}
-static INLINE void neighbours_break(adj_iter_t *it, int force)
+static inline void neighbours_break(adj_iter_t *it, int force)
{
(void) force;
assert(it->valid == 1);
pset *living;
} cliques_iter_t;
-static INLINE void free_clique_iter(cliques_iter_t *it) {
+static inline void free_clique_iter(cliques_iter_t *it) {
it->n_blocks = -1;
obstack_free(&it->ob, NULL);
del_pset(it->living);
* NOTE: Be careful when changing this function!
* First understand the control flow of consecutive calls.
*/
-static INLINE int get_next_clique(cliques_iter_t *it) {
+static inline int get_next_clique(cliques_iter_t *it) {
/* continue in the block we left the last time */
for (; it->blk < it->n_blocks; it->blk++) {
* Check if irn is a Proj, which has no execution units assigned.
* @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
*/
-static INLINE int is_normal_Proj(const arch_env_t *env, const ir_node *irn) {
+static inline int is_normal_Proj(const arch_env_t *env, const ir_node *irn) {
return is_Proj(irn) && (arch_env_get_allowed_execution_units(env, irn) == NULL);
}
* Skips normal Projs.
* @return predecessor if irn is a normal Proj, otherwise irn.
*/
-static INLINE ir_node *skip_normal_Proj(const arch_env_t *env, ir_node *irn) {
+static inline ir_node *skip_normal_Proj(const arch_env_t *env, ir_node *irn) {
if (is_normal_Proj(env, irn))
return get_Proj_pred(irn);
return irn;
}
-static INLINE int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
+static inline int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
unsigned lat = be_ilp_sched_latency(sel, irn, env);
if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
lat = 1;
*
*******************************************/
-static INLINE void check_for_keeps(waitq *keeps, const ir_node *block, const ir_node *irn) {
+static inline void check_for_keeps(waitq *keeps, const ir_node *block, const ir_node *irn) {
const ir_edge_t *edge;
(void) block;
/**
* Inserts @p irn before @p before into schedule and notifies backend.
*/
-static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
+static inline void notified_sched_add_before(be_ilpsched_env_t *env,
const ir_node *before, const ir_node *irn, unsigned cycle)
{
be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
/**
* Check if node can be executed on given unit type.
*/
-static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
+static inline int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
int i;
ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
#else /* WITH_ILP */
-static INLINE void some_picky_compiler_do_not_allow_empty_files(void)
+static inline void some_picky_compiler_do_not_allow_empty_files(void)
{}
#endif /* WITH_ILP */
* @param b The second node.
* @return 1 if a comes before b in the same block or if a == b, 0 else.
*/
-static INLINE int _value_dominates_intrablock(const ir_node *a, const ir_node *b)
+static inline int _value_dominates_intrablock(const ir_node *a, const ir_node *b)
{
/* TODO: ? : can be removed?! */
sched_timestep_t as = sched_is_scheduled(a) ? sched_get_time_step(a) : 0;
* @param b The second node.
* @return 1 if a comes before b in the same block, 0 else.
*/
-static INLINE int _value_strictly_dominates_intrablock(const ir_node *a, const ir_node *b)
+static inline int _value_strictly_dominates_intrablock(const ir_node *a, const ir_node *b)
{
/* TODO: ? : can be removed?! */
sched_timestep_t as = sched_is_scheduled(a) ? sched_get_time_step(a) : 0;
* @param b The second node.
* @return 1 if a dominates b or if a == b, 0 else.
*/
-static INLINE int _value_dominates(const ir_node *a, const ir_node *b)
+static inline int _value_dominates(const ir_node *a, const ir_node *b)
{
const ir_node *block_a = get_block_const(a);
const ir_node *block_b = get_block_const(b);
* @param b The second node.
* @return 1 if a dominates b, 0 else.
*/
-static INLINE int _value_strictly_dominates(const ir_node *a, const ir_node *b)
+static inline int _value_strictly_dominates(const ir_node *a, const ir_node *b)
{
const ir_node *block_a = get_block_const(a);
const ir_node *block_b = get_block_const(b);
* @param b The second value.
* @return 1, if a and b interfere, 0 if not.
*/
-static INLINE int _lv_values_interfere(const be_lv_t *lv, const ir_node *a, const ir_node *b)
+static inline int _lv_values_interfere(const be_lv_t *lv, const ir_node *a, const ir_node *b)
{
int a2b = _value_dominates(a, b);
int b2a = _value_dominates(b, a);
* @param edge The use.
* @return 1, if @p irn dominates the use @p edge.
*/
-static INLINE int _dominates_use(const ir_node *irn, const ir_edge_t *edge)
+static inline int _dominates_use(const ir_node *irn, const ir_edge_t *edge)
{
ir_node *use = get_edge_src_irn(edge);
* @param edge The use.
* @return 1, if @p irn strictly dominates the use @p edge.
*/
-static INLINE int _strictly_dominates_use(const ir_node *irn, const ir_edge_t *edge)
+static inline int _strictly_dominates_use(const ir_node *irn, const ir_edge_t *edge)
{
return get_edge_src_irn(edge) != irn && _dominates_use(irn, edge);
}
* @param where The location to check for.
* @return 1, if @p irn is live in front of @p where.
*/
-static INLINE int _be_lv_chk_before_irn(const be_irg_t *birg, const ir_node *irn, const ir_node *where)
+static inline int _be_lv_chk_before_irn(const be_irg_t *birg, const ir_node *irn, const ir_node *where)
{
const be_lv_t *lv = be_get_birg_liveness(birg);
const ir_edge_t *edge;
* @param where The location to check for.
* @return 1, if @p irn is live after @p where.
*/
-static INLINE int _be_lv_chk_after_irn(const be_irg_t *birg, const ir_node *irn, const ir_node *where)
+static inline int _be_lv_chk_after_irn(const be_irg_t *birg, const ir_node *irn, const ir_node *where)
{
const be_lv_t *lv = be_get_birg_liveness(birg);
const ir_edge_t *edge;
be_lv_t *lv;
};
-static INLINE be_lv_t *
+static inline be_lv_t *
_be_get_birg_liveness(const be_irg_t *birg) {
return birg->lv;
}
-static INLINE ir_exec_freq *
+static inline ir_exec_freq *
_be_get_birg_exec_freq(const be_irg_t *birg) {
return birg->exec_freq;
}
-static INLINE be_dom_front_info_t *
+static inline be_dom_front_info_t *
_be_get_birg_dom_front(const be_irg_t *birg) {
return birg->dom_front;
}
-static INLINE ir_graph *
+static inline ir_graph *
_be_get_birg_irg(const be_irg_t *birg) {
return birg->irg;
}
-static INLINE const arch_env_t *
+static inline const arch_env_t *
_be_get_birg_arch_env(const be_irg_t *birg) {
return birg->main_env->arch_env;
}
/**
* Returns non-zero if a node must be placed in the schedule.
*/
-static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
+static inline int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
{
int res = -1;
/**
* Returns non-zero if the node is already scheduled
*/
-static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
+static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
/**
* Mark a node as already scheduled
*/
-static INLINE void set_already_scheduled(block_sched_env_t *env, ir_node *n)
+static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
* @param irn The node to make ready.
* @return 1, if the node could be made ready, 0 else.
*/
-static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
+static inline int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
{
int i, n;
/**
* Returns the number of not yet schedules users.
*/
-static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
+static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Sets the number of not yet schedules users.
*/
-static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
+static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Add @p num to the number of not yet schedules users and returns the result.
*/
-static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
+static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
* @param irn the node t check
* @return 0 if no liveness info is needed, 1 else
*/
-static INLINE int is_liveness_node(const ir_node *irn)
+static inline int is_liveness_node(const ir_node *irn)
{
switch (get_irn_opcode(irn)) {
case iro_Block:
#ifdef LV_USE_BINARY_SEARCH
-static INLINE unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx)
+static inline unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx)
{
struct _be_lv_info_t *payload = arr + 1;
/**
* This function searches linearly for the node in the array.
*/
-static INLINE unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx) {
+static inline unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx) {
unsigned n = arr[0].u.head.n_members;
unsigned i;
/**
* Mark a node as live-in in a block.
*/
-static INLINE void mark_live_in(be_lv_t *lv, ir_node *block, ir_node *irn)
+static inline void mark_live_in(be_lv_t *lv, ir_node *block, ir_node *irn)
{
struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
DBG((dbg, LEVEL_2, "marking %+F live in at %+F\n", irn, block));
/**
* Mark a node as live-out in a block.
*/
-static INLINE void mark_live_out(be_lv_t *lv, ir_node *block, ir_node *irn)
+static inline void mark_live_out(be_lv_t *lv, ir_node *block, ir_node *irn)
{
struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
DBG((dbg, LEVEL_2, "marking %+F live out at %+F\n", irn, block));
/**
* Mark a node as live-end in a block.
*/
-static INLINE void mark_live_end(be_lv_t *lv, ir_node *block, ir_node *irn)
+static inline void mark_live_end(be_lv_t *lv, ir_node *block, ir_node *irn)
{
struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
DBG((dbg, LEVEL_2, "marking %+F live end at %+F\n", irn, block));
} u;
};
-static INLINE int _be_lv_next_irn(const struct _be_lv_t *lv, const ir_node *bl, unsigned flags, int i)
+static inline int _be_lv_next_irn(const struct _be_lv_t *lv, const ir_node *bl, unsigned flags, int i)
{
struct _be_lv_info_t *arr = phase_get_irn_data(&lv->ph, bl);
return -1;
}
-static INLINE ir_node *_be_lv_get_irn(const struct _be_lv_t *lv, const ir_node *bl, int i)
+static inline ir_node *_be_lv_get_irn(const struct _be_lv_t *lv, const ir_node *bl, int i)
{
struct _be_lv_info_t *arr = phase_get_irn_data(&lv->ph, bl);
return get_idx_irn(lv->irg, arr[i + 1].u.node.idx);
struct _be_lv_info_node_t *be_lv_get(const struct _be_lv_t *li, const ir_node *bl, const ir_node *irn);
-static INLINE int _be_is_live_xxx(const struct _be_lv_t *li, const ir_node *block, const ir_node *irn, unsigned flags)
+static inline int _be_is_live_xxx(const struct _be_lv_t *li, const ir_node *block, const ir_node *irn, unsigned flags)
{
int res;
for (i = _be_lv_next_irn(lv, bl, flags, 0); i >= 0; i = _be_lv_next_irn(lv, bl, flags, i + 1))
-static INLINE pset *_be_lv_pset_put(const struct _be_lv_t *lv, const ir_node *block, int state, pset *s)
+static inline pset *_be_lv_pset_put(const struct _be_lv_t *lv, const ir_node *block, int state, pset *s)
{
int i;
be_lv_foreach(lv, block, state, i)
return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr);
}
-static INLINE be_req_t *get_be_req(const ir_node *node, int pos)
+static inline be_req_t *get_be_req(const ir_node *node, int pos)
{
int idx;
const be_node_attr_t *attr;
return pos < 0 ? &rd->req : &rd->in_req;
}
-static INLINE arch_register_req_t *get_req(const ir_node *node, int pos)
+static inline arch_register_req_t *get_req(const ir_node *node, int pos)
{
be_req_t *bereq = get_be_req(node, pos);
return &bereq->req;
#define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops)
-static INLINE
+static inline
phi_attr_t *get_Phi_attr(const ir_node *phi)
{
phi_attr_t *attr = pmap_get(phi_handler.phi_attrs, (void*) phi);
*/
const void *be_node_get_irn_ops(const ir_node *irn);
-static INLINE int be_is_Spill (const ir_node *irn) { return get_irn_opcode(irn) == beo_Spill ; }
-static INLINE int be_is_Reload (const ir_node *irn) { return get_irn_opcode(irn) == beo_Reload ; }
-static INLINE int be_is_Copy (const ir_node *irn) { return get_irn_opcode(irn) == beo_Copy ; }
-static INLINE int be_is_CopyKeep (const ir_node *irn) { return get_irn_opcode(irn) == beo_CopyKeep ; }
-static INLINE int be_is_Perm (const ir_node *irn) { return get_irn_opcode(irn) == beo_Perm ; }
-static INLINE int be_is_MemPerm (const ir_node *irn) { return get_irn_opcode(irn) == beo_MemPerm ; }
-static INLINE int be_is_Keep (const ir_node *irn) { return get_irn_opcode(irn) == beo_Keep ; }
-static INLINE int be_is_Call (const ir_node *irn) { return get_irn_opcode(irn) == beo_Call ; }
-static INLINE int be_is_Return (const ir_node *irn) { return get_irn_opcode(irn) == beo_Return ; }
-static INLINE int be_is_IncSP (const ir_node *irn) { return get_irn_opcode(irn) == beo_IncSP ; }
-static INLINE int be_is_AddSP (const ir_node *irn) { return get_irn_opcode(irn) == beo_AddSP ; }
-static INLINE int be_is_SubSP (const ir_node *irn) { return get_irn_opcode(irn) == beo_SubSP ; }
-static INLINE int be_is_RegParams(const ir_node *irn) { return get_irn_opcode(irn) == beo_RegParams; }
-static INLINE int be_is_FrameAddr(const ir_node *irn) { return get_irn_opcode(irn) == beo_FrameAddr; }
-static INLINE int be_is_Barrier (const ir_node *irn) { return get_irn_opcode(irn) == beo_Barrier ; }
-static INLINE int be_is_Unwind (const ir_node *irn) { return get_irn_opcode(irn) == beo_Unwind ; }
+static inline int be_is_Spill (const ir_node *irn) { return get_irn_opcode(irn) == beo_Spill ; }
+static inline int be_is_Reload (const ir_node *irn) { return get_irn_opcode(irn) == beo_Reload ; }
+static inline int be_is_Copy (const ir_node *irn) { return get_irn_opcode(irn) == beo_Copy ; }
+static inline int be_is_CopyKeep (const ir_node *irn) { return get_irn_opcode(irn) == beo_CopyKeep ; }
+static inline int be_is_Perm (const ir_node *irn) { return get_irn_opcode(irn) == beo_Perm ; }
+static inline int be_is_MemPerm (const ir_node *irn) { return get_irn_opcode(irn) == beo_MemPerm ; }
+static inline int be_is_Keep (const ir_node *irn) { return get_irn_opcode(irn) == beo_Keep ; }
+static inline int be_is_Call (const ir_node *irn) { return get_irn_opcode(irn) == beo_Call ; }
+static inline int be_is_Return (const ir_node *irn) { return get_irn_opcode(irn) == beo_Return ; }
+static inline int be_is_IncSP (const ir_node *irn) { return get_irn_opcode(irn) == beo_IncSP ; }
+static inline int be_is_AddSP (const ir_node *irn) { return get_irn_opcode(irn) == beo_AddSP ; }
+static inline int be_is_SubSP (const ir_node *irn) { return get_irn_opcode(irn) == beo_SubSP ; }
+static inline int be_is_RegParams(const ir_node *irn) { return get_irn_opcode(irn) == beo_RegParams; }
+static inline int be_is_FrameAddr(const ir_node *irn) { return get_irn_opcode(irn) == beo_FrameAddr; }
+static inline int be_is_Barrier (const ir_node *irn) { return get_irn_opcode(irn) == beo_Barrier ; }
+static inline int be_is_Unwind (const ir_node *irn) { return get_irn_opcode(irn) == beo_Unwind ; }
#endif /* FIRM_BE_BENODE_T_H */
extern ir_node ***register_values;
-static INLINE ir_node *be_peephole_get_value(unsigned regclass_idx,
+static inline ir_node *be_peephole_get_value(unsigned regclass_idx,
unsigned register_idx)
{
return register_values[regclass_idx][register_idx];
}
-static INLINE ir_node *be_peephole_get_reg_value(const arch_register_t *reg)
+static inline ir_node *be_peephole_get_reg_value(const arch_register_t *reg)
{
unsigned regclass_idx = arch_register_class_index(arch_register_get_class(reg));
unsigned register_idx = arch_register_get_index(reg);
DEBUG_ONLY(firm_dbg_module_t *dbg);
} regpressure_ana_t;
-static INLINE int has_reg_class(const regpressure_ana_t *ra, const ir_node *irn)
+static inline int has_reg_class(const regpressure_ana_t *ra, const ir_node *irn)
{
return arch_irn_consider_in_reg_alloc(ra->cls, irn);
}
-static INLINE int regpressure(pset *live) {
+static inline int regpressure(pset *live) {
int pressure = pset_count(live);
return MIN(pressure, MAXPRESSURE);
}
* for a given graph.
* @param irg The graph.
*/
-static INLINE int _have_sched_info(const ir_graph *irg)
+static inline int _have_sched_info(const ir_graph *irg)
{
return get_irg_phase(irg, PHASE_BE_SCHED) != NULL;
}
* @param irn The node.
* @return 1, if the node is scheduled, 0 if not.
*/
-static INLINE int _sched_is_scheduled(const ir_node *irn)
+static inline int _sched_is_scheduled(const ir_node *irn)
{
return get_irn_sched_info(irn)->scheduled;
}
* @param irn The node.
* @return The time step in the schedule.
*/
-static INLINE int _sched_get_time_step(const ir_node *irn)
+static inline int _sched_get_time_step(const ir_node *irn)
{
assert(_sched_is_scheduled(irn));
return get_irn_sched_info(irn)->time_step;
* @param irn The node to check for.
* @return 1, if the node consumes/produces data, false if not.
*/
-static INLINE int to_appear_in_schedule(const ir_node *irn)
+static inline int to_appear_in_schedule(const ir_node *irn)
{
switch(get_irn_opcode(irn)) {
case iro_Start:
* @param irn The ir node.
* @return 1, if the node has a scheduling successor, 0 if not.
*/
-static INLINE int _sched_has_next(const ir_node *irn)
+static inline int _sched_has_next(const ir_node *irn)
{
const ir_node *block = is_Block(irn) ? irn : get_nodes_block(irn);
const sched_info_t *info = get_irn_sched_info(irn);
* @param irn The ir node.
* @return 1, if the node has a scheduling predecessor, 0 if not.
*/
-static INLINE int _sched_has_prev(const ir_node *irn)
+static inline int _sched_has_prev(const ir_node *irn)
{
const ir_node *block = is_Block(irn) ? irn : get_nodes_block(irn);
const sched_info_t *info = get_irn_sched_info(irn);
* @param irn The node.
* @return The next ir node in the schedule or the block, if the node has no next node.
*/
-static INLINE ir_node *_sched_next(const ir_node *irn)
+static inline ir_node *_sched_next(const ir_node *irn)
{
const sched_info_t *info = get_irn_sched_info(irn);
return get_sched_info_irn(get_irn_irg(irn), _sched_entry(info->list.next));
* @return The next ir node in the schedule or the block, if the node has no predecessor.
* predecessor.
*/
-static INLINE ir_node *_sched_prev(const ir_node *irn)
+static inline ir_node *_sched_prev(const ir_node *irn)
{
const sched_info_t *info = get_irn_sched_info(irn);
return get_sched_info_irn(get_irn_irg(irn), _sched_entry(info->list.prev));
* @return The first node in the schedule or the block itself
* if there is no node in the schedule.
*/
-static INLINE ir_node *_sched_first(const ir_node *block)
+static inline ir_node *_sched_first(const ir_node *block)
{
assert(is_Block(block) && "Need a block here");
return _sched_next(block);
* @return The last ir node in a schedule, or the block itself
* if there is no node in the schedule.
*/
-static INLINE ir_node *_sched_last(const ir_node *block)
+static inline ir_node *_sched_last(const ir_node *block)
{
assert(is_Block(block) && "Need a block here");
return _sched_prev(block);
*/
void sched_renumber(const ir_node *block);
-static INLINE void _sched_set_time_stamp(const ir_node *irn)
+static inline void _sched_set_time_stamp(const ir_node *irn)
{
sched_info_t *inf = get_irn_sched_info(irn);
sched_timestep_t before_ts = _sched_entry(inf->list.prev)->time_step;
* @param irn The node to add.
* @return The given node.
*/
-static INLINE void _sched_add_before(const ir_node *before, const ir_node *irn)
+static inline void _sched_add_before(const ir_node *before, const ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
assert(_sched_is_scheduled(before));
* @param irn The node to add.
* @return The given node.
*/
-static INLINE void _sched_add_after(const ir_node *after, const ir_node *irn)
+static inline void _sched_add_after(const ir_node *after, const ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
assert(_sched_is_scheduled(after));
info->scheduled = 1;
}
-static INLINE void _sched_init_block(const ir_node *block)
+static inline void _sched_init_block(const ir_node *block)
{
sched_info_t *info = get_irn_sched_info(block);
assert(info->scheduled == 0 && info->time_step == 0);
info->scheduled = 1;
}
-static INLINE void _sched_reset(const ir_node *node)
+static inline void _sched_reset(const ir_node *node)
{
sched_info_t *info = get_irn_sched_info(node);
info->scheduled = 0;
* Remove a node from the scheduled.
* @param irn The node.
*/
-static INLINE void _sched_remove(const ir_node *irn)
+static inline void _sched_remove(const ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
list_del(&info->list);
* @return A number smaller, equals to or larger than 0, if a is
* before, the same, or after b in the schedule.
*/
-static INLINE int _sched_cmp(const ir_node *a, const ir_node *b)
+static inline int _sched_cmp(const ir_node *a, const ir_node *b)
{
assert(_sched_is_scheduled(a) && _sched_is_scheduled(b));
assert(get_nodes_block(a) == get_nodes_block(b));
* @return 1, if n1 is in front of n2 in the schedule, 0 else.
* @note Both nodes must be in the same block.
*/
-static INLINE int _sched_comes_after(const ir_node *n1, const ir_node *n2)
+static inline int _sched_comes_after(const ir_node *n1, const ir_node *n2)
{
assert(_sched_is_scheduled(n1));
assert(_sched_is_scheduled(n2));
}
#endif
-static INLINE ir_node *skip_Projs(ir_node *irn)
+static inline ir_node *skip_Projs(ir_node *irn)
{
return is_Proj(irn) ? skip_Projs(get_Proj_pred(irn)) : irn;
}
}
#endif
-static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
+static inline usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
{
usage_stats_t *us = get_irn_link(irn);
return us;
}
-static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
+static inline usage_stats_t *get_usage_stats(ir_node *irn)
{
usage_stats_t *us = get_irn_link(irn);
assert(us && "This node must have usage stats");
return main_env;
}
-static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
+static inline int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
{
int res = -1;
return res;
}
-static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
+static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
{
int i, n;
int sum = 0;
/**
* Adds the edge src -> tgt to the dvg. Checks if reverse edge is already there (asserts).
*/
-static INLINE void add_dvg_edge(rss_t *rss, dvg_t *dvg, const ir_node *src, const ir_node *tgt, int have_source) {
+static inline void add_dvg_edge(rss_t *rss, dvg_t *dvg, const ir_node *src, const ir_node *tgt, int have_source) {
rss_edge_t *dvg_edge;
rss_edge_t key;
/**
* Returns non-zero if the node is a root node
*/
-static INLINE unsigned is_root_node(trace_env_t *env, ir_node *n)
+static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
/**
* Mark a node as root node
*/
-static INLINE void mark_root_node(trace_env_t *env, ir_node *n)
+static inline void mark_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
/**
* Get the current delay.
*/
-static INLINE sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current delay.
*/
-static INLINE void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
+static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the current etime.
*/
-static INLINE sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current etime.
*/
-static INLINE void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
+static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the number of users.
*/
-static INLINE unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the number of users.
*/
-static INLINE void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
+static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the register difference.
*/
-static INLINE int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
+static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the register difference.
*/
-static INLINE void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
+static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static INLINE int get_irn_preorder(trace_env_t *env, ir_node *n) {
+static inline int get_irn_preorder(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static INLINE void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
+static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static INLINE unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static INLINE void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
+static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Removes the value @p val from the workset if present.
*/
-static INLINE void workset_remove(workset_t *workset, ir_node *val)
+static inline void workset_remove(workset_t *workset, ir_node *val)
{
int i;
for(i = 0; i < workset->len; ++i) {
}
}
-static INLINE const loc_t *workset_contains(const workset_t *ws,
+static inline const loc_t *workset_contains(const workset_t *ws,
const ir_node *val)
{
int i;
/**
* @return The distance to the next use or 0 if irn has dont_spill flag set
*/
-static INLINE unsigned get_distance(ir_node *from, unsigned from_step,
+static inline unsigned get_distance(ir_node *from, unsigned from_step,
const ir_node *def, int skip_from_uses)
{
be_next_use_t use;
return (p->time > q->time) - (p->time < q->time);
}
-static INLINE void workset_print(const workset_t *w)
+static inline void workset_print(const workset_t *w)
{
int i;
/**
* Alloc a new workset on obstack @p ob with maximum size @p max
*/
-static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
+static inline workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
workset_t *res;
size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
res = obstack_alloc(ob, size);
/**
* Alloc a new instance on obstack and make it equal to @param ws
*/
-static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
+static inline workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
workset_t *res;
size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
res = obstack_alloc(ob, size);
* Do NOT alloc anything. Make @param tgt equal to @param src.
* returns @param tgt for convenience
*/
-static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
+static inline workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
memcpy(tgt, src, size);
return tgt;
* @param count locations given at memory @param locs.
* Set the length of @param ws to count.
*/
-static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
+static inline void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
workset->len = count;
memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
}
* Inserts the value @p val into the workset, iff it is not
* already contained. The workset must not be full.
*/
-static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
+static inline void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
int i;
/* check for current regclass */
if (!arch_irn_consider_in_reg_alloc(env->cls, val)) {
/**
* Removes all entries from this workset
*/
-static INLINE void workset_clear(workset_t *ws) {
+static inline void workset_clear(workset_t *ws) {
ws->len = 0;
}
/**
* Removes the value @p val from the workset if present.
*/
-static INLINE void workset_remove(workset_t *ws, ir_node *val) {
+static inline void workset_remove(workset_t *ws, ir_node *val) {
int i;
for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val) {
}
}
-static INLINE int workset_get_index(const workset_t *ws, const ir_node *val) {
+static inline int workset_get_index(const workset_t *ws, const ir_node *val) {
int i;
for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val)
} block_info_t;
-static INLINE void *new_block_info(belady_env_t *bel, int id)
+static inline void *new_block_info(belady_env_t *bel, int id)
{
ir_node *bl = bel->blocks[id];
block_info_t *res = obstack_alloc(&bel->ob, sizeof(*res));
#define get_block_info(block) ((block_info_t *)get_irn_link(block))
#define set_block_info(block, info) set_irn_link(block, info)
-static INLINE ir_node *block_info_get_last_ins(block_info_t *bi)
+static inline ir_node *block_info_get_last_ins(block_info_t *bi)
{
if (!bi->last_ins)
bi->last_ins = be_get_end_of_block_insertion_point(bi->bl);
#define get_current_use(bi, irn) phase_get_irn_data(&(bi)->next_uses, (irn))
-static INLINE void advance_current_use(block_info_t *bi, const ir_node *irn)
+static inline void advance_current_use(block_info_t *bi, const ir_node *irn)
{
next_use_t *use = get_current_use(bi, irn);
bring_in_t *sect_head;
};
-static INLINE bring_in_t *new_bring_in(block_info_t *bi, ir_node *irn, const next_use_t *use)
+static inline bring_in_t *new_bring_in(block_info_t *bi, ir_node *irn, const next_use_t *use)
{
bring_in_t *br = obstack_alloc(&bi->bel->ob, sizeof(br[0]));
return (fq > fp) - (fq < fp);
}
-static INLINE unsigned get_curr_distance(block_info_t *bi, const ir_node *irn, int is_usage)
+static inline unsigned get_curr_distance(block_info_t *bi, const ir_node *irn, int is_usage)
{
belady_env_t *env = bi->bel;
sched_timestep_t curr_step = sched_get_time_step(env->instr);
return be_is_live_end(env->lv, bi->bl, irn) ? LIVE_END : DEAD;
}
-static INLINE int is_local_phi(const ir_node *bl, const ir_node *irn)
+static inline int is_local_phi(const ir_node *bl, const ir_node *irn)
{
return is_Phi(irn) && get_nodes_block(irn) == bl;
}
* where @p irn is unused in the block @p bl which is always
* the case in our usage scenario.
*/
-static INLINE int is_transport_in(const ir_node *bl, const ir_node *irn)
+static inline int is_transport_in(const ir_node *bl, const ir_node *irn)
{
return get_nodes_block(irn) != bl || is_Phi(irn);
}
irn_action_t *ia_top;
} rollback_info_t;
-static INLINE block_state_t *get_block_state(global_end_state_t *ges, const block_info_t *bi)
+static inline block_state_t *get_block_state(global_end_state_t *ges, const block_info_t *bi)
{
int id = bi->id;
assert(!ver_is_younger(ges->bs_tops_vers[id], ges->version));
return ver_is_older(ges->bs_tops_vers[id], ges->version) ? NULL : ges->bs_tops[bi->id];
}
-static INLINE const workset_t *get_end_state(global_end_state_t *ges, block_info_t *bi)
+static inline const workset_t *get_end_state(global_end_state_t *ges, block_info_t *bi)
{
block_state_t *bs = get_block_state(ges, bi);
return bs ? bs->end_state : bi->ws_end;
return ia;
}
-static INLINE rollback_info_t trans_begin(global_end_state_t *ges)
+static inline rollback_info_t trans_begin(global_end_state_t *ges)
{
rollback_info_t rb;
rb.obst_level = obstack_base(&ges->obst);
return rb;
}
-static INLINE void trans_rollback(global_end_state_t *ges, rollback_info_t *rb)
+static inline void trans_rollback(global_end_state_t *ges, rollback_info_t *rb)
{
block_state_t *bs;
ilp_var_t ilp; /**< the ilp var for this memory operand */
} memoperand_t;
-static INLINE int
+static inline int
has_reg_class(const spill_ilp_t * si, const ir_node * irn)
{
return arch_irn_consider_in_reg_alloc(si->cls, irn);
/**
* Checks, whether node and its operands have suitable reg classes
*/
-static INLINE int
+static inline int
is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
{
int n;
/**
* Try to create a remat from @p op with destination value @p dest_value
*/
-static INLINE remat_t *
+static inline remat_t *
get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
{
remat_t *remat = NULL;
}
-static INLINE void
+static inline void
add_remat(const spill_ilp_t * si, const remat_t * remat)
{
remat_info_t *remat_info,
return ret;
}
-static INLINE void
+static inline void
get_remats_from_op(spill_ilp_t * si, const ir_node * op)
{
int n;
}
}
-static INLINE int
+static inline int
value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
{
ir_node *block;
return ret;
}
-static INLINE ir_node *sched_block_last_noncf(const ir_node * bb)
+static inline ir_node *sched_block_last_noncf(const ir_node * bb)
{
return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, NULL);
}
/**
* Returns first non-Phi node of block @p bb
*/
-static INLINE ir_node *
+static inline ir_node *
sched_block_first_nonphi(const ir_node * bb)
{
return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
return (is_Proj(irn));
}
-static INLINE ir_node *
+static inline ir_node *
sched_next_nonproj(const ir_node * irn, int forward)
{
return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
* Returns next operation node (non-Proj) after @p irn
* or the basic block of this node
*/
-static INLINE ir_node *
+static inline ir_node *
sched_next_op(const ir_node * irn)
{
ir_node *next = sched_next(irn);
* Returns previous operation node (non-Proj) before @p irn
* or the basic block of this node
*/
-static INLINE ir_node *
+static inline ir_node *
sched_prev_op(const ir_node * irn)
{
ir_node *prev = sched_prev(irn);
/**
* Tells you whether a @p remat can be placed before the irn @p pos
*/
-static INLINE int
+static inline int
can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
{
const ir_node *op = remat->op;
/**
* Tells you whether a @p remat can be placed after the irn @p pos
*/
-static INLINE int
+static inline int
can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
{
if(is_Block(pos)) {
}
-static INLINE int
+static inline int
is_zero(double x)
{
return fabs(x) < 0.00001;
}
-static INLINE ir_node *get_memory_edge(const ir_node *node)
+static inline ir_node *get_memory_edge(const ir_node *node)
{
int i, arity;
return vi->values;
}
-static INLINE ir_node *get_first_phi(ir_node **s) {
+static inline ir_node *get_first_phi(ir_node **s) {
int i;
for (i = ARR_LEN(s) - 1; i >= 0; --i) {
ir_node *end_state;
} block_info_t;
-static INLINE
+static inline
block_info_t *new_block_info(struct obstack *obst, ir_node *block)
{
block_info_t *res = obstack_alloc(obst, sizeof(*res));
return res;
}
-static INLINE
+static inline
block_info_t *get_block_info(ir_node *block)
{
assert(irn_visited(block));
return (block_info_t*) get_irn_link(block);
}
-static INLINE
+static inline
spill_info_t *create_spill_info(minibelady_env_t *env, ir_node *state)
{
spill_info_t *spill_info = obstack_alloc(&env->obst, sizeof(spill_info[0]));
return spill_info;
}
-static INLINE
+static inline
spill_info_t *get_spill_info(minibelady_env_t *env, const ir_node *node)
{
spill_info_t *spill_info
return irn_visited(node);
}
-static INLINE ir_node *be_get_transformed_node(ir_node *old_node) {
+static inline ir_node *be_get_transformed_node(ir_node *old_node) {
if (irn_visited(old_node)) {
ir_node *new_node = get_irn_link(old_node);
assert(new_node != NULL);
return 0;
}
-static INLINE
+static inline
unsigned get_step(const ir_node *node)
{
return PTR_TO_INT(get_irn_link(node));
#define USES_INFINITY 10000000
#define USES_PENDING 9999999
-static INLINE int USES_IS_INFINITE(unsigned time)
+static inline int USES_IS_INFINITE(unsigned time)
{
return time >= USES_INFINITY;
}
-static INLINE int USES_IS_PENDING(unsigned time)
+static inline int USES_IS_PENDING(unsigned time)
{
return time == USES_PENDING;
}
* @return The block of the node, or the node itself, if the node is a
* block.
*/
-static INLINE ir_node *get_block(ir_node *irn)
+static inline ir_node *get_block(ir_node *irn)
{
return is_Block(irn) ? irn : get_nodes_block(irn);
}
-static INLINE const ir_node *get_block_const(const ir_node *irn)
+static inline const ir_node *get_block_const(const ir_node *irn)
{
return is_Block(irn) ? irn : get_nodes_block(irn);
}
-static INLINE int is_firm_be_mode(const ir_mode *mode)
+static inline int is_firm_be_mode(const ir_mode *mode)
{
return mode_is_data(mode);
}
* @param irn The node to check for.
* @return 1, if the node is a data node, 0 if not.
*/
-static INLINE int is_data_node(const ir_node *irn)
+static inline int is_data_node(const ir_node *irn)
{
int i, n;
typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
-static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
+static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
DBG_OPT_SUB2NEGADD(irn, res);
}
-static INLINE int need_constraint_copy(ir_node *irn)
+static inline int need_constraint_copy(ir_node *irn)
{
/* TODO this should be determined from the node specification */
switch (get_ia32_irn_opcode(irn)) {
const arch_register_t *ia32_get_RegParam_reg(unsigned cc, size_t nr,
const ir_mode *mode);
-static INLINE int is_unknown_reg(const arch_register_t *reg)
+static inline int is_unknown_reg(const arch_register_t *reg)
{
if(reg == &ia32_gp_regs[REG_GP_UKNWN]
|| reg == &ia32_xmm_regs[REG_XMM_UKNWN]
/**
* Return true if a mode can be stored in the GP register set
*/
-static INLINE int mode_needs_gp_reg(ir_mode *mode) {
+static inline int mode_needs_gp_reg(ir_mode *mode) {
if (mode == mode_fpcw)
return 0;
if (get_mode_size_bits(mode) > 32)
be_peephole_exchange(node, xor);
}
-static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
+static inline int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
{
return node == cg->noreg_gp;
}
* all it's Projs are removed as well.
* @param irn The irn to be removed from schedule
*/
-static INLINE void try_kill(ir_node *node)
+static inline void try_kill(ir_node *node)
{
if(get_irn_mode(node) == mode_T) {
const ir_edge_t *edge, *next;
/**
* Wrap the arch_* function here so we can check for errors.
*/
-static INLINE const arch_register_t *x87_get_irn_register(const ir_node *irn)
+static inline const arch_register_t *x87_get_irn_register(const ir_node *irn)
{
const arch_register_t *res = arch_get_irn_register(irn);
typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg,
ir_node *block, ir_node *left, ir_node *right);
-static INLINE int mode_needs_gp_reg(ir_mode *mode) {
+static inline int mode_needs_gp_reg(ir_mode *mode) {
return mode_is_int(mode) || mode_is_reference(mode);
}
/**
* Set a node emitter. Make it a bit more type safe.
*/
-static INLINE void set_emitter(ir_op *op, emit_func ppc32_emit_node) {
+static inline void set_emitter(ir_op *op, emit_func ppc32_emit_node) {
op->ops.generic = (op_func)ppc32_emit_node;
}
/**
* Returns the proj of a given node with the given proj number
*/
-static INLINE ir_node *get_succ_Proj(ir_node *node, long proj)
+static inline ir_node *get_succ_Proj(ir_node *node, long proj)
{
const ir_edge_t *edge;
foreach_out_edge(node, edge)
#include <stddef.h>
-static INLINE void _time_get(ir_timer_val_t *val);
-static INLINE void _time_reset(ir_timer_val_t *val);
-static INLINE unsigned long _time_to_msec(const ir_timer_val_t *val);
-static INLINE ir_timer_val_t *_time_add(ir_timer_val_t *res,
+static inline void _time_get(ir_timer_val_t *val);
+static inline void _time_reset(ir_timer_val_t *val);
+static inline unsigned long _time_to_msec(const ir_timer_val_t *val);
+static inline ir_timer_val_t *_time_add(ir_timer_val_t *res,
const ir_timer_val_t *lhs, const ir_timer_val_t *rhs);
-static INLINE ir_timer_val_t *_time_sub(ir_timer_val_t *res,
+static inline ir_timer_val_t *_time_sub(ir_timer_val_t *res,
const ir_timer_val_t *lhs, const ir_timer_val_t *rhs);
/**
#ifdef HAVE_GETTIMEOFDAY
-static INLINE void _time_get(ir_timer_val_t *val)
+static inline void _time_get(ir_timer_val_t *val)
{
gettimeofday(val, NULL);
}
-static INLINE void _time_reset(ir_timer_val_t *val)
+static inline void _time_reset(ir_timer_val_t *val)
{
timerclear(val);
}
-static INLINE unsigned long _time_to_msec(const ir_timer_val_t *elapsed)
+static inline unsigned long _time_to_msec(const ir_timer_val_t *elapsed)
{
return (unsigned long) elapsed->tv_sec * 1000UL
+ (unsigned long) elapsed->tv_usec / 1000UL;
}
-static INLINE unsigned long _time_to_usec(const ir_timer_val_t *elapsed)
+static inline unsigned long _time_to_usec(const ir_timer_val_t *elapsed)
{
return (unsigned long) elapsed->tv_sec * 1000000UL
+ (unsigned long) elapsed->tv_usec;
}
-static INLINE ir_timer_val_t *_time_add(ir_timer_val_t *res,
+static inline ir_timer_val_t *_time_add(ir_timer_val_t *res,
const ir_timer_val_t *lhs, const ir_timer_val_t *rhs)
{
timeradd(lhs, rhs, res);
return res;
}
-static INLINE ir_timer_val_t *_time_sub(ir_timer_val_t *res,
+static inline ir_timer_val_t *_time_sub(ir_timer_val_t *res,
const ir_timer_val_t *lhs, const ir_timer_val_t *rhs)
{
timersub(lhs, rhs, res);
#elif defined(_WIN32)
-static INLINE void _time_get(ir_timer_val_t *val)
+static inline void _time_get(ir_timer_val_t *val)
{
if(!QueryPerformanceCounter(&val->hi_prec))
val->lo_prec = timeGetTime();
}
-static INLINE void _time_reset(ir_timer_val_t *val)
+static inline void _time_reset(ir_timer_val_t *val)
{
memset(val, 0, sizeof(val[0]));
}
-static INLINE unsigned long _time_to_msec(const ir_timer_val_t *elapsed)
+static inline unsigned long _time_to_msec(const ir_timer_val_t *elapsed)
{
LARGE_INTEGER freq;
return (unsigned long) ((elapsed->hi_prec.QuadPart * 1000) / freq.QuadPart);
}
-static INLINE unsigned long _time_to_usec(const ir_timer_val_t *elapsed)
+static inline unsigned long _time_to_usec(const ir_timer_val_t *elapsed)
{
LARGE_INTEGER freq;
return (unsigned long) ((elapsed->hi_prec.QuadPart * 1000000) / freq.QuadPart);
}
-static INLINE ir_timer_val_t *_time_add(ir_timer_val_t *res, const ir_timer_val_t *lhs, const ir_timer_val_t *rhs)
+static inline ir_timer_val_t *_time_add(ir_timer_val_t *res, const ir_timer_val_t *lhs, const ir_timer_val_t *rhs)
{
LARGE_INTEGER dummy;
if(QueryPerformanceFrequency(&dummy))
return res;
}
-static INLINE ir_timer_val_t *_time_sub(ir_timer_val_t *res, const ir_timer_val_t *lhs, const ir_timer_val_t *rhs)
+static inline ir_timer_val_t *_time_sub(ir_timer_val_t *res, const ir_timer_val_t *lhs, const ir_timer_val_t *rhs)
{
LARGE_INTEGER dummy;
if(QueryPerformanceFrequency(&dummy))
return n1->irn != n2->irn;
}
-static INLINE void send_cmd(firm_ycomp_dbg_t *dbg, const char *buf) {
+static inline void send_cmd(firm_ycomp_dbg_t *dbg, const char *buf) {
ssize_t res, len;
len = strlen(buf);
/**
* Retrieve the appropriate realizer for given node.
*/
-static INLINE unsigned get_node_realizer(ir_node *node) {
+static inline unsigned get_node_realizer(ir_node *node) {
unsigned realizer;
ir_opcode opc = get_irn_opcode(node);
/**
* Retrieve the appropriate realizer for given edge.
*/
-static INLINE unsigned get_edge_realizer(ir_node *src, ir_node *tgt) {
+static inline unsigned get_edge_realizer(ir_node *src, ir_node *tgt) {
unsigned realizer;
ir_mode *tgt_mode, *src_mode;
static struct obstack mangle_obst;
/** returned a mangled type name, currently no mangling */
-static INLINE ident *mangle_type(ir_type *tp) {
+static inline ident *mangle_type(ir_type *tp) {
assert(tp->kind == k_type);
return tp->name;
}
}
/*
-static INLINE ir_node * tail(ir_node * node) {
+static inline ir_node * tail(ir_node * node) {
ir_node * link;
for (; (link = get_irn_link(node)); node = link) ;
return node;
* cause cycles we don't want to see, as Unknwon is in the Start Block
* of the procedure. Use unknown of outermost irg where the start
* block has no predecessors. */
-static INLINE ir_node *get_cg_Unknown(ir_mode *m) {
+static inline ir_node *get_cg_Unknown(ir_mode *m) {
assert((get_Block_n_cfgpreds(get_irg_start_block(get_irp_main_irg())) == 1) &&
(get_nodes_block(get_Block_cfgpred(get_irg_start_block(get_irp_main_irg()), 0)) ==
get_irg_start_block(get_irp_main_irg())));
* *************************************************************************** */
/** Creates a Phi node with 0 predecessors. */
-static INLINE ir_node *
+static inline ir_node *
new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
ir_node *res;
* @param phi0 in non-NULL: the Phi0 node in the same block that represents
* the value for which the new Phi is constructed
*/
-static INLINE ir_node *
+static inline ir_node *
new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
ir_node **in, int ins, ir_node *phi0) {
int i;
* Further the last entry in frag_arr of current block points to n. This
* constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
*/
-static INLINE ir_node **new_frag_arr(ir_node *n) {
+static inline ir_node **new_frag_arr(ir_node *n) {
ir_node **arr;
int opt;
/**
* Returns the frag_arr from a node.
*/
-static INLINE ir_node **get_frag_arr(ir_node *n) {
+static inline ir_node **get_frag_arr(ir_node *n) {
switch (get_irn_opcode(n)) {
case iro_Call:
return n->attr.call.exc.frag_arr;
/* inline functions */
-static INLINE ir_node *
+static inline ir_node *
_new_d_Bad(void) {
return get_irg_bad(current_ir_graph);
}
-static INLINE ir_node *
+static inline ir_node *
_new_d_NoMem(void) {
return get_irg_no_mem(current_ir_graph);
}
/*-----------------------------------------------------------------*/
static void dump_whole_node(ir_node *n, void *env);
-static INLINE void dump_loop_nodes_into_graph(FILE *F, ir_graph *irg);
+static inline void dump_loop_nodes_into_graph(FILE *F, ir_graph *irg);
/*-----------------------------------------------------------------*/
/* Helper functions. */
/**
* Dump the node information of a node n to a file F.
*/
-static INLINE int dump_node_info(FILE *F, ir_node *n)
+static inline int dump_node_info(FILE *F, ir_node *n)
{
int bad = 0;
const ir_op_ops *ops = get_op_ops(get_irn_op(n));
return bad;
}
-static INLINE int is_constlike_node(const ir_node *node)
+static inline int is_constlike_node(const ir_node *node)
{
const ir_op *op = get_irn_op(node);
return is_op_constlike(op);
}
}
-static INLINE void
+static inline void
dump_loop_label(FILE *F, ir_loop *loop) {
fprintf(F, "loop %d, %d sons, %d nodes",
get_loop_depth(loop), get_loop_n_sons(loop), get_loop_n_nodes(loop));
}
-static INLINE void dump_loop_info(FILE *F, ir_loop *loop) {
+static inline void dump_loop_info(FILE *F, ir_loop *loop) {
fprintf(F, " info1: \"");
fprintf(F, " loop nr: %d", get_loop_loop_nr(loop));
#ifdef DEBUG_libfirm /* GL @@@ debug analyses */
fprintf(F, "\"");
}
-static INLINE void
+static inline void
dump_loop_node(FILE *F, ir_loop *loop) {
fprintf(F, "node: {title: \"");
PRINT_LOOPID(loop);
fprintf(F, "}\n");
}
-static INLINE void
+static inline void
dump_loop_node_edge(FILE *F, ir_loop *loop, int i) {
assert(loop);
fprintf(F, "edge: {sourcename: \"");
fprintf(F, "}\n");
}
-static INLINE void
+static inline void
dump_loop_son_edge(FILE *F, ir_loop *loop, int i) {
assert(loop);
fprintf(F, "edge: {sourcename: \"");
}
}
-static INLINE
+static inline
void dump_loop_nodes_into_graph(FILE *F, ir_graph *irg) {
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
static long last_edge_num = -1;
#endif
-static INLINE long edge_get_id(const ir_edge_t *e) {
+static inline long edge_get_id(const ir_edge_t *e) {
#ifdef DEBUG_libfirm
return e->edge_nr;
#else /* DEBUG_libfirm */
* @param tgt the edge target
* @param kind the kind of the edge
*/
-static INLINE void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs) {
+static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs) {
irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
info->out_count += ofs;
* Verify the edge list of a node, ie. ensure it's a loop:
* head -> e_1 -> ... -> e_n -> head
*/
-static INLINE void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind) {
+static inline void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind) {
int err = 0;
int num = 0;
pset *lh_set = pset_new_ptr(16);
* @param irn The node.
* @return The first out edge that points to this node.
*/
-static INLINE const ir_edge_t *_get_irn_out_edge_first_kind(const ir_node *irn, ir_edge_kind_t kind)
+static inline const ir_edge_t *_get_irn_out_edge_first_kind(const ir_node *irn, ir_edge_kind_t kind)
{
const struct list_head *head = _get_irn_outs_head(irn, kind);
return list_empty(head) ? NULL : list_entry(head->next, ir_edge_t, list);
* @param last The last out edge you have seen.
* @return The next out edge in @p irn 's out list after @p last.
*/
-static INLINE const ir_edge_t *_get_irn_out_edge_next(const ir_node *irn, const ir_edge_t *last)
+static inline const ir_edge_t *_get_irn_out_edge_next(const ir_node *irn, const ir_edge_t *last)
{
struct list_head *next = last->list.next;
return next == _get_irn_outs_head(irn, last->kind) ? NULL : list_entry(next, ir_edge_t, list);
* @param irn The node.
* @return The number of edges pointing to this node.
*/
-static INLINE int _get_irn_n_edges_kind(const ir_node *irn, int kind)
+static inline int _get_irn_n_edges_kind(const ir_node *irn, int kind)
{
/* Perhaps out_count was buggy. This code does it more safely. */
#if 0
#endif
}
-static INLINE int _edges_activated_kind(const ir_graph *irg, ir_edge_kind_t kind)
+static inline int _edges_activated_kind(const ir_graph *irg, ir_edge_kind_t kind)
{
return _get_irg_edge_info(irg, kind)->activated;
}
* Assure, that the edges information is present for a certain graph.
* @param irg The graph.
*/
-static INLINE void _edges_assure_kind(ir_graph *irg, int kind)
+static inline void _edges_assure_kind(ir_graph *irg, int kind)
{
if(!_edges_activated_kind(irg, kind))
edges_activate_kind(irg, kind);
* edges_register_private_data().
* @return A pointer to the private data.
*/
-static INLINE void *_get_edge_private_data(const ir_edge_t *edge, int ofs)
+static inline void *_get_edge_private_data(const ir_edge_t *edge, int ofs)
{
return (void *) ((char *) edge + sizeof(edge[0]) + ofs);
}
-static INLINE ir_node *_get_edge_src_irn(const ir_edge_t *edge)
+static inline ir_node *_get_edge_src_irn(const ir_edge_t *edge)
{
return edge->src;
}
-static INLINE int _get_edge_src_pos(const ir_edge_t *edge)
+static inline int _get_edge_src_pos(const ir_edge_t *edge)
{
return edge->pos;
}
/* generate the getter functions for external access */
#define E_FLAG(name, value, def) \
-static INLINE int _get_opt_##name(void) { \
+static inline int _get_opt_##name(void) { \
return libFIRM_opt & irf_##name; \
}
/* generate the getter functions for internal access */
#define I_FLAG(name, value, def) \
-static INLINE int get_opt_##name(void) { \
+static inline int get_opt_##name(void) { \
return libFIRM_opt & irf_##name; \
}
/* generate getter and setter functions for running flags */
#define R_FLAG(name, value) \
-static INLINE int is_##name##_running(void) { \
+static inline int is_##name##_running(void) { \
return libFIRM_running & ir_rf_##name; \
} \
-static INLINE void set_##name##_running(int flag) {\
+static inline void set_##name##_running(int flag) {\
if (flag) libFIRM_running |= ir_rf_##name; \
else libFIRM_running &= ~ir_rf_##name; \
}
#undef E_FLAG
#undef R_FLAG
-static INLINE int _get_optimize(void) {
+static inline int _get_optimize(void) {
return get_opt_optimize();
}
-static INLINE firm_verification_t
+static inline firm_verification_t
get_node_verification_mode(void) {
return opt_do_node_verification;
}
*
* @note current_ir_graph must be set
*/
-static INLINE void do_local_optimize(ir_node *n) {
+static inline void do_local_optimize(ir_node *n) {
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
/* inline functions for graphs */
/*-------------------------------------------------------------------*/
-static INLINE int
+static inline int
_is_ir_graph(const void *thing) {
return (get_kind(thing) == k_ir_graph);
}
/** Returns the start block of a graph. */
-static INLINE ir_node *
+static inline ir_node *
_get_irg_start_block(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_start_block);
}
-static INLINE void
+static inline void
_set_irg_start_block(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_start_block, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_start(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_start);
}
-static INLINE void
+static inline void
_set_irg_start(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_start, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_end_block(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_end_block);
}
-static INLINE void
+static inline void
_set_irg_end_block(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, -1, node);
set_irn_n(irg->anchor, anchor_end_block, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_end(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_end);
}
-static INLINE void
+static inline void
_set_irg_end(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_end, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_end_reg(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_end_reg);
}
-static INLINE void
+static inline void
_set_irg_end_reg(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_end_reg, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_end_except(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_end_except);
}
-static INLINE void
+static inline void
_set_irg_end_except(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_end_except, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_initial_exec(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_initial_exec);
}
-static INLINE void
+static inline void
_set_irg_initial_exec(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_initial_exec, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_frame(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_frame);
}
-static INLINE void
+static inline void
_set_irg_frame(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_frame, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_tls(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_tls);
}
-static INLINE void
+static inline void
_set_irg_tls(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_tls, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_initial_mem(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_initial_mem);
}
-static INLINE void
+static inline void
_set_irg_initial_mem(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_initial_mem, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_args(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_args);
}
-static INLINE void
+static inline void
_set_irg_args(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_args, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_value_param_base(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_value_param_base);
}
-static INLINE void
+static inline void
_set_irg_value_param_base(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_value_param_base, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_bad(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_bad);
}
-static INLINE void
+static inline void
_set_irg_bad(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_bad, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_no_mem(const ir_graph *irg) {
return get_irn_intra_n(irg->anchor, anchor_no_mem);
}
-static INLINE void
+static inline void
_set_irg_no_mem(ir_graph *irg, ir_node *node) {
set_irn_n(irg->anchor, anchor_no_mem, node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irg_current_block(const ir_graph *irg) {
return irg->current_block;
}
-static INLINE void
+static inline void
_set_irg_current_block(ir_graph *irg, ir_node *node) {
irg->current_block = node;
}
-static INLINE ir_entity *
+static inline ir_entity *
_get_irg_entity(const ir_graph *irg) {
assert(irg && irg->ent);
return irg->ent;
}
-static INLINE void
+static inline void
_set_irg_entity(ir_graph *irg, ir_entity *ent) {
irg->ent = ent;
}
-static INLINE ir_type *
+static inline ir_type *
_get_irg_frame_type(ir_graph *irg) {
assert(irg && irg->frame_type);
return irg->frame_type = skip_tid(irg->frame_type);
}
-static INLINE void
+static inline void
_set_irg_frame_type(ir_graph *irg, ir_type *ftp) {
assert(is_frame_type(ftp));
irg->frame_type = ftp;
}
-static INLINE struct obstack *
+static inline struct obstack *
_get_irg_obstack(const ir_graph *irg) {
return irg->obst;
}
-static INLINE irg_phase_state
+static inline irg_phase_state
_get_irg_phase_state(const ir_graph *irg) {
return irg->phase_state;
}
-static INLINE void
+static inline void
_set_irg_phase_state(ir_graph *irg, irg_phase_state state) {
irg->phase_state = state;
}
-static INLINE op_pin_state
+static inline op_pin_state
_get_irg_pinned(const ir_graph *irg) {
return irg->irg_pinned_state;
}
-static INLINE irg_outs_state
+static inline irg_outs_state
_get_irg_outs_state(const ir_graph *irg) {
return irg->outs_state;
}
-static INLINE void
+static inline void
_set_irg_outs_inconsistent(ir_graph *irg) {
if (irg->outs_state == outs_consistent)
irg->outs_state = outs_inconsistent;
}
-static INLINE irg_extblk_state
+static inline irg_extblk_state
_get_irg_extblk_state(const ir_graph *irg) {
return irg->extblk_state;
}
-static INLINE void
+static inline void
_set_irg_extblk_inconsistent(ir_graph *irg) {
if (irg->extblk_state == extblk_valid)
irg->extblk_state = extblk_invalid;
}
-static INLINE irg_dom_state
+static inline irg_dom_state
_get_irg_dom_state(const ir_graph *irg) {
return irg->dom_state;
}
-static INLINE irg_dom_state
+static inline irg_dom_state
_get_irg_postdom_state(const ir_graph *irg) {
return irg->pdom_state;
}
-static INLINE void
+static inline void
_set_irg_doms_inconsistent(ir_graph *irg) {
if (irg->dom_state != dom_none)
irg->dom_state = dom_inconsistent;
irg->pdom_state = dom_inconsistent;
}
-static INLINE irg_loopinfo_state
+static inline irg_loopinfo_state
_get_irg_loopinfo_state(const ir_graph *irg) {
return irg->loopinfo_state;
}
-static INLINE void
+static inline void
_set_irg_loopinfo_state(ir_graph *irg, irg_loopinfo_state s) {
irg->loopinfo_state = s;
}
-static INLINE void
+static inline void
_set_irg_loopinfo_inconsistent(ir_graph *irg) {
irg->loopinfo_state &= ~loopinfo_valid;
}
-static INLINE void
+static inline void
_set_irg_pinned(ir_graph *irg, op_pin_state p) {
irg->irg_pinned_state = p;
}
-static INLINE irg_callee_info_state
+static inline irg_callee_info_state
_get_irg_callee_info_state(const ir_graph *irg) {
return irg->callee_info_state;
}
-static INLINE void
+static inline void
_set_irg_callee_info_state(ir_graph *irg, irg_callee_info_state s) {
irg_callee_info_state irp_state = get_irp_callee_info_state();
set_irp_callee_info_state(s);
}
-static INLINE irg_inline_property
+static inline irg_inline_property
_get_irg_inline_property(const ir_graph *irg) {
return irg->inline_property;
}
-static INLINE void
+static inline void
_set_irg_inline_property(ir_graph *irg, irg_inline_property s) {
irg->inline_property = s;
}
-static INLINE unsigned
+static inline unsigned
_get_irg_additional_properties(const ir_graph *irg) {
if (irg->additional_properties & mtp_property_inherited)
return get_method_additional_properties(get_entity_type(irg->ent));
return irg->additional_properties;
}
-static INLINE void
+static inline void
_set_irg_additional_properties(ir_graph *irg, unsigned mask) {
irg->additional_properties = mask & ~mtp_property_inherited;
}
-static INLINE void
+static inline void
_set_irg_additional_property(ir_graph *irg, mtp_additional_property flag) {
unsigned prop = irg->additional_properties;
irg->additional_properties = prop | flag;
}
-static INLINE void
+static inline void
_set_irg_link(ir_graph *irg, void *thing) {
irg->link = thing;
}
-static INLINE void *
+static inline void *
_get_irg_link(const ir_graph *irg) {
return irg->link;
}
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_irg_visited(const ir_graph *irg) {
return irg->visited;
}
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_irg_block_visited(const ir_graph *irg) {
return irg->block_visited;
}
-static INLINE void
+static inline void
_set_irg_block_visited(ir_graph *irg, ir_visited_t visited) {
irg->block_visited = visited;
}
-static INLINE void
+static inline void
_inc_irg_block_visited(ir_graph *irg) {
++irg->block_visited;
}
-static INLINE void
+static inline void
_dec_irg_block_visited(ir_graph *irg) {
--irg->block_visited;
}
-static INLINE unsigned
+static inline unsigned
_get_irg_estimated_node_cnt(const ir_graph *irg) {
return irg->estimated_node_count;
}
/* Return the floating point model of this graph. */
-static INLINE unsigned
+static inline unsigned
_get_irg_fp_model(const ir_graph *irg) {
return irg->fp_model;
}
* @param irn The node.
* @return The index allocated for the node.
*/
-static INLINE unsigned irg_register_node_idx(ir_graph *irg, ir_node *irn) {
+static inline unsigned irg_register_node_idx(ir_graph *irg, ir_node *irn) {
unsigned idx = irg->last_node_idx++;
if (idx >= (unsigned)ARR_LEN(irg->idx_irn_map))
ARR_RESIZE(ir_node *, irg->idx_irn_map, idx + 1);
* Kill a node from the irg. BEWARE: this kills
* all later created nodes.
*/
-static INLINE void
+static inline void
irg_kill_node(ir_graph *irg, ir_node *n) {
unsigned idx = get_irn_idx(n);
assert(idx + 1 == irg->last_node_idx);
* @return The node with that index or NULL, if there is no node with that index.
* @note The node you got might be dead.
*/
-static INLINE ir_node *
+static inline ir_node *
get_idx_irn(ir_graph *irg, unsigned idx) {
assert(idx < (unsigned) ARR_LEN(irg->idx_irn_map));
return irg->idx_irn_map[idx];
/**
* Return the number of anchors in this graph.
*/
-static INLINE int
+static inline int
get_irg_n_anchors(const ir_graph *irg) {
return get_irn_arity(irg->anchor);
}
/**
* Return anchor for given index
*/
-static INLINE ir_node *
+static inline ir_node *
get_irg_anchor(const ir_graph *irg, int idx) {
return get_irn_intra_n(irg->anchor, idx);
}
/**
* Set anchor for given index
*/
-static INLINE void
+static inline void
set_irg_anchor(ir_graph *irg, int idx, ir_node *irn) {
set_irn_n(irg->anchor, idx, irn);
}
#ifdef INTERPROCEDURAL_VIEW
extern int firm_interprocedural_view;
-static INLINE int
+static inline int
_get_interprocedural_view(void) {
return firm_interprocedural_view;
}
* Returns current_ir_graph and sets it to the irg of predecessor index
* of node n.
*/
-static INLINE ir_graph *
+static inline ir_graph *
switch_irg(ir_node *n, int index) {
ir_graph *old_current = current_ir_graph;
* @param expected_elements Number of elements expected in the nodemap (roughly)
* @return The initialized nodemap
*/
-static INLINE ir_lnk_nodemap_t *ir_lnk_nodemap_new(size_t expected_elements) {
+static inline ir_lnk_nodemap_t *ir_lnk_nodemap_new(size_t expected_elements) {
ir_lnk_nodemap_t *res = XMALLOC(ir_lnk_nodemap_t);
ir_lnk_nodemap_init_size(res, expected_elements);
return res;
/**
* Destroys a linked nodemap and frees the memory of the nodemap itself.
*/
-static INLINE void ir_lnk_nodemap_del(ir_lnk_nodemap_t *nodemap) {
+static inline void ir_lnk_nodemap_del(ir_lnk_nodemap_t *nodemap) {
ir_lnk_nodemap_destroy(nodemap);
xfree(nodemap);
}
* @param expected_elements Number of elements expected in the nodeset (roughly)
* @return The initialized nodeset
*/
-static INLINE ir_lnk_nodeset_t *ir_lnk_nodeset_new(size_t expected_elements) {
+static inline ir_lnk_nodeset_t *ir_lnk_nodeset_new(size_t expected_elements) {
ir_lnk_nodeset_t *res = XMALLOC(ir_lnk_nodeset_t);
ir_lnk_nodeset_init_size(res, expected_elements);
return res;
/**
* Destroys a linked nodeset and frees the memory of the nodeset itself.
*/
-static INLINE void ir_lnk_nodeset_del(ir_lnk_nodeset_t *nodeset) {
+static inline void ir_lnk_nodeset_del(ir_lnk_nodeset_t *nodeset) {
ir_lnk_nodeset_destroy(nodeset);
xfree(nodeset);
}
*
* TODO: Add other fields
**/
-static INLINE int modes_are_equal(const ir_mode *m, const ir_mode *n) {
+static inline int modes_are_equal(const ir_mode *m, const ir_mode *n) {
if (m == n) return 1;
if (m->sort == n->sort &&
m->arithmetic == n->arithmetic &&
* ------------------------------- */
extern ir_mode *mode_P_code, *mode_P_data;
-static INLINE ir_mode *
+static inline ir_mode *
_get_modeP_code(void) { return mode_P_code; }
-static INLINE ir_mode *
+static inline ir_mode *
_get_modeP_data(void) { return mode_P_data; }
-static INLINE ir_modecode
+static inline ir_modecode
_get_mode_modecode(const ir_mode *mode) { return mode->code; }
-static INLINE ident *
+static inline ident *
_get_mode_ident(const ir_mode *mode) { return mode->name; }
-static INLINE ir_mode_sort
+static inline ir_mode_sort
_get_mode_sort(const ir_mode *mode) { return mode->sort; }
-static INLINE unsigned
+static inline unsigned
_get_mode_size_bits(const ir_mode *mode) { return mode->size; }
-static INLINE unsigned
+static inline unsigned
_get_mode_size_bytes(const ir_mode *mode) {
unsigned size = _get_mode_size_bits(mode);
if ((size & 7) != 0) return (unsigned) -1;
return size >> 3;
}
-static INLINE int
+static inline int
_get_mode_sign(const ir_mode *mode) { return mode->sign; }
-static INLINE ir_mode_arithmetic
+static inline ir_mode_arithmetic
_get_mode_arithmetic(const ir_mode *mode) { return mode->arithmetic; }
-static INLINE unsigned int
+static inline unsigned int
_get_mode_modulo_shift(const ir_mode *mode) { return mode->modulo_shift; }
-static INLINE unsigned int
+static inline unsigned int
_get_mode_vector_elems(const ir_mode *mode) { return mode->vector_elem; }
-static INLINE void *
+static inline void *
_get_mode_link(const ir_mode *mode) { return mode->link; }
-static INLINE void
+static inline void
_set_mode_link(ir_mode *mode, void *l) { mode->link = l; }
/* Functions to check, whether a ir_modecode is signed, float, int, num, data,
= {data || irm_M}
*/
-static INLINE int
+static inline int
_mode_is_signed(const ir_mode *mode) {
assert(mode);
return mode->sign;
}
-static INLINE int
+static inline int
_mode_is_float(const ir_mode *mode) {
assert(mode);
return (_get_mode_sort(mode) == irms_float_number);
}
-static INLINE int
+static inline int
_mode_is_int(const ir_mode *mode) {
assert(mode);
return (_get_mode_sort(mode) == irms_int_number);
}
-static INLINE int
+static inline int
_mode_is_reference(const ir_mode *mode) {
assert(mode);
return (_get_mode_sort(mode) == irms_reference);
}
-static INLINE int
+static inline int
_mode_is_num(const ir_mode *mode) {
assert(mode);
return (_mode_is_int(mode) || _mode_is_float(mode));
}
-static INLINE int
+static inline int
_mode_is_data(const ir_mode *mode) {
return (_mode_is_int(mode) || _mode_is_float(mode) || _mode_is_reference(mode));
}
-static INLINE int
+static inline int
_mode_is_datab(const ir_mode *mode) {
assert(mode);
return (_mode_is_data(mode) || _get_mode_sort(mode) == irms_internal_boolean);
}
-static INLINE int
+static inline int
_mode_is_dataM(const ir_mode *mode) {
assert(mode);
return (_mode_is_data(mode) || _get_mode_modecode(mode) == irm_M);
}
-static INLINE int
+static inline int
_mode_is_float_vector(const ir_mode *mode) {
assert(mode);
return (_get_mode_sort(mode) == irms_float_number) && (_get_mode_vector_elems(mode) > 1);
}
-static INLINE int
+static inline int
_mode_is_int_vector(const ir_mode *mode) {
assert(mode);
return (_get_mode_sort(mode) == irms_int_number) && (_get_mode_vector_elems(mode) > 1);
* Checks whether a pointer points to a ir node.
* Intern version for libFirm.
*/
-static INLINE int
+static inline int
_is_ir_node(const void *thing) {
return (get_kind(thing) == k_ir_node);
}
* Gets the op of a node.
* Intern version for libFirm.
*/
-static INLINE ir_op *
+static inline ir_op *
_get_irn_op(const ir_node *node) {
assert(node);
return node->op;
}
-static INLINE void
+static inline void
_set_irn_op(ir_node *node, ir_op *op) {
assert(node);
node->op = op;
/** Copies all attributes stored in the old node to the new node.
Assumes both have the same opcode and sufficient size. */
-static INLINE void
+static inline void
copy_node_attr(const ir_node *old_node, ir_node *new_node) {
ir_op *op = _get_irn_op(old_node);
* Gets the opcode of a node.
* Intern version for libFirm.
*/
-static INLINE unsigned
+static inline unsigned
_get_irn_opcode(const ir_node *node) {
assert(k_ir_node == get_kind(node));
assert(node->op);
* Returns the number of predecessors without the block predecessor.
* Intern version for libFirm.
*/
-static INLINE int
+static inline int
_get_irn_intra_arity(const ir_node *node) {
assert(node);
return ARR_LEN(node->in) - 1;
* Returns the number of predecessors without the block predecessor.
* Intern version for libFirm.
*/
-static INLINE int
+static inline int
_get_irn_inter_arity(const ir_node *node) {
assert(node);
if (_get_irn_op(node) == op_Filter) {
/**
* Intern version for libFirm.
*/
-static INLINE ir_node *
+static inline ir_node *
_get_irn_intra_n(const ir_node *node, int n) {
ir_node *nn;
/**
* Intern version for libFirm.
*/
-static INLINE ir_node*
+static inline ir_node*
_get_irn_inter_n(const ir_node *node, int n) {
assert(node); assert(-1 <= n && n < _get_irn_inter_arity(node));
/**
* returns a hash value for a node
*/
-static INLINE unsigned hash_irn(const ir_node *node)
+static inline unsigned hash_irn(const ir_node *node)
{
return (unsigned) get_irn_idx(node);
}
#define _get_irn_n(n,i) _get_irn_intra_n(n,i)
#endif
-static INLINE int _get_irn_deps(const ir_node *node) {
+static inline int _get_irn_deps(const ir_node *node) {
return node->deps ? ARR_LEN(node->deps) : 0;
}
-static INLINE ir_node *_get_irn_dep(const ir_node *node, int pos) {
+static inline ir_node *_get_irn_dep(const ir_node *node, int pos) {
assert(node->deps && "dependency array node yet allocated. use add_irn_dep()");
assert(pos >= 0 && pos < ARR_LEN(node->deps) && "dependency index out of range");
return node->deps[pos];
}
-static INLINE void
+static inline void
_set_irn_dep(ir_node *node, int pos, ir_node *dep) {
ir_node *old;
}
-static INLINE int
+static inline int
_get_irn_ins_or_deps(const ir_node *irn) {
return _get_irn_deps(irn) + _get_irn_arity(irn);
}
-static INLINE ir_node *
+static inline ir_node *
_get_irn_in_or_dep(const ir_node *irn, int pos) {
int n_in = get_irn_arity(irn);
return pos < n_in ? get_irn_n(irn, pos) : get_irn_dep(irn, pos - n_in);
* Gets the mode of a node.
* Intern version for libFirm.
*/
-static INLINE ir_mode *
+static inline ir_mode *
_get_irn_mode(const ir_node *node) {
assert(node);
return node->mode;
* Sets the mode of a node.
* Intern version of libFirm.
*/
-static INLINE void
+static inline void
_set_irn_mode(ir_node *node, ir_mode *mode) {
assert(node);
node->mode = mode;
* Gets the visited counter of a node.
* Intern version for libFirm.
*/
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_irn_visited(const ir_node *node) {
assert(node);
return node->visited;
* Sets the visited counter of a node.
* Intern version for libFirm.
*/
-static INLINE void
+static inline void
_set_irn_visited(ir_node *node, ir_visited_t visited) {
assert(node);
node->visited = visited;
* Mark a node as visited in a graph.
* Intern version for libFirm.
*/
-static INLINE void
+static inline void
_mark_irn_visited(ir_node *node) {
assert(node);
node->visited = current_ir_graph->visited;
* Returns non-zero if a node of was visited.
* Intern version for libFirm.
*/
-static INLINE int
+static inline int
_irn_visited(const ir_node *node) {
assert(node);
return (node->visited >= current_ir_graph->visited);
}
-static INLINE int
+static inline int
_irn_visited_else_mark(ir_node *node) {
if (_irn_visited(node))
return 1;
* Sets the link of a node.
* Intern version of libFirm.
*/
-static INLINE void
+static inline void
_set_irn_link(ir_node *node, void *link) {
assert(node);
node->link = link;
* Returns the link of a node.
* Intern version of libFirm.
*/
-static INLINE void *
+static inline void *
_get_irn_link(const ir_node *node) {
assert(node && _is_ir_node(node));
return node->link;
*
* Intern version of libFirm.
*/
-static INLINE op_pin_state
+static inline op_pin_state
_get_irn_pinned(const ir_node *node) {
op_pin_state state;
assert(node && _is_ir_node(node));
return state;
}
-static INLINE op_pin_state
+static inline op_pin_state
_is_irn_pinned_in_irg(const ir_node *node) {
if (get_irg_pinned(get_irn_irg(node)) == op_pin_state_floats)
return get_irn_pinned(node);
return op_pin_state_pinned;
}
-static INLINE int
+static inline int
_is_unop(const ir_node *node) {
assert(node && _is_ir_node(node));
return (node->op->opar == oparity_unary);
}
-static INLINE int
+static inline int
_is_binop(const ir_node *node) {
assert(node && _is_ir_node(node));
return (node->op->opar == oparity_binary);
}
-static INLINE int
+static inline int
_is_Phi(const ir_node *node) {
ir_op *op;
assert(node);
return (op == op_Phi);
}
-static INLINE int
+static inline int
_is_Proj(const ir_node *node) {
ir_op *op;
assert(node);
return (op == op_Proj);
}
-static INLINE int
+static inline int
_is_Filter(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Filter);
}
-static INLINE int
+static inline int
_is_Bad(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Bad);
}
-static INLINE int
+static inline int
_is_NoMem(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_NoMem);
}
-static INLINE int
+static inline int
_is_Minus(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Minus);
}
-static INLINE int
+static inline int
_is_Abs(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Abs);
}
-static INLINE int
+static inline int
_is_Mod(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Mod);
}
-static INLINE int
+static inline int
_is_Div(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Div);
}
-static INLINE int
+static inline int
_is_DivMod(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_DivMod);
}
-static INLINE int
+static inline int
_is_Quot(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Quot);
}
-static INLINE int
+static inline int
_is_Add(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Add);
}
-static INLINE int
+static inline int
_is_Carry(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Carry);
}
-static INLINE int
+static inline int
_is_And(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_And);
}
-static INLINE int
+static inline int
_is_Or(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Or);
}
-static INLINE int
+static inline int
_is_Eor(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Eor);
}
-static INLINE int
+static inline int
_is_Sub(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Sub);
}
-static INLINE int
+static inline int
_is_Not(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Not);
}
-static INLINE int
+static inline int
_is_Shl(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Shl);
}
-static INLINE int
+static inline int
_is_Shr(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Shr);
}
-static INLINE int
+static inline int
_is_Shrs(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Shrs);
}
-static INLINE int
+static inline int
_is_Rotl(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Rotl);
}
-static INLINE int
+static inline int
_is_Id(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Id);
}
-static INLINE int
+static inline int
_is_Tuple(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Tuple);
}
-static INLINE int
+static inline int
_is_Bound(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Bound);
}
-static INLINE int
+static inline int
_is_Start(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Start);
}
-static INLINE int
+static inline int
_is_End(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_End);
}
-static INLINE int
+static inline int
_is_Const(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Const);
}
-static INLINE int
+static inline int
_is_Conv(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Conv);
}
-static INLINE int
+static inline int
_is_strictConv(const ir_node *node) {
return _is_Conv(node) && get_Conv_strict(node);
}
-static INLINE int
+static inline int
_is_Cast(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Cast);
}
-static INLINE int
+static inline int
_is_CopyB(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_CopyB);
}
-static INLINE int
+static inline int
_is_Unknown(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Unknown);
}
-static INLINE int
+static inline int
_is_Return(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Return);
}
-static INLINE int
+static inline int
_is_Call(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Call);
}
-static INLINE int
+static inline int
_is_CallBegin(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_CallBegin);
}
-static INLINE int
+static inline int
_is_Sel(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Sel);
}
-static INLINE int
+static inline int
_is_Mul(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Mul);
}
-static INLINE int
+static inline int
_is_Mulh(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Mulh);
}
-static INLINE int
+static inline int
_is_Mux(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Mux);
}
-static INLINE int
+static inline int
_is_Load(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Load);
}
-static INLINE int
+static inline int
_is_Store(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Store);
}
-static INLINE int
+static inline int
_is_Sync(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Sync);
}
-static INLINE int
+static inline int
_is_Confirm(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Confirm);
}
-static INLINE int
+static inline int
_is_Pin(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Pin);
}
-static INLINE int
+static inline int
_is_SymConst(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_SymConst);
}
-static INLINE int
+static inline int
_is_SymConst_addr_ent(const ir_node *node) {
return is_SymConst(node) && get_SymConst_kind(node) == symconst_addr_ent;
}
-static INLINE int
+static inline int
_is_Cond(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Cond);
}
-static INLINE int
+static inline int
_is_Cmp(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Cmp);
}
-static INLINE int
+static inline int
_is_Alloc(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Alloc);
}
-static INLINE int
+static inline int
_is_Free(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Free);
}
-static INLINE int
+static inline int
_is_Jmp(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Jmp);
}
-static INLINE int
+static inline int
_is_IJmp(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_IJmp);
}
-static INLINE int
+static inline int
_is_Raise(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_Raise);
}
-static INLINE int
+static inline int
_is_ASM(const ir_node *node) {
assert(node);
return (_get_irn_op(node) == op_ASM);
}
-static INLINE int
+static inline int
_is_Anchor(const ir_node *node) {
return (_get_irn_op(node) == op_Anchor);
}
-static INLINE int
+static inline int
_is_no_Block(const ir_node *node) {
assert(node && _is_ir_node(node));
return (_get_irn_op(node) != op_Block);
}
-static INLINE int
+static inline int
_is_Block(const ir_node *node) {
assert(node && _is_ir_node(node));
return (_get_irn_op(node) == op_Block);
}
-static INLINE int
+static inline int
_get_Block_n_cfgpreds(const ir_node *node) {
assert(_is_Block(node));
return _get_irn_arity(node);
}
-static INLINE ir_node *
+static inline ir_node *
_get_Block_cfgpred(const ir_node *node, int pos) {
assert(0 <= pos && pos < get_irn_arity(node));
assert(_is_Block(node));
* - If we encounter the Bad node, this function does not return
* Start, but the Bad node.
*/
-static INLINE ir_node *
+static inline ir_node *
_get_Block_cfgpred_block(const ir_node *node, int pos) {
ir_node *res = skip_Proj(get_Block_cfgpred(node, pos));
if (!is_Bad(res))
return res;
}
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_Block_block_visited(const ir_node *node) {
assert(node->op == op_Block);
return node->attr.block.block_visited;
}
-static INLINE void
+static inline void
_set_Block_block_visited(ir_node *node, ir_visited_t visit) {
assert(node->op == op_Block);
node->attr.block.block_visited = visit;
}
/* For this current_ir_graph must be set. */
-static INLINE void
+static inline void
_mark_Block_block_visited(ir_node *node) {
assert(node->op == op_Block);
node->attr.block.block_visited = get_irg_block_visited(current_ir_graph);
}
-static INLINE int
+static inline int
_Block_block_visited(const ir_node *node) {
assert(node->op == op_Block);
return (node->attr.block.block_visited >= get_irg_block_visited(current_ir_graph));
}
-static INLINE ir_node *
+static inline ir_node *
_set_Block_dead(ir_node *block) {
assert(_get_irn_op(block) == op_Block);
block->attr.block.dom.dom_depth = -1;
return block;
}
-static INLINE int
+static inline int
_is_Block_dead(const ir_node *block) {
ir_op *op = _get_irn_op(block);
}
}
-static INLINE tarval *_get_Const_tarval(const ir_node *node) {
+static inline tarval *_get_Const_tarval(const ir_node *node) {
assert(_get_irn_op(node) == op_Const);
return node->attr.con.tv;
}
-static INLINE int _is_Const_null(const ir_node *node) {
+static inline int _is_Const_null(const ir_node *node) {
return tarval_is_null(_get_Const_tarval(node));
}
-static INLINE int _is_Const_one(const ir_node *node) {
+static inline int _is_Const_one(const ir_node *node) {
return tarval_is_one(_get_Const_tarval(node));
}
-static INLINE int _is_Const_all_one(const ir_node *node) {
+static inline int _is_Const_all_one(const ir_node *node) {
return tarval_is_all_one(_get_Const_tarval(node));
}
-static INLINE int _is_irn_forking(const ir_node *node) {
+static inline int _is_irn_forking(const ir_node *node) {
return is_op_forking(_get_irn_op(node));
}
-static INLINE ir_type *_get_irn_type(ir_node *node) {
+static inline ir_type *_get_irn_type(ir_node *node) {
return _get_irn_op(node)->ops.get_type(node);
}
-static INLINE ir_type *_get_irn_type_attr(ir_node *node) {
+static inline ir_type *_get_irn_type_attr(ir_node *node) {
return _get_irn_op(node)->ops.get_type_attr(node);
}
-static INLINE ir_entity *_get_irn_entity_attr(ir_node *node) {
+static inline ir_entity *_get_irn_entity_attr(ir_node *node) {
return _get_irn_op(node)->ops.get_entity_attr(node);
}
-static INLINE int _is_irn_constlike(const ir_node *node) {
+static inline int _is_irn_constlike(const ir_node *node) {
return is_op_constlike(_get_irn_op(node));
}
-static INLINE int _is_irn_always_opt(const ir_node *node) {
+static inline int _is_irn_always_opt(const ir_node *node) {
return is_op_always_opt(_get_irn_op(node));
}
-static INLINE int _is_irn_keep(const ir_node *node) {
+static inline int _is_irn_keep(const ir_node *node) {
return is_op_keep(_get_irn_op(node));
}
-static INLINE int _is_irn_start_block_placed(const ir_node *node) {
+static inline int _is_irn_start_block_placed(const ir_node *node) {
return is_op_start_block_placed(_get_irn_op(node));
}
-static INLINE int _is_irn_machine_op(const ir_node *node) {
+static inline int _is_irn_machine_op(const ir_node *node) {
return is_op_machine(_get_irn_op(node));
}
-static INLINE int _is_irn_machine_operand(const ir_node *node) {
+static inline int _is_irn_machine_operand(const ir_node *node) {
return is_op_machine_operand(_get_irn_op(node));
}
-static INLINE int _is_irn_machine_user(const ir_node *node, unsigned n) {
+static inline int _is_irn_machine_user(const ir_node *node, unsigned n) {
return is_op_machine_user(_get_irn_op(node), n);
}
-static INLINE cond_jmp_predicate _get_Cond_jmp_pred(const ir_node *node) {
+static inline cond_jmp_predicate _get_Cond_jmp_pred(const ir_node *node) {
assert(_get_irn_op(node) == op_Cond);
return node->attr.cond.pred;
}
-static INLINE void _set_Cond_jmp_pred(ir_node *node, cond_jmp_predicate pred) {
+static inline void _set_Cond_jmp_pred(ir_node *node, cond_jmp_predicate pred) {
assert(_get_irn_op(node) == op_Cond);
node->attr.cond.pred = pred;
}
-static INLINE void *_get_irn_generic_attr(ir_node *node) {
+static inline void *_get_irn_generic_attr(ir_node *node) {
return &node->attr;
}
-static INLINE const void *_get_irn_generic_attr_const(const ir_node *node) {
+static inline const void *_get_irn_generic_attr_const(const ir_node *node) {
return &node->attr;
}
-static INLINE unsigned _get_irn_idx(const ir_node *node) {
+static inline unsigned _get_irn_idx(const ir_node *node) {
return node->node_idx;
}
-static INLINE dbg_info *_get_irn_dbg_info(const ir_node *n) {
+static inline dbg_info *_get_irn_dbg_info(const ir_node *n) {
return n->dbi;
} /* get_irn_dbg_info */
-static INLINE void _set_irn_dbg_info(ir_node *n, dbg_info *db) {
+static inline void _set_irn_dbg_info(ir_node *n, dbg_info *db) {
n->dbi = db;
}
/**
* Sets the Phi list of a block.
*/
-static INLINE void
+static inline void
_set_Block_phis(ir_node *block, ir_node *phi) {
assert(_is_Block(block));
assert(phi == NULL || _is_Phi(phi));
* Returns the link of a node.
* Intern version of libFirm.
*/
-static INLINE ir_node *
+static inline ir_node *
_get_Block_phis(const ir_node *block) {
assert(_is_Block(block));
return block->attr.block.phis;
/**
* Sets the next link of a Phi.
*/
-static INLINE void
+static inline void
_set_Phi_next(ir_node *phi, ir_node *next) {
assert(_is_Phi(phi));
phi->attr.phi.next = next;
* Returns the link of a node.
* Intern version of libFirm.
*/
-static INLINE ir_node *
+static inline ir_node *
_get_Phi_next(const ir_node *phi) {
assert(_is_Phi(phi));
return phi->attr.phi.next;
}
/** Add a Phi node to the list of Block Phi's. */
-static INLINE void
+static inline void
_add_Block_phi(ir_node *block, ir_node *phi) {
_set_Phi_next(phi, _get_Block_phis(block));
_set_Block_phis(block, phi);
}
/** Get the Block mark (single bit). */
-static INLINE unsigned
+static inline unsigned
_get_Block_mark(const ir_node *block) {
assert(_is_Block(block));
return block->attr.block.marked;
}
/** Set the Block mark (single bit). */
-static INLINE void
+static inline void
_set_Block_mark(ir_node *block, unsigned mark) {
assert(_is_Block(block));
block->attr.block.marked = mark;
}
/** Returns non-zero if a node is a routine parameter. */
-static INLINE int
+static inline int
_is_arg_Proj(const ir_node *node) {
if (! is_Proj(node))
return 0;
* @param expected_elements Number of elements expected in the nodeset (roughly)
* @return The initialized nodeset
*/
-static INLINE ir_nodeset_t *ir_nodeset_new(size_t expected_elements) {
+static inline ir_nodeset_t *ir_nodeset_new(size_t expected_elements) {
ir_nodeset_t *res = XMALLOC(ir_nodeset_t);
ir_nodeset_init_size(res, expected_elements);
return res;
/**
* Destroys a nodeset and frees the memory of the nodeset itself.
*/
-static INLINE void ir_nodeset_del(ir_nodeset_t *nodeset) {
+static inline void ir_nodeset_del(ir_nodeset_t *nodeset) {
ir_nodeset_destroy(nodeset);
xfree(nodeset);
}
* Returns the attribute size of nodes of this opcode.
* @note Use not encouraged, internal feature.
*/
-static INLINE size_t get_op_attr_size (const ir_op *op) {
+static inline size_t get_op_attr_size (const ir_op *op) {
return op->attr_size;
}
* Returns non-zero if op is a control flow opcode,
* like Start, End, Jmp, Cond, Return, Raise or Bad.
*/
-static INLINE int is_op_cfopcode(const ir_op *op) {
+static inline int is_op_cfopcode(const ir_op *op) {
return op->flags & irop_flag_cfopcode;
}
* Returns non-zero if the operation manipulates interprocedural control flow:
* CallBegin, EndReg, EndExcept
*/
-static INLINE int is_ip_cfopcode(const ir_op *op) {
+static inline int is_ip_cfopcode(const ir_op *op) {
return op->flags & irop_flag_ip_cfopcode;
}
/** Returns non-zero if operation is commutative */
-static INLINE int is_op_commutative(const ir_op *op) {
+static inline int is_op_commutative(const ir_op *op) {
return op->flags & irop_flag_commutative;
}
/** Returns non-zero if operation is fragile */
-static INLINE int is_op_fragile(const ir_op *op) {
+static inline int is_op_fragile(const ir_op *op) {
return op->flags & irop_flag_fragile;
}
/** Returns non-zero if operation is forking control flow */
-static INLINE int is_op_forking(const ir_op *op) {
+static inline int is_op_forking(const ir_op *op) {
return op->flags & irop_flag_forking;
}
/** Returns non-zero if operation is a high-level op */
-static INLINE int is_op_highlevel(const ir_op *op) {
+static inline int is_op_highlevel(const ir_op *op) {
return op->flags & irop_flag_highlevel;
}
/** Returns non-zero if operation is a const-like op */
-static INLINE int is_op_constlike(const ir_op *op) {
+static inline int is_op_constlike(const ir_op *op) {
return op->flags & irop_flag_constlike;
}
-static INLINE int is_op_uses_memory(const ir_op *op) {
+static inline int is_op_uses_memory(const ir_op *op) {
return op->flags & irop_flag_uses_memory;
}
/** Returns non-zero if operation must always be optimized */
-static INLINE int is_op_always_opt(const ir_op *op) {
+static inline int is_op_always_opt(const ir_op *op) {
return op->flags & irop_flag_always_opt;
}
/** Returns non-zero if operation is a keep-like op */
-static INLINE int is_op_keep(const ir_op *op) {
+static inline int is_op_keep(const ir_op *op) {
return op->flags & irop_flag_keep;
}
/** Returns non-zero if operation must always be placed in the start block. */
-static INLINE int is_op_start_block_placed(const ir_op *op) {
+static inline int is_op_start_block_placed(const ir_op *op) {
return op->flags & irop_flag_start_block;
}
/** Returns non-zero if operation is a machine operation */
-static INLINE int is_op_machine(const ir_op *op) {
+static inline int is_op_machine(const ir_op *op) {
return op->flags & irop_flag_machine;
}
/** Returns non-zero if operation is a machine operand */
-static INLINE int is_op_machine_operand(const ir_op *op) {
+static inline int is_op_machine_operand(const ir_op *op) {
return op->flags & irop_flag_machine_op;
}
/** Returns non-zero if operation is a machine user op number n */
-static INLINE int is_op_machine_user(const ir_op *op, unsigned n) {
+static inline int is_op_machine_user(const ir_op *op, unsigned n) {
return op->flags & (irop_flag_user << n);
}
-static INLINE unsigned _get_op_code(const ir_op *op) {
+static inline unsigned _get_op_code(const ir_op *op) {
return op->code;
}
-static INLINE ident *_get_op_ident(const ir_op *op){
+static inline ident *_get_op_ident(const ir_op *op){
return op->name;
}
-static INLINE op_pin_state _get_op_pinned(const ir_op *op) {
+static inline op_pin_state _get_op_pinned(const ir_op *op) {
return op->pin_state;
}
-static INLINE void _set_generic_function_ptr(ir_op *op, op_func func) {
+static inline void _set_generic_function_ptr(ir_op *op, op_func func) {
op->ops.generic = func;
}
-static INLINE op_func _get_generic_function_ptr(const ir_op *op) {
+static inline op_func _get_generic_function_ptr(const ir_op *op) {
return op->ops.generic;
}
-static INLINE const ir_op_ops *_get_op_ops(const ir_op *op) {
+static inline const ir_op_ops *_get_op_ops(const ir_op *op) {
return &op->ops;
}
-static INLINE void _set_op_tag(ir_op *op, unsigned tag) {
+static inline void _set_op_tag(ir_op *op, unsigned tag) {
op->tag = tag;
}
-static INLINE unsigned _get_op_tag(const ir_op *op) {
+static inline unsigned _get_op_tag(const ir_op *op) {
return op->tag;
}
-static INLINE void _set_op_attr(ir_op *op, void *attr) {
+static inline void _set_op_attr(ir_op *op, void *attr) {
op->attr = attr;
}
-static INLINE void *_get_op_attr(const ir_op *op) {
+static inline void *_get_op_attr(const ir_op *op) {
return op->attr;
}
* @param value_table The value table
* @param n The node to lookup
*/
-static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
+static inline ir_node *identify_cons(pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify_remember(value_table, n);
/**
* Returns the associated tarval of a node.
*/
-static INLINE tarval *
+static inline tarval *
value_of(const ir_node *n) {
return value_of_ptr(n);
}
/**
* This is private and only here for performance reasons.
*/
-static INLINE void _phase_reinit_single_irn_data(ir_phase *phase, ir_node *irn)
+static inline void _phase_reinit_single_irn_data(ir_phase *phase, ir_node *irn)
{
int idx;
/**
* This is private and just here for performance reasons.
*/
-static INLINE void _private_phase_enlarge(ir_phase *phase, unsigned max_idx)
+static inline void _private_phase_enlarge(ir_phase *phase, unsigned max_idx)
{
unsigned last_irg_idx = get_irg_last_idx(phase->irg);
size_t old_cap = phase->n_data_ptr;
*/
#define _private_phase_assure_capacity(ph, max_idx) ((max_idx) >= (ph)->n_data_ptr ? (_private_phase_enlarge((ph), (max_idx)), 1) : 1)
-static INLINE void *_phase_get_irn_data(const ir_phase *ph, const ir_node *irn)
+static inline void *_phase_get_irn_data(const ir_phase *ph, const ir_node *irn)
{
unsigned idx = get_irn_idx(irn);
return idx < ph->n_data_ptr ? ph->data_ptr[idx] : NULL;
}
-static INLINE void *_phase_set_irn_data(ir_phase *ph, const ir_node *irn, void *data)
+static inline void *_phase_set_irn_data(ir_phase *ph, const ir_node *irn, void *data)
{
unsigned idx = get_irn_idx(irn);
void *res;
}
-static INLINE void *_phase_get_or_set_irn_data(ir_phase *ph, const ir_node *irn)
+static inline void *_phase_get_or_set_irn_data(ir_phase *ph, const ir_node *irn)
{
unsigned idx = get_irn_idx(irn);
void *res;
return res;
}
-static INLINE ir_phase *_get_irg_phase(const ir_graph *irg, ir_phase_id id)
+static inline ir_phase *_get_irg_phase(const ir_graph *irg, ir_phase_id id)
{
return irg->phases[id];
}
-static INLINE void *_get_irn_phase_info(const ir_node *irn, ir_phase_id id)
+static inline void *_get_irn_phase_info(const ir_node *irn, ir_phase_id id)
{
const ir_graph *irg = get_irn_irg(irn);
const ir_phase *ph = get_irg_phase(irg, id);
return _phase_get_irn_data(ph, irn);
}
-static INLINE void *_get_or_set_irn_phase_info(const ir_node *irn, ir_phase_id id)
+static inline void *_get_or_set_irn_phase_info(const ir_node *irn, ir_phase_id id)
{
const ir_graph *irg = get_irn_irg(irn);
ir_phase *ph = get_irg_phase(irg, id);
return _phase_get_or_set_irn_data(ph, irn);
}
-static INLINE void *_set_irn_phase_info(const ir_node *irn, ir_phase_id id, void *data)
+static inline void *_set_irn_phase_info(const ir_node *irn, ir_phase_id id, void *data)
{
const ir_graph *irg = get_irn_irg(irn);
ir_phase *ph = get_irg_phase(irg, id);
#else
-static INLINE void ir_debugf(const char *fmt, ...)
+static inline void ir_debugf(const char *fmt, ...)
{
(void) fmt;
}
-static INLINE void ir_fdebugf(FILE *f, const char *fmt, ...)
+static inline void ir_fdebugf(FILE *f, const char *fmt, ...)
{
(void) f;
(void) fmt;
/** Adds mode to the list of modes in irp. */
void add_irp_mode(ir_mode *mode);
-/* INLINE functions */
-static INLINE ir_type *
+/* inline functions */
+static inline ir_type *
_get_segment_type(ir_segment_t segment)
{
ir_type *type;
return type;
}
-static INLINE ir_type *
+static inline ir_type *
_get_glob_type(void) {
return _get_segment_type(IR_SEGMENT_GLOBAL);
}
-static INLINE ir_type *
+static inline ir_type *
_get_tls_type(void) {
return _get_segment_type(IR_SEGMENT_THREAD_LOCAL);
}
-static INLINE int
+static inline int
_get_irp_n_irgs(void) {
assert (irp && irp->graphs);
if (get_visit_pseudo_irgs()) return get_irp_n_allirgs();
return ARR_LEN(irp->graphs);
}
-static INLINE ir_graph *
+static inline ir_graph *
_get_irp_irg(int pos){
if (get_visit_pseudo_irgs()) return get_irp_allirg(pos);
assert(0 <= pos && pos <= _get_irp_n_irgs());
}
-static INLINE int
+static inline int
_get_irp_n_types(void) {
assert (irp && irp->types);
return ARR_LEN(irp->types);
}
-static INLINE ir_type *
+static inline ir_type *
_get_irp_type(int pos) {
assert (irp && irp->types);
/* Don't set the skip_tid result so that no double entries are generated. */
return skip_tid(irp->types[pos]);
}
-static INLINE int
+static inline int
_get_irp_n_modes(void) {
assert (irp && irp->modes);
return ARR_LEN(irp->modes);
}
-static INLINE ir_mode *
+static inline ir_mode *
_get_irp_mode(int pos) {
assert (irp && irp->modes);
return irp->modes[pos];
}
-static INLINE int
+static inline int
_get_irp_n_opcodes(void) {
assert (irp && irp->opcodes);
return ARR_LEN(irp->opcodes);
}
-static INLINE ir_op *
+static inline ir_op *
_get_irp_opcode(int pos) {
assert (irp && irp->opcodes);
return irp->opcodes[pos];
#ifdef DEBUG_libfirm
/** Returns a new, unique number to number nodes or the like. */
-static INLINE long
+static inline long
get_irp_new_node_nr(void) {
assert(irp);
return irp->max_node_nr++;
}
#endif /* DEBUG_libfirm */
-static INLINE int
+static inline int
get_irp_new_irg_idx(void) {
assert(irp);
return irp->max_irg_idx++;
}
-static INLINE ir_graph *
+static inline ir_graph *
_get_const_code_irg(void) {
return irp->const_code_irg;
}
/** Returns a new, unique exception region number. */
-static INLINE ir_exc_region_t
+static inline ir_exc_region_t
_get_irp_next_region_nr(void) {
assert(irp);
return ++irp->last_region_nr;
}
/** Returns a new, unique label number. */
-static INLINE ir_label_t
+static inline ir_label_t
_get_irp_next_label_nr(void) {
assert(irp);
return ++irp->last_label_nr;
* @param expected_elements Number of elements expected in the value set (roughly)
* @return The initialized value set
*/
-static INLINE ir_valueset_t *ir_valueset_new(size_t expected_elements) {
+static inline ir_valueset_t *ir_valueset_new(size_t expected_elements) {
ir_valueset_t *res = XMALLOC(ir_valueset_t);
ir_valueset_init_size(res, expected_elements);
return res;
/**
* Destroys a value set and frees the memory of the set itself.
*/
-static INLINE void ir_valueset_del(ir_valueset_t *valueset) {
+static inline void ir_valueset_del(ir_valueset_t *valueset) {
ir_valueset_destroy(valueset);
xfree(valueset);
}
/* ISO C99 Standard stuff */
#ifdef LC_HAVE_C99
-#define LC_INLINE inline
#define LC_FUNCNAME __func__
#define LC_UNUSED(x) x
#define LC_LONGLONG long long
/* definitions using GCC */
#elif defined(__GNUC__)
-#define LC_INLINE __inline__
+#define inline __inline__
#define LC_FUNCNAME __FUNCTION__
#define LC_UNUSED(x) x __attribute__((__unused__))
#elif defined(_MSC_VER)
-#define LC_INLINE __inline
+#define inline __inline
#define LC_FUNCNAME "<unknown>"
#define LC_UNUSED(x) x
#define LC_LONGLONG __int64
/* default definitions */
#else /* defined(_MSC_VER) */
-#define LC_INLINE
+#define inline
#define LC_FUNCNAME "<unknown>"
#define LC_UNUSED(x)
#define LC_LONGLONG long
/** The default argument environment. */
static lc_arg_env_t *default_env = NULL;
-static INLINE lc_arg_env_t *_lc_arg_get_default_env(void)
+static inline lc_arg_env_t *_lc_arg_get_default_env(void)
{
if(!default_env)
default_env = lc_arg_add_std(lc_arg_new_env());
/* FIXME: the name clashes here with the function from ana/field_temperature.c
* please rename. */
-static INLINE int get_irn_loop_depth(ir_node *n) {
+static inline int get_irn_loop_depth(ir_node *n) {
return get_loop_depth(get_irn_loop(n));
}
#endif /* CHECK_PARTITIONS */
#ifdef DEBUG_libfirm
-static INLINE lattice_elem_t get_partition_type(const partition_t *X);
+static inline lattice_elem_t get_partition_type(const partition_t *X);
/**
* Dump partition to output.
*
* @return the associated type of this node
*/
-static INLINE lattice_elem_t get_node_type(const ir_node *irn) {
+static inline lattice_elem_t get_node_type(const ir_node *irn) {
return get_irn_node(irn)->type;
} /* get_node_type */
*
* @return the associated type of this node
*/
-static INLINE tarval *get_node_tarval(const ir_node *irn) {
+static inline tarval *get_node_tarval(const ir_node *irn) {
lattice_elem_t type = get_node_type(irn);
if (is_tarval(type.tv))
/**
* Add a partition to the worklist.
*/
-static INLINE void add_to_worklist(partition_t *X, environment_t *env) {
+static inline void add_to_worklist(partition_t *X, environment_t *env) {
assert(X->on_worklist == 0);
DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
X->wl_next = env->worklist;
*
* @return a newly allocated partition
*/
-static INLINE partition_t *new_partition(environment_t *env) {
+static inline partition_t *new_partition(environment_t *env) {
partition_t *part = obstack_alloc(&env->obst, sizeof(*part));
INIT_LIST_HEAD(&part->Leader);
/**
* Get the first node from a partition.
*/
-static INLINE node_t *get_first_node(const partition_t *X) {
+static inline node_t *get_first_node(const partition_t *X) {
return list_entry(X->Leader.next, node_t, node_list);
} /* get_first_node */
*
* @return the type of the first element of the partition
*/
-static INLINE lattice_elem_t get_partition_type(const partition_t *X) {
+static inline lattice_elem_t get_partition_type(const partition_t *X) {
const node_t *first = get_first_node(X);
return first->type;
} /* get_partition_type */
* @param y a node
* @param env the environment
*/
-static INLINE void add_to_touched(node_t *y, environment_t *env) {
+static inline void add_to_touched(node_t *y, environment_t *env) {
if (y->on_touched == 0) {
partition_t *part = y->part;
DEBUG_ONLY(static firm_dbg_module_t *dbg);
-static INLINE int imin(int a, int b) { return a < b ? a : b; }
+static inline int imin(int a, int b) { return a < b ? a : b; }
static
int is_optimizable_node(const ir_node *node)
* accesses. This function is called for all Phi and Block nodes
* in a Block.
*/
-static INLINE int
+static inline int
compute_new_arity(ir_node *b) {
int i, res, irn_arity;
int irg_v, block_v;
* Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
-INLINE static int is_leave(ir_graph *irg) {
+inline static int is_leave(ir_graph *irg) {
inline_irg_env *env = get_irg_link(irg);
return env->n_call_nodes == 0;
}
* Returns TRUE if the number of nodes in the callee is
* smaller then size in the irg's environment.
*/
-INLINE static int is_smaller(ir_graph *callee, unsigned size) {
+inline static int is_smaller(ir_graph *callee, unsigned size) {
inline_irg_env *env = get_irg_link(callee);
return env->n_nodes < size;
}
/**
* increase a counter
*/
-static INLINE void cnt_inc(counter_t *cnt)
+static inline void cnt_inc(counter_t *cnt)
{
int i;
/**
* decrease a counter
*/
-static INLINE void cnt_dec(counter_t *cnt)
+static inline void cnt_dec(counter_t *cnt)
{
int i;
/**
* set a counter to zero
*/
-static INLINE void cnt_clr(counter_t *cnt)
+static inline void cnt_clr(counter_t *cnt)
{
memset(cnt->cnt, 0, sizeof(cnt->cnt));
}
/**
* add a counter to another
*/
-static INLINE void cnt_add(counter_t *dst, const counter_t *src)
+static inline void cnt_add(counter_t *dst, const counter_t *src)
{
int i, carry = 0;
/**
* add an (positive) integer to an counter
*/
-static INLINE void cnt_add_i(counter_t *dst, int src)
+static inline void cnt_add_i(counter_t *dst, int src)
{
int i;
unsigned carry = src;
/**
* compare two counter
*/
-static INLINE int cnt_cmp(const counter_t *a, const counter_t *b)
+static inline int cnt_cmp(const counter_t *a, const counter_t *b)
{
int i;
unsigned va, vb;
/**
* convert a counter into a double
*/
-static INLINE double cnt_to_dbl(const counter_t *a)
+static inline double cnt_to_dbl(const counter_t *a)
{
int i;
double res = 0.0, scale = 1.0, tmp;
/**
* convert a counter into an unsigned
*/
-static INLINE unsigned cnt_to_uint(const counter_t *a)
+static inline unsigned cnt_to_uint(const counter_t *a)
{
int i;
/**
* check, if a counter is equal to an unsigned
*/
-static INLINE int cnt_eq(const counter_t *a, unsigned value)
+static inline int cnt_eq(const counter_t *a, unsigned value)
{
int i;
/**
* check, if a counter as greater than an unsigned
*/
-static INLINE int cnt_gt(const counter_t *a, unsigned value)
+static inline int cnt_gt(const counter_t *a, unsigned value)
{
int i;
*
* The hash value for the buffer content is updated.
*/
-static INLINE void put_byte(CODE_BUFFER *buf, BYTE byte) {
+static inline void put_byte(CODE_BUFFER *buf, BYTE byte) {
if (buf->next < buf->end) {
*buf->next++ = byte;
buf->hash = (buf->hash * 9) ^ byte;
*
* @return the next byte from the code buffer
*/
-static INLINE BYTE look_byte(CODE_BUFFER *buf) {
+static inline BYTE look_byte(CODE_BUFFER *buf) {
if (buf->next < buf->end)
return *buf->next;
return VLC_TAG_END;
*
* @return the next byte from the code buffer
*/
-static INLINE BYTE get_byte(CODE_BUFFER *buf) {
+static inline BYTE get_byte(CODE_BUFFER *buf) {
if (buf->next < buf->end)
return *buf->next++;
return VLC_TAG_END;
#include <regex.h>
static regex_t regex;
static regex_t *filter = NULL;
-static INLINE int key_matches(const char *key)
+static inline int key_matches(const char *key)
{
if (!filter)
return 1;
#else
static char filter[128] = { '\0' };
-static INLINE int key_matches(const char *key)
+static inline int key_matches(const char *key)
{
int i = 0;
extern timing_sched_env_t stat_ev_sched_rt;
extern timing_sched_env_t stat_ev_sched_normal;
-static INLINE __attribute__((unused)) void stat_ev_tim_push(void) {
+static inline __attribute__((unused)) void stat_ev_tim_push(void) {
timing_ticks_t temp;
int sp = stat_ev_timer_sp++;
timing_ticks(temp);
timing_ticks(stat_ev_timer_start[sp]);
}
-static INLINE __attribute__((unused)) void stat_ev_tim_pop(const char *name) {
+static inline __attribute__((unused)) void stat_ev_tim_pop(const char *name) {
int sp;
timing_ticks_t temp;
timing_ticks(temp);
/**
* Add an entity to it's already set owner type.
*/
-static INLINE void insert_entity_in_owner(ir_entity *ent) {
+static inline void insert_entity_in_owner(ir_entity *ent) {
ir_type *owner = ent->owner;
switch (get_type_tpop_code(owner)) {
case tpo_class:
*
* @return the new created entity
*/
-static INLINE ir_entity *
+static inline ir_entity *
new_rd_entity(dbg_info *db, ir_type *owner, ident *name, ir_type *type)
{
ir_entity *res;
/* ----------------------- inline functions ------------------------ */
-static INLINE int
+static inline int
_is_entity(const void *thing) {
return get_kind(thing) == k_entity;
}
-static INLINE const char *
+static inline const char *
_get_entity_name(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return get_id_str(get_entity_ident(ent));
}
-static INLINE ident *
+static inline ident *
_get_entity_ident(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->name;
}
-static INLINE void
+static inline void
_set_entity_ident(ir_entity *ent, ident *id) {
assert(ent && ent->kind == k_entity);
ent->name = id;
}
-static INLINE ir_type *
+static inline ir_type *
_get_entity_owner(ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->owner = skip_tid(ent->owner);
}
-static INLINE ident *
+static inline ident *
_get_entity_ld_ident(ir_entity *ent)
{
assert(ent && ent->kind == k_entity);
return ent->ld_name;
}
-static INLINE void
+static inline void
_set_entity_ld_ident(ir_entity *ent, ident *ld_ident) {
assert(ent && ent->kind == k_entity);
ent->ld_name = ld_ident;
}
-static INLINE const char *
+static inline const char *
_get_entity_ld_name(ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return get_id_str(get_entity_ld_ident(ent));
}
-static INLINE ir_type *
+static inline ir_type *
_get_entity_type(ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->type = skip_tid(ent->type);
}
-static INLINE void
+static inline void
_set_entity_type(ir_entity *ent, ir_type *type) {
assert(ent && ent->kind == k_entity);
ent->type = type;
}
-static INLINE ir_allocation
+static inline ir_allocation
_get_entity_allocation(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->allocation;
}
-static INLINE void
+static inline void
_set_entity_allocation(ir_entity *ent, ir_allocation al) {
assert(ent && ent->kind == k_entity);
ent->allocation = al;
}
-static INLINE ir_visibility
+static inline ir_visibility
_get_entity_visibility(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->visibility;
}
-static INLINE ir_variability
+static inline ir_variability
_get_entity_variability(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->variability;
}
-static INLINE ir_volatility
+static inline ir_volatility
_get_entity_volatility(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->volatility;
}
-static INLINE void
+static inline void
_set_entity_volatility(ir_entity *ent, ir_volatility vol) {
assert(ent && ent->kind == k_entity);
ent->volatility = vol;
}
-static INLINE ir_align
+static inline ir_align
_get_entity_align(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->align;
}
-static INLINE void
+static inline void
_set_entity_align(ir_entity *ent, ir_align a) {
assert(ent && ent->kind == k_entity);
ent->align = a;
}
-static INLINE ir_peculiarity
+static inline ir_peculiarity
_get_entity_peculiarity(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->peculiarity;
* describe a field? With inherited the situation changed. So
* I removed the assertion. GL, 28.2.05
*/
-static INLINE void
+static inline void
_set_entity_peculiarity(ir_entity *ent, ir_peculiarity pec) {
assert(ent && ent->kind == k_entity);
/* @@@ why peculiarity only for methods? */
ent->peculiarity = pec;
}
-static INLINE ir_stickyness
+static inline ir_stickyness
_get_entity_stickyness(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->stickyness;
}
-static INLINE void
+static inline void
_set_entity_stickyness(ir_entity *ent, ir_stickyness stickyness) {
assert(ent && ent->kind == k_entity);
ent->stickyness = stickyness;
}
-static INLINE int
+static inline int
_is_entity_final(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return (int)ent->final;
}
-static INLINE void
+static inline void
_set_entity_final(ir_entity *ent, int final) {
assert(ent && ent->kind == k_entity);
ent->final = final ? 1 : 0;
}
-static INLINE int
+static inline int
_is_entity_compiler_generated(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->compiler_gen;
}
-static INLINE void
+static inline void
_set_entity_compiler_generated(ir_entity *ent, int flag) {
assert(ent && ent->kind == k_entity);
ent->compiler_gen = flag ? 1 : 0;
}
-static INLINE int
+static inline int
_is_entity_backend_marked(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->backend_marked;
}
-static INLINE void
+static inline void
_set_entity_backend_marked(ir_entity *ent, int flag) {
assert(ent && ent->kind == k_entity);
ent->backend_marked = flag ? 1 : 0;
}
-static INLINE ir_entity_usage
+static inline ir_entity_usage
_get_entity_usage(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->usage;
}
-static INLINE void
+static inline void
_set_entity_usage(ir_entity *ent, ir_entity_usage state) {
assert(ent && ent->kind == k_entity);
ent->usage = state;
}
-static INLINE int
+static inline int
_get_entity_offset(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->offset;
}
-static INLINE void
+static inline void
_set_entity_offset(ir_entity *ent, int offset) {
assert(ent && ent->kind == k_entity);
ent->offset = offset;
}
-static INLINE unsigned char
+static inline unsigned char
_get_entity_offset_bits_remainder(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->offset_bit_remainder;
}
-static INLINE void
+static inline void
_set_entity_offset_bits_remainder(ir_entity *ent, unsigned char offset) {
assert(ent && ent->kind == k_entity);
ent->offset_bit_remainder = offset;
}
-static INLINE void *
+static inline void *
_get_entity_link(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->link;
}
-static INLINE void
+static inline void
_set_entity_link(ir_entity *ent, void *l) {
assert(ent && ent->kind == k_entity);
ent->link = l;
}
-static INLINE ir_graph *
+static inline ir_graph *
_get_entity_irg(const ir_entity *ent) {
ir_graph *irg;
assert(ent && ent->kind == k_entity);
return irg;
}
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_entity_visited(ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->visit;
}
-static INLINE void
+static inline void
_set_entity_visited(ir_entity *ent, ir_visited_t num) {
assert(ent && ent->kind == k_entity);
ent->visit = num;
}
-static INLINE void
+static inline void
_mark_entity_visited(ir_entity *ent) {
assert(ent && ent->kind == k_entity);
ent->visit = firm_type_visited;
}
-static INLINE int
+static inline int
_entity_visited(ir_entity *ent) {
return _get_entity_visited(ent) >= firm_type_visited;
}
-static INLINE int
+static inline int
_entity_not_visited(ir_entity *ent) {
return _get_entity_visited(ent) < firm_type_visited;
}
-static INLINE ir_type *
+static inline ir_type *
_get_entity_repr_class(const ir_entity *ent) {
assert(ent && ent->kind == k_entity);
return ent->repr_class;
}
-static INLINE dbg_info *
+static inline dbg_info *
_get_entity_dbg_info(const ir_entity *ent) {
return ent->dbi;
}
-static INLINE void
+static inline void
_set_entity_dbg_info(ir_entity *ent, dbg_info *db) {
ent->dbi = db;
}
* inline functions *
* -----------------*/
-static INLINE tp_opcode
+static inline tp_opcode
_get_tpop_code(const tp_op *op) {
return op->code;
}
-static INLINE ident *
+static inline ident *
_get_tpop_ident(const tp_op *op){
return op->name;
}
-static INLINE size_t
+static inline size_t
_get_tpop_attr_size(const tp_op *op) {
return op->attr_size;
}
/**
* calculate the hash value of an tr_inh_trans_tp
*/
-static INLINE unsigned int tr_inh_trans_hash(const tr_inh_trans_tp *v) {
+static inline unsigned int tr_inh_trans_hash(const tr_inh_trans_tp *v) {
return HASH_PTR(v->kind);
}
extern ir_visited_t firm_type_visited;
-static INLINE void _set_master_type_visited(ir_visited_t val) { firm_type_visited = val; }
-static INLINE ir_visited_t _get_master_type_visited(void) { return firm_type_visited; }
-static INLINE void _inc_master_type_visited(void) { ++firm_type_visited; }
+static inline void _set_master_type_visited(ir_visited_t val) { firm_type_visited = val; }
+static inline ir_visited_t _get_master_type_visited(void) { return firm_type_visited; }
+static inline void _inc_master_type_visited(void) { ++firm_type_visited; }
-static INLINE void *
+static inline void *
_get_type_link(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return(tp -> link);
}
-static INLINE void
+static inline void
_set_type_link(ir_type *tp, void *l) {
assert(tp && tp->kind == k_type);
tp -> link = l;
}
-static INLINE const tp_op*
+static inline const tp_op*
_get_type_tpop(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->type_op;
}
-static INLINE ident*
+static inline ident*
_get_type_tpop_nameid(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return get_tpop_ident(tp->type_op);
}
-static INLINE tp_opcode
+static inline tp_opcode
_get_type_tpop_code(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return get_tpop_code(tp->type_op);
}
-static INLINE ir_mode *
+static inline ir_mode *
_get_type_mode(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->mode;
}
-static INLINE ident *
+static inline ident *
_get_type_ident(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->name;
}
-static INLINE void
+static inline void
_set_type_ident(ir_type *tp, ident* id) {
assert(tp && tp->kind == k_type);
tp->name = id;
}
-static INLINE unsigned
+static inline unsigned
_get_type_size_bytes(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->size;
}
-static INLINE ir_type_state
+static inline ir_type_state
_get_type_state(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->flags & tf_layout_fixed ? layout_fixed : layout_undefined;
}
-static INLINE ir_visited_t
+static inline ir_visited_t
_get_type_visited(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->visit;
}
-static INLINE void
+static inline void
_set_type_visited(ir_type *tp, ir_visited_t num) {
assert(tp && tp->kind == k_type);
tp->visit = num;
}
-static INLINE void
+static inline void
_mark_type_visited(ir_type *tp) {
assert(tp && tp->kind == k_type);
assert(tp->visit < firm_type_visited);
tp->visit = firm_type_visited;
}
-static INLINE int
+static inline int
_type_visited(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->visit >= firm_type_visited;
}
-static INLINE int
+static inline int
_type_not_visited(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return tp->visit < firm_type_visited;
}
-static INLINE dbg_info *
+static inline dbg_info *
_get_type_dbg_info(const ir_type *tp) {
return tp->dbi;
}
-static INLINE void
+static inline void
_set_type_dbg_info(ir_type *tp, dbg_info *db) {
tp->dbi = db;
}
-static INLINE int
+static inline int
_is_type(const void *thing) {
return (get_kind(thing) == k_type);
}
-static INLINE int
+static inline int
_is_class_type(const ir_type *clss) {
assert(clss);
return (clss->type_op == type_class);
}
-static INLINE int
+static inline int
_get_class_n_members (const ir_type *clss) {
assert(clss && (clss->type_op == type_class));
return (ARR_LEN (clss->attr.ca.members));
}
-static INLINE ir_entity *
+static inline ir_entity *
_get_class_member (const ir_type *clss, int pos) {
assert(clss && (clss->type_op == type_class));
assert(pos >= 0 && pos < _get_class_n_members(clss));
return clss->attr.ca.members[pos];
}
-static INLINE unsigned
+static inline unsigned
_get_class_vtable_size(const ir_type *clss) {
assert(clss && (clss->type_op == type_class));
return clss->attr.ca.vtable_size;
}
-static INLINE void
+static inline void
_set_class_vtable_size(ir_type *clss, unsigned vtable_size) {
assert(clss && (clss->type_op == type_class));
clss->attr.ca.vtable_size = vtable_size;
}
-static INLINE int
+static inline int
_is_class_final(const ir_type *clss) {
assert(clss && (clss->type_op == type_class));
return clss->attr.ca.clss_flags & cf_final_class;
}
-static INLINE void
+static inline void
_set_class_final(ir_type *clss, int final) {
assert(clss && (clss->type_op == type_class));
if (final)
clss->attr.ca.clss_flags &= ~cf_final_class;
}
-static INLINE int
+static inline int
_is_class_interface(const ir_type *clss) {
assert(clss && (clss->type_op == type_class));
return clss->attr.ca.clss_flags & cf_interface_class;
}
-static INLINE void
+static inline void
_set_class_interface(ir_type *clss, int final) {
assert(clss && (clss->type_op == type_class));
if (final)
clss->attr.ca.clss_flags &= ~cf_interface_class;
}
-static INLINE int
+static inline int
_is_class_abstract(const ir_type *clss) {
assert(clss && (clss->type_op == type_class));
return clss->attr.ca.clss_flags & cf_absctract_class;
}
-static INLINE void
+static inline void
_set_class_abstract(ir_type *clss, int final) {
assert(clss && (clss->type_op == type_class));
if (final)
clss->attr.ca.clss_flags &= ~cf_absctract_class;
}
-static INLINE int
+static inline int
_is_struct_type(const ir_type *strct) {
assert(strct);
return (strct->type_op == type_struct);
}
-static INLINE int
+static inline int
_is_method_type(const ir_type *method) {
assert(method);
return (method->type_op == type_method);
}
-static INLINE int
+static inline int
_is_union_type(const ir_type *uni) {
assert(uni);
return (uni->type_op == type_union);
}
-static INLINE int
+static inline int
_is_array_type(const ir_type *array) {
assert(array);
return (array->type_op == type_array);
}
-static INLINE int
+static inline int
_is_enumeration_type(const ir_type *enumeration) {
assert(enumeration);
return (enumeration->type_op == type_enumeration);
}
-static INLINE int
+static inline int
_is_pointer_type(const ir_type *pointer) {
assert(pointer);
return (pointer->type_op == type_pointer);
}
/** Returns true if a type is a primitive type. */
-static INLINE int
+static inline int
_is_primitive_type(const ir_type *primitive) {
assert(primitive && primitive->kind == k_type);
return (primitive->type_op == type_primitive);
}
-static INLINE int
+static inline int
_is_atomic_type(const ir_type *tp) {
assert(tp && tp->kind == k_type);
return (_is_primitive_type(tp) || _is_pointer_type(tp) ||
_is_enumeration_type(tp));
}
-static INLINE int
+static inline int
_get_method_n_params(const ir_type *method) {
assert(method && (method->type_op == type_method));
return method->attr.ma.n_params;
}
-static INLINE int
+static inline int
_get_method_n_ress(const ir_type *method) {
assert(method && (method->type_op == type_method));
return method->attr.ma.n_res;
}
-static INLINE unsigned
+static inline unsigned
_get_method_additional_properties(const ir_type *method) {
assert(method && (method->type_op == type_method));
return method->attr.ma.additional_properties;
}
-static INLINE void
+static inline void
_set_method_additional_properties(ir_type *method, unsigned mask) {
assert(method && (method->type_op == type_method));
method->attr.ma.additional_properties = mask & ~mtp_property_inherited;
}
-static INLINE void
+static inline void
_set_method_additional_property(ir_type *method, mtp_additional_property flag) {
assert(method && (method->type_op == type_method));
method->attr.ma.additional_properties |= flag & ~mtp_property_inherited;
}
-static INLINE unsigned
+static inline unsigned
_get_method_calling_convention(const ir_type *method) {
assert(method && (method->type_op == type_method));
return method->attr.ma.irg_calling_conv;
}
-static INLINE void
+static inline void
_set_method_calling_convention(ir_type *method, unsigned cc_mask) {
assert(method && (method->type_op == type_method));
method->attr.ma.irg_calling_conv = cc_mask;
panic("%s:%d: Invalid tarval (null)", file, line);
}
#ifdef __GNUC__
-INLINE static void tarval_verify(tarval *tv) __attribute__ ((unused));
+inline static void tarval_verify(tarval *tv) __attribute__ ((unused));
#endif
-INLINE static void tarval_verify(tarval *tv)
+inline static void tarval_verify(tarval *tv)
{
assert(tv);
assert(tv->mode);
/*
* Access routines for tarval fields ========================================
*/
-static INLINE ir_mode *
+static inline ir_mode *
_get_tarval_mode(const tarval *tv) {
assert(tv);
return tv->mode;
}
-static INLINE tarval *
+static inline tarval *
_get_tarval_bad(void) {
return tarval_bad;
}
-static INLINE tarval *
+static inline tarval *
_get_tarval_undefined(void) {
return tarval_undefined;
}
-static INLINE tarval *
+static inline tarval *
_get_tarval_b_false(void) {
return tarval_b_false;
}
-static INLINE tarval *
+static inline tarval *
_get_tarval_b_true(void) {
return tarval_b_true;
}
-static INLINE tarval *
+static inline tarval *
_get_tarval_reachable(void) {
return tarval_reachable;
}
-static INLINE tarval *
+static inline tarval *
_get_tarval_unreachable(void) {
return tarval_unreachable;
}
-static INLINE int
+static inline int
_is_tarval(const void *thing) {
return get_kind(thing) == k_tarval;
}
/* Define the right const token */
/* #undef const */
-#define INLINE __inline
+#define inline __inline
/* map some non-POSIX names for Win32 */
#define snprintf _snprintf