#endif
}
-/**
- * Analyze how pointer arguments of a given
- * ir graph are accessed.
- *
- * @param irg The ir graph to analyze.
- */
void analyze_irg_args(ir_graph *irg)
{
ir_entity *ent;
analyze_ent_args(ent);
}
-/*
- * Compute for a method with pointer parameter(s)
- * if they will be read or written.
- */
ptr_access_kind get_method_param_access(ir_entity *ent, size_t pos)
{
#ifndef NDEBUG
}
}
-/*
- * Returns for a method the 'weight' that every parameter
- * has on optimization possibility. Higher values allows
- * higher optimization with procedure cloning.
- *
- * The values are calculation on demand only.
- *
- * @param ent the entity to analyze
- * @param pos the argument number
- *
- * @return the parameter weight or null_weight if pos is greater
- * than the number of arguments.
- */
unsigned get_method_param_weight(ir_entity *ent, size_t pos)
{
if (ent->attr.mtd_attr.param_weight) {
return null_weight;
}
-/**
- * Analyze argument's weight of a given
- * ir graph.
- *
- * @param irg The ir graph to analyze.
- */
void analyze_irg_args_weight(ir_graph *irg)
{
ir_entity *entity = get_irg_entity(irg);
static inline int cg_irg_visited (ir_graph *n);
static inline void mark_cg_irg_visited(ir_graph *n);
-/** Returns the callgraph state of the program representation. */
irp_callgraph_state get_irp_callgraph_state(void)
{
return irp->callgraph_state;
}
-/* Sets the callgraph state of the program representation. */
void set_irp_callgraph_state(irp_callgraph_state s)
{
irp->callgraph_state = s;
}
-/* Returns the number of procedures that call the given irg. */
size_t get_irg_n_callers(const ir_graph *irg)
{
assert(irg->callers);
return irg->callers ? ARR_LEN(irg->callers) : 0;
}
-/* Returns the caller at position pos. */
ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
{
assert(pos < get_irg_n_callers(irg));
return irg->callers ? irg->callers[pos] : NULL;
}
-/* Returns non-zero if the caller at position pos is "a backedge", i.e. a recursion. */
int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
{
assert(pos < get_irg_n_callers(irg));
}
}
-/* Returns non-zero if the irg has a backedge caller. */
int has_irg_caller_backedge(const ir_graph *irg)
{
size_t i, n_callers = get_irg_n_callers(irg);
return 0;
}
-/* Returns the maximal loop depth of call nodes that call along this edge. */
size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
{
ir_graph *caller = get_irg_caller(irg, pos);
return get_irg_callee_loop_depth(caller, pos_callee);
}
-
-/* Returns the number of procedures that are called by the given irg. */
size_t get_irg_n_callees(const ir_graph *irg)
{
assert(irg->callees);
return irg->callees ? ARR_LEN(irg->callees) : 0;
}
-/* Returns the callee at position pos. */
ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
{
assert(pos < get_irg_n_callees(irg));
return irg->callees ? irg->callees[pos]->irg : NULL;
}
-/* Returns non-zero if the callee at position pos is "a backedge", i.e. a recursion. */
int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
{
assert(pos < get_irg_n_callees(irg));
return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
}
-/* Returns non-zero if the irg has a backedge callee. */
int has_irg_callee_backedge(const ir_graph *irg)
{
size_t i, n_callees = get_irg_n_callees(irg);
rbitset_set(irg->callee_isbe, pos);
}
-/* Returns the maximal loop depth of call nodes that call along this edge. */
size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
{
assert(pos < get_irg_n_callees(irg));
}
-/* --------------------- Compute the callgraph ------------------------ */
-
/**
* Pre-Walker called by compute_callgraph(), analyses all Call nodes.
*/
return e1 != e2;
}
-
-/* Construct and destruct the callgraph. */
void compute_callgraph(void)
{
size_t i, n_irgs;
set_irp_callgraph_state(irp_callgraph_consistent);
}
-/* Destruct the callgraph. */
void free_callgraph(void)
{
size_t i, n_irgs = get_irp_n_irgs();
set_irp_callgraph_state(irp_callgraph_none);
}
-/* ----------------------------------------------------------------------------------- */
-/* A walker for the callgraph */
-/* ----------------------------------------------------------------------------------- */
-
static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
{
}
}
-/* ----------------------------------------------------------------------------------- */
-/* loop construction algorithm */
-/* ----------------------------------------------------------------------------------- */
-
static ir_graph *outermost_ir_graph; /**< The outermost graph the scc is computed
for */
static ir_loop *current_loop; /**< Current cfloop construction is working
static size_t current_dfn = 1; /**< Counter to generate depth first numbering
of visited nodes. */
-/*-----------------*/
-/* Node attributes */
-/*-----------------*/
-
typedef struct scc_info {
size_t dfn; /**< Depth first search number. */
size_t uplink; /**< dfn number of ancestor. */
return info->dfn;
}
-/**********************************************************************/
-/* A stack. **/
-/**********************************************************************/
-
static ir_graph **stack = NULL;
static size_t tos = 0; /**< top of stack */
}
}
-/**********************************************************************/
-/* The loop data structure. **/
-/**********************************************************************/
-
/**
* Allocates a new loop as son of current_loop. Sets current_loop
* to the new loop and returns the father.
}
-/**********************************************************************/
-/* Constructing and destructing the loop/backedge information. **/
-/**********************************************************************/
-
-/* Initialization steps. **********************************************/
-
static void init_scc(struct obstack *obst)
{
size_t i, n_irgs;
return get_irg_callee(m, res_index);
}
-/*-----------------------------------------------------------*
- * The core algorithm. *
- *-----------------------------------------------------------*/
-
-
static void cgscc(ir_graph *n)
{
size_t i, n_callees;
}
}
-/* ----------------------------------------------------------------------------------- */
-/* The recursion stuff driver. */
-/* ----------------------------------------------------------------------------------- */
-
-/* Compute the backedges that represent recursions. */
void find_callgraph_recursions(void)
{
size_t i, n_irgs;
irp->callgraph_state = irp_callgraph_and_calltree_consistent;
}
-/* Returns the maximal loop depth of all paths from an external visible method to
- this irg. */
size_t get_irg_loop_depth(const ir_graph *irg)
{
assert(irp->callgraph_state == irp_callgraph_consistent ||
return irg->callgraph_loop_depth;
}
-/* Returns the maximal recursion depth of all paths from an external visible method to
- this irg. */
size_t get_irg_recursion_depth(const ir_graph *irg)
{
assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
return irg->callgraph_recursion_depth;
}
-/* Computes the interprocedural loop nesting information. */
void analyse_loop_nesting_depth(void)
{
/* establish preconditions. */
return _get_cdep_next(cdep);
}
-/* Return a list of all control dependences of a block. */
ir_cdep *find_cdep(const ir_node *block)
{
assert(is_Block(block));
entities = NULL;
}
-/*--------------------------------------------------------------------------*/
-/* Freeing the callee arrays. */
-/*--------------------------------------------------------------------------*/
-
static void destruct_walker(ir_node * node, void * env)
{
(void) env;
}
}
-/*--------------------------------------------------------------------------*/
-/* Main drivers. */
-/*--------------------------------------------------------------------------*/
-
size_t cgana(ir_entity ***free_methods)
{
size_t length;
}
}
-/* Optimize the address expressions passed to call nodes.
- *
- * This optimization performs the following transformations for
- * all ir graphs:
- * - All SymConst operations that refer to intern methods are replaced
- * by Const operations referring to the corresponding entity.
- * - Sel nodes, that select entities that are not overwritten are
- * replaced by Const nodes referring to the selected entity.
- * - Sel nodes, for which no method exists at all are replaced by Bad
- * nodes.
- * - Sel nodes with a pointer input that is an Alloc node are replaced
- * by Const nodes referring to the entity that implements the method in
- * the type given by the Alloc node.
- */
void opt_call_addrs(void)
{
+ /* Optimize the address expressions passed to call nodes.
+ *
+ * This optimization performs the following transformations for
+ * all ir graphs:
+ * - All SymConst operations that refer to intern methods are replaced
+ * by Const operations referring to the corresponding entity.
+ * - Sel nodes, that select entities that are not overwritten are
+ * replaced by Const nodes referring to the selected entity.
+ * - Sel nodes, for which no method exists at all are replaced by Bad
+ * nodes.
+ * - Sel nodes with a pointer input that is an Alloc node are replaced
+ * by Const nodes referring to the entity that implements the method in
+ * the type given by the Alloc node.
+ */
sel_methods_init();
sel_methods_dispose();
}
return false;
}
-/**
- * Check, if one node can be reached from another one, according to data
- * dependence.
- */
int heights_reachable_in_block(ir_heights_t *h, const ir_node *n,
const ir_node *m)
{
assert(legal_backarray(n));
}
-/* Returns non-zero if the predecessor pos is a backedge. */
int is_backedge(const ir_node *n, int pos)
{
bitset_t *ba = get_backarray(n);
return 0;
}
-/* Remarks that edge pos is a backedge. */
void set_backedge(ir_node *n, int pos)
{
bitset_t *ba = get_backarray(n);
bitset_set(ba, pos);
}
-/* Remarks that edge pos is a backedge. */
void set_not_backedge(ir_node *n, int pos)
{
bitset_t *ba = get_backarray(n);
bitset_clear(ba, pos);
}
-/* Returns non-zero if n has backedges. */
int has_backedges(const ir_node *n)
{
bitset_t *ba = get_backarray(n);
return 0;
}
-/** Sets all backedge information to zero. */
void clear_backedges(ir_node *n)
{
bitset_t *ba = get_backarray(n);
}
}
-/* Allocate a new backedge array on the obstack for given size. */
bitset_t *new_backedge_arr(struct obstack *obst, size_t size)
{
return bitset_obstack_alloc(obst, size);
}
}
-/* Constructs control flow backedge information for irg. */
int construct_cf_backedges(ir_graph *irg)
{
ir_graph *rem = current_ir_graph;
perform_irg_optimization(irg, &opt_confirms);
}
-/* Construct a pass. */
ir_graph_pass_t *construct_confirms_pass(const char *name)
{
return def_graph_pass(name ? name : "confirm", construct_confirms);
exchange(n, value);
}
-/*
- * Remove all Confirm nodes from a graph.
- */
void remove_confirms(ir_graph *irg)
{
irg_walk_graph(irg, NULL, remove_confirm, NULL);
}
-/* Construct a pass. */
ir_graph_pass_t *remove_confirms_pass(const char *name)
{
return def_graph_pass(name ? name : "rem_confirm", remove_confirms);
#define get_dom_info(bl) (&(bl)->attr.block.dom)
#define get_pdom_info(bl) (&(bl)->attr.block.pdom)
-/*--------------------------------------------------------------------*/
-/** Accessing the dominator and post dominator data structures **/
-/*--------------------------------------------------------------------*/
-
ir_node *get_Block_idom(const ir_node *bl)
{
assert(is_Block(bl));
return get_pdom_info(bl)->max_subtree_pre_num;
}
-/* Check, if a block dominates another block. */
int block_dominates(const ir_node *a, const ir_node *b)
{
const ir_dom_info *ai, *bi;
return 0;
}
-/* Check, if a block strictly dominates another block. */
int block_strictly_dominates(const ir_node *a, const ir_node *b)
{
return (a != b) && block_dominates(a, b);
}
-/* Returns the smallest common dominator block of two nodes. */
ir_node *node_smallest_common_dominator(ir_node *a, ir_node *b)
{
ir_node *bl_a = is_Block(a) ? a : get_nodes_block(a);
return dom_bl;
}
-/* Returns the smallest common dominator block of all users of a node. */
ir_node *node_users_smallest_common_dominator(ir_node *irn, int handle_phi)
{
int n, j, i = 0, success;
return dom_bl;
}
-
/* Get the first node in the list of nodes dominated by a given block. */
ir_node *get_Block_dominated_first(const ir_node *bl)
{
return get_dom_info(bl)->first;
}
-/* Get the next node in a list of nodes which are dominated by some
- * other node. */
ir_node *get_Block_dominated_next(const ir_node *bl)
{
assert(is_Block(bl));
return get_dom_info(bl)->next;
}
-/* Check, if a block post dominates another block. */
int block_postdominates(const ir_node *a, const ir_node *b)
{
const ir_dom_info *ai, *bi;
return 0;
}
-/* Check, if a block strictly dominates another block. */
int block_strictly_postdominates(const ir_node *a, const ir_node *b)
{
return (a != b) && block_postdominates(a, b);
}
-
-/* Get the first node in the list of nodes post dominated by a given block. */
ir_node *get_Block_postdominated_first(const ir_node *bl)
{
assert(is_Block(bl));
return get_pdom_info(bl)->first;
}
-/* Get the next node in a list of nodes which are post dominated by some
- * other node. */
ir_node *get_Block_postdominated_next(const ir_node *bl)
{
assert(is_Block(bl));
return get_pdom_info(bl)->next;
}
-/* Visit all nodes in the dominator subtree of a given node. */
void dom_tree_walk(ir_node *bl, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
post(bl, env);
}
-/* Visit all nodes in the post dominator subtree of a given node. */
void postdom_tree_walk(ir_node *bl, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
post(bl, env);
}
-/* Walk over the dominator tree of an irg starting at the root. */
void dom_tree_walk_irg(ir_graph *irg, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
dom_tree_walk(root, pre, post, env);
}
-/* Walk over the post dominator tree of an irg starting at the root. */
void postdom_tree_walk_irg(ir_graph *irg, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
assert(bi->max_subtree_pre_num >= bi->tree_pre_num);
}
-/*--------------------------------------------------------------------*/
-/* Building and Removing the dominator data structure */
-/*--------------------------------------------------------------------*/
-
/**
* count the number of blocks and clears the post dominance info
*/
set_Block_dom_depth(bl, -1);
}
-/* Computes the dominator trees. */
void compute_doms(ir_graph *irg)
{
ir_graph *rem = current_ir_graph;
but better call it anyways... */
}
-/* Computes the post dominator trees. */
void compute_postdoms(ir_graph *irg)
{
ir_graph *rem = current_ir_graph;
}
}
-/*
- * Compute the extended basic blocks for a graph
- */
void compute_extbb(ir_graph *irg)
{
env_t env;
set_irg_state(irg, IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
}
-/* free all extended block info. */
void free_extbb(ir_graph *irg)
{
if (irg->extbb_obst) {
clear_irg_state(irg, IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
}
-/* Return the extended block of a node. */
ir_extblk *get_nodes_extbb(const ir_node *node)
{
const ir_node *block = is_Block(node) ? node : get_nodes_block(node);
return get_Block_extbb(block);
}
-/* Gets the visited counter of an extended block. */
ir_visited_t (get_extbb_visited)(const ir_extblk *blk)
{
return _get_extbb_visited(blk);
}
-/* Sets the visited counter of an extended block. */
void (set_extbb_visited)(ir_extblk *blk, ir_visited_t visited)
{
_set_extbb_visited(blk, visited);
}
-/* Mark an extended block as visited in a graph. */
void (mark_extbb_visited)(ir_extblk *blk)
{
_mark_extbb_visited(blk);
}
-/* Returns non-zero if an extended was visited. */
int (extbb_visited)(const ir_extblk *blk)
{
return _extbb_visited(blk);
}
-/* Returns non-zero if an extended block was NOT visited. */
int (extbb_not_visited)(const ir_extblk *blk)
{
return _extbb_not_visited(blk);
}
-/* Returns the link field of an extended block. */
void *(get_extbb_link)(const ir_extblk *blk)
{
return _get_extbb_link(blk);
}
-/* Sets the link field of an extended block. */
void (set_extbb_link)(ir_extblk *blk, void *link)
{
_set_extbb_link(blk, link);
}
-/* Return the number of basic blocks of an extended block */
int (get_extbb_n_blocks)(const ir_extblk *blk)
{
return _get_extbb_n_blocks(blk);
}
-/* Return the i'th basic block of an extended block */
ir_node *(get_extbb_block)(const ir_extblk *blk, int pos)
{
return _get_extbb_block(blk, pos);
}
-/* Return the leader basis block of an extended block. */
ir_node *(get_extbb_leader)(const ir_extblk *blk)
{
return _get_extbb_leader(blk);
}
-/* Return the node number of an extended block. */
long get_extbb_node_nr(const ir_extblk *blk)
{
return get_irn_node_nr(get_extbb_leader(blk));
}
}
-/* walks only over extended Block nodes in the graph. Has its own visited
- flag, so that it can be interleaved with the other walker. */
void irg_extblock_walk(ir_extblk *blk, extbb_walk_func *pre, extbb_walk_func *post, void *env)
{
ir_node *pred, *start_bl = get_irg_start_block(current_ir_graph);
pre(start_blk, env);
}
-/* Walks only over reachable Extended Basic Block nodes in the graph. */
void irg_extblock_walk_graph(ir_graph *irg, extbb_walk_func *pre, extbb_walk_func *post, void *env)
{
ir_node *endbl = get_irg_end_block(irg);
}
}
-/*
- * Compute the extended basic blocks for a graph
- */
void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs)
{
env_t env;
xfree(lv);
}
-/**
- * Check a nodes liveness situation of a block.
- * This routine considers both cases, the live in and end/out case.
- *
- * @param lv The liveness check environment.
- * @param bl The block under investigation.
- * @param var The node to check for.
- * @return A bitmask of lv_chk_state_XXX fields.
- */
unsigned lv_chk_bl_xxx(lv_chk_t *lv, const ir_node *bl, const ir_node *var)
{
int res = 0;
ARR_APP1(loop_element, loop->children, ln);
}
-/**
- * Mature all loops by removing the flexible arrays of a loop.
- *
- * @param loop the loop to mature
- * @param obst an obstack, where the new arrays are allocated on
- */
void mature_loops(ir_loop *loop, struct obstack *obst)
{
size_t i;
}
}
-/* Returns outer loop, itself if outermost. */
ir_loop *(get_loop_outer_loop)(const ir_loop *loop)
{
return _get_loop_outer_loop(loop);
}
-/* Returns nesting depth of this loop */
unsigned (get_loop_depth)(const ir_loop *loop)
{
return _get_loop_depth(loop);
}
-/* Returns the number of elements contained in loop. */
size_t get_loop_n_elements(const ir_loop *loop)
{
assert(loop && loop->kind == k_ir_loop);
return(loop -> children[pos]);
}
-/**
- * Sets the loop for a node.
- */
void set_irn_loop(ir_node *n, ir_loop *loop)
{
n->loop = loop;
return _is_ir_loop(thing);
}
-/* The outermost loop is remarked in the surrounding graph. */
void (set_irg_loop)(ir_graph *irg, ir_loop *loop)
{
_set_irg_loop(irg, loop);
}
-/* Returns the root loop info (if exists) for an irg. */
ir_loop *(get_irg_loop)(const ir_graph *irg)
{
return _get_irg_loop(irg);
}
-/*
- * Allocates a new loop as son of father on the given obstack.
- * If father is equal NULL, a new root loop is created.
- */
ir_loop *alloc_loop(ir_loop *father, struct obstack *obst)
{
ir_loop *son;
/** The global memory disambiguator options. */
static unsigned global_mem_disamgig_opt = aa_opt_no_opt;
-/* Returns a human readable name for an alias relation. */
const char *get_ir_alias_relation_name(ir_alias_relation rel)
{
#define X(a) case a: return #a
#undef X
}
-/* Get the memory disambiguator options for a graph. */
unsigned get_irg_memory_disambiguator_options(const ir_graph *irg)
{
unsigned opt = irg->mem_disambig_opt;
return opt;
}
-/* Set the memory disambiguator options for a graph. */
void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options)
{
irg->mem_disambig_opt = options & ~aa_opt_inherited;
}
-/* Set the global disambiguator options for all graphs not having local options. */
void set_irp_memory_disambiguator_options(unsigned options)
{
global_mem_disamgig_opt = options;
}
-/* Get the base storage class (ignore modifier) */
ir_storage_class_class_t get_base_sc(ir_storage_class_class_t x)
{
return x & ~ir_sc_modifiers;
return ir_may_alias;
}
-/*
- * Determine the alias relation between two addresses.
- */
ir_alias_relation get_alias_relation(
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2)
return rel;
}
-/* Set a source language specific memory disambiguator function. */
void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func)
{
language_disambuigator = func;
p1->mode1 == p2->mode1 && p1->mode2 == p2->mode2;
}
-/**
- * Initialize the relation cache.
- */
void mem_disambig_init(void)
{
result_cache = new_set(cmp_mem_disambig_entry, 8);
}
-/*
- * Determine the alias relation between two addresses.
- */
ir_alias_relation get_alias_relation_ex(
const ir_node *adr1, const ir_mode *mode1,
const ir_node *adr2, const ir_mode *mode2)
return key.result;
}
-/* Free the relation cache. */
void mem_disambig_term(void)
{
if (result_cache != NULL) {
irp->globals_entity_usage_state = ir_entity_usage_computed;
}
-/* Returns the current address taken state of the globals. */
ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void)
{
return irp->globals_entity_usage_state;
}
-/* Sets the current address taken state of the graph. */
void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state)
{
irp->globals_entity_usage_state = state;
}
-/* Assure that the address taken flag is computed for the globals. */
void assure_irp_globals_entity_usage_computed(void)
{
if (irp->globals_entity_usage_state != ir_entity_usage_not_computed)
}
}
-/* Mark all private methods, i.e. those of which all call sites are known. */
void mark_private_methods(void)
{
size_t i, n;
pmap_destroy(mtp_map);
}
-/* create a pass for mark_private_methods() */
ir_prog_pass_t *mark_private_methods_pass(const char *name)
{
return def_prog_pass(name ? name : "mark_private_methods", mark_private_methods);
#include "irprintf.h"
#include "error.h"
-#ifdef DEBUG_libfirm
-/* Note: ir_node.out_valid and ir_graph.n_outs are only present when DEBUG_libfirm is defined */
-/* Accesses to out_valid and n_outs are fenced out to avoid breakage
- when compiling with neither DEBUG_libfirm or NDEBUG defined */
-#endif /* defined DEBUG_libfirm */
-
/*--------------------------------------------------------------------*/
/** Accessing the out datastructures **/
/*--------------------------------------------------------------------*/
return node->out != NULL;
}
-/* returns the number of successors of the node: */
int get_irn_n_outs(const ir_node *node)
{
assert(node && node->kind == k_ir_node);
return node->out[0].pos;
}
-/* Access successor n */
ir_node *get_irn_out(const ir_node *def, int pos)
{
assert(pos >= 0 && pos < get_irn_n_outs(def));
return def->out[pos+1].use;
}
-/* Access successor n */
ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos)
{
assert(pos >= 0 && pos < get_irn_n_outs(def));
def->out[pos+1].pos = in_pos;
}
-/* Return the number of control flow successors, ignore keep-alives. */
int get_Block_n_cfg_outs(const ir_node *bl)
{
int i, n_cfg_outs = 0;
return n_cfg_outs;
}
-/* Return the number of control flow successors, honor keep-alives. */
int get_Block_n_cfg_outs_ka(const ir_node *bl)
{
int i, n_cfg_outs = 0;
return n_cfg_outs;
}
-/* Access predecessor n, ignore keep-alives. */
ir_node *get_Block_cfg_out(const ir_node *bl, int pos)
{
int i;
return NULL;
}
-/* Access predecessor n, honor keep-alives. */
ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos)
{
int i, n_outs;
}
}
-/* Walks only over Block nodes in the graph. Has its own visited
- flag, so that it can be interleaved with the other walker. */
void irg_out_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
void *env)
{
return free;
}
-/* compute the outs for a given graph */
void compute_irg_outs(ir_graph *irg)
{
ir_graph *rem = current_ir_graph;
}
}
-/* Constructs backedge information for irg. In interprocedural view constructs
- backedges for all methods called by irg, too. */
int construct_backedges(ir_graph *irg)
{
ir_graph *rem = current_ir_graph;
reset_backedges(n);
}
-/** Removes all loop information.
- Resets all backedges */
void free_loop_information(ir_graph *irg)
{
/* We can not use this recursion, as the loop might contain
return 0;
}
-/* Test whether a value is loop invariant.
- *
- * @param n The node to be tested.
- * @param block A block node. We pass the block, not the loop as we must
- * start off with a block loop to find all proper uses.
- *
- * Returns non-zero, if the node n is not changed in the loop block
- * belongs to or in inner loops of this blocks loop. */
int is_loop_invariant(const ir_node *n, const ir_node *block)
{
ir_loop *l = get_irn_loop(block);
#include "irnode_t.h"
#include "pmap.h"
-/* ------------ The map. ---------------------------------------------- */
-
-
static pmap *type_node_map = NULL;
-/* ------------ Auxiliary type. --------------------------------------- */
-
-/* This auxiliary type expresses that a field is uninitialized. The
- * variable is set by init_irtypeinfo. The type is freed by
- * free_irtypeinfo.
- */
ir_type *initial_type = NULL;
-/* ------------ Initializing this module. ----------------------------- */
-
-/* Initializes the type information module.
- * Generates a type "initial_type" and sets the type of all nodes to this type.
- * Calling set/get_irn_type is invalid before calling init. Requires memory
- * in the order of MIN(<calls to set_irn_type>, #irnodes).
- */
void init_irtypeinfo(void)
{
size_t i, n;
}
-/* ------------ Irgraph state handling. ------------------------------- */
-
void set_irg_typeinfo_state(ir_graph *irg, ir_typeinfo_state s)
{
assert(is_ir_graph(irg));
}
-/* Returns accumulated type information state information.
- *
- * Returns ir_typeinfo_consistent if the type information of all irgs is
- * consistent. Returns ir_typeinfo_inconsistent if at least one irg has inconsistent
- * or no type information. Returns ir_typeinfo_none if no irg contains type information.
- */
ir_typeinfo_state get_irp_typeinfo_state(void)
{
return irp->typeinfo_state;
{
irp->typeinfo_state = s;
}
-/* If typeinfo is consistent, sets it to inconsistent. */
void set_irp_typeinfo_inconsistent(void)
{
if (irp->typeinfo_state == ir_typeinfo_consistent)
}
-/* ------------ Irnode type information. ------------------------------ */
-
-/* These routines only work properly if the ir_graph is in state
- * ir_typeinfo_consistent or ir_typeinfo_inconsistent. They
- * assume current_ir_graph set properly.
- */
ir_type *get_irn_typeinfo_type(const ir_node *n)
{
ir_type *res = initial_type;
return n_instances;
}
-/* Cast node that creates an instance of this type */
ir_node *get_type_cast(const ir_type *tp, size_t pos)
{
ir_node **casts;
}
}
-/* compute the trouts data structures. */
void compute_trouts(void)
{
size_t i;
#include "ircons.h"
#include "instrument.h"
-/**
- * Adds a Call at the beginning of the given irg.
- */
void instrument_initcall(ir_graph *irg, ir_entity *ent)
{
const ir_edge_t *edge;
return res;
}
-/* ************************************************************************** */
-
-/*
- * Finalize a Block node, when all control flows are known.
- * Acceptable parameters are only Block nodes.
- */
void mature_immBlock(ir_node *block)
{
size_t n_preds;
return verbosity;
}
-/* Write the irnode and all its attributes to the file passed. */
void dump_irnode_to_file(FILE *F, const ir_node *n)
{
char comma;
return (long)e;
}
-/**
- * Announce to reserve extra space for each edge to be allocated.
- *
- * @param n: Size of the space to reserve
- *
- * @return Offset at which the private data will begin
- *
- * Several users can reserve extra space for private usage.
- * Each user has to remember his given offset and the size of his private data.
- * To be called before FIRM is initialized.
- */
size_t edges_register_private_data(size_t n)
{
size_t res = edges_private_size;
return res;
}
-/*
- * Reset the user's private data at offset 'offset'
- * The user has to remember his offset and the size of his data!
- * Caution: Using wrong values here can destroy other users private data!
- */
void edges_reset_private_data(ir_graph *irg, int offset, unsigned size)
{
irg_edge_info_t *info = get_irg_edge_info(irg, EDGE_KIND_NORMAL);
#define edge_hash(edge) (TIMES37((edge)->pos) + HASH_PTR((edge)->src))
-/**
- * Initialize the out information for a graph.
- * @note Dead node elimination can call this on an already initialized graph.
- */
void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
{
if (edges_activated_kind(irg, kind)) {
}
}
-/**
- * Get the edge object of an outgoing edge at a node.
- * @param irg The graph, the node is in.
- * @param src The node at which the edge originates.
- * @param pos The position of the edge.
- * @param kind The kind of the edge.
- * @return The corresponding edge object or NULL,
- * if no such edge exists.
- */
const ir_edge_t *get_irn_edge_kind(const ir_node *src, int pos, ir_edge_kind_t kind)
{
ir_graph *irg = get_irn_irg(src);
}
}
-/* The edge from (src, pos) -> old_tgt is redirected to tgt */
void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
ir_node *old_tgt, ir_edge_kind_t kind,
ir_graph *irg)
}
}
-/*
- * Build the initial edge set.
- * Beware, this is not a simple task because it suffers from two
- * difficulties:
- * - the anchor set allows access to Nodes that may not be reachable from
- * the End node
- * - the identities add nodes to the "root set" that are not yet reachable
- * from End. However, after some transformations, the CSE may revival these
- * nodes
- *
- * These problems can be fixed using different strategies:
- * - Add an age flag to every node. Whenever the edge of a node is older
- * then the current edge, invalidate the edges of this node.
- * While this would help for revivaled nodes, it increases memory and runtime.
- * - Delete the identities set.
- * Solves the revival problem, but may increase the memory consumption, as
- * nodes cannot be revivaled at all.
- * - Manually iterate over the identities root set. This did not consume more memory
- * but increase the computation time because the |identities| >= |V|
- *
- * Currently, we use the last option.
- */
void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
{
+ /*
+ * Build the initial edge set.
+ * Beware, this is not a simple task because it suffers from two
+ * difficulties:
+ * - the anchor set allows access to Nodes that may not be reachable from
+ * the End node
+ * - the identities add nodes to the "root set" that are not yet reachable
+ * from End. However, after some transformations, the CSE may revival these
+ * nodes
+ *
+ * These problems can be fixed using different strategies:
+ * - Add an age flag to every node. Whenever the edge of a node is older
+ * then the current edge, invalidate the edges of this node.
+ * While this would help for revivaled nodes, it increases memory and runtime.
+ * - Delete the identities set.
+ * Solves the revival problem, but may increase the memory consumption, as
+ * nodes cannot be revivaled at all.
+ * - Manually iterate over the identities root set. This did not consume more memory
+ * but increase the computation time because the |identities| >= |V|
+ *
+ * Currently, we use the last option.
+ */
struct build_walker w;
irg_edge_info_t *info = get_irg_edge_info(irg, kind);
visitor_info_t visit;
return edges_activated_kind_(irg, kind);
}
-
-/**
- * Reroute all use-edges from a node to another.
- * @param from The node whose use-edges shall be withdrawn.
- * @param to The node to which all the use-edges of @p from shall be
- * sent to.
- * @param irg The graph.
- */
void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind)
{
ir_graph *irg = get_irn_irg(from);
bitset_free(bs);
}
-/**
- * Verifies the out edges of an irg.
- */
int edges_verify(ir_graph *irg)
{
struct build_walker w;
return 0;
}
-/* Creates an ir_graph pass for edges_verify(). */
ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
{
pass_t *pass = XMALLOCZ(pass_t);
#undef I_FLAG
#undef R_FLAG
-/** The bitset of currently running phases. */
optimization_state_t libFIRM_running = 0;
-/* verbose is always off on default */
optimization_state_t libFIRM_verb = 0;
-/* silence warnings */
void set_opt_optimize(int value);
/* an external flag can be set and get from outside */
#undef E_FLAG
#undef R_FLAG
-/* for compatibility reasons */
void set_optimize(int value)
{
set_opt_optimize(value);
return get_opt_optimize();
}
-/* Save the current optimization state. */
void save_optimization_state(optimization_state_t *state)
{
*state = libFIRM_opt;
}
-/* Restore the current optimization state. */
void restore_optimization_state(const optimization_state_t *state)
{
libFIRM_opt = *state;
}
-/* Switches ALL optimizations off */
void all_optimizations_off(void)
{
libFIRM_opt = 0;
}
#ifdef _DEBUG
-/* only for debugging */
void firm_show_flags(FILE *f)
{
if (! f)
#include "irtools.h"
#include "error.h"
-/**
- * Turns a node into a "useless" Tuple. The Tuple just forms a tuple
- * from several inputs.
- * This is useful if a node returning a tuple is removed, but the Projs
- * extracting values from the tuple are not available.
- */
void turn_into_tuple(ir_node *node, int arity)
{
ir_graph *irg = get_irn_irg(node);
set_irn_op(node, op_Tuple);
}
-/**
- * Insert irnode `new' in place of irnode `old'
- * Since `new' may be bigger than `old' replace `old'
- * by an op_Id which is smaller than everything.
- */
void exchange(ir_node *old, ir_node *nw)
{
ir_graph *irg;
| IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
}
-/*--------------------------------------------------------------------*/
-/* Functionality for collect_phis */
-/*--------------------------------------------------------------------*/
-
/**
* Walker: links all Phi nodes to their Blocks lists,
* all Proj nodes to there predecessors.
irg_walk_graph(irg, firm_clear_node_and_phi_links, collect_phiprojs_walker, NULL);
}
-/*--------------------------------------------------------------------*/
-/* Functionality for part_block */
-/*--------------------------------------------------------------------*/
-
/**
* Moves node and all predecessors of node from from_bl to to_bl.
* Does not move predecessors of Phi nodes (or block nodes).
#include "iredges_t.h"
#include "irtools.h"
-/*------------------------------------------------------------------*/
-/* apply optimizations of iropt to all nodes. */
-/*------------------------------------------------------------------*/
-
/**
* A wrapper around optimize_inplace_2() to be called from a walker.
*/
irg_walk(n, firm_clear_link, optimize_in_place_wrapper, NULL);
}
-/* Applies local optimizations (see iropt.h) to all nodes reachable from node n */
void local_optimize_node(ir_node *n)
{
ir_graph *rem = current_ir_graph;
}
}
-/* Applies local optimizations (see iropt.h) to all nodes reachable from node n. */
void local_optimize_graph(ir_graph *irg)
{
ir_graph *rem = current_ir_graph;
/** contains the suffix for frame type names */
static ident *frame_type_suffix = NULL;
-/* initialize the IR graph module */
void firm_init_irgraph(void)
{
frame_type_suffix = new_id_from_str(FRAME_TP_SUFFIX);
free(ptr - additional_graph_data_size);
}
-/**
- * Set the number of locals for a given graph.
- *
- * @param irg the graph
- * @param n_loc number of locals
- */
void irg_set_nloc(ir_graph *res, int n_loc)
{
assert(res->phase_state == phase_building);
}
}
-/* Allocates a list of nodes:
- - The start block containing a start node and Proj nodes for its four
- results (X, M, P, Tuple).
- - The end block containing an end node. This block is not matured after
- new_ir_graph as predecessors need to be added to it.
- - The current block, which is empty and also not matured.
- Further it allocates several datastructures needed for graph construction
- and optimization.
-*/
ir_graph *new_r_ir_graph(ir_entity *ent, int n_loc)
{
ir_graph *res;
return res;
}
-/* Make a rudimentary IR graph for the constant code.
- Must look like a correct irg, spare everything else. */
ir_graph *new_const_code_irg(void)
{
ir_graph *res = alloc_graph();
return (ir_node*) get_irn_link(old_node);
}
-/*
- * Create a new graph that is a copy of a given one.
- */
ir_graph *create_irg_copy(ir_graph *irg)
{
ir_graph *res;
return res;
}
-/* Frees the passed irgraph.
- Deallocates all nodes in this graph and the ir_graph structure.
- Sets the field irgraph in the corresponding entity to NULL.
- Does not remove the irgraph from the list in irprog (requires
- inefficient search, call remove_irp_irg by hand).
- Does not free types, entities or modes that are used only by this
- graph, nor the entity standing for this graph. */
void free_ir_graph(ir_graph *irg)
{
assert(is_ir_graph(irg));
free_graph(irg);
}
-/* access routines for all ir_graph attributes:
- templates:
- {attr type} get_irg_{attribute name} (ir_graph *irg);
- void set_irg_{attr name} (ir_graph *irg, {attr type} {attr}); */
-
int (is_ir_graph)(const void *thing)
{
return is_ir_graph_(thing);
}
#ifdef DEBUG_libfirm
-/* Outputs a unique number for this node */
long get_irg_graph_nr(const ir_graph *irg)
{
return irg->graph_nr;
return irg->n_loc - 1;
}
-/* Returns the obstack associated with the graph. */
struct obstack *(get_irg_obstack)(const ir_graph *irg)
{
return get_irg_obstack_(irg);
}
-/*
- * Returns true if the node n is allocated on the storage of graph irg.
- *
- * Implementation is GLIBC specific as is uses the internal _obstack_chunk implementation.
- */
int node_is_in_irgs_storage(const ir_graph *irg, const ir_node *n)
{
struct _obstack_chunk *p;
inc_irg_block_visited_(irg);
}
-/* Return the floating point model of this graph. */
unsigned (get_irg_fp_model)(const ir_graph *irg)
{
return get_irg_fp_model_(irg);
}
-/* Sets the floating point model for this graph. */
void set_irg_fp_model(ir_graph *irg, unsigned model)
{
irg->fp_model = model;
}
-/* set a description for local value n */
void set_irg_loc_description(ir_graph *irg, int n, void *description)
{
assert(0 <= n && n < irg->n_loc);
irg->loc_descriptions[n] = description;
}
-/* get the description for local value n */
void *get_irg_loc_description(ir_graph *irg, int n)
{
assert(0 <= n && n < irg->n_loc);
{
return irg->reserved_resources;
}
-#endif /* NDEBUG */
+#endif
-/* Returns a estimated node count of the irg. */
unsigned (get_irg_estimated_node_cnt)(const ir_graph *irg)
{
return get_irg_estimated_node_cnt_(irg);
}
-/* Returns the last irn index for this graph. */
unsigned get_irg_last_idx(const ir_graph *irg)
{
return irg->last_node_idx;
}
-/* register additional space in an IR graph */
size_t register_additional_graph_data(size_t size)
{
assert(!forbid_new_data && "Too late to register additional node data");
return cnt;
}
-/**
- * Intraprozedural graph walker.
- *
- * @return number of visited nodes
- */
unsigned irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
void *env)
{
current_ir_graph = rem;
}
-/*
- * walk over a graph
- */
void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
{
ir_graph * rem = current_ir_graph;
current_ir_graph = rem;
}
-/* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
- Sets current_ir_graph properly for each walk. Conserves current
- current_ir_graph. */
void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env)
{
size_t i, n;
}
}
-/***************************************************************************/
-
/**
* specialized version of irg_walk_in_or_dep_2, called if only pre callback exists
*
else return irg_walk_in_or_dep_2_both(node, pre, post, env);
}
-/*
- * Generic graph walker. Follows dependency edges as well.
- */
void irg_walk_in_or_dep(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
{
assert(is_ir_node(node));
ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
}
-/*
- * Walk over a graph. Follow all edges (including dependencies)
- */
void irg_walk_in_or_dep_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
{
ir_graph * rem = current_ir_graph;
current_ir_graph = rem;
}
-/***************************************************************************/
-
/* Walks back from n until it finds a real cf op. */
static ir_node *get_cf_op(ir_node *n)
{
post(node, env);
}
-
-/* walks only over Block nodes in the graph. Has its own visited
- flag, so that it can be interleaved with the other walker. */
void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
void *env)
{
ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
}
-/*
- * walk over a graph block wise
- */
void irg_block_walk_graph(ir_graph *irg, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
current_ir_graph = rem;
}
-/*
- * Additionally walk over all anchors. Do NOT increase the visit flag.
- */
void irg_walk_anchors(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
{
ir_graph * rem = current_ir_graph;
current_ir_graph = rem;
}
-/********************************************************************/
-
typedef struct walk_env {
irg_walk_func *pre;
irg_walk_func *post;
}
}
-/* Walks over all code in const_code_irg. */
void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env)
{
walk_env my_env;
#include "irhooks.h"
-/* the hooks */
hook_entry_t *hooks[hook_last];
-/* register a hook */
void register_hook(hook_type_t hook, hook_entry_t *entry)
{
/* check if a hook function is specified. It's a union, so no matter which one */
hooks[hook] = entry;
}
-/* unregister a hook */
void unregister_hook(hook_type_t hook, hook_entry_t *entry)
{
hook_entry_t *p;
return entry->data;
}
-/**
- * Initializes a nodemap iterator. Sets the iterator before the first element in
- * the linked nodemap.
- *
- * @param iterator Pointer to already allocated iterator memory
- * @param nodemap Pointer to the nodemap
- */
void ir_lnk_nodemap_iterator_init(ir_lnk_nodemap_iterator_t *iterator,
const ir_lnk_nodemap_t *nodemap)
{
iterator->nodemap = nodemap;
}
-/**
- * Advances the iterator and returns the current element or NULL if all elements
- * in the linked nodemap have been processed.
- * @attention It is not allowed to use ir_lnk_nodemap_insert or ir_lnk_nodemap_remove while
- * iterating over a nodemap.
- *
- * @param iterator Pointer to the nodemap iterator.
- * @returns Next element in the nodemap or NULL
- */
ir_node *ir_lnk_nodemap_iterator_next(ir_lnk_nodemap_iterator_t *iterator)
{
ir_node *res;
return res;
}
-/**
- * Removes the element the iterator currently points to.
- *
- * @param nodemap Pointer to the linked nodemap
- * @param iterator Pointer to the nodemap iterator.
- */
void ir_lnk_nodemap_remove_iterator(ir_lnk_nodemap_t *nodemap,
ir_lnk_nodemap_iterator_t *iterator)
{
Free(old_entries);
}
-
-/* Inserts a node into a linked nodeset. */
int ir_lnk_nodeset_insert(ir_lnk_nodeset_t *nodeset, ir_node *node)
{
ir_lnk_nodeset_entry_t *entry = ir_lnk_nodeset_insert_(nodeset, node);
return ir_lnk_nodeset_find_(nodeset, node) != NULL;
}
-/**
- * Initializes a nodeset iterator. Sets the iterator before the first element in
- * the linked nodeset.
- *
- * @param iterator Pointer to already allocated iterator memory
- * @param nodeset Pointer to the nodeset
- */
void ir_lnk_nodeset_iterator_init(ir_lnk_nodeset_iterator_t *iterator,
const ir_lnk_nodeset_t *nodeset)
{
iterator->nodeset = nodeset;
}
-/**
- * Advances the iterator and returns the current element or NULL if all elements
- * in the linked nodeset have been processed.
- * @attention It is not allowed to use ir_lnk_nodeset_insert or ir_lnk_nodeset_remove while
- * iterating over a nodeset.
- *
- * @param iterator Pointer to the nodeset iterator.
- * @returns Next element in the nodeset or NULL
- */
ir_node *ir_lnk_nodeset_iterator_next(ir_lnk_nodeset_iterator_t *iterator)
{
ir_node *res;
return res;
}
-/**
- * Removes the element the iterator currently points to.
- *
- * @param nodeset Pointer to the linked nodeset
- * @param iterator Pointer to the nodeset iterator.
- */
void ir_lnk_nodeset_remove_iterator(ir_lnk_nodeset_t *nodeset,
ir_lnk_nodeset_iterator_t *iterator)
{
}
}
-/* * *
- * globals defined in irmode.h
- * * */
-
-/* --- Predefined modes --- */
-
-/* FIRM internal modes: */
ir_mode *mode_T;
ir_mode *mode_X;
ir_mode *mode_M;
ir_mode *mode_ANY;
ir_mode *mode_BAD;
-/* predefined numerical modes: */
ir_mode *mode_F;
ir_mode *mode_D;
ir_mode *mode_Q;
-ir_mode *mode_Bs; /* integral values, signed and unsigned */
-ir_mode *mode_Bu; /* 8 bit */
-ir_mode *mode_Hs; /* 16 bit */
+ir_mode *mode_Bs;
+ir_mode *mode_Bu;
+ir_mode *mode_Hs;
ir_mode *mode_Hu;
-ir_mode *mode_Is; /* 32 bit */
+ir_mode *mode_Is;
ir_mode *mode_Iu;
-ir_mode *mode_Ls; /* 64 bit */
+ir_mode *mode_Ls;
ir_mode *mode_Lu;
-ir_mode *mode_LLs; /* 128 bit */
+ir_mode *mode_LLs;
ir_mode *mode_LLu;
ir_mode *mode_b;
ir_mode *mode_P;
-/* machine specific modes */
-ir_mode *mode_P_code; /**< machine specific pointer mode for code addresses */
-ir_mode *mode_P_data; /**< machine specific pointer mode for data addresses */
-
-/* * *
- * functions defined in irmode.h
- * * */
+ir_mode *mode_P_code;
+ir_mode *mode_P_data;
ir_mode *get_modeT(void) { return mode_T; }
ir_mode *get_modeF(void) { return mode_F; }
return register_mode(result);
}
-/* Functions for the direct access to all attributes of an ir_mode */
ident *(get_mode_ident)(const ir_mode *mode)
{
return get_mode_ident_(mode);
}
-/* Attribute modulo shift specifies for modes of kind irms_int_number
- * whether shift applies modulo to value of bits to shift. Asserts
- * if mode is not irms_int_number.
- */
unsigned int (get_mode_modulo_shift)(const ir_mode *mode)
{
return get_mode_modulo_shift_(mode);
return get_mode_exponent_size_(mode);
}
-/* Returns true if sm can be converted to lm without loss. */
int smaller_mode(const ir_mode *sm, const ir_mode *lm)
{
int sm_bits, lm_bits;
return 0;
}
-/* Returns true if a value of mode sm can be converted into mode lm
- and backwards without loss. */
int values_in_mode(const ir_mode *sm, const ir_mode *lm)
{
ir_mode_arithmetic arith;
}
}
-/* Return the signed integer equivalent mode for an reference mode. */
ir_mode *get_reference_mode_signed_eq(ir_mode *mode)
{
assert(mode_is_reference(mode));
return mode->eq_signed;
}
-/* Sets the signed integer equivalent mode for an reference mode. */
void set_reference_mode_signed_eq(ir_mode *ref_mode, ir_mode *int_mode)
{
assert(mode_is_reference(ref_mode));
ref_mode->eq_signed = int_mode;
}
-/* Return the unsigned integer equivalent mode for an reference mode. */
ir_mode *get_reference_mode_unsigned_eq(ir_mode *mode)
{
assert(mode_is_reference(mode));
return mode->eq_unsigned;
}
-/* Sets the unsigned integer equivalent mode for an reference mode. */
void set_reference_mode_unsigned_eq(ir_mode *ref_mode, ir_mode *int_mode)
{
assert(mode_is_reference(ref_mode));
return register_mode(mode);
}
-/* initialization, build the default modes */
void init_mode(void)
{
obstack_init(&modes);
mode_P_data = mode_P;
}
-/* find a signed mode for an unsigned integer mode */
ir_mode *find_unsigned_mode(const ir_mode *mode)
{
ir_mode n = *mode;
return find_mode(&n);
}
-/* find an unsigned mode for a signed integer mode */
ir_mode *find_signed_mode(const ir_mode *mode)
{
ir_mode n = *mode;
return find_mode(&n);
}
-/* finds a integer mode with 2*n bits for an integer mode with n bits. */
ir_mode *find_double_bits_int_mode(const ir_mode *mode)
{
ir_mode n = *mode;
return find_mode(&n);
}
-/*
- * Returns non-zero if the given mode honors signed zero's, i.e.,
- * a +0 and a -0 exists and handled differently.
- */
int mode_honor_signed_zeros(const ir_mode *mode)
{
/* for floating point, we know that IEEE 754 has +0 and -0,
mode->arithmetic != irma_ieee754;
}
-/*
- * Returns non-zero if the given mode might overflow on unary Minus.
- *
- * This does NOT happen on IEEE 754.
- */
int mode_overflow_on_unary_Minus(const ir_mode *mode)
{
if (mode->sort == irms_float_number)
return 1;
}
-/*
- * Returns non-zero if the mode has a reversed wrap-around
- * logic, especially (a + x) - x == a.
- *
- * This is normally true for integer modes, not for floating
- * point modes.
- */
int mode_wrap_around(const ir_mode *mode)
{
/* FIXME: better would be an extra mode property */
return mode_is_int(mode);
}
-/*
- * Returns non-zero if the cast from mode src to mode dst is a
- * reinterpret cast (ie. only the bit pattern is reinterpreted,
- * no conversion is done)
- */
int is_reinterpret_cast(const ir_mode *src, const ir_mode *dst)
{
ir_mode_arithmetic ma;
*/
static int forbid_new_data = 0;
-/**
- * The amount of additional space for custom data to be allocated upon
- * creating a new node.
- */
unsigned firm_add_node_size = 0;
-/* register new space for every node */
unsigned firm_register_additional_node_data(unsigned size)
{
assert(!forbid_new_data && "Too late to register additional node data");
} s;
};
-/*
- * irnode constructor.
- * Create a new irnode in irg, with an op, mode, arity and
- * some incoming irnodes.
- * If arity is negative, a node with a dynamic array is created.
- */
ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
ir_mode *mode, int arity, ir_node *const *in)
{
return res;
}
-/*-- getting some parameters from ir_nodes --*/
-
int (is_ir_node)(const void *thing)
{
return is_ir_node_(thing);
return get_irn_arity_(node);
}
-/* Returns the array with ins. This array is shifted with respect to the
- array accessed by get_irn_n: The block operand is at position 0 not -1.
- (@@@ This should be changed.)
- The order of the predecessors in this array is not guaranteed, except that
- lists of operands as predecessors of Block or arguments of a Call are
- consecutive. */
ir_node **get_irn_in(const ir_node *node)
{
return node->in;
return get_irn_op_(node);
}
-/* should be private to the library: */
void (set_irn_op)(ir_node *node, ir_op *op)
{
set_irn_op_(node, op);
node->attr.except.pin_state = state;
}
-/* Outputs a unique number for this node */
long get_irn_node_nr(const ir_node *node)
{
assert(node);
return -1;
}
-/** manipulate fields of individual nodes **/
-
ir_node *(get_nodes_block)(const ir_node *node)
{
return get_nodes_block_(node);
set_irn_n(node, -1, block);
}
-/* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
- * from Start. If so returns frame type, else Null. */
ir_type *is_frame_pointer(const ir_node *n)
{
if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
block->attr.block.extblk = extblk;
}
-/* returns the graph of a Block. */
ir_graph *(get_Block_irg)(const ir_node *block)
{
return get_Block_irg_(block);
add_Block_phi_(block, phi);
}
-/* Get the Block mark (single bit). */
unsigned (get_Block_mark)(const ir_node *block)
{
return get_Block_mark_(block);
}
-/* Set the Block mark (single bit). */
void (set_Block_mark)(ir_node *block, unsigned mark)
{
set_Block_mark_(block, mark);
set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
}
-/* Set new keep-alives */
void set_End_keepalives(ir_node *end, int n, ir_node *in[])
{
size_t e;
clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
}
-/* Set new keep-alives from old keep-alives, skipping irn */
void remove_End_keepalive(ir_node *end, ir_node *irn)
{
int n = get_End_n_keepalives(end);
clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_OUTS);
}
-/* remove Bads, NoMems and doublets from the keep-alive set */
void remove_End_Bads_and_doublets(ir_node *end)
{
pset_new_t keeps;
node->attr.symc.sym.type_p = tp;
}
-
-/* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
ir_entity *get_SymConst_entity(const ir_node *node)
{
assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
}
-/* Returns a human readable string for the ir_builtin_kind. */
const char *get_builtin_kind_name(ir_builtin_kind kind)
{
#define X(a) case a: return #a
node->attr.call.callee_arr = NULL;
}
-/* Checks for upcast.
- *
- * Returns true if the Cast node casts a class type to a super type.
- */
int is_Cast_upcast(ir_node *node)
{
ir_type *totype = get_Cast_type(node);
return is_SubClass_of(fromtype, totype);
}
-/* Checks for downcast.
- *
- * Returns true if the Cast node casts a class type to a sub type.
- */
int is_Cast_downcast(ir_node *node)
{
ir_type *totype = get_Cast_type(node);
return (get_irn_arity(node));
}
-/*
-void set_Sync_n_preds(ir_node *node, int n_preds)
-{
- assert(is_Sync(node));
-}
-*/
-
ir_node *get_Sync_pred(const ir_node *node, int pos)
{
assert(is_Sync(node));
set_irn_n(node, pos, pred);
}
-/* Add a new Sync predecessor */
void add_Sync_pred(ir_node *node, ir_node *pred)
{
assert(is_Sync(node));
return ARR_LEN(node->attr.assem.clobbers);
}
-/* returns the graph of a node */
ir_graph *(get_irn_irg)(const ir_node *node)
{
return get_irn_irg_(node);
}
-
-/*----------------------------------------------------------------*/
-/* Auxiliary routines */
-/*----------------------------------------------------------------*/
-
ir_node *skip_Proj(ir_node *node)
{
/* don't assert node !!! */
return node;
}
-/* returns operand of node if node is a Cast */
ir_node *skip_Cast(ir_node *node)
{
if (is_Cast(node))
return node;
}
-/* returns operand of node if node is a Cast */
const ir_node *skip_Cast_const(const ir_node *node)
{
if (is_Cast(node))
return node;
}
-/* returns operand of node if node is a Pin */
ir_node *skip_Pin(ir_node *node)
{
if (is_Pin(node))
return node;
}
-/* returns operand of node if node is a Confirm */
ir_node *skip_Confirm(ir_node *node)
{
if (is_Confirm(node))
return node;
}
-/* skip all high-level ops */
ir_node *skip_HighLevel_ops(ir_node *node)
{
while (is_op_highlevel(get_irn_op(node))) {
}
-/* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
- * than any other approach, as Id chains are resolved and all point to the real node, or
- * all id's are self loops.
- *
- * Note: This function takes 10% of mostly ANY the compiler run, so it's
- * a little bit "hand optimized".
- */
ir_node *skip_Id(ir_node *node)
{
+ /* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
+ * than any other approach, as Id chains are resolved and all point to the real node, or
+ * all id's are self loops.
+ *
+ * Note: This function takes 10% of mostly ANY the compiler run, so it's
+ * a little bit "hand optimized".
+ */
ir_node *pred;
/* don't assert node !!! */
return is_strictConv_(node);
}
-/* Returns true if node is a SymConst node with kind symconst_addr_ent. */
int (is_SymConst_addr_ent)(const ir_node *node)
{
return is_SymConst_addr_ent_(node);
}
-/* Returns true if the operation manipulates control flow. */
int is_cfop(const ir_node *node)
{
if (is_fragile_op(node) && ir_throws_exception(node))
return is_op_unknown_jump(get_irn_op(node));
}
-/* Returns true if the operation can change the control flow because
- of an exception. */
int is_fragile_op(const ir_node *node)
{
return is_op_fragile(get_irn_op(node));
}
-/* Returns true if the operation is a forking control flow operation. */
int (is_irn_forking)(const ir_node *node)
{
return is_irn_forking_(node);
copy_node_attr_(irg, old_node, new_node);
}
-/* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
- Cast) or NULL.*/
ir_type *(get_irn_type_attr)(ir_node *node)
{
return get_irn_type_attr_(node);
}
-/* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
ir_entity *(get_irn_entity_attr)(ir_node *node)
{
return get_irn_entity_attr_(node);
}
-/* Returns non-zero for constant-like nodes. */
int (is_irn_constlike)(const ir_node *node)
{
return is_irn_constlike_(node);
}
-/*
- * Returns non-zero for nodes that are allowed to have keep-alives and
- * are neither Block nor PhiM.
- */
int (is_irn_keep)(const ir_node *node)
{
return is_irn_keep_(node);
}
-/*
- * Returns non-zero for nodes that are always placed in the start block.
- */
int (is_irn_start_block_placed)(const ir_node *node)
{
return is_irn_start_block_placed_(node);
}
-/* Returns non-zero for nodes that are CSE neutral to its users. */
int (is_irn_cse_neutral)(const ir_node *node)
{
return is_irn_cse_neutral_(node);
}
-/* Gets the string representation of the jump prediction .*/
const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
{
#define X(a) case a: return #a
return firm_unknown_type;
}
-/* Sets the get_type operation for an ir_op_ops. */
void firm_set_default_get_type_attr(unsigned code, ir_op_ops *ops)
{
switch (code) {
return NULL;
}
-/* Sets the get_type operation for an ir_op_ops. */
void firm_set_default_get_entity_attr(unsigned code, ir_op_ops *ops)
{
switch (code) {
}
}
-/* Sets the debug information of a node. */
void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
{
set_irn_dbg_info_(n, db);
}
-/**
- * Returns the debug information of an node.
- *
- * @param n The node.
- */
dbg_info *(get_irn_dbg_info)(const ir_node *n)
{
return get_irn_dbg_info_(n);
return res;
}
-/*
- * Calculate a hash value of a node.
- */
unsigned firm_default_hash(const ir_node *node)
{
unsigned h;
/** the available next opcode */
static unsigned next_iro = iro_MaxOpcode;
-/*
- * Copies all attributes stored in the old node to the new node.
- * Assumes both have the same opcode and sufficient size.
- */
void default_copy_attr(ir_graph *irg, const ir_node *old_node,
ir_node *new_node)
{
firm_set_default_reassoc(code, ops);
}
-/* Creates a new ir operation. */
ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p,
unsigned flags, op_arity opar, int op_index, size_t attr_size,
const ir_op_ops *ops)
op->pn_x_except = pn_x_except;
}
-/* Returns the string for the opcode. */
const char *get_op_name (const ir_op *op)
{
return get_id_str(op->name);
return get_op_pinned_(op);
}
-/* Sets op_pin_state_pinned in the opcode. Setting it to floating has no effect
- for Phi, Block and control flow nodes. */
void set_op_pinned(ir_op *op, op_pin_state pinned)
{
if (op == op_Block || op == op_Phi || is_op_cfopcode(op)) return;
op->pin_state = pinned;
}
-/* retrieve the next free opcode */
unsigned get_next_ir_opcode(void)
{
return next_iro++;
}
-/* Returns the next free n IR opcode number, allows to register a bunch of user ops */
unsigned get_next_ir_opcodes(unsigned num)
{
unsigned base = next_iro;
return base;
}
-/* Returns the generic function pointer from an ir operation. */
op_func (get_generic_function_ptr)(const ir_op *op)
{
return get_generic_function_ptr_(op);
}
-/* Store a generic function pointer into an ir operation. */
void (set_generic_function_ptr)(ir_op *op, op_func func)
{
set_generic_function_ptr_(op, func);
}
-/* Returns the ir_op_ops of an ir_op. */
const ir_op_ops *(get_op_ops)(const ir_op *op)
{
return get_op_ops_(op);
#include "bitfiddle.h"
#include "be.h"
-/* Make types visible to allow most efficient access */
#include "entity_t.h"
static bool is_Or_Eor_Add(const ir_node *node)
value_of_func value_of_ptr = default_value_of;
-/* * Set a new value_of function. */
void set_value_of_func(value_of_func func)
{
if (func != NULL)
#undef CASE
}
-/*
- * Compare function for two nodes in the value table. Gets two
- * nodes as parameters. Returns 0 if the nodes are a Common Sub Expression.
- */
int identities_cmp(const void *elt, const void *key)
{
ir_node *a = (ir_node *)elt;
return 0;
}
-/*
- * Calculate a hash value of a node.
- *
- * @param node The IR-node
- */
unsigned ir_node_hash(const ir_node *node)
{
return node->op->ops.hash(node);
}
-
void new_identities(ir_graph *irg)
{
if (irg->value_table != NULL)
del_pset(irg->value_table);
}
-/* Normalize a node by putting constants (and operands with larger
- * node index) on the right (operator side). */
void ir_normalize_node(ir_node *n)
{
if (is_op_commutative(get_irn_op(n))) {
}
}
-/*
- * Return the canonical node computing the same value as n.
- * Looks up the node in a hash table, enters it in the table
- * if it isn't there yet.
- *
- * @param n the node to look up
- *
- * @return a node that computes the same value as n or n if no such
- * node could be found
- */
ir_node *identify_remember(ir_node *n)
{
ir_graph *irg = get_irn_irg(n);
return n;
}
-/* Add a node to the identities value table. */
void add_identities(ir_node *node)
{
if (!get_opt_cse())
identify_remember(node);
}
-/* Visit each node in the value table of a graph. */
void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env)
{
ir_node *node;
current_ir_graph = rem;
}
-/**
- * These optimizations deallocate nodes from the obstack.
- * It can only be called if it is guaranteed that no other nodes
- * reference this one, i.e., right after construction of a node.
- *
- * @param n The node to optimize
- */
ir_node *optimize_node(ir_node *n)
{
ir_node *oldn = n;
return n;
}
-
-/**
- * These optimizations never deallocate nodes (in place). This can cause dead
- * nodes lying on the obstack. Remove these by a dead node elimination,
- * i.e., a copying garbage collection.
- */
ir_node *optimize_in_place_2(ir_node *n)
{
if (!get_opt_optimize() && !is_Phi(n)) return n;
return n;
}
-/**
- * Wrapper for external use, set proper status bits after optimization.
- */
ir_node *optimize_in_place(ir_node *n)
{
ir_graph *irg = get_irn_irg(n);
typedef int (*int_pass_func_irg)(ir_graph *irg);
typedef void (*void_pass_func)(void);
-/*Add a graph pass to a graph pass manager. */
void ir_graph_pass_mgr_add(ir_graph_pass_manager_t *mgr, ir_graph_pass_t *pass)
{
list_add_tail(&pass->list, &mgr->passes);
pass->add_to_mgr(pass->context);
}
-/* Add an irprog pass to an irprog pass manager. */
void ir_prog_pass_mgr_add(ir_prog_pass_manager_t *mgr, ir_prog_pass_t *pass)
{
list_add_tail(&pass->list, &mgr->passes);
return ir_graph_pass_mgr_run(mgr);
}
-/* Ensure that no verifier is run an ir_prog pass. */
int ir_prog_no_verify(ir_prog *prog, void *ctx)
{
(void)prog;
return 0;
}
-/* Ensure that no dumper is run from an ir_prog pass. */
void ir_prog_no_dump(ir_prog *prog, void *ctx, unsigned idx)
{
(void)prog;
return pass;
}
-/* Add an ir_graph_pass as a pass to an ir_prog pass manager. */
void ir_prog_pass_mgr_add_graph_pass(
ir_prog_pass_manager_t *mgr, ir_graph_pass_t *pass)
{
ir_prog_pass_mgr_add(mgr, wrapper);
}
-/* Add an ir_graph_pass_manager as a pass to an ir_prog pass manager. */
void ir_prog_pass_mgr_add_graph_mgr(
ir_prog_pass_manager_t *mgr, ir_graph_pass_manager_t *graph_mgr)
{
snprintf(suffix, n, "%s.svg", pass_name);
}
-/* Run all passes of an ir_graph pass manager. */
int ir_graph_pass_mgr_run(ir_graph_pass_manager_t *mgr)
{
ir_graph_pass_t *pass;
return res;
}
-/* Run all passes of an ir_prog pass manager. */
int ir_prog_pass_mgr_run(ir_prog_pass_manager_t *mgr)
{
ir_prog_pass_t *pass;
return res;
}
-/* Creates a new ir_graph pass manager. */
ir_graph_pass_manager_t *new_graph_pass_mgr(
const char *name, int verify_all, int dump_all)
{
return res;
}
-/* Creates a new ir_prog pass manager. */
ir_prog_pass_manager_t *new_prog_pass_mgr(
const char *name, int verify_all, int dump_all)
{
return res;
}
-/* Terminate an ir_graph pass manager and all owned passes. */
void term_graph_pass_mgr(ir_graph_pass_manager_t *mgr)
{
ir_graph_pass_t *pass, *next;
xfree(mgr);
}
-/* Terminate an ir_prog pass manager and all owned passes. */
void term_prog_pass_mgr(ir_prog_pass_manager_t *mgr)
{
ir_prog_pass_t *pass, *next;
xfree(mgr);
}
-/**
- * Set the run index for an irgraph pass manager.
- *
- * @param mgr the manager
- * @param run_idx the index for the first pass of this manager
- */
void ir_graph_pass_mgr_set_run_idx(
ir_graph_pass_manager_t *mgr, unsigned run_idx)
{
mgr->run_idx = run_idx;
}
-/**
- * Set the run index for an irprog pass manager.
- *
- * @param mgr the manager
- * @param run_idx the index for the first pass of this manager
- */
void ir_prog_pass_mgr_set_run_idx(
ir_prog_pass_manager_t *mgr, unsigned run_idx)
{
return 0;
}
-/* Creates an ir_graph pass for running void function(ir_graph *irg). */
ir_graph_pass_t *def_graph_pass(
const char *name, void (*function)(ir_graph *irg))
{
return function(irg);
}
-/* Creates an ir_graph pass for running void function(ir_graph *irg). */
ir_graph_pass_t *def_graph_pass_ret(
const char *name, int (*function)(ir_graph *irg))
{
return pass;
}
-/* constructor for a default graph pass */
ir_graph_pass_t *def_graph_pass_constructor(
ir_graph_pass_t *pass,
const char *name, int (*function)(ir_graph *irg, void *context)) {
return pass;
}
-/* set the run parallel property */
void ir_graph_pass_set_parallel(ir_graph_pass_t *pass, int flag)
{
pass->run_parallel = flag != 0;
return 0;
}
-/* Creates an ir_prog pass for running void function(void). */
ir_prog_pass_t *def_prog_pass(
const char *name,
void (*function)(void))
return pass;
}
-/* Creates an ir_prog pass for running void function(void). */
ir_prog_pass_t *def_prog_pass_constructor(
ir_prog_pass_t *pass,
const char *name,
return result;
}
-/**
- * Instrument all ir_graphs in the current ir_program. Currently this only
- * works for graphs in the backend. Additionally, the resulting program
- * has to be linked with libfirmprof.
- *
- * @param filename the name of the profile file (usually module_name.prof)
- * @returns the module initializer, may be NULL
- */
ir_graph *ir_profile_instrument(const char *filename)
{
int n, n_blocks = 0;
/**
* Reads the corresponding profile info file if it exists.
*/
-
static void block_associate_walker(ir_node *bb, void *env)
{
block_assoc_t *b = (block_assoc_t*) env;
return true;
}
-/**
- * Frees the profile info
- */
void ir_profile_free(void)
{
if (profile) {
}
}
-/**
- * Tells whether profile module has acquired data
- */
bool ir_profile_has_data(void)
{
return profile != NULL;
}
-/**
- * Get block execution count as determined by profiling
- */
unsigned int ir_profile_get_block_execcount(const ir_node *block)
{
execcount_t *ec, query;
/** The initial name of the irp program. */
#define INITAL_PROG_NAME "no_name_set"
-/* A variable from where everything in the ir can be accessed. */
ir_prog *irp;
ir_prog *get_irp(void) { return irp; }
void set_irp(ir_prog *new_irp)
#undef IDENT
}
-/* initializes ir_prog. Constructs only the basic lists. */
void init_irprog_1(void)
{
irp = new_incomplete_ir_prog();
}
-/* Completes ir_prog. */
void init_irprog_2(void)
{
(void)complete_ir_prog(irp, INITAL_PROG_NAME);
}
-/* Create a new ir prog. Automatically called by init_firm through
- init_irprog. */
ir_prog *new_ir_prog(const char *name)
{
return complete_ir_prog(new_incomplete_ir_prog(), name);
}
-/* frees all memory used by irp. Types in type list, irgs in irg
- list and entities in global type must be freed by hand before. */
void free_ir_prog(void)
{
size_t i;
irp->kind = k_BAD;
}
-/*- Functions to access the fields of ir_prog -*/
-
-
-/* Access the main routine of the compiled program. */
ir_graph *get_irp_main_irg(void)
{
assert(irp);
return get_tls_type_();
}
-/* Adds irg to the list of ir graphs in irp. */
void add_irp_irg(ir_graph *irg)
{
assert(irg != NULL);
ARR_APP1(ir_graph *, irp->graphs, irg);
}
-/* Removes irg from the list or irgs, shrinks the list by one. */
void remove_irp_irg_from_list(ir_graph *irg)
{
size_t i, l;
}
}
-/* Removes irg from the list or irgs, shrinks the list by one. */
void remove_irp_irg(ir_graph *irg)
{
free_ir_graph(irg);
irp->graphs[pos] = irg;
}
-/* Adds type to the list of types in irp. */
void add_irp_type(ir_type *typ)
{
assert(typ != NULL);
ARR_APP1(ir_type *, irp->types, typ);
}
-/* Remove type from the list of types in irp. */
void remove_irp_type(ir_type *typ)
{
size_t i, l;
irp->types[pos] = typ;
}
-/* Returns the number of all modes in the irp. */
size_t (get_irp_n_modes)(void)
{
return get_irp_n_modes_();
}
-/* Returns the mode at position pos in the irp. */
ir_mode *(get_irp_mode)(size_t pos)
{
return get_irp_mode_(pos);
}
-/* Adds mode to the list of modes in irp. */
void add_irp_mode(ir_mode *mode)
{
assert(mode != NULL);
ARR_APP1(ir_mode *, irp->modes, mode);
}
-/* Adds opcode to the list of opcodes in irp. */
void add_irp_opcode(ir_op *opcode)
{
size_t len;
irp->opcodes[code] = opcode;
}
-/* Removes opcode from the list of opcodes and shrinks the list by one. */
void remove_irp_opcode(ir_op *opcode)
{
assert(opcode->code < ARR_LEN(irp->opcodes));
irp->opcodes[opcode->code] = NULL;
}
-/* Returns the number of all opcodes in the irp. */
size_t (get_irp_n_opcodes)(void)
{
return get_irp_n_opcodes_();
}
-/* Returns the opcode at position pos in the irp. */
ir_op *(get_irp_opcode)(size_t pos)
{
return get_irp_opcode_(pos);
}
-/* Sets the generic function pointer of all opcodes to NULL */
void clear_irp_opcodes_generic_func(void)
{
size_t i, n;
}
}
-/*- File name / executable name or the like -*/
void set_irp_prog_name(ident *name)
{
irp->name = name;
irp->callee_info_state = s;
}
-/* Returns a new, unique exception region number. */
-ir_exc_region_t (get_irp_next_region_nr)(void)
-{
- return get_irp_next_region_nr_();
-}
-
-/* Returns a new, unique label number. */
ir_label_t (get_irp_next_label_nr)(void)
{
return get_irp_next_label_nr_();
}
-/* Add a new global asm include */
void add_irp_asm(ident *asm_string)
{
ARR_APP1(ident *, irp->global_asms, asm_string);
}
-/* Return the number of global asm includes. */
size_t get_irp_n_asms(void)
{
return ARR_LEN(irp->global_asms);
}
-/* Return the global asm include at position pos. */
ident *get_irp_asm(size_t pos)
{
assert(pos < get_irp_n_asms());
return irp->global_asms[pos];
}
-/** Return whether optimization dump vcg graphs */
int (get_irp_optimization_dumps)(void)
{
return get_irp_optimization_dumps_();
}
-/** Enable vcg dumping of optimization */
void (enable_irp_optimization_dumps)(void)
{
enable_irp_optimization_dumps_();
set_Block_phis(block, NULL);
}
-/*
- * Restarts SSA construction on the given graph with n_loc
- * new values.
- *
- * @param irg the graph on which the SSA construction is restarted
- * @param n_loc number of new variables
- *
- * After this function is complete, the graph is in phase_building
- * again and set_value()/get_value() and mature_block() can be used
- * to construct new values.
- */
void ssa_cons_start(ir_graph *irg, int n_loc)
{
/* for now we support only phase_high graphs */
mature_immBlock(block);
}
-/*
- * Finalize the (restarted) SSA construction. Matures all blocks that are
- * not matured yet and reset the graph state to phase_high.
- */
void ssa_cons_finish(ir_graph *irg)
{
ssa_cons_walker(irg, NULL, finish_block, NULL);
const char *firm_verify_failure_msg;
-/* enable verification of Load/Store entities */
void verify_enable_entity_tests(int enable)
{
verify_entities = enable;
return 1;
}
-/* Tests the modes of n and its predecessors. */
int irn_verify_irg(const ir_node *n, ir_graph *irg)
{
ir_op *op;
return env.res;
}
-/*
- * Calls irn_verify for each node in irg.
- * Graph must be in state "op_pin_state_pinned".
- * If dominance info is available, check the SSA property.
- */
int irg_verify(ir_graph *irg, unsigned flags)
{
int res = 1;
return 0;
}
-/* Creates an ir_graph pass for irg_verify(). */
ir_graph_pass_t *irg_verify_pass(const char *name, unsigned flags)
{
pass_t *pass = XMALLOCZ(pass_t);
return &pass->pass;
}
-/* create a verify pass */
int irn_verify_irg_dump(const ir_node *n, ir_graph *irg,
const char **bad_string)
{
}
}
-/*
- * verify occurrence of bad nodes
- */
int irg_verify_bads(ir_graph *irg, int flags)
{
verify_bad_env_t env;
return env.res;
}
-/*
- * set the default verify operation
- */
void firm_set_default_verifier(unsigned code, ir_op_ops *ops)
{
#define CASE(a) \
}
}
-/* Remove Bad nodes from Phi and Block inputs.
- *
- * This does NOT remove unreachable code.
- *
- * Postcondition: No Bad nodes.
- */
int remove_bads(ir_graph *irg)
{
size_t i;
*changed = true;
}
-/* Remove Tuple nodes from an ir graph.
- *
- * Postcondition: No Tuple nodes.
- */
int remove_tuples(ir_graph *irg)
{
bool changed = 0;
#include "iropt_dbg.h"
#include "error.h"
#include "be.h"
+#include "util.h"
/** Walker environment. */
typedef struct walker_env {
}
}
-/* Go through all graphs and map calls to intrinsic functions. */
size_t lower_intrinsics(i_record *list, size_t length, int part_block_used)
{
size_t i, n;
set_Tuple_pred(call, pn_Call_T_result, rest);
}
-/* A mapper for the integer abs. */
int i_mapper_abs(ir_node *call, void *ctx)
{
ir_node *mem = get_Call_mem(call);
return 1;
}
-/* A mapper for the integer bswap. */
int i_mapper_bswap(ir_node *call, void *ctx)
{
ir_node *mem = get_Call_mem(call);
return 1;
}
-/* A mapper for the alloca() function. */
int i_mapper_alloca(ir_node *call, void *ctx)
{
ir_node *mem = get_Call_mem(call);
return 1;
}
-/* A mapper for the floating point sqrt. */
int i_mapper_sqrt(ir_node *call, void *ctx)
{
ir_node *mem;
return 1;
}
-/* A mapper for the floating point cbrt. */
int i_mapper_cbrt(ir_node *call, void *ctx)
{
ir_node *mem;
return 1;
}
-/* A mapper for the floating point pow. */
int i_mapper_pow(ir_node *call, void *ctx)
{
ir_node *left = get_Call_param(call, 0);
return 1;
}
-/* A mapper for the floating point exp. */
int i_mapper_exp(ir_node *call, void *ctx)
{
ir_node *val = get_Call_param(call, 0);
return changed;
}
-/* A mapper for the floating point log. */
int i_mapper_log(ir_node *call, void *ctx)
{
/* log(1.0) = 0.0 */
return i_mapper_one_to_zero(call, ctx, FS_OPT_RTS_LOG);
}
-/* A mapper for the floating point sin. */
int i_mapper_sin(ir_node *call, void *ctx)
{
/* sin(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_SIN);
}
-/* A mapper for the floating point cos. */
int i_mapper_cos(ir_node *call, void *ctx)
{
/* cos(0.0) = 1.0, cos(-x) = x */
return i_mapper_symmetric_zero_to_one(call, ctx, FS_OPT_RTS_COS);
}
-/* A mapper for the floating point tan. */
int i_mapper_tan(ir_node *call, void *ctx)
{
/* tan(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_TAN);
}
-/* A mapper for the floating point asin. */
int i_mapper_asin(ir_node *call, void *ctx)
{
/* asin(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_ASIN);
}
-/* A mapper for the floating point acos. */
int i_mapper_acos(ir_node *call, void *ctx)
{
/* acos(1.0) = 0.0 */
return i_mapper_one_to_zero(call, ctx, FS_OPT_RTS_ACOS);
}
-/* A mapper for the floating point atan. */
int i_mapper_atan(ir_node *call, void *ctx)
{
/* atan(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_ATAN);
}
-/* A mapper for the floating point sinh. */
int i_mapper_sinh(ir_node *call, void *ctx)
{
/* sinh(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_SINH);
}
-/* A mapper for the floating point cosh. */
int i_mapper_cosh(ir_node *call, void *ctx)
{
/* cosh(0.0) = 1.0, cosh(-x) = x */
return i_mapper_symmetric_zero_to_one(call, ctx, FS_OPT_RTS_COSH);
}
-/* A mapper for the floating point tanh. */
int i_mapper_tanh(ir_node *call, void *ctx)
{
/* tanh(0.0) = 0.0 */
return NULL;
}
-/* A mapper for strlen */
int i_mapper_strlen(ir_node *call, void *ctx)
{
ir_node *s = get_Call_param(call, 0);
return initializer_val_is_null(init0);
}
-/* A mapper for strcmp */
int i_mapper_strcmp(ir_node *call, void *ctx)
{
ir_node *left = get_Call_param(call, 0);
return 0;
}
-/* A mapper for strncmp */
int i_mapper_strncmp(ir_node *call, void *ctx)
{
ir_node *left = get_Call_param(call, 0);
return 0;
}
-/* A mapper for strcpy */
int i_mapper_strcpy(ir_node *call, void *ctx)
{
ir_node *dst = get_Call_param(call, 0);
return 0;
}
-/* A mapper for memcpy */
int i_mapper_memcpy(ir_node *call, void *ctx)
{
ir_node *dst = get_Call_param(call, 0);
return 0;
}
-/* A mapper for mempcpy */
int i_mapper_mempcpy(ir_node *call, void *ctx)
{
ir_node *dst = get_Call_param(call, 0);
return 0;
}
-/* A mapper for memmove */
int i_mapper_memmove(ir_node *call, void *ctx)
{
ir_node *dst = get_Call_param(call, 0);
return 0;
}
-/* A mapper for memset */
int i_mapper_memset(ir_node *call, void *ctx)
{
ir_node *len = get_Call_param(call, 2);
return 0;
}
-/* A mapper for memcmp */
int i_mapper_memcmp(ir_node *call, void *ctx)
{
ir_node *left = get_Call_param(call, 0);
}
}
-#define LMAX(a, b) ((a) > (b) ? (a) : (b))
-
-/* A mapper for mapping unsupported instructions to runtime calls. */
int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
{
int i, j, arity, first, n_param, n_res;
/* step 0: calculate the number of needed Proj's */
n_proj = 0;
- n_proj = LMAX(n_proj, rt->mem_proj_nr + 1);
- n_proj = LMAX(n_proj, rt->res_proj_nr + 1);
+ n_proj = MAX(n_proj, rt->mem_proj_nr + 1);
+ n_proj = MAX(n_proj, rt->res_proj_nr + 1);
if (throws_exception) {
- n_proj = LMAX(n_proj, rt->regular_proj_nr + 1);
- n_proj = LMAX(n_proj, rt->exc_proj_nr + 1);
+ n_proj = MAX(n_proj, rt->regular_proj_nr + 1);
+ n_proj = MAX(n_proj, rt->exc_proj_nr + 1);
}
if (n_proj > 0) {
return newe;
}
-/*
- * Copies the entity if the new_owner is different from the
- * owner of the old entity, else returns the old entity.
- */
ir_entity *copy_entity_own(ir_entity *old, ir_type *new_owner)
{
ir_entity *newe;
xfree(ent);
}
-/* Outputs a unique number for this node */
long get_entity_nr(const ir_entity *ent)
{
assert(ent && ent->kind == k_entity);
_set_entity_volatility(ent, vol);
}
-/* Return the name of the volatility. */
const char *get_volatility_name(ir_volatility var)
{
#define X(a) case a: return #a
_set_entity_alignment(ent, alignment);
}
-/* Return the name of the alignment. */
const char *get_align_name(ir_align a)
{
#define X(a) case a: return #a
entity->linkage &= ~linkage;
}
-/* Checks if an entity is compiler generated */
int (is_entity_compiler_generated)(const ir_entity *ent)
{
return _is_entity_compiler_generated(ent);
}
-/* Sets/resets the compiler generated flag */
void (set_entity_compiler_generated)(ir_entity *ent, int flag)
{
_set_entity_compiler_generated(ent, flag);
_set_entity_usage(ent, flags);
}
-/* Set has no effect for existent entities of type method. */
ir_node *get_atomic_ent_value(ir_entity *entity)
{
ir_initializer_t *initializer = get_entity_initializer(entity);
entity->initializer = initializer;
}
-/* Returns true if the the node is representable as code on
- * const_code_irg. */
int is_irn_const_expression(ir_node *n)
{
/* we are in danger iff an exception will arise. TODO: be more precisely,
return 0;
}
-/*
- * Copies a firm subgraph that complies to the restrictions for
- * constant expressions to block.
- */
ir_node *copy_const_value(dbg_info *dbg, ir_node *n, ir_node *block)
{
ir_graph *irg = get_irn_irg(block);
return nn;
}
-/** Return the name of the initializer kind. */
const char *get_initializer_kind_name(ir_initializer_kind_t ini)
{
#define X(a) case a: return #a
}
}
-/* Returns the class type that this type info entity represents or NULL
- if ent is no type info entity. */
ir_type *(get_entity_repr_class)(const ir_entity *ent)
{
return _get_entity_repr_class(ent);
#undef ID
}
-/* Finalize the tpop module.
- * Frees all type opcodes. */
void finish_tpop(void)
{
free_tpop(type_class ); type_class = NULL;
free_tpop(tpop_unknown ); tpop_unknown = NULL;
}
-/* Returns the string for the tp_opcode. */
-const char *get_tpop_name(const tp_op *op)
+const char *get_tpop_name(const tp_op *op)
{
return get_id_str(op->name);
}
return _get_tpop_code(op);
}
-/* returns the attribute size of the operator. */
size_t (get_tpop_attr_size)(const tp_op *op)
{
return _get_tpop_attr_size(op);
}
}
-/* Resolve implicit inheritance.
- *
- * Resolves the implicit inheritance supplied by firm.
- */
void resolve_inheritance(mangle_inherited_name_func *mfunc)
{
if (!mfunc)
}
}
-/** Compute the transitive closure of the subclass/superclass and
- * overwrites/overwrittenby relation.
- *
- * This function walks over the ir (O(#types+#entities)) to compute the
- * transitive closure. */
void compute_inh_transitive_closure(void)
{
size_t i, n_types = get_irp_n_types();
irp_free_resources(irp, IRP_RESOURCE_TYPE_VISITED);
}
-/** Free memory occupied by the transitive closure information. */
void free_inh_transitive_closure(void)
{
if (tr_inh_trans_set) {
/* - overwrites ---------------------------------------------------------- */
-/** Iterate over all transitive overwritten entities. */
ir_entity *get_entity_trans_overwrites_first(const ir_entity *ent)
{
assert_valid_state();
return 0;
}
-/* Returns true if low is subclass of high. */
int is_SubClass_of(ir_type *low, ir_type *high)
{
assert(is_Class_type(low) && is_Class_type(high));
return check_is_SubClass_of(low, high);
}
-
-/* Subclass check for pointers to classes.
- *
- * Dereferences at both types the same amount of pointer types (as
- * many as possible). If the remaining types are both class types
- * and subclasses, returns true, else false. Can also be called with
- * two class types. */
int is_SubClass_ptr_of(ir_type *low, ir_type *high)
{
while (is_Pointer_type(low) && is_Pointer_type(high)) {
return static_ent;
}
-/* Resolve polymorphy in the inheritance relation.
- *
- * Returns the dynamically referenced entity if the static entity and the
- * dynamic type are given.
- * Search downwards in overwritten tree.
- */
ir_entity *resolve_ent_polymorphy(ir_type *dynamic_class, ir_entity *static_ent)
{
ir_entity *res;
ccs->worst_situation = this_state;
}
-/** Verify that the graph meets requirements of state set. */
void verify_irg_class_cast_state(ir_graph *irg)
{
ccs_env env;
return 0;
}
-
-/*
- * Checks a type.
- *
- * return
- * 0 if no error encountered
- */
int check_type(ir_type *tp)
{
switch (get_type_tpop_code(tp)) {
return 0;
}
-/*
- * Check an entity. Currently, we check only if initialized constants
- * are build on the const irg graph.
- *
- * @return
- * 0 if no error encountered
- * != 0 a trverify_error_codes code
- */
int check_entity(ir_entity *ent)
{
ir_type *tp = get_entity_type(ent);
}
}
-/*
- * Verify types and entities.
- */
int tr_verify(void)
{
static ident *empty = NULL;
}
}
-/** the global type visited flag */
ir_visited_t firm_type_visited;
void (set_master_type_visited)(ir_visited_t val)
assert(0 && "setting a mode is NOT allowed for this type");
}
-/* Outputs a unique number for this node */
long get_type_nr(const ir_type *tp)
{
assert(tp);
return _is_type(thing);
}
-/* Checks whether two types are structural equal.*/
int equal_type(ir_type *typ1, ir_type *typ2)
{
ir_entity **m;
free(array->attr.aa.order);
}
-/* manipulate private fields of array ir_type */
size_t get_array_n_dimensions(const ir_type *array)
{
assert(array->type_op == type_array);
}
}
-/**
- * If we have the closed world assumption, we can calculate the
- * finalization of classes and entities by inspecting the class hierarchy.
- * After this is done, all classes and entities that are not overridden
- * anymore have the final property set.
- */
void types_calc_finalization(void)
{
if (! get_opt_closed_world())
irn_type_walker(node, pre, post, envi);
}
-/* walker: walks over all types */
void type_walk(type_walk_func *pre, type_walk_func *post, void *env)
{
size_t i, n_types = get_irp_n_types();
}
-/* Walks over all entities in the type */
void walk_types_entities(ir_type *tp,
entity_walk_func *doit,
void *env)