res->out = NULL;
res->node_nr = get_irp_new_node_nr();
- for (i = 0; i < EDGE_KIND_LAST; ++i)
+ for (i = 0; i < EDGE_KIND_LAST; ++i) {
INIT_LIST_HEAD(&res->edge_info[i].outs_head);
+ /* edges will be build immediately */
+ res->edge_info[i].edges_built = 1;
+ res->edge_info[i].out_count = 0;
+ }
/* don't put this into the for loop, arity is -1 for some nodes! */
edges_notify_edge(res, -1, res->in[0], NULL, irg);
void set_irn_in(ir_node *node, int arity, ir_node **in) {
int i;
ir_node *** pOld_in;
- ir_graph *irg = current_ir_graph;
+ ir_graph *irg = get_irn_irg(node);
assert(node);
#ifdef INTERPROCEDURAL_VIEW
call_attr *get_irn_call_attr(ir_node *node) {
assert(is_Call(node));
- node->attr.call.type = skip_tid(node->attr.call.type);
return &node->attr.call;
}
glob = get_glob_type();
entity = new_entity(glob, id_unique("block_%u"), get_code_type());
+ set_entity_visibility(entity, ir_visibility_local);
+ set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
nr = get_irp_next_label_nr();
set_entity_label(entity, nr);
set_entity_compiler_generated(entity, 1);
- set_entity_allocation(entity, allocation_static);
block->attr.block.entity = entity;
}
set_irn_n(ijmp, 0, tgt);
}
-/*
-> Implementing the case construct (which is where the constant Proj node is
-> important) involves far more than simply determining the constant values.
-> We could argue that this is more properly a function of the translator from
-> Firm to the target machine. That could be done if there was some way of
-> projecting "default" out of the Cond node.
-I know it's complicated.
-Basically there are two problems:
- - determining the gaps between the Projs
- - determining the biggest case constant to know the proj number for
- the default node.
-I see several solutions:
-1. Introduce a ProjDefault node. Solves both problems.
- This means to extend all optimizations executed during construction.
-2. Give the Cond node for switch two flavors:
- a) there are no gaps in the Projs (existing flavor)
- b) gaps may exist, default proj is still the Proj with the largest
- projection number. This covers also the gaps.
-3. Fix the semantic of the Cond to that of 2b)
-
-Solution 2 seems to be the best:
-Computing the gaps in the Firm representation is not too hard, i.e.,
-libFIRM can implement a routine that transforms between the two
-flavours. This is also possible for 1) but 2) does not require to
-change any existing optimization.
-Further it should be far simpler to determine the biggest constant than
-to compute all gaps.
-I don't want to choose 3) as 2a) seems to have advantages for
-dataflow analysis and 3) does not allow to convert the representation to
-2a).
-*/
-
-const char *get_cond_kind_name(cond_kind kind)
-{
-#define X(a) case a: return #a;
- switch (kind) {
- X(dense);
- X(fragmentary);
- }
- return "<unknown>";
-#undef X
-}
-
ir_node *
get_Cond_selector(const ir_node *node) {
assert(is_Cond(node));
set_irn_n(node, 0, selector);
}
-cond_kind
-get_Cond_kind(const ir_node *node) {
- assert(is_Cond(node));
- return node->attr.cond.kind;
-}
-
-void
-set_Cond_kind(ir_node *node, cond_kind kind) {
- assert(is_Cond(node));
- node->attr.cond.kind = kind;
-}
-
long
get_Cond_default_proj(const ir_node *node) {
assert(is_Cond(node));
ir_type *
get_Const_type(ir_node *node) {
assert(is_Const(node));
- node->attr.con.tp = skip_tid(node->attr.con.tp);
return node->attr.con.tp;
}
ir_node *irn = (ir_node *)node;
assert(is_SymConst(node) &&
(SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
- return irn->attr.symc.sym.type_p = skip_tid(irn->attr.symc.sym.type_p);
+ return irn->attr.symc.sym.type_p;
}
void
ir_type *
get_SymConst_value_type(ir_node *node) {
assert(is_SymConst(node));
- if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp);
return node->attr.symc.tp;
}
ir_type *
get_Call_type(ir_node *node) {
assert(is_Call(node));
- return node->attr.call.type = skip_tid(node->attr.call.type);
+ return node->attr.call.type;
}
void
ir_type *
get_Builtin_type(ir_node *node) {
assert(is_Builtin(node));
- return node->attr.builtin.type = skip_tid(node->attr.builtin.type);
+ return node->attr.builtin.type;
}
void
ir_type *
get_Cast_type(ir_node *node) {
assert(is_Cast(node));
- node->attr.cast.type = skip_tid(node->attr.cast.type);
return node->attr.cast.type;
}
ir_type *
get_Alloc_type(ir_node *node) {
assert(is_Alloc(node));
- return node->attr.alloc.type = skip_tid(node->attr.alloc.type);
+ return node->attr.alloc.type;
}
void
ir_type *
get_Free_type(ir_node *node) {
assert(is_Free(node));
- return node->attr.free.type = skip_tid(node->attr.free.type);
+ return node->attr.free.type;
}
void
ir_type *get_CopyB_type(ir_node *node) {
assert(is_CopyB(node));
- return node->attr.copyb.type = skip_tid(node->attr.copyb.type);
+ return node->attr.copyb.type;
}
void set_CopyB_type(ir_node *node, ir_type *data_type) {
ir_type *
get_InstOf_type(ir_node *node) {
assert(node->op == op_InstOf);
- return node->attr.instof.type = skip_tid(node->attr.instof.type);
+ return node->attr.instof.type;
}
void
case iro_Alloc :
case iro_Bound :
case iro_CopyB :
- return get_irn_n(node, pn_Generic_M_regular);
+ return get_irn_n(node, pn_Generic_M);
case iro_Bad :
case iro_Unknown:
return node;
return _is_irn_machine_user(node, n);
}
+/* Returns non-zero for nodes that are CSE neutral to its users. */
+int (is_irn_cse_neutral)(const ir_node *node) {
+ return _is_irn_cse_neutral(node);
+}
/* Gets the string representation of the jump prediction .*/
const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
/* consider all in nodes... except the block if not a control flow. */
for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) {
- h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
+ ir_node *pred = get_irn_intra_n(node, i);
+ if (is_irn_cse_neutral(pred))
+ h *= 9;
+ else
+ h = 9*h + HASH_PTR(pred);
}
/* ...mode,... */