X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firnode.c;h=83bf3edbd5034d2f36b7e575719e01bf93120e35;hb=2af4a97900b435e35d7c20350604e3863aff2b4c;hp=31dbcf491e4aedd1c60f0abe805b8a248f47a4aa;hpb=3d344e21b632786c6c5ccc6c2bdaee07484a9fb6;p=libfirm diff --git a/ir/ir/irnode.c b/ir/ir/irnode.c index 31dbcf491..83bf3edbd 100644 --- a/ir/ir/irnode.c +++ b/ir/ir/irnode.c @@ -168,8 +168,12 @@ new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mod res->out = NULL; res->node_nr = get_irp_new_node_nr(); - for (i = 0; i < EDGE_KIND_LAST; ++i) + for (i = 0; i < EDGE_KIND_LAST; ++i) { INIT_LIST_HEAD(&res->edge_info[i].outs_head); + /* edges will be build immediately */ + res->edge_info[i].edges_built = 1; + res->edge_info[i].out_count = 0; + } /* don't put this into the for loop, arity is -1 for some nodes! */ edges_notify_edge(res, -1, res->in[0], NULL, irg); @@ -180,6 +184,25 @@ new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mod if (get_irg_phase_state(irg) == phase_backend) { be_info_new_node(res); } + // Init the VRP structures + res->vrp.range_type = VRP_UNDEFINED; + res->vrp.valid = 0; + if(mode_is_int(mode)) { + // We are assuming that 0 is always represented as 0x0000 + res->vrp.bits_set = new_tarval_from_long(0, mode); + res->vrp.bits_not_set = new_tarval_from_long(0, mode); + res->vrp.range_bottom = get_tarval_top(); + res->vrp.range_top = get_tarval_top(); + } else { + res->vrp.bits_set = get_tarval_bad(); + res->vrp.bits_not_set = get_tarval_bad(); + res->vrp.range_bottom = get_tarval_bad(); + res->vrp.range_top = get_tarval_bad(); + } + res->vrp.bits_node = NULL; + res->vrp.range_node = NULL; + res->vrp.range_op = VRP_NONE; + return res; } @@ -229,7 +252,7 @@ ir_node **get_irn_in(const ir_node *node) { void set_irn_in(ir_node *node, int arity, ir_node **in) { int i; ir_node *** pOld_in; - ir_graph *irg = current_ir_graph; + ir_graph *irg = get_irn_irg(node); assert(node); #ifdef INTERPROCEDURAL_VIEW @@ -269,11 +292,11 @@ void set_irn_in(ir_node *node, int arity, ir_node **in) { } ir_node *(get_irn_intra_n)(const ir_node *node, int n) { - return _get_irn_intra_n (node, n); + return _get_irn_intra_n(node, n); } ir_node *(get_irn_inter_n)(const ir_node *node, int n) { - return _get_irn_inter_n (node, n); + return _get_irn_inter_n(node, n); } ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n; @@ -288,6 +311,7 @@ void set_irn_n(ir_node *node, int n, ir_node *in) { assert(n < get_irn_arity(node)); assert(in && in->kind == k_ir_node); +#ifdef INTERPROCEDURAL_VIEW if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) { /* Change block pred in both views! */ node->in[n + 1] = in; @@ -295,7 +319,6 @@ void set_irn_n(ir_node *node, int n, ir_node *in) { node->attr.filter.in_cg[n + 1] = in; return; } -#ifdef INTERPROCEDURAL_VIEW if (get_interprocedural_view()) { /* handle Filter and Block specially */ if (get_irn_opcode(node) == iro_Filter) { assert(node->attr.filter.in_cg); @@ -518,9 +541,9 @@ symconst_attr *get_irn_symconst_attr(ir_node *node) { return &node->attr.symc; } -ir_type *get_irn_call_attr(ir_node *node) { +call_attr *get_irn_call_attr(ir_node *node) { assert(is_Call(node)); - return node->attr.call.type = skip_tid(node->attr.call.type); + return &node->attr.call; } sel_attr *get_irn_sel_attr(ir_node *node) { @@ -782,6 +805,7 @@ ir_node *get_Block_MacroBlock(const ir_node *block) { /* Sets the macro block header of a block. */ void set_Block_MacroBlock(ir_node *block, ir_node *mbh) { assert(is_Block(block)); + mbh = skip_Id(mbh); assert(is_Block(mbh)); set_irn_n(block, -1, mbh); } @@ -813,10 +837,11 @@ ir_entity *create_Block_entity(ir_node *block) { glob = get_glob_type(); entity = new_entity(glob, id_unique("block_%u"), get_code_type()); + set_entity_visibility(entity, ir_visibility_local); + set_entity_linkage(entity, IR_LINKAGE_CONSTANT); nr = get_irp_next_label_nr(); set_entity_label(entity, nr); set_entity_compiler_generated(entity, 1); - set_entity_allocation(entity, allocation_static); block->attr.block.entity = entity; } @@ -990,49 +1015,6 @@ void set_IJmp_target(ir_node *ijmp, ir_node *tgt) { set_irn_n(ijmp, 0, tgt); } -/* -> Implementing the case construct (which is where the constant Proj node is -> important) involves far more than simply determining the constant values. -> We could argue that this is more properly a function of the translator from -> Firm to the target machine. That could be done if there was some way of -> projecting "default" out of the Cond node. -I know it's complicated. -Basically there are two problems: - - determining the gaps between the Projs - - determining the biggest case constant to know the proj number for - the default node. -I see several solutions: -1. Introduce a ProjDefault node. Solves both problems. - This means to extend all optimizations executed during construction. -2. Give the Cond node for switch two flavors: - a) there are no gaps in the Projs (existing flavor) - b) gaps may exist, default proj is still the Proj with the largest - projection number. This covers also the gaps. -3. Fix the semantic of the Cond to that of 2b) - -Solution 2 seems to be the best: -Computing the gaps in the Firm representation is not too hard, i.e., -libFIRM can implement a routine that transforms between the two -flavours. This is also possible for 1) but 2) does not require to -change any existing optimization. -Further it should be far simpler to determine the biggest constant than -to compute all gaps. -I don't want to choose 3) as 2a) seems to have advantages for -dataflow analysis and 3) does not allow to convert the representation to -2a). -*/ - -const char *get_cond_kind_name(cond_kind kind) -{ -#define X(a) case a: return #a; - switch (kind) { - X(dense); - X(fragmentary); - } - return ""; -#undef X -} - ir_node * get_Cond_selector(const ir_node *node) { assert(is_Cond(node)); @@ -1045,18 +1027,6 @@ set_Cond_selector(ir_node *node, ir_node *selector) { set_irn_n(node, 0, selector); } -cond_kind -get_Cond_kind(const ir_node *node) { - assert(is_Cond(node)); - return node->attr.cond.kind; -} - -void -set_Cond_kind(ir_node *node, cond_kind kind) { - assert(is_Cond(node)); - node->attr.cond.kind = kind; -} - long get_Cond_default_proj(const ir_node *node) { assert(is_Cond(node)); @@ -1144,7 +1114,6 @@ int (is_Const_all_one)(const ir_node *node) { ir_type * get_Const_type(ir_node *node) { assert(is_Const(node)); - node->attr.con.tp = skip_tid(node->attr.con.tp); return node->attr.con.tp; } @@ -1178,7 +1147,7 @@ get_SymConst_type(const ir_node *node) { ir_node *irn = (ir_node *)node; assert(is_SymConst(node) && (SYMCONST_HAS_TYPE(get_SymConst_kind(node)))); - return irn->attr.symc.sym.type_p = skip_tid(irn->attr.symc.sym.type_p); + return irn->attr.symc.sym.type_p; } void @@ -1237,7 +1206,6 @@ set_SymConst_symbol(ir_node *node, union symconst_symbol sym) { ir_type * get_SymConst_value_type(ir_node *node) { assert(is_SymConst(node)); - if (node->attr.symc.tp) node->attr.symc.tp = skip_tid(node->attr.symc.tp); return node->attr.symc.tp; } @@ -1375,7 +1343,7 @@ set_Call_param(ir_node *node, int pos, ir_node *param) { ir_type * get_Call_type(ir_node *node) { assert(is_Call(node)); - return node->attr.call.type = skip_tid(node->attr.call.type); + return node->attr.call.type; } void @@ -1385,6 +1353,18 @@ set_Call_type(ir_node *node, ir_type *tp) { node->attr.call.type = tp; } +unsigned +get_Call_tail_call(const ir_node *node) { + assert(is_Call(node)); + return node->attr.call.tail_call; +} + +void +set_Call_tail_call(ir_node *node, unsigned tail_call) { + assert(is_Call(node)); + node->attr.call.tail_call = tail_call != 0; +} + ir_node * get_Builtin_mem(const ir_node *node) { assert(is_Builtin(node)); @@ -1436,7 +1416,7 @@ set_Builtin_param(ir_node *node, int pos, ir_node *param) { ir_type * get_Builtin_type(ir_node *node) { assert(is_Builtin(node)); - return node->attr.builtin.type = skip_tid(node->attr.builtin.type); + return node->attr.builtin.type; } void @@ -1639,7 +1619,6 @@ void set_Conv_strict(ir_node *node, int strict_flag) { ir_type * get_Cast_type(ir_node *node) { assert(is_Cast(node)); - node->attr.cast.type = skip_tid(node->attr.cast.type); return node->attr.cast.type; } @@ -1965,7 +1944,7 @@ set_Alloc_size(ir_node *node, ir_node *size) { ir_type * get_Alloc_type(ir_node *node) { assert(is_Alloc(node)); - return node->attr.alloc.type = skip_tid(node->attr.alloc.type); + return node->attr.alloc.type; } void @@ -2026,7 +2005,7 @@ set_Free_size(ir_node *node, ir_node *size) { ir_type * get_Free_type(ir_node *node) { assert(is_Free(node)); - return node->attr.free.type = skip_tid(node->attr.free.type); + return node->attr.free.type; } void @@ -2359,7 +2338,7 @@ void set_CopyB_src(ir_node *node, ir_node *src) { ir_type *get_CopyB_type(ir_node *node) { assert(is_CopyB(node)); - return node->attr.copyb.type = skip_tid(node->attr.copyb.type); + return node->attr.copyb.type; } void set_CopyB_type(ir_node *node, ir_type *data_type) { @@ -2371,7 +2350,7 @@ void set_CopyB_type(ir_node *node, ir_type *data_type) { ir_type * get_InstOf_type(ir_node *node) { assert(node->op == op_InstOf); - return node->attr.instof.type = skip_tid(node->attr.instof.type); + return node->attr.instof.type; } void @@ -2738,7 +2717,7 @@ ir_node *get_fragile_op_mem(ir_node *node) { case iro_Alloc : case iro_Bound : case iro_CopyB : - return get_irn_n(node, pn_Generic_M_regular); + return get_irn_n(node, pn_Generic_M); case iro_Bad : case iro_Unknown: return node; @@ -2823,6 +2802,10 @@ int (is_irn_machine_user)(const ir_node *node, unsigned n) { return _is_irn_machine_user(node, n); } +/* Returns non-zero for nodes that are CSE neutral to its users. */ +int (is_irn_cse_neutral)(const ir_node *node) { + return _is_irn_cse_neutral(node); +} /* Gets the string representation of the jump prediction .*/ const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) { @@ -2963,7 +2946,11 @@ unsigned firm_default_hash(const ir_node *node) { /* consider all in nodes... except the block if not a control flow. */ for (i = is_cfop(node) ? -1 : 0; i < irn_arity; ++i) { - h = 9*h + HASH_PTR(get_irn_intra_n(node, i)); + ir_node *pred = get_irn_intra_n(node, i); + if (is_irn_cse_neutral(pred)) + h *= 9; + else + h = 9*h + HASH_PTR(pred); } /* ...mode,... */