X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firnode.c;h=2cd7b29d0e5cbd345152e6936db89a10a19c8975;hb=f9d25133f86594ca2b1f33fb0b41a591ecc9b914;hp=ce3965653cbed882d78d425100378f5c1fcb6ee0;hpb=8afc9e4660cd0eba5e07e99796ae91a92e6e49c3;p=libfirm diff --git a/ir/ir/irnode.c b/ir/ir/irnode.c index ce3965653..2cd7b29d0 100644 --- a/ir/ir/irnode.c +++ b/ir/ir/irnode.c @@ -11,9 +11,12 @@ */ #ifdef HAVE_CONFIG_H -# include +# include "config.h" +#endif + +#ifdef HAVE_STRING_H +# include #endif -#include #include "ident.h" #include "irnode_t.h" @@ -24,8 +27,9 @@ #include "irdump.h" #include "irop_t.h" #include "irprog_t.h" +#include "iredges_t.h" -#include "firmstat.h" +#include "irhooks.h" /* some constants fixing the positions of nodes predecessors in the in array */ @@ -49,30 +53,29 @@ const char *get_pnc_string(int pnc) { return pnc_name_arr[pnc]; } -/** - * Calculates the negated pnc condition. +/* + * Calculates the negated (Complement(R)) pnc condition. */ +int get_negated_pnc(int pnc, ir_mode *mode) { + pnc ^= pn_Cmp_True; + + /* do NOT add the Uo bit for non-floating point values */ + if (! mode_is_float(mode)) + pnc &= ~pn_Cmp_Uo; + + return pnc; +} + +/* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */ int -get_negated_pnc(int pnc) { - switch (pnc) { - case False: return True; break; - case Eq: return Ne; break; - case Lt: return Uge; break; - case Le: return Ug; break; - case Gt: return Ule; break; - case Ge: return Ul; break; - case Lg: return Ue; break; - case Leg: return Uo; break; - case Uo: return Leg; break; - case Ue: return Lg; break; - case Ul: return Ge; break; - case Ule: return Gt; break; - case Ug: return Le; break; - case Uge: return Lt; break; - case Ne: return Eq; break; - case True: return False; break; - } - return 99; /* to shut up gcc */ +get_inversed_pnc(int pnc) { + int code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt); + int lesser = pnc & pn_Cmp_Lt; + int greater = pnc & pn_Cmp_Gt; + + code |= (lesser ? pn_Cmp_Gt : 0) | (greater ? pn_Cmp_Lt : 0); + + return code; } const char *pns_name_arr [] = { @@ -84,9 +87,34 @@ const char *symconst_name_arr [] = { "type_tag", "size", "addr_name", "addr_ent" }; +/** + * Indicates, whether additional data can be registered to ir nodes. + * If set to 1, this is not possible anymore. + */ +static int forbid_new_data = 0; + +/** + * The amount of additional space for custom data to be allocated upon + * creating a new node. + */ +unsigned firm_add_node_size = 0; + + +/* register new space for every node */ +unsigned register_additional_node_data(unsigned size) { + assert(!forbid_new_data && "Too late to register additional node data"); + + if (forbid_new_data) + return 0; + + return firm_add_node_size += size; +} + + void -init_irnode (void) -{ +init_irnode(void) { + /* Forbid the addition of new data to an ir node. */ + forbid_new_data = 1; } /* @@ -100,23 +128,26 @@ new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mo int arity, ir_node **in) { ir_node *res; - int node_size = offsetof (ir_node, attr) + op->attr_size; + size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size; + char *p; assert(irg && op && mode); - res = (ir_node *) obstack_alloc (irg->obst, node_size); - memset((void *)res, 0, node_size); + p = obstack_alloc (irg->obst, node_size); + memset(p, 0, node_size); + res = (ir_node *) (p + firm_add_node_size); - res->kind = k_ir_node; - res->op = op; - res->mode = mode; + res->kind = k_ir_node; + res->op = op; + res->mode = mode; res->visited = 0; - res->link = NULL; + res->link = NULL; if (arity < 0) { res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */ } else { res->in = NEW_ARR_D (ir_node *, irg->obst, (arity+1)); memcpy (&res->in[1], in, sizeof (ir_node *) * arity); } + res->in[0] = block; set_irn_dbg_info(res, db); res->out = NULL; @@ -125,39 +156,48 @@ new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mo res->node_nr = get_irp_new_node_nr(); #endif - stat_new_node(res); +#if FIRM_EDGES_INPLACE + { + int i, n; + int not_a_block = is_no_Block(res); - return res; -} + INIT_LIST_HEAD(&res->edge_info.outs_head); + if(!not_a_block) + INIT_LIST_HEAD(&res->attr.block.succ_head); -/* Copies all attributes stored in the old node to the new node. - Assumes both have the same opcode and sufficient size. */ -void -copy_attrs (const ir_node *old_node, ir_node *new_node) { - assert(get_irn_op(old_node) == get_irn_op(new_node)); - memcpy(&new_node->attr, &old_node->attr, get_op_attr_size(get_irn_op(old_node))); + + for (i = 0, n = arity + not_a_block; i < n; ++i) + edges_notify_edge(res, i - not_a_block, res->in[i], NULL, irg); + } +#endif + + hook_new_node(irg, res); + + return res; } /*-- getting some parameters from ir_nodes --*/ int (is_ir_node)(const void *thing) { - return __is_ir_node(thing); + return _is_ir_node(thing); } int (get_irn_intra_arity)(const ir_node *node) { - return __get_irn_intra_arity(node); + return _get_irn_intra_arity(node); } int (get_irn_inter_arity)(const ir_node *node) { - return __get_irn_inter_arity(node); + return _get_irn_inter_arity(node); } +int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity; + int (get_irn_arity)(const ir_node *node) { - return __get_irn_arity(node); + return _get_irn_arity(node); } /* Returns the array with ins. This array is shifted with respect to the @@ -169,7 +209,7 @@ int ir_node ** get_irn_in (const ir_node *node) { assert(node); - if (interprocedural_view) { /* handle Filter and Block specially */ + if (get_interprocedural_view()) { /* handle Filter and Block specially */ if (get_irn_opcode(node) == iro_Filter) { assert(node->attr.filter.in_cg); return node->attr.filter.in_cg; @@ -183,9 +223,10 @@ get_irn_in (const ir_node *node) { void set_irn_in (ir_node *node, int arity, ir_node **in) { + int i; ir_node *** arr; assert(node); - if (interprocedural_view) { /* handle Filter and Block specially */ + if (get_interprocedural_view()) { /* handle Filter and Block specially */ if (get_irn_opcode(node) == iro_Filter) { assert(node->attr.filter.in_cg); arr = &node->attr.filter.in_cg; @@ -203,28 +244,38 @@ set_irn_in (ir_node *node, int arity, ir_node **in) { (*arr)[0] = block; } fix_backedges(current_ir_graph->obst, node); + + for (i = 0; i < arity; i++) { + edges_notify_edge(node, i, in[i], (*arr)[i+1], current_ir_graph); + } + memcpy((*arr) + 1, in, sizeof(ir_node *) * arity); } ir_node * -(get_irn_intra_n)(ir_node *node, int n) { - return __get_irn_intra_n (node, n); +(get_irn_intra_n)(const ir_node *node, int n) { + return _get_irn_intra_n (node, n); } ir_node * -(get_irn_inter_n)(ir_node *node, int n) { - return __get_irn_inter_n (node, n); +(get_irn_inter_n)(const ir_node *node, int n) { + return _get_irn_inter_n (node, n); } +ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n; + ir_node * -(get_irn_n)(ir_node *node, int n) { - return __get_irn_n (node, n); +(get_irn_n)(const ir_node *node, int n) { + return _get_irn_n(node, n); } void set_irn_n (ir_node *node, int n, ir_node *in) { - assert(node && node->kind == k_ir_node && -1 <= n && n < get_irn_arity(node)); + assert(node && node->kind == k_ir_node); + assert(-1 <= n); + assert(n < get_irn_arity(node)); assert(in && in->kind == k_ir_node); + if ((n == -1) && (get_irn_opcode(node) == iro_Filter)) { /* Change block pred in both views! */ node->in[n + 1] = in; @@ -232,7 +283,7 @@ set_irn_n (ir_node *node, int n, ir_node *in) { node->attr.filter.in_cg[n + 1] = in; return; } - if (interprocedural_view) { /* handle Filter and Block specially */ + if (get_interprocedural_view()) { /* handle Filter and Block specially */ if (get_irn_opcode(node) == iro_Filter) { assert(node->attr.filter.in_cg); node->attr.filter.in_cg[n + 1] = in; @@ -243,18 +294,25 @@ set_irn_n (ir_node *node, int n, ir_node *in) { } /* else fall through */ } + + /* Call the hook */ + hook_set_irn_n(node, n, in, node->in[n + 1]); + + /* Here, we rely on src and tgt being in the current ir graph */ + edges_notify_edge(node, n, in, node->in[n + 1], current_ir_graph); + node->in[n + 1] = in; } ir_mode * (get_irn_mode)(const ir_node *node) { - return __get_irn_mode(node); + return _get_irn_mode(node); } void (set_irn_mode)(ir_node *node, ir_mode *mode) { - __set_irn_mode(node, mode); + _set_irn_mode(node, mode); } modecode @@ -282,7 +340,7 @@ get_irn_modeident (const ir_node *node) ir_op * (get_irn_op)(const ir_node *node) { - return __get_irn_op(node); + return _get_irn_op(node); } /* should be private to the library: */ @@ -296,7 +354,7 @@ set_irn_op (ir_node *node, ir_op *op) opcode (get_irn_opcode)(const ir_node *node) { - return __get_irn_opcode(node); + return _get_irn_opcode(node); } const char * @@ -319,43 +377,48 @@ get_irn_opident (const ir_node *node) unsigned long (get_irn_visited)(const ir_node *node) { - return __get_irn_visited(node); + return _get_irn_visited(node); } void (set_irn_visited)(ir_node *node, unsigned long visited) { - __set_irn_visited(node, visited); + _set_irn_visited(node, visited); } void (mark_irn_visited)(ir_node *node) { - __mark_irn_visited(node); + _mark_irn_visited(node); } int (irn_not_visited)(const ir_node *node) { - return __irn_not_visited(node); + return _irn_not_visited(node); } int (irn_visited)(const ir_node *node) { - return __irn_visited(node); + return _irn_visited(node); } void (set_irn_link)(ir_node *node, void *link) { - __set_irn_link(node, link); + _set_irn_link(node, link); } void * (get_irn_link)(const ir_node *node) { - return __get_irn_link(node); + return _get_irn_link(node); } op_pin_state (get_irn_pinned)(const ir_node *node) { - return __get_irn_pinned(node); + return _get_irn_pinned(node); +} + +op_pin_state +(is_irn_pinned_in_irg) (const ir_node *node) { + return _is_irn_pinned_in_irg(node); } void set_irn_pinned(ir_node *node, op_pin_state state) { @@ -363,7 +426,7 @@ void set_irn_pinned(ir_node *node, op_pin_state state) { if (get_irn_op(node) == op_Tuple) return; - assert(node && get_op_pinned(get_irn_op(node)) == op_pin_state_exc_pinned); + assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned); assert(state == op_pin_state_pinned || state == op_pin_state_floats); node->attr.except.pin_state = state; @@ -385,6 +448,12 @@ struct section *firm_get_irn_section(ir_node *n) { void firm_set_irn_section(ir_node *n, struct section *s) { n->sec = s; } +#else +/* Dummies needed for firmjni. */ +struct abstval *get_irn_abst_value(ir_node *n) { return NULL; } +void set_irn_abst_value(ir_node *n, struct abstval *os) {} +struct section *firm_get_irn_section(ir_node *n) { return NULL; } +void firm_set_irn_section(ir_node *n, struct section *s) {} #endif /* DO_HEAPANALYSIS */ @@ -420,11 +489,11 @@ get_irn_alloc_attr (ir_node *node) return node->attr.a; } -type * +free_attr get_irn_free_attr (ir_node *node) { assert (node->op == op_Free); - return node->attr.f = skip_tid(node->attr.f); + return node->attr.f; } symconst_attr @@ -441,13 +510,6 @@ get_irn_call_attr (ir_node *node) return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp); } -type * -get_irn_funccall_attr (ir_node *node) -{ - assert (node->op == op_FuncCall); - return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp); -} - sel_attr get_irn_sel_attr (ir_node *node) { @@ -487,16 +549,22 @@ except_attr get_irn_except_attr (ir_node *node) { assert (node->op == op_Div || node->op == op_Quot || - node->op == op_DivMod || node->op == op_Mod); + node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc); return node->attr.except; } +void * +get_irn_generic_attr (ir_node *node) { + return &node->attr; +} + /** manipulate fields of individual nodes **/ /* this works for all except Block */ ir_node * -get_nodes_block (ir_node *node) { +get_nodes_block (const ir_node *node) { assert (!(node->op == op_Block)); + assert (is_irn_pinned_in_irg(node) && "block info may be incorrect"); return get_irn_n(node, -1); } @@ -553,19 +621,14 @@ get_Block_cfgpred_arr (ir_node *node) return (ir_node **)&(get_irn_in(node)[1]); } - int -get_Block_n_cfgpreds (ir_node *node) { - assert ((node->op == op_Block)); - return get_irn_arity(node); +(get_Block_n_cfgpreds)(ir_node *node) { + return get_Block_n_cfgpreds(node); } ir_node * -get_Block_cfgpred (ir_node *node, int pos) { - assert(node); - assert (node->op == op_Block); - assert(-1 <= pos && pos < get_irn_arity(node)); - return get_irn_n(node, pos); +(get_Block_cfgpred)(ir_node *node, int pos) { + return get_Block_cfgpred(node, pos); } void @@ -574,6 +637,11 @@ set_Block_cfgpred (ir_node *node, int pos, ir_node *pred) { set_irn_n(node, pos, pred); } +ir_node * +(get_Block_cfgpred_block)(ir_node *node, int pos) { + return _get_Block_cfgpred_block(node, pos); +} + bool get_Block_matured (ir_node *node) { assert (node->op == op_Block); @@ -585,29 +653,26 @@ set_Block_matured (ir_node *node, bool matured) { assert (node->op == op_Block); node->attr.block.matured = matured; } + unsigned long -get_Block_block_visited (ir_node *node) { - assert (node->op == op_Block); - return node->attr.block.block_visited; +(get_Block_block_visited)(ir_node *node) { + return _get_Block_block_visited(node); } void -set_Block_block_visited (ir_node *node, unsigned long visit) { - assert (node->op == op_Block); - node->attr.block.block_visited = visit; +(set_Block_block_visited)(ir_node *node, unsigned long visit) { + _set_Block_block_visited(node, visit); } /* For this current_ir_graph must be set. */ void -mark_Block_block_visited (ir_node *node) { - assert (node->op == op_Block); - node->attr.block.block_visited = get_irg_block_visited(current_ir_graph); +(mark_Block_block_visited)(ir_node *node) { + _mark_Block_block_visited(node); } int -Block_not_block_visited(ir_node *node) { - assert (node->op == op_Block); - return (node->attr.block.block_visited < get_irg_block_visited(current_ir_graph)); +(Block_not_block_visited)(ir_node *node) { + return _Block_not_block_visited(node); } ir_node * @@ -629,12 +694,12 @@ void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in) { node->attr.block.in_cg[0] = NULL; node->attr.block.cg_backedge = new_backedge_arr(current_ir_graph->obst, arity); { - /* Fix backedge array. fix_backedges operates depending on + /* Fix backedge array. fix_backedges() operates depending on interprocedural_view. */ - bool ipv = interprocedural_view; - interprocedural_view = true; + int ipv = get_interprocedural_view(); + set_interprocedural_view(true); fix_backedges(current_ir_graph->obst, node); - interprocedural_view = ipv; + set_interprocedural_view(ipv); } } memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity); @@ -667,6 +732,24 @@ void remove_Block_cg_cfgpred_arr(ir_node * node) { node->attr.block.in_cg = NULL; } +ir_node *(set_Block_dead)(ir_node *block) { + return _set_Block_dead(block); +} + +int (is_Block_dead)(const ir_node *block) { + return _is_Block_dead(block); +} + +ir_extblk *get_Block_extbb(const ir_node *block) { + assert(is_Block(block)); + return block->attr.block.extblk; +} + +void set_Block_extbb(ir_node *block, ir_extblk *extblk) { + assert(is_Block(block)); + block->attr.block.extblk = extblk; +} + void set_Start_irg(ir_node *node, ir_graph *irg) { assert(node->op == op_Start); @@ -707,6 +790,17 @@ free_End (ir_node *end) { in array afterwards ... */ } +/* Return the target address of an IJmp */ +ir_node *get_IJmp_target(ir_node *ijmp) { + assert(ijmp->op == op_IJmp); + return get_irn_n(ijmp, 0); +} + +/** Sets the target address of an IJmp */ +void set_IJmp_target(ir_node *ijmp, ir_node *tgt) { + assert(ijmp->op == op_IJmp); + set_irn_n(ijmp, 0, tgt); +} /* > Implementing the case construct (which is where the constant Proj node is @@ -841,9 +935,8 @@ set_Raise_exo_ptr (ir_node *node, ir_node *exo_ptr) { set_irn_n(node, 1, exo_ptr); } -tarval *get_Const_tarval (ir_node *node) { - assert (node->op == op_Const); - return node->attr.con.tv; +tarval *(get_Const_tarval)(ir_node *node) { + return _get_Const_tarval(node); } void @@ -852,6 +945,11 @@ set_Const_tarval (ir_node *node, tarval *con) { node->attr.con.tv = con; } +cnst_classify_t (classify_Const)(ir_node *node) +{ + return _classify_Const(node); +} + /* The source language type. Must be an atomic type. Mode of type must be mode of node. For tarvals from entities type must be pointer to @@ -865,11 +963,10 @@ get_Const_type (ir_node *node) { void set_Const_type (ir_node *node, type *tp) { assert (node->op == op_Const); - if (tp != unknown_type) { + if (tp != firm_unknown_type) { assert (is_atomic_type(tp)); assert (get_type_mode(tp) == get_irn_mode(node)); } - node->attr.con.tp = tp; } @@ -930,7 +1027,6 @@ void set_SymConst_entity (ir_node *node, entity *ent) { node->attr.i.sym.entity_p = ent; } - union symconst_symbol get_SymConst_symbol (ir_node *node) { assert (node->op == op_SymConst); @@ -944,6 +1040,19 @@ set_SymConst_symbol (ir_node *node, union symconst_symbol sym) { node->attr.i.sym = sym; } +type * +get_SymConst_value_type (ir_node *node) { + assert (node->op == op_SymConst); + if (node->attr.i.tp) node->attr.i.tp = skip_tid(node->attr.i.tp); + return node->attr.i.tp; +} + +void +set_SymConst_value_type (ir_node *node, type *tp) { + assert (node->op == op_SymConst); + node->attr.i.tp = tp; +} + ir_node * get_Sel_mem (ir_node *node) { assert (node->op == op_Sel); @@ -1122,7 +1231,7 @@ get_Call_type (ir_node *node) { void set_Call_type (ir_node *node, type *tp) { assert (node->op == op_Call); - assert (is_method_type(tp)); + assert ((get_unknown_type() == tp) || is_Method_type(tp)); node->attr.call.cld_tp = tp; } @@ -1172,95 +1281,6 @@ void set_CallBegin_call (ir_node *node, ir_node *call) { node->attr.callbegin.call = call; } -ir_node * -get_FuncCall_ptr (ir_node *node) { - assert (node->op == op_FuncCall); - return get_irn_n(node, 0); -} - -void -set_FuncCall_ptr (ir_node *node, ir_node *ptr) { - assert (node->op == op_FuncCall); - set_irn_n(node, 0, ptr); -} - -ir_node ** -get_FuncCall_param_arr (ir_node *node) { - assert (node->op == op_FuncCall); - return (ir_node **)&get_irn_in(node)[FUNCCALL_PARAM_OFFSET]; -} - -int -get_FuncCall_n_params (ir_node *node) { - assert (node->op == op_FuncCall); - return (get_irn_arity(node) - FUNCCALL_PARAM_OFFSET); -} - -int -get_FuncCall_arity (ir_node *node) { - assert (node->op == op_FuncCall); - return get_FuncCall_n_params(node); -} - -/* void -set_FuncCall_arity (ir_node *node, ir_node *arity) { - assert (node->op == op_FuncCall); -} -*/ - -ir_node * -get_FuncCall_param (ir_node *node, int pos) { - assert (node->op == op_FuncCall); - return get_irn_n(node, pos + FUNCCALL_PARAM_OFFSET); -} - -void -set_FuncCall_param (ir_node *node, int pos, ir_node *param) { - assert (node->op == op_FuncCall); - set_irn_n(node, pos + FUNCCALL_PARAM_OFFSET, param); -} - -type * -get_FuncCall_type (ir_node *node) { - assert (node->op == op_FuncCall); - return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp); -} - -void -set_FuncCall_type (ir_node *node, type *tp) { - assert (node->op == op_FuncCall); - assert (is_method_type(tp)); - node->attr.call.cld_tp = tp; -} - -int FuncCall_has_callees(ir_node *node) { - return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) && - (node->attr.call.callee_arr != NULL)); -} - -int get_FuncCall_n_callees(ir_node * node) { - assert(node->op == op_FuncCall && node->attr.call.callee_arr); - return ARR_LEN(node->attr.call.callee_arr); -} - -entity * get_FuncCall_callee(ir_node * node, int pos) { - assert(node->op == op_FuncCall && node->attr.call.callee_arr); - return node->attr.call.callee_arr[pos]; -} - -void set_FuncCall_callee_arr(ir_node * node, int n, entity ** arr) { - assert(node->op == op_FuncCall); - if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) { - node->attr.call.callee_arr = NEW_ARR_D(entity *, current_ir_graph->obst, n); - } - memcpy(node->attr.call.callee_arr, arr, n * sizeof(entity *)); -} - -void remove_FuncCall_callee_arr(ir_node * node) { - assert(node->op == op_FuncCall); - node->attr.call.callee_arr = NULL; -} - #define BINOP(OP) \ ir_node * get_##OP##_left(ir_node *node) { \ @@ -1375,9 +1395,55 @@ set_Cast_type (ir_node *node, type *to_tp) { node->attr.cast.totype = to_tp; } + +/* Checks for upcast. + * + * Returns true if the Cast node casts a class type to a super type. + */ +int is_Cast_upcast(ir_node *node) { + type *totype = get_Cast_type(node); + type *fromtype = get_irn_typeinfo_type(get_Cast_op(node)); + ir_graph *myirg = get_irn_irg(node); + + assert(get_irg_typeinfo_state(myirg) == ir_typeinfo_consistent); + assert(fromtype); + + while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) { + totype = get_pointer_points_to_type(totype); + fromtype = get_pointer_points_to_type(fromtype); + } + + assert(fromtype); + + if (!is_Class_type(totype)) return false; + return is_subclass_of(fromtype, totype); +} + +/* Checks for downcast. + * + * Returns true if the Cast node casts a class type to a sub type. + */ +int is_Cast_downcast(ir_node *node) { + type *totype = get_Cast_type(node); + type *fromtype = get_irn_typeinfo_type(get_Cast_op(node)); + + assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent); + assert(fromtype); + + while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) { + totype = get_pointer_points_to_type(totype); + fromtype = get_pointer_points_to_type(fromtype); + } + + assert(fromtype); + + if (!is_Class_type(totype)) return false; + return is_subclass_of(totype, fromtype); +} + int -is_unop (ir_node *node) { - return (node->op->opar == oparity_unary); +(is_unop)(const ir_node *node) { + return _is_unop(node); } ir_node * @@ -1398,8 +1464,8 @@ set_unop_op (ir_node *node, ir_node *op) { } int -is_binop (ir_node *node) { - return (node->op->opar == oparity_binary); +(is_binop)(const ir_node *node) { + return _is_binop(node); } ir_node * @@ -1436,13 +1502,13 @@ set_binop_right (ir_node *node, ir_node *right) { assert (node->op->opar == oparity_binary); } -int is_Phi (ir_node *n) { +int is_Phi (const ir_node *n) { ir_op *op; assert(n); op = get_irn_op(n); - if (op == op_Filter) return interprocedural_view; + if (op == op_Filter) return get_interprocedural_view(); if (op == op_Phi) return ((get_irg_phase_state(get_irn_irg(n)) != phase_building) || @@ -1451,7 +1517,7 @@ int is_Phi (ir_node *n) { return 0; } -int is_Phi0 (ir_node *n) { +int is_Phi0 (const ir_node *n) { assert(n); return ((get_irn_op(n) == op_Phi) && @@ -1700,13 +1766,25 @@ set_Free_size (ir_node *node, ir_node *size) { type * get_Free_type (ir_node *node) { assert (node->op == op_Free); - return node->attr.f = skip_tid(node->attr.f); + return node->attr.f.type = skip_tid(node->attr.f.type); } void set_Free_type (ir_node *node, type *tp) { assert (node->op == op_Free); - node->attr.f = tp; + node->attr.f.type = tp; +} + +where_alloc +get_Free_where (ir_node *node) { + assert (node->op == op_Free); + return node->attr.f.where; +} + +void +set_Free_where (ir_node *node, where_alloc where) { + assert (node->op == op_Free); + node->attr.f.where = where; } ir_node ** @@ -1740,8 +1818,40 @@ set_Sync_pred (ir_node *node, int pos, ir_node *pred) { set_irn_n(node, pos, pred); } +type *get_Proj_type(ir_node *n) +{ + type *tp = NULL; + ir_node *pred = get_Proj_pred(n); + + switch (get_irn_opcode(pred)) { + case iro_Proj: { + ir_node *pred_pred; + /* Deal with Start / Call here: we need to know the Proj Nr. */ + assert(get_irn_mode(pred) == mode_T); + pred_pred = get_Proj_pred(pred); + if (get_irn_op(pred_pred) == op_Start) { + type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred))); + tp = get_method_param_type(mtp, get_Proj_proj(n)); + } else if (get_irn_op(pred_pred) == op_Call) { + type *mtp = get_Call_type(pred_pred); + tp = get_method_res_type(mtp, get_Proj_proj(n)); + } + } break; + case iro_Start: break; + case iro_Call: break; + case iro_Load: { + ir_node *a = get_Load_ptr(pred); + if (get_irn_op(a) == op_Sel) + tp = get_entity_type(get_Sel_entity(a)); + } break; + default: + break; + } + return tp; +} + ir_node * -get_Proj_pred (ir_node *node) { +get_Proj_pred (const ir_node *node) { assert (is_Proj(node)); return get_irn_n(node, 0); } @@ -1753,7 +1863,7 @@ set_Proj_pred (ir_node *node, ir_node *pred) { } long -get_Proj_proj (ir_node *node) { +get_Proj_proj (const ir_node *node) { assert (is_Proj(node)); if (get_irn_opcode(node) == iro_Proj) { return node->attr.proj; @@ -1890,13 +2000,88 @@ ir_node *get_Filter_cg_pred(ir_node *node, int pos) { return node->attr.filter.in_cg[pos + 1]; } +/* Mux support */ +ir_node *get_Mux_sel (ir_node *node) { + assert(node->op == op_Mux); + return node->in[1]; +} +void set_Mux_sel (ir_node *node, ir_node *sel) { + assert(node->op == op_Mux); + node->in[1] = sel; +} + +ir_node *get_Mux_false (ir_node *node) { + assert(node->op == op_Mux); + return node->in[2]; +} +void set_Mux_false (ir_node *node, ir_node *ir_false) { + assert(node->op == op_Mux); + node->in[2] = ir_false; +} + +ir_node *get_Mux_true (ir_node *node) { + assert(node->op == op_Mux); + return node->in[3]; +} +void set_Mux_true (ir_node *node, ir_node *ir_true) { + assert(node->op == op_Mux); + node->in[3] = ir_true; +} + +/* CopyB support */ +ir_node *get_CopyB_mem (ir_node *node) { + assert (node->op == op_CopyB); + return get_irn_n(node, 0); +} + +void set_CopyB_mem (ir_node *node, ir_node *mem) { + assert (node->op == op_CopyB); + set_irn_n(node, 0, mem); +} + +ir_node *get_CopyB_dst (ir_node *node) { + assert (node->op == op_CopyB); + return get_irn_n(node, 1); +} + +void set_CopyB_dst (ir_node *node, ir_node *dst) { + assert (node->op == op_CopyB); + set_irn_n(node, 1, dst); +} + +ir_node *get_CopyB_src (ir_node *node) { + assert (node->op == op_CopyB); + return get_irn_n(node, 2); +} + +void set_CopyB_src (ir_node *node, ir_node *src) { + assert (node->op == op_CopyB); + set_irn_n(node, 2, src); +} + +type *get_CopyB_type(ir_node *node) { + assert (node->op == op_CopyB); + return node->attr.copyb.data_type; +} + +void set_CopyB_type(ir_node *node, type *data_type) { + assert (node->op == op_CopyB && data_type); + node->attr.copyb.data_type = data_type; +} + ir_graph * -get_irn_irg(ir_node *node) { - if (get_irn_op(node) != op_Block) - node = get_nodes_block(node); +get_irn_irg(const ir_node *node) { + /* + * Do not use get_nodes_Block() here, because this + * will check the pinned state. + * However even a 'wrong' block is always in the proper + * irg. + */ + if (! is_Block(node)) + node = get_irn_n(node, -1); if (is_Bad(node)) /* sometimes bad is predecessor of nodes instead of block: in case of optimization */ - node = get_nodes_block(node); + node = get_irn_n(node, -1); assert(get_irn_op(node) == op_Block); return node->attr.block.irg; } @@ -1919,27 +2104,56 @@ skip_Proj (ir_node *node) { ir_node * skip_Tuple (ir_node *node) { ir_node *pred; + ir_op *op; if (!get_opt_normalize()) return node; +restart: node = skip_Id(node); if (get_irn_op(node) == op_Proj) { pred = skip_Id(get_Proj_pred(node)); - if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */ + op = get_irn_op(pred); + + /* + * Looks strange but calls get_irn_op() only once + * in most often cases. + */ + if (op == op_Proj) { /* nested Tuple ? */ pred = skip_Id(skip_Tuple(pred)); - if (get_irn_op(pred) == op_Tuple) - return get_Tuple_pred(pred, get_Proj_proj(node)); + op = get_irn_op(pred); + + if (op == op_Tuple) { + node = get_Tuple_pred(pred, get_Proj_proj(node)); + goto restart; + } + } + else if (op == op_Tuple) { + node = get_Tuple_pred(pred, get_Proj_proj(node)); + goto restart; + } } return node; } -/** returns operand of node if node is a Cast */ +/* returns operand of node if node is a Cast */ ir_node *skip_Cast (ir_node *node) { - if (node && get_irn_op(node) == op_Cast) { - return skip_Id(get_irn_n(node, 0)); - } else { - return node; - } + if (node && get_irn_op(node) == op_Cast) + return get_Cast_op(node); + return node; +} + +/* returns operand of node if node is a Confirm */ +ir_node *skip_Confirm (ir_node *node) { + if (node && get_irn_op(node) == op_Confirm) + return get_Confirm_value(node); + return node; +} + +/* skip all high-level ops */ +ir_node *skip_HighLevel(ir_node *node) { + if (node && is_op_highlevel(get_irn_op(node))) + return get_irn_n(node, 0); + return node; } #if 0 @@ -2010,55 +2224,54 @@ skip_Id (ir_node *node) { #endif int -is_Bad (ir_node *node) { - assert(node); - if ((node) && get_irn_opcode(node) == iro_Bad) - return 1; - return 0; +(is_Bad)(const ir_node *node) { + return _is_Bad(node); } int -is_no_Block (ir_node *node) { - assert(node); - return (get_irn_opcode(node) != iro_Block); +(is_Const)(const ir_node *node) { + return _is_Const(node); } int -is_Block (ir_node *node) { - assert(node); - return (get_irn_opcode(node) == iro_Block); +(is_no_Block)(const ir_node *node) { + return _is_no_Block(node); +} + +int +(is_Block)(const ir_node *node) { + return _is_Block(node); } /* returns true if node is a Unknown node. */ int -is_Unknown (ir_node *node) { - assert(node); - return (get_irn_opcode(node) == iro_Unknown); +(is_Unknown)(const ir_node *node) { + return _is_Unknown(node); } int is_Proj (const ir_node *node) { assert(node); return node->op == op_Proj - || (!interprocedural_view && node->op == op_Filter); + || (!get_interprocedural_view() && node->op == op_Filter); } /* Returns true if the operation manipulates control flow. */ int -is_cfop(ir_node *node) { +is_cfop(const ir_node *node) { return is_cfopcode(get_irn_op(node)); } /* Returns true if the operation manipulates interprocedural control flow: CallBegin, EndReg, EndExcept */ -int is_ip_cfop(ir_node *node) { +int is_ip_cfop(const ir_node *node) { return is_ip_cfopcode(get_irn_op(node)); } /* Returns true if the operation can change the control flow because of an exception. */ int -is_fragile_op(ir_node *node) { +is_fragile_op(const ir_node *node) { return is_op_fragile(get_irn_op(node)); } @@ -2086,9 +2299,56 @@ ir_node *get_fragile_op_mem(ir_node *node) { } /* Returns true if the operation is a forking control flow operation. */ -int -is_forking_op(ir_node *node) { - return is_op_forking(get_irn_op(node)); +int (is_irn_forking)(const ir_node *node) { + return _is_irn_forking(node); +} + +type *(get_irn_type)(ir_node *node) { + return _get_irn_type(node); +} + +/* Returns non-zero for constant-like nodes. */ +int (is_irn_constlike)(const ir_node *node) { + return _is_irn_constlike(node); +} + +/* Gets the string representation of the jump prediction .*/ +const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) +{ + switch (pred) { + default: + case COND_JMP_PRED_NONE: return "no prediction"; + case COND_JMP_PRED_TRUE: return "true taken"; + case COND_JMP_PRED_FALSE: return "false taken"; + } +} + +/* Returns the conditional jump prediction of a Cond node. */ +cond_jmp_predicate (get_Cond_jmp_pred)(ir_node *cond) { + return _get_Cond_jmp_pred(cond); +} + +/* Sets a new conditional jump prediction. */ +void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) { + _set_Cond_jmp_pred(cond, pred); +} + +/** the get_type operation must be always implemented */ +static type *get_Null_type(ir_node *n) { + return NULL; +} + +/* set the get_type operation */ +ir_op *firm_set_default_get_type(ir_op *op) +{ + switch (op->code) { + case iro_Const: op->get_type = get_Const_type; break; + case iro_SymConst: op->get_type = get_SymConst_value_type; break; + case iro_Cast: op->get_type = get_Cast_type; break; + case iro_Proj: op->get_type = get_Proj_type; break; + default: op->get_type = get_Null_type; break; + } + return op; } #ifdef DEBUG_libfirm