X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firnode.c;h=228b14830a6ee0712c2d5ef69c184ee5a03770fb;hb=69d11aa49aa17358468bbb2f5506ef5f009514e9;hp=20376d643ae7a9be1f38d777ad687f167e90fc43;hpb=96027c6b59a098ca8bcc10f6c3df82b0a725485a;p=libfirm diff --git a/ir/ir/irnode.c b/ir/ir/irnode.c index 20376d643..228b14830 100644 --- a/ir/ir/irnode.c +++ b/ir/ir/irnode.c @@ -48,7 +48,7 @@ /* some constants fixing the positions of nodes predecessors in the in array */ #define CALL_PARAM_OFFSET 2 -#define FUNCCALL_PARAM_OFFSET 1 +#define BUILDIN_PARAM_OFFSET 1 #define SEL_INDEX_OFFSET 2 #define RETURN_RESULT_OFFSET 1 /* mem is not a result */ #define END_KEEPALIVE_OFFSET 0 @@ -163,13 +163,10 @@ new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mod memcpy(&res->in[1], in, sizeof(ir_node *) * arity); } - res->in[0] = block; + res->in[0] = block; set_irn_dbg_info(res, db); - res->out = NULL; - -#ifdef DEBUG_libfirm + res->out = NULL; res->node_nr = get_irp_new_node_nr(); -#endif for (i = 0; i < EDGE_KIND_LAST; ++i) INIT_LIST_HEAD(&res->edge_info[i].outs_head); @@ -497,11 +494,7 @@ void set_irn_pinned(ir_node *node, op_pin_state state) { /* Outputs a unique number for this node */ long get_irn_node_nr(const ir_node *node) { assert(node); -#ifdef DEBUG_libfirm return node->node_nr; -#else - return (long)PTR_TO_INT(node); -#endif } const_attr *get_irn_const_attr(ir_node *node) { @@ -532,7 +525,7 @@ symconst_attr *get_irn_symconst_attr(ir_node *node) { ir_type *get_irn_call_attr(ir_node *node) { assert(is_Call(node)); - return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp); + return node->attr.call.type = skip_tid(node->attr.call.type); } sel_attr *get_irn_sel_attr(ir_node *node) { @@ -571,6 +564,11 @@ divmod_attr *get_irn_divmod_attr(ir_node *node) { return &node->attr.divmod; } +builtin_attr *get_irn_builtin_attr(ir_node *node) { + assert(is_Builtin(node)); + return &node->attr.builtin; +} + void *(get_irn_generic_attr)(ir_node *node) { assert(is_ir_node(node)); return _get_irn_generic_attr(node); @@ -638,20 +636,6 @@ ir_type *is_tls_pointer(const ir_node *n) { return NULL; } -/* Test whether arbitrary node is value arg base, i.e. Proj(pn_Start_P_value_arg_base) - * from Start. If so returns 1, else 0. */ -int is_value_arg_pointer(const ir_node *n) { - if (is_Proj(n) && - (get_Proj_proj(n) == pn_Start_P_value_arg_base) && - is_Start(get_Proj_pred(n))) - return 1; - return 0; -} - -/* Returns an array with the predecessors of the Block. Depending on - the implementation of the graph data structure this can be a copy of - the internal representation of predecessors as well as the internal - array itself. Therefore writing to this array might obstruct the ir. */ ir_node **get_Block_cfgpred_arr(ir_node *node) { assert(is_Block(node)); return (ir_node **)&(get_irn_in(node)[1]); @@ -670,6 +654,16 @@ void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) { set_irn_n(node, pos, pred); } +int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) { + int i; + + for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) { + if (get_Block_cfgpred_block(block, i) == pred) + return i; + } + return -1; +} + ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) { return _get_Block_cfgpred_block(node, pos); } @@ -923,7 +917,7 @@ found: ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET); } -/* remove Bads and doublets from the keep-alive set */ +/* remove Bads, NoMems and doublets from the keep-alive set */ void remove_End_Bads_and_doublets(ir_node *end) { pset_new_t keeps; int idx, n = get_End_n_keepalives(end); @@ -938,7 +932,7 @@ void remove_End_Bads_and_doublets(ir_node *end) { for (idx = n - 1; idx >= 0; --idx) { ir_node *ka = get_End_keepalive(end, idx); - if (is_Bad(ka) || pset_new_contains(&keeps, ka)) { + if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) { /* remove the edge */ edges_notify_edge(end, idx, NULL, ka, irg); @@ -1011,6 +1005,18 @@ I don't want to choose 3) as 2a) seems to have advantages for dataflow analysis and 3) does not allow to convert the representation to 2a). */ + +const char *get_cond_kind_name(cond_kind kind) +{ +#define X(a) case a: return #a; + switch (kind) { + X(dense); + X(fragmentary); + } + return ""; +#undef X +} + ir_node * get_Cond_selector(const ir_node *node) { assert(is_Cond(node)); @@ -1036,11 +1042,16 @@ set_Cond_kind(ir_node *node, cond_kind kind) { } long -get_Cond_defaultProj(const ir_node *node) { +get_Cond_default_proj(const ir_node *node) { assert(is_Cond(node)); return node->attr.cond.default_proj; } +void set_Cond_default_proj(ir_node *node, long defproj) { + assert(is_Cond(node)); + node->attr.cond.default_proj = defproj; +} + ir_node * get_Return_mem(const ir_node *node) { assert(is_Return(node)); @@ -1284,7 +1295,7 @@ set_Sel_index(ir_node *node, int pos, ir_node *index) { ir_entity * get_Sel_entity(const ir_node *node) { assert(is_Sel(node)); - return node->attr.sel.ent; + return node->attr.sel.entity; } /* need a version without const to prevent warning */ @@ -1295,7 +1306,7 @@ static ir_entity *_get_Sel_entity(ir_node *node) { void set_Sel_entity(ir_node *node, ir_entity *ent) { assert(is_Sel(node)); - node->attr.sel.ent = ent; + node->attr.sel.entity = ent; } @@ -1343,18 +1354,6 @@ get_Call_n_params(const ir_node *node) { return (get_irn_arity(node) - CALL_PARAM_OFFSET); } -int -get_Call_arity(const ir_node *node) { - assert(is_Call(node)); - return get_Call_n_params(node); -} - -/* void -set_Call_arity(ir_node *node, ir_node *arity) { - assert(is_Call(node)); -} -*/ - ir_node * get_Call_param(const ir_node *node, int pos) { assert(is_Call(node)); @@ -1370,16 +1369,101 @@ set_Call_param(ir_node *node, int pos, ir_node *param) { ir_type * get_Call_type(ir_node *node) { assert(is_Call(node)); - return node->attr.call.cld_tp = skip_tid(node->attr.call.cld_tp); + return node->attr.call.type = skip_tid(node->attr.call.type); } void set_Call_type(ir_node *node, ir_type *tp) { assert(is_Call(node)); assert((get_unknown_type() == tp) || is_Method_type(tp)); - node->attr.call.cld_tp = tp; + node->attr.call.type = tp; } +ir_node * +get_Builtin_mem(const ir_node *node) { + assert(is_Builtin(node)); + return get_irn_n(node, 0); +} + +void +set_Builin_mem(ir_node *node, ir_node *mem) { + assert(is_Builtin(node)); + set_irn_n(node, 0, mem); +} + +ir_builtin_kind +get_Builtin_kind(const ir_node *node) { + assert(is_Builtin(node)); + return node->attr.builtin.kind; +} + +void +set_Builtin_kind(ir_node *node, ir_builtin_kind kind) { + assert(is_Builtin(node)); + node->attr.builtin.kind = kind; +} + +ir_node ** +get_Builtin_param_arr(ir_node *node) { + assert(is_Builtin(node)); + return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1]; +} + +int +get_Builtin_n_params(const ir_node *node) { + assert(is_Builtin(node)); + return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET); +} + +ir_node * +get_Builtin_param(const ir_node *node, int pos) { + assert(is_Builtin(node)); + return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET); +} + +void +set_Builtin_param(ir_node *node, int pos, ir_node *param) { + assert(is_Builtin(node)); + set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param); +} + +ir_type * +get_Builtin_type(ir_node *node) { + assert(is_Builtin(node)); + return node->attr.builtin.type = skip_tid(node->attr.builtin.type); +} + +void +set_Builtin_type(ir_node *node, ir_type *tp) { + assert(is_Builtin(node)); + assert((get_unknown_type() == tp) || is_Method_type(tp)); + node->attr.builtin.type = tp; +} + +/* Returns a human readable string for the ir_builtin_kind. */ +const char *get_builtin_kind_name(ir_builtin_kind kind) { +#define X(a) case a: return #a; + switch (kind) { + X(ir_bk_trap); + X(ir_bk_debugbreak); + X(ir_bk_return_address); + X(ir_bk_frame_addess); + X(ir_bk_prefetch); + X(ir_bk_ffs); + X(ir_bk_clz); + X(ir_bk_ctz); + X(ir_bk_popcount); + X(ir_bk_parity); + X(ir_bk_bswap); + X(ir_bk_inport); + X(ir_bk_outport); + X(ir_bk_inner_trampoline); + } + return ""; +#undef X +} + + int Call_has_callees(const ir_node *node) { assert(is_Call(node)); return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) && @@ -1493,12 +1577,12 @@ BINOP_MEM(OP) \ \ ir_mode *get_##OP##_resmode(const ir_node *node) { \ assert(is_##OP(node)); \ - return node->attr.divmod.res_mode; \ + return node->attr.divmod.resmode; \ } \ \ void set_##OP##_resmode(ir_node *node, ir_mode *mode) { \ assert(is_##OP(node)); \ - node->attr.divmod.res_mode = mode; \ + node->attr.divmod.resmode = mode; \ } @@ -1525,7 +1609,7 @@ BINOP(Cmp) UNOP(Conv) UNOP(Cast) -int is_Div_remainderless(const ir_node *node) { +int get_Div_no_remainder(const ir_node *node) { assert(is_Div(node)); return node->attr.divmod.no_remainder; } @@ -1543,14 +1627,14 @@ void set_Conv_strict(ir_node *node, int strict_flag) { ir_type * get_Cast_type(ir_node *node) { assert(is_Cast(node)); - node->attr.cast.totype = skip_tid(node->attr.cast.totype); - return node->attr.cast.totype; + node->attr.cast.type = skip_tid(node->attr.cast.type); + return node->attr.cast.type; } void set_Cast_type(ir_node *node, ir_type *to_tp) { assert(is_Cast(node)); - node->attr.cast.totype = to_tp; + node->attr.cast.type = to_tp; } @@ -1752,13 +1836,13 @@ set_Load_ptr(ir_node *node, ir_node *ptr) { ir_mode * get_Load_mode(const ir_node *node) { assert(is_Load(node)); - return node->attr.load.load_mode; + return node->attr.load.mode; } void set_Load_mode(ir_node *node, ir_mode *mode) { assert(is_Load(node)); - node->attr.load.load_mode = mode; + node->attr.load.mode = mode; } ir_volatility @@ -2268,12 +2352,12 @@ void set_CopyB_src(ir_node *node, ir_node *src) { ir_type *get_CopyB_type(ir_node *node) { assert(is_CopyB(node)); - return node->attr.copyb.data_type = skip_tid(node->attr.copyb.data_type); + return node->attr.copyb.type = skip_tid(node->attr.copyb.type); } void set_CopyB_type(ir_node *node, ir_type *data_type) { assert(is_CopyB(node) && data_type); - node->attr.copyb.data_type = data_type; + node->attr.copyb.type = data_type; } @@ -2488,10 +2572,8 @@ skip_Tuple(ir_node *node) { ir_node *pred; ir_op *op; - if (!get_opt_normalize()) return node; - restart: - if (get_irn_op(node) == op_Proj) { + if (is_Proj(node)) { pred = get_Proj_pred(node); op = get_irn_op(pred); @@ -2501,9 +2583,8 @@ restart: */ if (op == op_Proj) { /* nested Tuple ? */ pred = skip_Tuple(pred); - op = get_irn_op(pred); - if (op == op_Tuple) { + if (is_Tuple(pred)) { node = get_Tuple_pred(pred, get_Proj_proj(node)); goto restart; } @@ -2771,6 +2852,12 @@ int return _is_Call(node); } +/* returns true if node is a Builtin node. */ +int +(is_Builtin)(const ir_node *node) { + return _is_Builtin(node); +} + /* returns true if node is a CallBegin node. */ int (is_CallBegin)(const ir_node *node) { @@ -2884,6 +2971,12 @@ int return _is_ASM(node); } +/* returns true if a node is an Dummy node. */ +int +(is_Dummy)(const ir_node *node) { + return _is_Dummy(node); +} + int (is_Proj)(const ir_node *node) { return _is_Proj(node); @@ -3012,12 +3105,14 @@ int (is_irn_machine_user)(const ir_node *node, unsigned n) { /* Gets the string representation of the jump prediction .*/ const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) { +#define X(a) case a: return #a; switch (pred) { - default: - case COND_JMP_PRED_NONE: return "no prediction"; - case COND_JMP_PRED_TRUE: return "true taken"; - case COND_JMP_PRED_FALSE: return "false taken"; + X(COND_JMP_PRED_NONE); + X(COND_JMP_PRED_TRUE); + X(COND_JMP_PRED_FALSE); } + return ""; +#undef X } /* Returns the conditional jump prediction of a Cond node. */