X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Fircons.c;h=38c6dd9958ae4c7330a8cfdf0f848cc0dc121580;hb=76a6c3b40390427bc516a943316642bb57daa465;hp=398cfe22f117e66909d4f2bdbad0152048975356;hpb=58ed1dbc91297e868c47ffb3b622c66f28065105;p=libfirm diff --git a/ir/ir/ircons.c b/ir/ir/ircons.c index 398cfe22f..38c6dd995 100644 --- a/ir/ir/ircons.c +++ b/ir/ir/ircons.c @@ -12,9 +12,20 @@ */ #ifdef HAVE_CONFIG_H -# include +# include "config.h" #endif +#ifdef HAVE_ALLOCA_H +#include +#endif +#ifdef HAVE_MALLOC_H +#include +#endif +#ifdef HAVE_STRING_H +#include +#endif + +# include "irprog_t.h" # include "irgraph_t.h" # include "irnode_t.h" # include "irmode_t.h" @@ -25,10 +36,9 @@ # include "iropt_t.h" # include "irgmod.h" # include "array.h" -/* memset belongs to string.h */ -# include "string.h" # include "irbackedge_t.h" # include "irflag_t.h" +# include "iredges_t.h" #if USE_EXPLICIT_PHI_IN_STACK /* A stack needed for the automatic Phi node construction in constructor @@ -50,15 +60,16 @@ typedef struct Phi_in_stack Phi_in_stack; /* * language dependant initialization variable */ -static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL; +static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL; -/*** ******************************************** */ -/** privat interfaces, for professional use only */ +/* -------------------------------------------- */ +/* privat interfaces, for professional use only */ +/* -------------------------------------------- */ /* Constructs a Block with a fixed number of predecessors. Does not set current_block. Can not be used with automatic Phi node construction. */ -INLINE ir_node * +ir_node * new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in) { ir_node *res; @@ -69,16 +80,17 @@ new_rd_Block (dbg_info* db, ir_graph *irg, int arity, ir_node **in) /* res->attr.block.exc = exc_normal; */ /* res->attr.block.handler_entry = 0; */ - res->attr.block.irg = irg; - res->attr.block.backedge = new_backedge_arr(irg->obst, arity); - res->attr.block.in_cg = NULL; + res->attr.block.dead = 0; + res->attr.block.irg = irg; + res->attr.block.backedge = new_backedge_arr(irg->obst, arity); + res->attr.block.in_cg = NULL; res->attr.block.cg_backedge = NULL; IRN_VRFY_IRG(res, irg); return res; } -INLINE ir_node * +ir_node * new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block) { ir_node *res; @@ -90,7 +102,7 @@ new_rd_Start (dbg_info* db, ir_graph *irg, ir_node *block) return res; } -INLINE ir_node * +ir_node * new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block) { ir_node *res; @@ -103,7 +115,7 @@ new_rd_End (dbg_info* db, ir_graph *irg, ir_node *block) /* Creates a Phi node with all predecessors. Calling this constructor is only allowed if the corresponding block is mature. */ -INLINE ir_node * +ir_node * new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) { ir_node *res; @@ -135,7 +147,7 @@ new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in return res; } -INLINE ir_node * +ir_node * new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp) { ir_node *res; @@ -150,18 +162,19 @@ new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, t return res; } -INLINE ir_node * +ir_node * new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) { - type *tp = unknown_type; - /* removing this somehow causes errors in jack. */ - if (tarval_is_entity(con)) - tp = find_pointer_type_to_type(get_entity_type(get_tarval_entity(con))); + return new_rd_Const_type (db, irg, block, mode, con, firm_unknown_type); +} - return new_rd_Const_type (db, irg, block, mode, con, tp); +ir_node * +new_rd_Const_long (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, long value) +{ + return new_rd_Const(db, irg, block, mode, new_tarval_from_long(value, mode)); } -INLINE ir_node * +ir_node * new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) { ir_node *res; @@ -172,7 +185,7 @@ new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *m return res; } -INLINE ir_node * +ir_node * new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode, long proj) { @@ -183,7 +196,7 @@ new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode assert(res); assert(get_Proj_pred(res)); - assert(get_nodes_Block(get_Proj_pred(res))); + assert(get_nodes_block(get_Proj_pred(res))); res = optimize_node(res); @@ -192,7 +205,7 @@ new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode } -INLINE ir_node * +ir_node * new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, long max_proj) { @@ -204,7 +217,7 @@ new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, return res; } -INLINE ir_node * +ir_node * new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { ir_node *res; @@ -215,11 +228,13 @@ new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode * return res; } -INLINE ir_node * +ir_node * new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) { ir_node *res; + assert(is_atomic_type(to_tp)); + res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op); res->attr.cast.totype = to_tp; res = optimize_node(res); @@ -227,7 +242,7 @@ new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_ return res; } -INLINE ir_node * +ir_node * new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in) { ir_node *res; @@ -238,7 +253,7 @@ new_rd_Tuple (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node ** return res; } -INLINE ir_node * +ir_node * new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { @@ -253,7 +268,7 @@ new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { @@ -268,9 +283,9 @@ new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block, - ir_node *op, ir_mode *mode) + ir_node *op, ir_mode *mode) { ir_node *res; @@ -280,7 +295,7 @@ new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { @@ -295,7 +310,7 @@ new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { @@ -311,7 +326,7 @@ new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { @@ -327,7 +342,7 @@ new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { @@ -343,7 +358,7 @@ new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { @@ -359,7 +374,7 @@ new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { @@ -374,7 +389,7 @@ new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { @@ -389,7 +404,7 @@ new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { @@ -404,7 +419,7 @@ new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { @@ -416,7 +431,7 @@ new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { @@ -431,7 +446,7 @@ new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { @@ -446,7 +461,7 @@ new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { @@ -461,7 +476,7 @@ new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { @@ -476,7 +491,7 @@ new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { @@ -488,7 +503,7 @@ new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2) { @@ -503,7 +518,7 @@ new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block) { ir_node *res; @@ -514,7 +529,7 @@ new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block) return res; } -INLINE ir_node * +ir_node * new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c) { ir_node *res; @@ -543,9 +558,10 @@ new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in); - assert(is_method_type(tp)); + assert((get_unknown_type() == tp) || is_Method_type(tp)); set_Call_type(res, tp); - res->attr.call.callee_arr = NULL; + res->attr.call.exc.pin_state = op_pin_state_pinned; + res->attr.call.callee_arr = NULL; res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; @@ -569,7 +585,7 @@ new_rd_Return (dbg_info* db, ir_graph *irg, ir_node *block, return res; } -INLINE ir_node * +ir_node * new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) { ir_node *in[2]; @@ -583,9 +599,9 @@ new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_no return res; } -INLINE ir_node * +ir_node * new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block, - ir_node *store, ir_node *adr) + ir_node *store, ir_node *adr, ir_mode *mode) { ir_node *in[2]; ir_node *res; @@ -593,12 +609,15 @@ new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block, in[0] = store; in[1] = adr; res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in); + res->attr.load.exc.pin_state = op_pin_state_pinned; + res->attr.load.load_mode = mode; + res->attr.load.volatility = volatility_non_volatile; res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; } -INLINE ir_node * +ir_node * new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *adr, ir_node *val) { @@ -609,12 +628,14 @@ new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block, in[1] = adr; in[2] = val; res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in); + res->attr.store.exc.pin_state = op_pin_state_pinned; + res->attr.store.volatility = volatility_non_volatile; res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; } -INLINE ir_node * +ir_node * new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *size, type *alloc_type, where_alloc where) { @@ -624,16 +645,17 @@ new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, in[0] = store; in[1] = size; res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in); - res->attr.a.where = where; - res->attr.a.type = alloc_type; + res->attr.a.exc.pin_state = op_pin_state_pinned; + res->attr.a.where = where; + res->attr.a.type = alloc_type; res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; } -INLINE ir_node * +ir_node * new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, - ir_node *ptr, ir_node *size, type *free_type) + ir_node *ptr, ir_node *size, type *free_type, where_alloc where) { ir_node *in[3]; ir_node *res; @@ -641,8 +663,9 @@ new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, in[0] = store; in[1] = ptr; in[2] = size; - res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in); - res->attr.f = free_type; + res = new_ir_node (db, irg, block, op_Free, mode_M, 3, in); + res->attr.f.where = where; + res->attr.f.type = free_type; res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; @@ -691,10 +714,9 @@ new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store, return res; } -INLINE ir_node * +ir_node * new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value, - symconst_kind symkind, type *tp) -{ + symconst_kind symkind, type *tp) { ir_node *res; ir_mode *mode; @@ -702,6 +724,7 @@ new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symb mode = mode_P_mach; else mode = mode_Iu; + res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL); res->attr.i.num = symkind; @@ -713,11 +736,11 @@ new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symb return res; } -INLINE ir_node * +ir_node * new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value, - symconst_kind symkind) + symconst_kind symkind) { - ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type); + ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, firm_unknown_type); return res; } @@ -741,7 +764,7 @@ ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type * return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp); } -INLINE ir_node * +ir_node * new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in) { ir_node *res; @@ -752,13 +775,13 @@ new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **i return res; } -INLINE ir_node * +ir_node * new_rd_Bad (ir_graph *irg) { return irg->bad; } -INLINE ir_node * +ir_node * new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) { ir_node *in[2], *res; @@ -772,13 +795,13 @@ new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_no return res; } -INLINE ir_node * +ir_node * new_rd_Unknown (ir_graph *irg, ir_mode *m) { return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL); } -INLINE ir_node * +ir_node * new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) { ir_node *in[1]; @@ -793,7 +816,7 @@ new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call) return res; } -INLINE ir_node * +ir_node * new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block) { ir_node *res; @@ -804,7 +827,7 @@ new_rd_EndReg (dbg_info *db, ir_graph *irg, ir_node *block) return res; } -INLINE ir_node * +ir_node * new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block) { ir_node *res; @@ -815,7 +838,7 @@ new_rd_EndExcept (dbg_info *db, ir_graph *irg, ir_node *block) return res; } -INLINE ir_node * +ir_node * new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block) { ir_node *res; @@ -826,7 +849,7 @@ new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block) return res; } -INLINE ir_node * +ir_node * new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode, long proj) { @@ -839,230 +862,239 @@ new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mod assert(res); assert(get_Proj_pred(res)); - assert(get_nodes_Block(get_Proj_pred(res))); + assert(get_nodes_block(get_Proj_pred(res))); res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; +} +ir_node * +new_rd_NoMem (ir_graph *irg) { + return irg->no_mem; } ir_node * -new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block, - ir_node *callee, int arity, ir_node **in, type *tp) +new_rd_Mux (dbg_info *db, ir_graph *irg, ir_node *block, + ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) { - ir_node **r_in; + ir_node *in[3]; ir_node *res; - int r_arity; - r_arity = arity+1; - NEW_ARR_A(ir_node *, r_in, r_arity); - r_in[0] = callee; - memcpy(&r_in[1], in, sizeof (ir_node *) * arity); + in[0] = sel; + in[1] = ir_false; + in[2] = ir_true; - res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in); + res = new_ir_node(db, irg, block, op_Mux, mode, 3, in); + assert(res); - assert(is_method_type(tp)); - set_FuncCall_type(res, tp); - res->attr.call.callee_arr = NULL; res = optimize_node(res); IRN_VRFY_IRG(res, irg); return res; } -INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) { +ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) { return new_rd_Block(NULL, irg, arity, in); } -INLINE ir_node *new_r_Start (ir_graph *irg, ir_node *block) { +ir_node *new_r_Start (ir_graph *irg, ir_node *block) { return new_rd_Start(NULL, irg, block); } -INLINE ir_node *new_r_End (ir_graph *irg, ir_node *block) { +ir_node *new_r_End (ir_graph *irg, ir_node *block) { return new_rd_End(NULL, irg, block); } -INLINE ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) { +ir_node *new_r_Jmp (ir_graph *irg, ir_node *block) { return new_rd_Jmp(NULL, irg, block); } -INLINE ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) { +ir_node *new_r_Cond (ir_graph *irg, ir_node *block, ir_node *c) { return new_rd_Cond(NULL, irg, block, c); } -INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block, +ir_node *new_r_Return (ir_graph *irg, ir_node *block, ir_node *store, int arity, ir_node **in) { return new_rd_Return(NULL, irg, block, store, arity, in); } -INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block, +ir_node *new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj) { return new_rd_Raise(NULL, irg, block, store, obj); } -INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block, +ir_node *new_r_Const (ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con) { return new_rd_Const(NULL, irg, block, mode, con); } -INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block, + +ir_node *new_r_Const_long(ir_graph *irg, ir_node *block, + ir_mode *mode, long value) { + return new_rd_Const_long(NULL, irg, block, mode, value); +} + + +ir_node *new_r_SymConst (ir_graph *irg, ir_node *block, symconst_symbol value, symconst_kind symkind) { return new_rd_SymConst(NULL, irg, block, value, symkind); } -INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, +ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *ent) { return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent); } -INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr, +ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr, type *ent) { return (new_rd_InstOf (NULL, irg, block, store, objptr, ent)); } -INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store, +ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store, ir_node *callee, int arity, ir_node **in, type *tp) { return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp); } -INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block, +ir_node *new_r_Add (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { return new_rd_Add(NULL, irg, block, op1, op2, mode); } -INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block, +ir_node *new_r_Sub (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { return new_rd_Sub(NULL, irg, block, op1, op2, mode); } -INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block, +ir_node *new_r_Minus (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { return new_rd_Minus(NULL, irg, block, op, mode); } -INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block, +ir_node *new_r_Mul (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { return new_rd_Mul(NULL, irg, block, op1, op2, mode); } -INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block, +ir_node *new_r_Quot (ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { return new_rd_Quot(NULL, irg, block, memop, op1, op2); } -INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block, +ir_node *new_r_DivMod (ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { return new_rd_DivMod(NULL, irg, block, memop, op1, op2); } -INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block, +ir_node *new_r_Div (ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { return new_rd_Div(NULL, irg, block, memop, op1, op2); } -INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block, +ir_node *new_r_Mod (ir_graph *irg, ir_node *block, ir_node *memop, ir_node *op1, ir_node *op2) { return new_rd_Mod(NULL, irg, block, memop, op1, op2); } -INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block, +ir_node *new_r_Abs (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { return new_rd_Abs(NULL, irg, block, op, mode); } -INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block, +ir_node *new_r_And (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { return new_rd_And(NULL, irg, block, op1, op2, mode); } -INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block, +ir_node *new_r_Or (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { return new_rd_Or(NULL, irg, block, op1, op2, mode); } -INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block, +ir_node *new_r_Eor (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) { return new_rd_Eor(NULL, irg, block, op1, op2, mode); } -INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block, +ir_node *new_r_Not (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { return new_rd_Not(NULL, irg, block, op, mode); } -INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block, +ir_node *new_r_Cmp (ir_graph *irg, ir_node *block, ir_node *op1, ir_node *op2) { return new_rd_Cmp(NULL, irg, block, op1, op2); } -INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block, +ir_node *new_r_Shl (ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { return new_rd_Shl(NULL, irg, block, op, k, mode); } -INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block, +ir_node *new_r_Shr (ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { return new_rd_Shr(NULL, irg, block, op, k, mode); } -INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block, +ir_node *new_r_Shrs (ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { return new_rd_Shrs(NULL, irg, block, op, k, mode); } -INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block, +ir_node *new_r_Rot (ir_graph *irg, ir_node *block, ir_node *op, ir_node *k, ir_mode *mode) { return new_rd_Rot(NULL, irg, block, op, k, mode); } -INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block, +ir_node *new_r_Conv (ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode) { return new_rd_Conv(NULL, irg, block, op, mode); } -INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) { +ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) { return new_rd_Cast(NULL, irg, block, op, to_tp); } -INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity, +ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode) { return new_rd_Phi(NULL, irg, block, arity, in, mode); } -INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block, - ir_node *store, ir_node *adr) { - return new_rd_Load(NULL, irg, block, store, adr); +ir_node *new_r_Load (ir_graph *irg, ir_node *block, + ir_node *store, ir_node *adr, ir_mode *mode) { + return new_rd_Load(NULL, irg, block, store, adr, mode); } -INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block, +ir_node *new_r_Store (ir_graph *irg, ir_node *block, ir_node *store, ir_node *adr, ir_node *val) { return new_rd_Store(NULL, irg, block, store, adr, val); } -INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store, +ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store, ir_node *size, type *alloc_type, where_alloc where) { return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where); } -INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store, - ir_node *ptr, ir_node *size, type *free_type) { - return new_rd_Free(NULL, irg, block, store, ptr, size, free_type); +ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store, + ir_node *ptr, ir_node *size, type *free_type, where_alloc where) { + return new_rd_Free(NULL, irg, block, store, ptr, size, free_type, where); } -INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) { +ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) { return new_rd_Sync(NULL, irg, block, arity, in); } -INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, +ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode, long proj) { return new_rd_Proj(NULL, irg, block, arg, mode, proj); } -INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg, +ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg, long max_proj) { return new_rd_defaultProj(NULL, irg, block, arg, max_proj); } -INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block, +ir_node *new_r_Tuple (ir_graph *irg, ir_node *block, int arity, ir_node **in) { return new_rd_Tuple(NULL, irg, block, arity, in ); } -INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block, +ir_node *new_r_Id (ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode) { return new_rd_Id(NULL, irg, block, val, mode); } -INLINE ir_node *new_r_Bad (ir_graph *irg) { +ir_node *new_r_Bad (ir_graph *irg) { return new_rd_Bad(irg); } -INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) { +ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) { return new_rd_Confirm (NULL, irg, block, val, bound, cmp); } -INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) { +ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) { return new_rd_Unknown(irg, m); } -INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) { +ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) { return new_rd_CallBegin(NULL, irg, block, callee); } -INLINE ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) { +ir_node *new_r_EndReg (ir_graph *irg, ir_node *block) { return new_rd_EndReg(NULL, irg, block); } -INLINE ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) { +ir_node *new_r_EndExcept (ir_graph *irg, ir_node *block) { return new_rd_EndExcept(NULL, irg, block); } -INLINE ir_node *new_r_Break (ir_graph *irg, ir_node *block) { +ir_node *new_r_Break (ir_graph *irg, ir_node *block) { return new_rd_Break(NULL, irg, block); } -INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg, +ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode, long proj) { return new_rd_Filter(NULL, irg, block, arg, mode, proj); } -INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block, - ir_node *callee, int arity, ir_node **in, - type *tp) { - return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp); +ir_node *new_r_NoMem (ir_graph *irg) { + return new_rd_NoMem(irg); +} +ir_node *new_r_Mux (ir_graph *irg, ir_node *block, + ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) { + return new_rd_Mux(NULL, irg, block, sel, ir_false, ir_true, mode); } @@ -1117,9 +1149,11 @@ new_d_Block (dbg_info* db, int arity, ir_node **in) res = new_rd_Block(db, current_ir_graph, arity, in); /* Create and initialize array for Phi-node construction. */ - res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, - current_ir_graph->n_loc); - memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc); + if (get_irg_phase_state(current_ir_graph) == phase_building) { + res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst, + current_ir_graph->n_loc); + memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc); + } for (i = arity-1; i >= 0; i--) if (get_irn_op(in[i]) == op_Unknown) { @@ -1145,7 +1179,7 @@ new_d_Block (dbg_info* db, int arity, ir_node **in) Call Graph: ( A ---> B == A "calls" B) - get_value mature_block + get_value mature_immBlock | | | | | | @@ -1185,7 +1219,7 @@ new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode) to model this stack, now it is explicit. This reduces side effects. */ #if USE_EXPLICIT_PHI_IN_STACK -INLINE Phi_in_stack * +Phi_in_stack * new_Phi_in_stack(void) { Phi_in_stack *res; @@ -1197,7 +1231,7 @@ new_Phi_in_stack(void) { return res; } -INLINE void +void free_Phi_in_stack(Phi_in_stack *s) { DEL_ARR_F(s->stack); free(s); @@ -1297,9 +1331,9 @@ new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int i If so, it is a real Phi node and we break the loop. Else the Phi node merges the same definition on several paths and therefore is not needed. */ - for (i = 0; i < ins; ++i) - { - if (in[i] == res || in[i] == known) continue; + for (i = 0; i < ins; ++i) { + if (in[i] == res || in[i] == known) + continue; if (known == res) known = in[i]; @@ -1312,7 +1346,8 @@ new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int i #if USE_EXPLICIT_PHI_IN_STACK free_to_Phi_in_stack(res); #else - obstack_free (current_ir_graph->obst, res); + edges_node_deleted(res, current_ir_graph); + obstack_free(current_ir_graph->obst, res); #endif res = known; } else { @@ -1367,7 +1402,7 @@ phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) The call order get_value (makes Phi0, put's it into graph_arr) set_value (overwrites Phi0 in graph_arr) - mature_block (upgrades Phi0, puts it again into graph_arr, overwriting + mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting the proper value.) fails. */ if (!block->attr.block.graph_arr[pos]) { @@ -1458,7 +1493,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) /* The block is not mature, we don't know how many in's are needed. A Phi with zero predecessors is created. Such a Phi node is called Phi0 node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added - to the list of Phi0 nodes in this block to be matured by mature_block + to the list of Phi0 nodes in this block to be matured by mature_immBlock later. The Phi0 has to remember the pos of it's internal value. If the real Phi is computed, pos is used to update the array with the local @@ -1493,13 +1528,13 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) #if USE_EXPLICIT_PHI_IN_STACK /* Just dummies */ -INLINE Phi_in_stack * new_Phi_in_stack() { return NULL; } -INLINE void free_Phi_in_stack(Phi_in_stack *s) { } +Phi_in_stack * new_Phi_in_stack() { return NULL; } +void free_Phi_in_stack(Phi_in_stack *s) { } #endif static INLINE ir_node * new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, - ir_node **in, int ins, ir_node *phi0) + ir_node **in, int ins, ir_node *phi0) { int i; ir_node *res, *known; @@ -1535,15 +1570,18 @@ new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, /* i==ins: there is at most one predecessor, we don't need a phi node. */ if (i == ins) { if (res != known) { + edges_node_deleted(res, current_ir_graph); obstack_free (current_ir_graph->obst, res); if (is_Phi(known)) { - /* If pred is a phi node we want to optmize it: If loops are matured in a bad - order, an enclosing Phi know may get superfluous. */ - res = optimize_in_place_2(known); - if (res != known) { exchange(known, res); } - } else { - res = known; + /* If pred is a phi node we want to optimize it: If loops are matured in a bad + order, an enclosing Phi know may get superfluous. */ + res = optimize_in_place_2(known); + if (res != known) + exchange(known, res); + } + else + res = known; } else { /* A undefined value, e.g., in unreachable code. */ res = new_Bad(); @@ -1591,11 +1629,11 @@ static INLINE ir_node ** new_frag_arr (ir_node *n) arr[0] = new_Proj(n, mode_M, pn_Call_M_except); else { assert((pn_Quot_M == pn_DivMod_M) && - (pn_Quot_M == pn_Div_M) && - (pn_Quot_M == pn_Mod_M) && - (pn_Quot_M == pn_Load_M) && - (pn_Quot_M == pn_Store_M) && - (pn_Quot_M == pn_Alloc_M) ); + (pn_Quot_M == pn_Div_M) && + (pn_Quot_M == pn_Mod_M) && + (pn_Quot_M == pn_Load_M) && + (pn_Quot_M == pn_Store_M) && + (pn_Quot_M == pn_Alloc_M) ); arr[0] = new_Proj(n, mode_M, pn_Alloc_M); } set_optimize(opt); @@ -1604,14 +1642,22 @@ static INLINE ir_node ** new_frag_arr (ir_node *n) return arr; } +/** + * returns the frag_arr from a node + */ static INLINE ir_node ** get_frag_arr (ir_node *n) { - if (get_irn_op(n) == op_Call) { - return n->attr.call.frag_arr; - } else if (get_irn_op(n) == op_Alloc) { - return n->attr.a.frag_arr; - } else { - return n->attr.frag_arr; + switch (get_irn_opcode(n)) { + case iro_Call: + return n->attr.call.exc.frag_arr; + case iro_Alloc: + return n->attr.a.exc.frag_arr; + case iro_Load: + return n->attr.load.exc.frag_arr; + case iro_Store: + return n->attr.store.exc.frag_arr; + default: + return n->attr.except.frag_arr; } } @@ -1710,21 +1756,21 @@ phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) before recuring. */ if (default_initialize_local_variable) - block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1); + block->attr.block.graph_arr[pos] = default_initialize_local_variable(current_ir_graph, mode, pos - 1); else block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad); /* We don't need to care about exception ops in the start block. - There are none by definition. */ + There are none by definition. */ return block->attr.block.graph_arr[pos]; } else { phi0 = new_rd_Phi0(current_ir_graph, block, mode); block->attr.block.graph_arr[pos] = phi0; #if PRECISE_EXC_CONTEXT if (get_opt_precise_exc_context()) { - /* Set graph_arr for fragile ops. Also here we should break recursion. - We could choose a cyclic path through an cfop. But the recursion would - break at some point. */ - set_frag_value(block->attr.block.graph_arr, pos, phi0); + /* Set graph_arr for fragile ops. Also here we should break recursion. + We could choose a cyclic path through an cfop. But the recursion would + break at some point. */ + set_frag_value(block->attr.block.graph_arr, pos, phi0); } #endif } @@ -1747,9 +1793,9 @@ phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) if (!is_Bad(prevBlock)) { #if PRECISE_EXC_CONTEXT if (get_opt_precise_exc_context() && - is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) { - assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode)); - nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode); + is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) { + assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode)); + nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode); } else #endif nin[i-1] = get_r_value_internal (prevBlock, pos, mode); @@ -1762,12 +1808,12 @@ phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) optimization possibilities. The Phi0 node either is allocated in this function, or it comes from a former call to get_r_value_internal. In this case we may not yet - exchange phi0, as this is done in mature_block. */ + exchange phi0, as this is done in mature_immBlock. */ if (!phi0) { phi0_all = block->attr.block.graph_arr[pos]; if (!((get_irn_op(phi0_all) == op_Phi) && - (get_irn_arity(phi0_all) == 0) && - (get_nodes_block(phi0_all) == block))) + (get_irn_arity(phi0_all) == 0) && + (get_nodes_block(phi0_all) == block))) phi0_all = NULL; } else { phi0_all = phi0; @@ -1859,7 +1905,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) /* The block is not mature, we don't know how many in's are needed. A Phi with zero predecessors is created. Such a Phi node is called Phi0 node. The Phi0 is then added to the list of Phi0 nodes in this block - to be matured by mature_block later. + to be matured by mature_immBlock later. The Phi0 has to remember the pos of it's internal value. If the real Phi is computed, pos is used to update the array with the local values. */ @@ -1875,7 +1921,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) printf("Error: no value set. Use of undefined variable. Initializing to zero.\n"); assert (mode->code >= irm_F && mode->code <= irm_P); res = new_rd_Const (NULL, current_ir_graph, block, mode, - get_mode_null(mode)); + get_mode_null(mode)); } /* The local valid value is available now. */ @@ -1891,7 +1937,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode) /** Finalize a Block node, when all control flows are known. */ /** Acceptable parameters are only Block nodes. */ void -mature_block (ir_node *block) +mature_immBlock (ir_node *block) { int ins; @@ -1947,6 +1993,12 @@ new_d_Const (dbg_info* db, ir_mode *mode, tarval *con) } ir_node * +new_d_Const_long(dbg_info* db, ir_mode *mode, long value) +{ + return new_rd_Const_long(db, current_ir_graph, current_ir_graph->start_block, mode, value); +} + + ir_node * new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp) { return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block, @@ -2034,8 +2086,8 @@ new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode) static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) { if (get_opt_precise_exc_context()) { if ((current_ir_graph->phase_state == phase_building) && - (get_irn_op(res) == op) && /* Could be optimized away. */ - !*frag_store) /* Could be a cse where the arr is already set. */ { + (get_irn_op(res) == op) && /* Could be optimized away. */ + !*frag_store) /* Could be a cse where the arr is already set. */ { *frag_store = new_frag_arr(res); } } @@ -2048,8 +2100,9 @@ new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) ir_node *res; res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block, memop, op1, op2); + res->attr.except.pin_state = op_pin_state_pinned; #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Quot, &res->attr.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2061,8 +2114,9 @@ new_d_DivMod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) ir_node *res; res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block, memop, op1, op2); + res->attr.except.pin_state = op_pin_state_pinned; #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_DivMod, &res->attr.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2074,8 +2128,9 @@ new_d_Div (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) ir_node *res; res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block, memop, op1, op2); + res->attr.except.pin_state = op_pin_state_pinned; #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Div, &res->attr.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2087,8 +2142,9 @@ new_d_Mod (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2) ir_node *res; res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block, memop, op1, op2); + res->attr.except.pin_state = op_pin_state_pinned; #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Mod, &res->attr.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2184,7 +2240,7 @@ new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node ** res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block, store, callee, arity, in, tp); #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Call, &res->attr.call.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2205,13 +2261,13 @@ new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj) } ir_node * -new_d_Load (dbg_info* db, ir_node *store, ir_node *addr) +new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode) { ir_node *res; res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block, - store, addr); + store, addr, mode); #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Load, &res->attr.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2224,7 +2280,7 @@ new_d_Store (dbg_info* db, ir_node *store, ir_node *addr, ir_node *val) res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block, store, addr, val); #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Store, &res->attr.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */ #endif return res; @@ -2238,17 +2294,18 @@ new_d_Alloc (dbg_info* db, ir_node *store, ir_node *size, type *alloc_type, res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block, store, size, alloc_type, where); #if PRECISE_EXC_CONTEXT - allocate_frag_arr(res, op_Alloc, &res->attr.a.frag_arr); /* Could be optimized away. */ + allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */ #endif return res; } ir_node * -new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type) +new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, + ir_node *size, type *free_type, where_alloc where) { return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block, - store, ptr, size, free_type); + store, ptr, size, free_type, where); } ir_node * @@ -2299,7 +2356,7 @@ new_d_Sync (dbg_info* db, int arity, ir_node** in) ir_node * (new_d_Bad)(void) { - return __new_d_Bad(); + return _new_d_Bad(); } ir_node * @@ -2353,14 +2410,16 @@ new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj) } ir_node * -new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in, - type *tp) +(new_d_NoMem)(void) { - ir_node *res; - res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block, - callee, arity, in, tp); + return _new_d_NoMem(); +} - return res; +ir_node * +new_d_Mux (dbg_info *db, ir_node *sel, ir_node *ir_false, + ir_node *ir_true, ir_mode *mode) { + return new_rd_Mux (db, current_ir_graph, current_ir_graph->current_block, + sel, ir_false, ir_true, mode); } /* ********************************************************************* */ @@ -2377,12 +2436,13 @@ ir_node *new_d_immBlock (dbg_info* db) { /* creates a new dynamic in-array as length of in is -1 */ res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL); current_ir_graph->current_block = res; - res->attr.block.matured = 0; + res->attr.block.matured = 0; + res->attr.block.dead = 0; /* res->attr.block.exc = exc_normal; */ /* res->attr.block.handler_entry = 0; */ - res->attr.block.irg = current_ir_graph; - res->attr.block.backedge = NULL; - res->attr.block.in_cg = NULL; + res->attr.block.irg = current_ir_graph; + res->attr.block.backedge = NULL; + res->attr.block.in_cg = NULL; res->attr.block.cg_backedge = NULL; set_Block_block_visited(res, 0); @@ -2397,14 +2457,14 @@ ir_node *new_d_immBlock (dbg_info* db) { return res; } -INLINE ir_node * +ir_node * new_immBlock (void) { return new_d_immBlock(NULL); } /* add an adge to a jmp/control flow node */ void -add_in_edge (ir_node *block, ir_node *jmp) +add_immBlock_pred (ir_node *block, ir_node *jmp) { if (block->attr.block.matured) { assert(0 && "Error: Block already matured!\n"); @@ -2417,7 +2477,7 @@ add_in_edge (ir_node *block, ir_node *jmp) /* changing the current block */ void -switch_block (ir_node *target) +set_cur_block (ir_node *target) { current_ir_graph->current_block = target; } @@ -2435,14 +2495,14 @@ get_d_value (dbg_info* db, int pos, ir_mode *mode) return get_r_value_internal (current_ir_graph->current_block, pos + 1, mode); } /* get a value from the parameter array from the current block by its index */ -INLINE ir_node * +ir_node * get_value (int pos, ir_mode *mode) { return get_d_value(NULL, pos, mode); } /* set a value at position pos in the parameter array from the current block */ -INLINE void +void set_value (int pos, ir_node *value) { assert(get_irg_phase_state (current_ir_graph) == phase_building); @@ -2451,7 +2511,7 @@ set_value (int pos, ir_node *value) } /* get the current store */ -INLINE ir_node * +ir_node * get_store (void) { assert(get_irg_phase_state (current_ir_graph) == phase_building); @@ -2461,7 +2521,7 @@ get_store (void) } /* set the current store */ -INLINE void +void set_store (ir_node *store) { /* GL: one could call set_value instead */ @@ -2477,7 +2537,7 @@ keep_alive (ir_node *ka) /** Useful access routines **/ /* Returns the current block of the current graph. To set the current - block use switch_block(). */ + block use set_cur_block. */ ir_node *get_cur_block() { return get_irg_current_block(current_ir_graph); } @@ -2493,17 +2553,28 @@ type *get_cur_frame_type() { /* call once for each run of the library */ void -init_cons (default_initialize_local_variable_func_t *func) +init_cons(uninitialized_local_variable_func_t *func) { default_initialize_local_variable = func; } /* call for each graph */ void -finalize_cons (ir_graph *irg) { +irg_finalize_cons (ir_graph *irg) { irg->phase_state = phase_high; } +void +irp_finalize_cons (void) { + int i, n_irgs = get_irp_n_irgs(); + for (i = 0; i < n_irgs; i++) { + irg_finalize_cons(get_irp_irg(i)); + } + irp->phase_state = phase_high;\ +} + + + ir_node *new_Block(int arity, ir_node **in) { return new_d_Block(NULL, arity, in); @@ -2529,6 +2600,16 @@ ir_node *new_Raise (ir_node *store, ir_node *obj) { ir_node *new_Const (ir_mode *mode, tarval *con) { return new_d_Const(NULL, mode, con); } + +ir_node *new_Const_long(ir_mode *mode, long value) +{ + return new_d_Const_long(NULL, mode, value); +} + +ir_node *new_Const_type(tarval *con, type *tp) { + return new_d_Const_type(NULL, get_type_mode(tp), con, tp); +} + ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) { return new_d_SymConst(NULL, value, kind); } @@ -2610,8 +2691,8 @@ ir_node *new_Cast (ir_node *op, type *to_tp) { ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) { return new_d_Phi(NULL, arity, in, mode); } -ir_node *new_Load (ir_node *store, ir_node *addr) { - return new_d_Load(NULL, store, addr); +ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) { + return new_d_Load(NULL, store, addr, mode); } ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) { return new_d_Store(NULL, store, addr, val); @@ -2621,8 +2702,8 @@ ir_node *new_Alloc (ir_node *store, ir_node *size, type *alloc_type, return new_d_Alloc(NULL, store, size, alloc_type, where); } ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size, - type *free_type) { - return new_d_Free(NULL, store, ptr, size, free_type); + type *free_type, where_alloc where) { + return new_d_Free(NULL, store, ptr, size, free_type, where); } ir_node *new_Sync (int arity, ir_node **in) { return new_d_Sync(NULL, arity, in); @@ -2663,6 +2744,9 @@ ir_node *new_Break (void) { ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) { return new_d_Filter(NULL, arg, mode, proj); } -ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) { - return new_d_FuncCall(NULL, callee, arity, in, tp); +ir_node *new_NoMem (void) { + return new_d_NoMem(); +} +ir_node *new_Mux (ir_node *sel, ir_node *ir_false, ir_node *ir_true, ir_mode *mode) { + return new_d_Mux(NULL, sel, ir_false, ir_true, mode); }