From 6b45fde8adadc605dfe4c3a181c7fd8f78ac227c Mon Sep 17 00:00:00 2001 From: Matthias Braun Date: Tue, 31 May 2011 16:27:06 +0200 Subject: [PATCH 1/1] rework fragile ops to have a throws_exception attribute --- include/libfirm/firm_types.h | 11 +- include/libfirm/irnode.h | 11 ++ ir/be/beabi.c | 7 +- ir/be/benode.c | 1 + ir/be/benode.h | 7 +- ir/be/ia32/bearch_ia32.c | 13 +- ir/be/ia32/ia32_common_transform.c | 2 + ir/be/ia32/ia32_fpu.c | 4 +- ir/be/ia32/ia32_optimize.c | 21 +- ir/be/ia32/ia32_spec.pl | 34 ++-- ir/be/ia32/ia32_transform.c | 296 ++++++++++++++++++++--------- ir/ir/irnode.c | 17 ++ ir/ir/irnode_t.h | 2 +- ir/ir/iropt.c | 4 +- ir/ir/irtypes.h | 9 +- ir/ir/irverify.c | 17 ++ ir/lower/lower_intrinsics.c | 77 +++++--- scripts/gen_ir.py | 5 + scripts/ir_spec.py | 22 ++- scripts/spec_util.py | 29 ++- 20 files changed, 423 insertions(+), 166 deletions(-) diff --git a/include/libfirm/firm_types.h b/include/libfirm/firm_types.h index bccb220b7..188443231 100644 --- a/include/libfirm/firm_types.h +++ b/include/libfirm/firm_types.h @@ -143,10 +143,13 @@ ENUM_BITSET(ir_relation) * constrained flags for memory operations. */ typedef enum ir_cons_flags { - cons_none = 0, /**< No constrains. */ - cons_volatile = 1U << 0, /**< Memory operation is volatile. */ - cons_unaligned = 1U << 1, /**< Memory operation is unaligned. */ - cons_floats = 1U << 2 /**< Memory operation can float. */ + cons_none = 0, /**< No constrains. */ + cons_volatile = 1U << 0, /**< Memory operation is volatile. */ + cons_unaligned = 1U << 1, /**< Memory operation is unaligned. */ + cons_floats = 1U << 2, /**< Memory operation can float. */ + cons_throws_exception = 1U << 3, /**< fragile op throws exception (and + produces X_regular and X_except + values) */ } ir_cons_flags; ENUM_BITSET(ir_cons_flags) diff --git a/include/libfirm/irnode.h b/include/libfirm/irnode.h index 8713c58eb..a3f20b18f 100644 --- a/include/libfirm/irnode.h +++ b/include/libfirm/irnode.h @@ -488,6 +488,17 @@ FIRM_API int is_x_except_Proj(const ir_node *node); */ FIRM_API int is_x_regular_Proj(const ir_node *node); +/** + * Set throws exception attribute of a fragile node + * @p throws_exception must be 0 or 1 + */ +FIRM_API void ir_set_throws_exception(ir_node *node, int throws_exception); + +/** + * Returns throws_exception attribute of a fragile node + */ +FIRM_API int ir_throws_exception(const ir_node *node); + /** returns the name of an ir_relation */ FIRM_API const char *get_relation_string(ir_relation relation); diff --git a/ir/be/beabi.c b/ir/be/beabi.c index 55da37b17..5ec10b4f7 100644 --- a/ir/be/beabi.c +++ b/ir/be/beabi.c @@ -362,6 +362,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) int *reg_param_idxs; int *stack_param_idx; int i, n, destroy_all_regs; + int throws_exception; size_t s; size_t p; dbg_info *dbgi; @@ -591,6 +592,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) assert(n_ins == (int) (n_reg_params + ARR_LEN(states))); /* ins collected, build the call */ + throws_exception = ir_throws_exception(irn); if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) { /* direct call */ low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp, @@ -603,6 +605,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs), n_ins, in, get_Call_type(irn)); } + ir_set_throws_exception(low_call, throws_exception); be_Call_set_pop(low_call, call->pop); /* put the call into the list of all calls for later processing */ @@ -616,9 +619,9 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) /* now handle results */ for (i = 0; i < n_res; ++i) { - int pn; ir_node *proj = res_projs[i]; be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0); + long pn = i + pn_be_Call_first_res; /* returns values on stack not supported yet */ assert(arg->in_reg); @@ -753,7 +756,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp) } if (! mem_proj) { - mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular); + mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M); keep_alive(mem_proj); } } diff --git a/ir/be/benode.c b/ir/be/benode.c index bcca214cd..3d7c28557 100644 --- a/ir/be/benode.c +++ b/ir/be/benode.c @@ -1385,6 +1385,7 @@ void be_init_op(void) op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_exc_pinned, irop_flag_keep, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops); op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_exc_pinned, irop_flag_keep, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops); + ir_op_set_fragile_indices(op_be_Call, n_be_Call_mem, pn_be_Call_X_regular, pn_be_Call_X_except); op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_exc_pinned, irop_flag_cfopcode, oparity_dynamic, 0, sizeof(be_return_attr_t), &be_node_op_ops); op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); op_be_SubSP = new_ir_op(beo_SubSP, "be_SubSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); diff --git a/ir/be/benode.h b/ir/be/benode.h index f6b00ffc4..fc01db6ec 100644 --- a/ir/be/benode.h +++ b/ir/be/benode.h @@ -285,10 +285,11 @@ enum { * Projection numbers for result of be_Call node: use for Proj nodes! */ typedef enum { - pn_be_Call_M_regular = pn_Call_M, /**< The memory result of a be_Call. */ + pn_be_Call_M = pn_Call_M, /**< The memory result of a be_Call. */ + pn_be_Call_X_regular = pn_Call_X_regular, + pn_be_Call_X_except = pn_Call_X_except, pn_be_Call_sp = pn_Call_max+1, - pn_be_Call_first_res /**< The first result proj number of a - be_Call. */ + pn_be_Call_first_res /**< The first result proj number of a be_Call. */ } pn_be_Call; /** diff --git a/ir/be/ia32/bearch_ia32.c b/ir/be/ia32/bearch_ia32.c index 090e243fc..0b361186a 100644 --- a/ir/be/ia32/bearch_ia32.c +++ b/ir/be/ia32/bearch_ia32.c @@ -889,6 +889,7 @@ static void transform_to_Store(ir_node *node) ir_node *nomem = get_irg_no_mem(irg); ir_node *ptr = get_irg_frame(irg); ir_node *val = get_irn_n(node, n_be_Spill_val); + ir_node *res; ir_node *store; ir_node *sched_point = NULL; @@ -897,17 +898,23 @@ static void transform_to_Store(ir_node *node) } if (mode_is_float(mode)) { - if (ia32_cg_config.use_sse2) + if (ia32_cg_config.use_sse2) { store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val); - else + res = new_r_Proj(store, mode_M, pn_ia32_xStore_M); + } else { store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode); + res = new_r_Proj(store, mode_M, pn_ia32_vfst_M); + } } else if (get_mode_size_bits(mode) == 128) { /* Spill 128 bit SSE registers */ store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val); + res = new_r_Proj(store, mode_M, pn_ia32_xxStore_M); } else if (get_mode_size_bits(mode) == 8) { store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val); + res = new_r_Proj(store, mode_M, pn_ia32_Store8Bit_M); } else { store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val); + res = new_r_Proj(store, mode_M, pn_ia32_Store_M); } set_ia32_op_type(store, ia32_AddrModeD); @@ -923,7 +930,7 @@ static void transform_to_Store(ir_node *node) sched_remove(node); } - exchange(node, store); + exchange(node, res); } static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) diff --git a/ir/be/ia32/ia32_common_transform.c b/ir/be/ia32/ia32_common_transform.c index 9e9ae9b03..06d2f43d2 100644 --- a/ir/be/ia32/ia32_common_transform.c +++ b/ir/be/ia32/ia32_common_transform.c @@ -751,6 +751,7 @@ ir_node *ia32_gen_CopyB(ir_node *node) ir_node *res = NULL; dbg_info *dbgi = get_irn_dbg_info(node); int size = get_type_size_bytes(get_CopyB_type(node)); + int throws_exception = ir_throws_exception(node); int rem; /* If we have to copy more than 32 bytes, we use REP MOVSx and */ @@ -769,6 +770,7 @@ ir_node *ia32_gen_CopyB(ir_node *node) } res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size); } + ir_set_throws_exception(res, throws_exception); SET_IA32_ORIG_NODE(res, node); diff --git a/ir/be/ia32/ia32_fpu.c b/ir/be/ia32/ia32_fpu.c index bb3ab792a..5e3b54707 100644 --- a/ir/be/ia32/ia32_fpu.c +++ b/ir/be/ia32/ia32_fpu.c @@ -173,6 +173,7 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state, ir_mode *lsmode = ia32_reg_classes[CLASS_ia32_fp_cw].mode; ir_node *nomem = get_irg_no_mem(irg); ir_node *cwstore, *load, *load_res, *orn, *store, *fldcw; + ir_node *store_proj; ir_node *or_const; assert(last_state != NULL); @@ -204,9 +205,10 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state, /* use mode_Iu, as movl has a shorter opcode than movw */ set_ia32_ls_mode(store, mode_Iu); set_ia32_use_frame(store); + store_proj = new_r_Proj(store, mode_M, pn_ia32_Store_M); sched_add_before(before, store); - fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store); + fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store_proj); set_ia32_op_type(fldcw, ia32_AddrModeS); set_ia32_ls_mode(fldcw, lsmode); set_ia32_use_frame(fldcw); diff --git a/ir/be/ia32/ia32_optimize.c b/ir/be/ia32/ia32_optimize.c index 2b4b36135..a09ead131 100644 --- a/ir/be/ia32/ia32_optimize.c +++ b/ir/be/ia32/ia32_optimize.c @@ -483,12 +483,15 @@ static void peephole_IncSP_Store_to_push(ir_node *irn) ir_node *val, *mem, *mem_proj; ir_node *store = stores[i]; ir_node *noreg = ia32_new_NoReg_gp(irg); + const ir_edge_t *edge; + const ir_edge_t *next; val = get_irn_n(store, n_ia32_unary_op); mem = get_irn_n(store, n_ia32_mem); spreg = arch_get_irn_register(curr_sp); - push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, mem, val, curr_sp); + push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, + mem, val, curr_sp); copy_mark(store, push); if (first_push == NULL) @@ -503,8 +506,22 @@ static void peephole_IncSP_Store_to_push(ir_node *irn) /* create memory Proj */ mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M); + /* rewire Store Projs */ + foreach_out_edge_safe(store, edge, next) { + ir_node *proj = get_edge_src_irn(edge); + if (!is_Proj(proj)) + continue; + switch (get_Proj_proj(proj)) { + case pn_ia32_Store_M: + exchange(proj, mem_proj); + break; + default: + panic("unexpected Proj on Store->IncSp"); + } + } + /* use the memproj now */ - be_peephole_exchange(store, mem_proj); + be_peephole_exchange(store, push); inc_ofs -= 4; } diff --git a/ir/be/ia32/ia32_spec.pl b/ir/be/ia32/ia32_spec.pl index 199a7d4c5..667cf9e55 100644 --- a/ir/be/ia32/ia32_spec.pl +++ b/ir/be/ia32/ia32_spec.pl @@ -1221,7 +1221,6 @@ Store => { emit => '. mov%M %SI3, %AM', latency => 2, units => [ "GP" ], - mode => "mode_M", }, Store8Bit => { @@ -1234,7 +1233,6 @@ Store8Bit => { emit => '. mov%M %SB3, %AM', latency => 2, units => [ "GP" ], - mode => "mode_M", }, Lea => { @@ -1436,13 +1434,14 @@ Popcnt => { }, Call => { + op_flags => [ "fragile" ], state => "exc_pinned", reg_req => { in => [ "gp", "gp", "none", "gp", "esp", "fpcw", "eax", "ecx", "edx" ], - out => [ "esp:I|S", "fpcw:I", "none", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ] + out => [ "esp:I|S", "fpcw:I", "none", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "none", "none" ] }, ins => [ "base", "index", "mem", "addr", "stack", "fpcw", "eax", "ecx", "edx" ], - outs => [ "stack", "fpcw", "M", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ], + outs => [ "stack", "fpcw", "M", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "X_regular", "X_except" ], attr_type => "ia32_call_attr_t", attr => "unsigned pop, ir_type *call_tp", am => "source,unary", @@ -1882,7 +1881,6 @@ xStore => { emit => '. mov%XXM %S3, %AM', latency => 0, units => [ "SSE" ], - mode => "mode_M", }, xStoreSimple => { @@ -1895,7 +1893,6 @@ xStoreSimple => { emit => '. mov%XXM %S3, %AM', latency => 0, units => [ "SSE" ], - mode => "mode_M", }, CvtSI2SS => { @@ -1980,11 +1977,12 @@ Cwtl => { }, Conv_I2I => { + op_flags => [ "fragile" ], state => "exc_pinned", reg_req => { in => [ "gp", "gp", "none", "gp" ], - out => [ "gp", "none", "none" ] }, + out => [ "gp", "none", "none", "none", "none" ] }, ins => [ "base", "index", "mem", "val" ], - outs => [ "res", "flags", "M" ], + outs => [ "res", "flags", "M", "X_regular", "X_except" ], am => "source,unary", units => [ "GP" ], latency => 1, @@ -1994,11 +1992,12 @@ Conv_I2I => { }, Conv_I2I8Bit => { + op_flags => [ "fragile" ], state => "exc_pinned", reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx" ], - out => [ "gp", "none", "none" ] }, + out => [ "gp", "none", "none", "none", "none" ] }, ins => [ "base", "index", "mem", "val" ], - outs => [ "res", "flags", "M" ], + outs => [ "res", "flags", "M", "X_regular", "X_except" ], am => "source,unary", units => [ "GP" ], latency => 1, @@ -2150,7 +2149,6 @@ vfst => { init_attr => "attr->attr.ls_mode = store_mode;", latency => 2, units => [ "VFP" ], - mode => "mode_M", attr_type => "ia32_x87_attr_t", }, @@ -2166,22 +2164,25 @@ vfild => { }, vfist => { + op_flags => [ "fragile" ], state => "exc_pinned", - reg_req => { in => [ "gp", "gp", "none", "vfp", "fpcw" ], out => [ "none" ] }, + reg_req => { in => [ "gp", "gp", "none", "vfp", "fpcw" ], + out => [ "none", "none", "none", "none" ] }, ins => [ "base", "index", "mem", "val", "fpcw" ], - outs => [ "M" ], + outs => [ "dummy", "M", "X_regular", "X_except" ], latency => 4, units => [ "VFP" ], - mode => "mode_M", attr_type => "ia32_x87_attr_t", }, # SSE3 fisttp instruction vfisttp => { + op_flags => [ "fragile" ], state => "exc_pinned", - reg_req => { in => [ "gp", "gp", "none", "vfp" ], out => [ "in_r4", "none" ]}, + reg_req => { in => [ "gp", "gp", "none", "vfp" ], + out => [ "in_r4", "none", "none", "none" ]}, ins => [ "base", "index", "mem", "val" ], - outs => [ "res", "M" ], + outs => [ "res", "M", "X_regular", "X_except" ], latency => 4, units => [ "VFP" ], attr_type => "ia32_x87_attr_t", @@ -2720,7 +2721,6 @@ xxStore => { emit => '. movdqu %binop', units => [ "SSE" ], latency => 1, - mode => "mode_M", }, ); # end of %nodes diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index b32cd8db6..ca8b7ea07 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -1677,9 +1677,10 @@ static ir_node *create_sex_32_64(dbg_info *dbgi, ir_node *block, */ static ir_node *create_Div(ir_node *node) { - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_nodes_block(node); + ir_node *new_block = be_transform_node(block); + int throws_exception = ir_throws_exception(node); ir_node *mem; ir_node *new_mem; ir_node *op1; @@ -1726,6 +1727,7 @@ static ir_node *create_Div(ir_node *node) addr->index, new_mem, am.new_op2, am.new_op1, sign_extension); } + ir_set_throws_exception(new_node, throws_exception); set_irn_pinned(new_node, get_irn_pinned(node)); @@ -2185,6 +2187,7 @@ static ir_node *gen_Load(ir_node *node) ir_node *new_mem = be_transform_node(mem); dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_Load_mode(node); + int throws_exception = ir_throws_exception(node); ir_node *base; ir_node *index; ir_node *new_node; @@ -2227,6 +2230,7 @@ static ir_node *gen_Load(ir_node *node) new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem); } } + ir_set_throws_exception(new_node, throws_exception); set_irn_pinned(new_node, get_irn_pinned(node)); set_ia32_op_type(new_node, ia32_AddrModeS); @@ -2619,6 +2623,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) dbg_info *dbgi = get_irn_dbg_info(node); int ofs = 0; int i = 0; + int throws_exception = ir_throws_exception(node); ir_node *ins[4]; ia32_address_t addr; @@ -2637,7 +2642,9 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) ir_node *new_node = new_bd_ia32_Store(dbgi, new_block, addr.base, addr.index, addr.mem, imm); + ir_node *mem = new_r_Proj(new_node, mode_M, pn_ia32_Store_M); + ir_set_throws_exception(new_node, throws_exception); set_irn_pinned(new_node, get_irn_pinned(node)); set_ia32_op_type(new_node, ia32_AddrModeD); set_ia32_ls_mode(new_node, mode_Iu); @@ -2645,7 +2652,7 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) SET_IA32_ORIG_NODE(new_node, node); assert(i < 4); - ins[i++] = new_node; + ins[i++] = mem; size -= 4; ofs += 4; @@ -2655,18 +2662,16 @@ static ir_node *gen_float_const_Store(ir_node *node, ir_node *cns) if (i > 1) { return new_rd_Sync(dbgi, new_block, i, ins); } else { - return ins[0]; + return get_Proj_pred(ins[0]); } } /** * Generate a vfist or vfisttp instruction. */ -static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node *index, - ir_node *mem, ir_node *val, ir_node **fist) +static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, + ir_node *index, ir_node *mem, ir_node *val) { - ir_node *new_node; - if (ia32_cg_config.use_fisttp) { /* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied if other users exists */ @@ -2674,17 +2679,16 @@ static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node ir_node *value = new_r_Proj(vfisttp, mode_E, pn_ia32_vfisttp_res); be_new_Keep(block, 1, &value); - new_node = new_r_Proj(vfisttp, mode_M, pn_ia32_vfisttp_M); - *fist = vfisttp; + return vfisttp; } else { ir_node *trunc_mode = ia32_new_Fpu_truncate(current_ir_graph); /* do a fist */ - new_node = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode); - *fist = new_node; + ir_node *vfist = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode); + return vfist; } - return new_node; } + /** * Transforms a general (no special case) Store. * @@ -2699,7 +2703,9 @@ static ir_node *gen_general_Store(ir_node *node) ir_node *ptr = get_Store_ptr(node); ir_node *mem = get_Store_mem(node); dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *new_val, *new_node, *store; + int throws_exception = ir_throws_exception(node); + ir_node *new_val; + ir_node *new_node; ia32_address_t addr; /* check for destination address mode */ @@ -2741,12 +2747,12 @@ static ir_node *gen_general_Store(ir_node *node) new_node = new_bd_ia32_vfst(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, mode); } - store = new_node; } else if (!ia32_cg_config.use_sse2 && is_float_to_int_conv(val)) { val = get_Conv_op(val); /* TODO: is this optimisation still necessary at all (middleend)? */ - /* We can skip ALL float->float up-Convs (and strict-up-Convs) before stores. */ + /* We can skip ALL float->float up-Convs (and strict-up-Convs) before + * stores. */ while (is_Conv(val)) { ir_node *op = get_Conv_op(val); if (!mode_is_float(get_irn_mode(op))) @@ -2756,7 +2762,7 @@ static ir_node *gen_general_Store(ir_node *node) val = op; } new_val = be_transform_node(val); - new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, &store); + new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val); } else { new_val = create_immediate_or_transform(val, 0); assert(mode != mode_b); @@ -2768,15 +2774,15 @@ static ir_node *gen_general_Store(ir_node *node) new_node = new_bd_ia32_Store(dbgi, new_block, addr.base, addr.index, addr.mem, new_val); } - store = new_node; } + ir_set_throws_exception(new_node, throws_exception); - set_irn_pinned(store, get_irn_pinned(node)); - set_ia32_op_type(store, ia32_AddrModeD); - set_ia32_ls_mode(store, mode); + set_irn_pinned(new_node, get_irn_pinned(node)); + set_ia32_op_type(new_node, ia32_AddrModeD); + set_ia32_ls_mode(new_node, mode); - set_address(store, &addr); - SET_IA32_ORIG_NODE(store, node); + set_address(new_node, &addr); + SET_IA32_ORIG_NODE(new_node, node); return new_node; } @@ -3672,13 +3678,17 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_irn_mode(node); + ir_node *frame = get_irg_frame(irg); ir_node *fist, *load, *mem; - mem = gen_vfist(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist); + fist = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_op); set_irn_pinned(fist, op_pin_state_floats); set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); + assert((long)pn_ia32_vfist_M == (long) pn_ia32_vfisttp_M); + mem = new_r_Proj(fist, mode_M, pn_ia32_vfist_M); + assert(get_mode_size_bits(mode) <= 32); /* exception we can only store signed 32 bit integers, so for unsigned we store a 64bit (signed) integer and load the lower bits */ @@ -3717,6 +3727,7 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) ir_graph *irg = get_Block_irg(block); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *frame = get_irg_frame(irg); + ir_node *store_mem; ir_node *store, *load; ir_node *new_node; @@ -3725,7 +3736,9 @@ static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node) set_ia32_op_type(store, ia32_AddrModeD); SET_IA32_ORIG_NODE(store, node); - load = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store, tgt_mode); + store_mem = new_r_Proj(store, mode_M, pn_ia32_vfst_M); + + load = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store_mem, tgt_mode); set_ia32_use_frame(load); set_ia32_op_type(load, ia32_AddrModeS); SET_IA32_ORIG_NODE(load, node); @@ -3759,6 +3772,7 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) ir_mode *store_mode; ir_node *fild; ir_node *store; + ir_node *store_mem; ir_node *new_node; /* fild can use source AM if the operand is a signed 16bit or 32bit integer */ @@ -3804,6 +3818,8 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) set_ia32_op_type(store, ia32_AddrModeD); set_ia32_ls_mode(store, mode_Iu); + store_mem = new_r_Proj(store, mode_M, pn_ia32_Store_M); + /* exception for 32bit unsigned, do a 64bit spill+load */ if (!mode_is_signed(mode)) { ir_node *in[2]; @@ -3812,23 +3828,24 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) ir_node *zero_store = new_bd_ia32_Store(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, zero_const); + ir_node *zero_store_mem = new_r_Proj(zero_store, mode_M, pn_ia32_Store_M); set_ia32_use_frame(zero_store); set_ia32_op_type(zero_store, ia32_AddrModeD); add_ia32_am_offs_int(zero_store, 4); set_ia32_ls_mode(zero_store, mode_Iu); - in[0] = zero_store; - in[1] = store; + in[0] = zero_store_mem; + in[1] = store_mem; - store = new_rd_Sync(dbgi, block, 2, in); + store_mem = new_rd_Sync(dbgi, block, 2, in); store_mode = mode_Ls; } else { store_mode = mode_Is; } /* do a fild */ - fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg_GP, store); + fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg_GP, store_mem); set_ia32_use_frame(fild); set_ia32_op_type(fild, ia32_AddrModeS); @@ -4058,7 +4075,11 @@ static ir_node *gen_be_Return(ir_node *node) ir_node *block = be_transform_node(get_nodes_block(node)); ir_type *res_type; ir_mode *mode; - ir_node *frame, *sse_store, *fld, *mproj; + ir_node *frame; + ir_node *sse_store; + ir_node *store_mem; + ir_node *fld; + ir_node *mproj; int i; int arity; unsigned pop; @@ -4091,9 +4112,10 @@ static ir_node *gen_be_Return(ir_node *node) set_ia32_ls_mode(sse_store, mode); set_ia32_op_type(sse_store, ia32_AddrModeD); set_ia32_use_frame(sse_store); + store_mem = new_r_Proj(sse_store, mode_M, pn_ia32_xStoreSimple_M); /* load into x87 register */ - fld = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, sse_store, mode); + fld = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store_mem, mode); set_ia32_op_type(fld, ia32_AddrModeS); set_ia32_use_frame(fld); @@ -4325,7 +4347,10 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) ir_node *new_val_high = be_transform_node(val_high); ir_node *in[2]; ir_node *sync, *fild, *res; - ir_node *store_low, *store_high; + ir_node *store_low; + ir_node *store_high; + ir_node *mem_low; + ir_node *mem_high; if (ia32_cg_config.use_sse2) { panic("ia32_l_LLtoFloat not implemented for SSE2"); @@ -4339,6 +4364,9 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) SET_IA32_ORIG_NODE(store_low, node); SET_IA32_ORIG_NODE(store_high, node); + mem_low = new_r_Proj(store_low, mode_M, pn_ia32_Store_M); + mem_high = new_r_Proj(store_high, mode_M, pn_ia32_Store_M); + set_ia32_use_frame(store_low); set_ia32_use_frame(store_high); set_ia32_op_type(store_low, ia32_AddrModeD); @@ -4347,8 +4375,8 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node) set_ia32_ls_mode(store_high, mode_Is); add_ia32_am_offs_int(store_high, 4); - in[0] = store_low; - in[1] = store_high; + in[0] = mem_low; + in[1] = mem_high; sync = new_rd_Sync(dbgi, block, 2, in); /* do a fild */ @@ -4406,15 +4434,16 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node) ir_node *frame = get_irg_frame(irg); ir_node *val = get_irn_n(node, n_ia32_l_FloattoLL_val); ir_node *new_val = be_transform_node(val); - ir_node *fist, *mem; + ir_node *fist; - mem = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val, &fist); + fist = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val); SET_IA32_ORIG_NODE(fist, node); set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); set_ia32_ls_mode(fist, mode_Ls); - return mem; + assert((long)pn_ia32_vfist_M == (long) pn_ia32_vfisttp_M); + return new_r_Proj(fist, mode_M, pn_ia32_vfist_M); } static ir_node *gen_Proj_l_FloattoLL(ir_node *node) @@ -4542,10 +4571,17 @@ static ir_node *gen_Proj_Load(ir_node *node) } else if (is_ia32_Conv_I2I(new_pred) || is_ia32_Conv_I2I8Bit(new_pred)) { set_irn_mode(new_pred, mode_T); - if (proj == pn_Load_res) { + switch ((pn_Load)proj) { + case pn_Load_res: return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_res); - } else if (proj == pn_Load_M) { + case pn_Load_M: return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_mem); + case pn_Load_X_except: + /* This Load might raise an exception. Mark it. */ + set_ia32_exc_label(new_pred, 1); + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Conv_I2I_X_except); + case pn_Load_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Conv_I2I_X_regular); } } else if (is_ia32_xLoad(new_pred)) { switch ((pn_Load)proj) { @@ -4586,7 +4622,76 @@ static ir_node *gen_Proj_Load(ir_node *node) return new_rd_Proj(dbgi, new_pred, mode_M, 1); } - panic("No idea how to transform proj"); + panic("No idea how to transform Proj(Load) %+F", node); +} + +static ir_node *gen_Proj_Store(ir_node *node) +{ + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = be_transform_node(pred); + dbg_info *dbgi = get_irn_dbg_info(node); + long pn = get_Proj_proj(node); + + if (is_ia32_Store(new_pred) || is_ia32_Store8Bit(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Store_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Store_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Store_X_regular); + } + } else if (is_ia32_vfist(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfist_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfist_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfist_X_regular); + } + } else if (is_ia32_vfisttp(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfisttp_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfisttp_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfisttp_X_regular); + } + } else if (is_ia32_vfst(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfst_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfst_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfst_X_regular); + } + } else if (is_ia32_xStore(new_pred)) { + switch ((pn_Store)pn) { + case pn_Store_M: + return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xStore_M); + case pn_Store_X_except: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xStore_X_except); + case pn_Store_X_regular: + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xStore_X_regular); + } + } else if (is_Sync(new_pred)) { + /* hack for the case that gen_float_const_Store produced a Sync */ + if (pn == pn_Store_M) { + return new_pred; + } + panic("exception control flow for gen_float_const_Store not implemented yet"); + } else if (get_ia32_op_type(new_pred) == ia32_AddrModeD) { + /* destination address mode */ + if (pn == pn_Store_M) { + return new_pred; + } + panic("exception control flow for destination AM not implemented yet"); + } + + panic("No idea how to transform Proj(Store) %+F", node); } /** @@ -4671,7 +4776,7 @@ static ir_node *gen_Proj_CopyB(ir_node *node) dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); - switch (proj) { + switch ((pn_CopyB)proj) { case pn_CopyB_M: if (is_ia32_CopyB_i(new_pred)) { return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_i_M); @@ -4679,7 +4784,19 @@ static ir_node *gen_Proj_CopyB(ir_node *node) return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_M); } break; - default: + case pn_CopyB_X_regular: + if (is_ia32_CopyB_i(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_i_X_regular); + } else if (is_ia32_CopyB(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_X_regular); + } + break; + case pn_CopyB_X_except: + if (is_ia32_CopyB_i(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_i_X_except); + } else if (is_ia32_CopyB(new_pred)) { + return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_X_except); + } break; } @@ -4707,6 +4824,7 @@ static ir_node *gen_be_Call(ir_node *node) unsigned const pop = be_Call_get_pop(node); ir_type *const call_tp = be_Call_get_type(node); int old_no_pic_adjust; + int throws_exception = ir_throws_exception(node); /* Run the x87 simulator if the call returns a float value */ if (get_method_n_ress(call_tp) > 0) { @@ -4752,6 +4870,7 @@ static ir_node *gen_be_Call(ir_node *node) mem = transform_AM_mem(block, src_ptr, src_mem, addr->mem); call = new_bd_ia32_Call(dbgi, block, addr->base, addr->index, mem, am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp); + ir_set_throws_exception(call, throws_exception); set_am_attributes(call, &am); call = fix_mem_proj(call, &am); @@ -5483,7 +5602,7 @@ static ir_node *gen_Proj_be_Call(ir_node *node) ir_mode *mode = get_irn_mode(node); ir_node *res; - if (proj == pn_be_Call_M_regular) { + if (proj == pn_be_Call_M) { return new_rd_Proj(dbgi, new_call, mode_M, n_ia32_Call_mem); } /* transform call modes */ @@ -5495,8 +5614,12 @@ static ir_node *gen_Proj_be_Call(ir_node *node) /* Map from be_Call to ia32_Call proj number */ if (proj == pn_be_Call_sp) { proj = pn_ia32_Call_stack; - } else if (proj == pn_be_Call_M_regular) { + } else if (proj == pn_be_Call_M) { proj = pn_ia32_Call_M; + } else if (proj == pn_be_Call_X_except) { + proj = pn_ia32_Call_X_except; + } else if (proj == pn_be_Call_X_regular) { + proj = pn_ia32_Call_X_regular; } else { arch_register_req_t const *const req = arch_get_register_req_out(node); int const n_outs = arch_irn_get_n_outs(new_call); @@ -5524,13 +5647,13 @@ static ir_node *gen_Proj_be_Call(ir_node *node) /* TODO arch_set_irn_register() only operates on Projs, need variant with index */ switch (proj) { - case pn_ia32_Call_stack: - arch_set_irn_register(res, &ia32_registers[REG_ESP]); - break; + case pn_ia32_Call_stack: + arch_set_irn_register(res, &ia32_registers[REG_ESP]); + break; - case pn_ia32_Call_fpcw: - arch_set_irn_register(res, &ia32_registers[REG_FPCW]); - break; + case pn_ia32_Call_fpcw: + arch_set_irn_register(res, &ia32_registers[REG_FPCW]); + break; } return res; @@ -5575,15 +5698,10 @@ static ir_node *gen_Proj(ir_node *node) long proj; switch (get_irn_opcode(pred)) { - case iro_Store: - proj = get_Proj_proj(node); - if (proj == pn_Store_M) { - return be_transform_node(pred); - } else { - panic("No idea how to transform proj->Store"); - } case iro_Load: return gen_Proj_Load(node); + case iro_Store: + return gen_Proj_Store(node); case iro_ASM: return gen_Proj_ASM(node); case iro_Builtin: @@ -5772,6 +5890,7 @@ static void postprocess_fp_call_results(void) ir_mode *mode = get_ia32_ls_mode(succ); ir_node *st = new_bd_ia32_vfst(db, block, base, index, mem, value, mode); + //ir_node *mem = new_r_Proj(st, mode_M, pn_ia32_vfst_M); set_ia32_am_offs_int(st, get_ia32_am_offs_int(succ)); if (is_ia32_use_frame(succ)) set_ia32_use_frame(st); @@ -5779,36 +5898,43 @@ static void postprocess_fp_call_results(void) set_irn_pinned(st, get_irn_pinned(succ)); set_ia32_op_type(st, ia32_AddrModeD); + assert((long)pn_ia32_xStore_M == (long)pn_ia32_vfst_M); + assert((long)pn_ia32_xStore_X_regular == (long)pn_ia32_vfst_X_regular); + assert((long)pn_ia32_xStore_X_except == (long)pn_ia32_vfst_X_except); + exchange(succ, st); - } else { - if (new_res == NULL) { - dbg_info *db = get_irn_dbg_info(call); - ir_node *block = get_nodes_block(call); - ir_node *frame = get_irg_frame(current_ir_graph); - ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M); - ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M); - ir_node *vfst, *xld, *new_mem; - - /* store st(0) on stack */ - vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem, res, mode); - set_ia32_op_type(vfst, ia32_AddrModeD); - set_ia32_use_frame(vfst); - - /* load into SSE register */ - xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst, mode); - set_ia32_op_type(xld, ia32_AddrModeS); - set_ia32_use_frame(xld); - - new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res); - new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M); - - if (old_mem != NULL) { - edges_reroute(old_mem, new_mem); - kill_node(old_mem); - } + } else if (new_res == NULL) { + dbg_info *db = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node *frame = get_irg_frame(current_ir_graph); + ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M); + ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M); + ir_node *vfst, *xld, *new_mem; + ir_node *vfst_mem; + + /* store st(0) on stack */ + vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem, + res, mode); + set_ia32_op_type(vfst, ia32_AddrModeD); + set_ia32_use_frame(vfst); + + vfst_mem = new_r_Proj(vfst, mode_M, pn_ia32_vfst_M); + + /* load into SSE register */ + xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst_mem, + mode); + set_ia32_op_type(xld, ia32_AddrModeS); + set_ia32_use_frame(xld); + + new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res); + new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M); + + if (old_mem != NULL) { + edges_reroute(old_mem, new_mem); + kill_node(old_mem); } - set_irn_n(succ, get_edge_src_pos(edge), new_res); } + set_irn_n(succ, get_edge_src_pos(edge), new_res); } } } diff --git a/ir/ir/irnode.c b/ir/ir/irnode.c index 7a6fb3118..770e797e7 100644 --- a/ir/ir/irnode.c +++ b/ir/ir/irnode.c @@ -1328,6 +1328,20 @@ int is_x_regular_Proj(const ir_node *node) return get_Proj_proj(node) == pred->op->pn_x_regular; } +void ir_set_throws_exception(ir_node *node, int throws_exception) +{ + except_attr *attr = &node->attr.except; + assert(is_fragile_op(node)); + attr->throws_exception = throws_exception; +} + +int ir_throws_exception(const ir_node *node) +{ + const except_attr *attr = &node->attr.except; + assert(is_fragile_op(node)); + return attr->throws_exception; +} + ir_node **get_Tuple_preds_arr(ir_node *node) { assert(is_Tuple(node)); @@ -1524,6 +1538,9 @@ int (is_SymConst_addr_ent)(const ir_node *node) /* Returns true if the operation manipulates control flow. */ int is_cfop(const ir_node *node) { + if (is_fragile_op(node) && ir_throws_exception(node)) + return true; + return is_op_cfopcode(get_irn_op(node)); } diff --git a/ir/ir/irnode_t.h b/ir/ir/irnode_t.h index 474fd5524..376a68e48 100644 --- a/ir/ir/irnode_t.h +++ b/ir/ir/irnode_t.h @@ -327,7 +327,7 @@ static inline op_pin_state _get_irn_pinned(const ir_node *node) state = _get_op_pinned(_get_irn_op(node)); if (state >= op_pin_state_exc_pinned) - return node->attr.except.pin_state; + return (op_pin_state)node->attr.except.pin_state; return state; } diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 90c491ace..f5aef5f6b 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -5833,7 +5833,7 @@ static ir_node *transform_node_Load(ir_node *n) ir_node *bad = new_r_Bad(irg, mode_X); ir_mode *mode = get_Load_mode(n); ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res); - ir_node *in[pn_Load_max+1] = { mem, jmp, bad, res }; + ir_node *in[pn_Load_max+1] = { mem, res, jmp, bad }; ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); return tuple; } @@ -5853,7 +5853,7 @@ static ir_node *transform_node_Load(ir_node *n) ir_graph *irg = get_irn_irg(n); ir_node *bad = new_r_Bad(irg, mode_X); ir_node *res = value; - ir_node *in[pn_Load_max+1] = { mem, jmp, bad, res }; + ir_node *in[pn_Load_max+1] = { mem, res, jmp, bad }; ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); return tuple; } diff --git a/ir/ir/irtypes.h b/ir/ir/irtypes.h index 56de92c9a..87a3cb3a7 100644 --- a/ir/ir/irtypes.h +++ b/ir/ir/irtypes.h @@ -202,9 +202,12 @@ typedef struct sel_attr { /** Exception attributes. */ typedef struct except_attr { - op_pin_state pin_state; /**< the pin state for operations that might generate a exception: - If it's know that no exception will be generated, could be set to - op_pin_state_floats. */ + unsigned pin_state : 2; /**< the pin state for operations with + variable pinned state. Contains a + op_pin_state */ + unsigned throws_exception : 1; /**< if true a fragile op throws and + must produce X_except and X_regular + values */ } except_attr; /** Call attributes. */ diff --git a/ir/ir/irverify.c b/ir/ir/irverify.c index 4c30c3441..5b2353cf8 100644 --- a/ir/ir/irverify.c +++ b/ir/ir/irverify.c @@ -733,6 +733,17 @@ static int verify_node_Proj_Bound(const ir_node *p) return 1; } +static int verify_node_Proj_fragile(const ir_node *node) +{ + ir_node *pred = get_Proj_pred(node); + int throws_exception = ir_throws_exception(pred); + ASSERT_AND_RET((!is_x_except_Proj(node) || throws_exception) + && (!is_x_regular_Proj(node) || throws_exception), + "X_except und X_regular Proj only allowed when throws_exception is set", + 0); + return 1; +} + /** * verify a Proj node */ @@ -746,6 +757,12 @@ static int verify_node_Proj(const ir_node *p) ASSERT_AND_RET(get_irn_mode(pred) == mode_T, "mode of a 'projed' node is not Tuple", 0); ASSERT_AND_RET(get_irg_pinned(irg) == op_pin_state_floats || get_nodes_block(pred) == get_nodes_block(p), "Proj must be in same block as its predecessor", 0); + if (is_fragile_op(pred)) { + int res = verify_node_Proj_fragile(p); + if (res != 1) + return res; + } + op = get_irn_op(pred); if (op->ops.verify_proj_node) return op->ops.verify_proj_node(p); diff --git a/ir/lower/lower_intrinsics.c b/ir/lower/lower_intrinsics.c index 88cd1386b..957f49ccc 100644 --- a/ir/lower/lower_intrinsics.c +++ b/ir/lower/lower_intrinsics.c @@ -209,28 +209,34 @@ ir_prog_pass_t *lower_intrinsics_pass( * @param reg_jmp new regular control flow, if NULL, a Jmp will be used * @param exc_jmp new exception control flow, if reg_jmp == NULL, a Bad will be used */ -static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp) +static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, + ir_node *reg_jmp, ir_node *exc_jmp) { ir_node *block = get_nodes_block(call); ir_graph *irg = get_irn_irg(block); + ir_node *rest = new_r_Tuple(block, 1, &irn); - if (reg_jmp == NULL) { - - /* Beware: do we need here a protection against CSE? Better we do it. */ - int old_cse = get_opt_cse(); - set_opt_cse(0); - reg_jmp = new_r_Jmp(block); - set_opt_cse(old_cse); - exc_jmp = new_r_Bad(irg, mode_X); + if (ir_throws_exception(call)) { + turn_into_tuple(call, pn_Call_max+1); + if (reg_jmp == NULL) { + reg_jmp = new_r_Jmp(block); + } + if (exc_jmp == NULL) { + exc_jmp = new_r_Bad(irg, mode_X); + } + set_Tuple_pred(call, pn_Call_X_regular, reg_jmp); + set_Tuple_pred(call, pn_Call_X_except, exc_jmp); + } else { + assert(reg_jmp == NULL); + assert(exc_jmp == NULL); + turn_into_tuple(call, pn_Call_T_result+1); + assert(pn_Call_M <= pn_Call_T_result); + assert(pn_Call_X_regular > pn_Call_T_result); + assert(pn_Call_X_except > pn_Call_T_result); } - irn = new_r_Tuple(block, 1, &irn); - - turn_into_tuple(call, pn_Call_max+1); set_Tuple_pred(call, pn_Call_M, mem); - set_Tuple_pred(call, pn_Call_X_regular, reg_jmp); - set_Tuple_pred(call, pn_Call_X_except, exc_jmp); - set_Tuple_pred(call, pn_Call_T_result, irn); -} /* replace_call */ + set_Tuple_pred(call, pn_Call_T_result, rest); +} /* A mapper for the integer abs. */ int i_mapper_abs(ir_node *call, void *ctx) @@ -298,9 +304,15 @@ int i_mapper_alloca(ir_node *call, void *ctx) irn = new_rd_Alloc(dbg, block, mem, op, firm_unknown_type, stack_alloc); mem = new_rd_Proj(dbg, irn, mode_M, pn_Alloc_M); - no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular); - exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except); irn = new_rd_Proj(dbg, irn, get_modeP_data(), pn_Alloc_res); + if (ir_throws_exception(call)) { + no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular); + exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except); + ir_set_throws_exception(irn, true); + } else { + no_exc = NULL; + exc = NULL; + } DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_ALLOCA); replace_call(irn, call, mem, no_exc, exc); @@ -356,13 +368,15 @@ int i_mapper_cbrt(ir_node *call, void *ctx) /* A mapper for the floating point pow. */ int i_mapper_pow(ir_node *call, void *ctx) { + ir_node *left = get_Call_param(call, 0); + ir_node *right = get_Call_param(call, 1); + ir_node *block = get_nodes_block(call); + ir_graph *irg = get_irn_irg(block); + ir_node *reg_jmp = NULL; + ir_node *exc_jmp = NULL; + ir_node *irn; dbg_info *dbg; ir_node *mem; - ir_node *left = get_Call_param(call, 0); - ir_node *right = get_Call_param(call, 1); - ir_node *block = get_nodes_block(call); - ir_graph *irg = get_irn_irg(block); - ir_node *irn, *reg_jmp = NULL, *exc_jmp = NULL; (void) ctx; if (is_Const(left) && is_Const_one(left)) { @@ -397,8 +411,11 @@ int i_mapper_pow(ir_node *call, void *ctx) div = new_rd_Div(dbg, block, mem, irn, left, mode, op_pin_state_pinned); mem = new_r_Proj(div, mode_M, pn_Div_M); irn = new_r_Proj(div, mode, pn_Div_res); - reg_jmp = new_r_Proj(div, mode_X, pn_Div_X_regular); - exc_jmp = new_r_Proj(div, mode_X, pn_Div_X_except); + if (ir_throws_exception(call)) { + reg_jmp = new_r_Proj(div, mode_X, pn_Div_X_regular); + exc_jmp = new_r_Proj(div, mode_X, pn_Div_X_except); + ir_set_throws_exception(div, true); + } } DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_POW); replace_call(irn, call, mem, reg_jmp, exc_jmp); @@ -919,9 +936,15 @@ replace_by_call: /* replace the strcmp by (*x) */ irn = new_rd_Load(dbg, block, mem, v, mode, cons_none); mem = new_r_Proj(irn, mode_M, pn_Load_M); - exc = new_r_Proj(irn, mode_X, pn_Load_X_except); - reg = new_r_Proj(irn, mode_X, pn_Load_X_regular); irn = new_r_Proj(irn, mode, pn_Load_res); + if (ir_throws_exception(call)) { + exc = new_r_Proj(irn, mode_X, pn_Load_X_except); + reg = new_r_Proj(irn, mode_X, pn_Load_X_regular); + ir_set_throws_exception(irn, true); + } else { + exc = NULL; + reg = NULL; + } /* conv to the result mode */ mode = get_type_mode(res_tp); diff --git a/scripts/gen_ir.py b/scripts/gen_ir.py index 690da1f5b..3696854ea 100755 --- a/scripts/gen_ir.py +++ b/scripts/gen_ir.py @@ -272,6 +272,11 @@ def preprocess_node(node): fqname = ".exc.pin_state", init = "pin_state" )) + if hasattr(node, "throws_init"): + initattrs.append(dict( + fqname = ".exc.throws_exception", + init = node.throws_init + )) for arg in node.constructor_args: arguments.append(prepare_attr(arg)) diff --git a/scripts/ir_spec.py b/scripts/ir_spec.py index 2ab4c8a7e..f264b82ce 100755 --- a/scripts/ir_spec.py +++ b/scripts/ir_spec.py @@ -39,9 +39,9 @@ class Alloc(Op): ] outs = [ ("M", "memory result"), + ("res", "pointer to newly allocated memory"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("res", "pointer to newly allocated memory"), ] attrs = [ dict( @@ -57,6 +57,7 @@ class Alloc(Op): ] flags = [ "fragile", "uses_memory" ] pinned = "exception" + throws_init = "false" pinned_init = "op_pin_state_pinned" attr_struct = "alloc_attr" @@ -203,13 +204,14 @@ class Bound(Op): ] outs = [ ("M", "memory result"), + ("res", "the checked index"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("res", "the checked index"), ] flags = [ "fragile", "highlevel" ] pinned = "exception" pinned_init = "op_pin_state_pinned" + throws_init = "false" attr_struct = "bound_attr" attrs_name = "bound" @@ -255,9 +257,9 @@ class Call(Op): arity = "variable" outs = [ ("M", "memory result"), + ("T_result", "tuple containing all results"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("T_result", "tuple containing all results"), ] flags = [ "fragile", "uses_memory" ] attrs = [ @@ -276,6 +278,7 @@ class Call(Op): attr_struct = "call_attr" pinned = "memory" pinned_init = "op_pin_state_pinned" + throws_init = "false" init = ''' assert((get_unknown_type() == type) || is_Method_type(type)); ''' @@ -434,6 +437,7 @@ class CopyB(Op): attrs_name = "copyb" pinned = "memory" pinned_init = "op_pin_state_pinned" + throws_init = "false" class Div(Op): """returns the quotient of its 2 operands""" @@ -444,9 +448,9 @@ class Div(Op): ] outs = [ ("M", "memory result"), + ("res", "result of computation"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("res", "result of computation"), ] flags = [ "fragile", "uses_memory" ] attrs_name = "div" @@ -464,6 +468,7 @@ class Div(Op): ] attr_struct = "div_attr" pinned = "exception" + throws_init = "false" op_index = 1 arity_override = "oparity_binary" @@ -544,9 +549,9 @@ class InstOf(Op): ] outs = [ ("M", "memory result"), + ("res", "checked object pointer"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("res", "checked object pointer"), ] flags = [ "highlevel" ] attrs = [ @@ -575,9 +580,9 @@ class Load(Op): ] outs = [ ("M", "memory result"), + ("res", "result of load operation"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("res", "result of load operation"), ] flags = [ "fragile", "uses_memory" ] pinned = "exception" @@ -609,6 +614,7 @@ class Load(Op): ), ] pinned_init = "flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned" + throws_init = "(flags & cons_throws_exception) != 0" class Minus(Unop): """returns the difference between its operands""" @@ -630,9 +636,9 @@ class Mod(Op): ] outs = [ ("M", "memory result"), + ("res", "result of computation"), ("X_regular", "control flow when no exception occurs"), ("X_except", "control flow when exception occured"), - ("res", "result of computation"), ] flags = [ "fragile", "uses_memory" ] attrs_name = "mod" @@ -645,6 +651,7 @@ class Mod(Op): ] attr_struct = "mod_attr" pinned = "exception" + throws_init = "false" op_index = 1 arity_override = "oparity_binary" @@ -834,6 +841,7 @@ class Store(Op): pinned = "exception" attr_struct = "store_attr" pinned_init = "flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned" + throws_init = "(flags & cons_throws_exception) != 0" attrs = [ dict( type = "ir_volatility", diff --git a/scripts/spec_util.py b/scripts/spec_util.py index 7fd35a4a8..6e15063f8 100644 --- a/scripts/spec_util.py +++ b/scripts/spec_util.py @@ -8,6 +8,9 @@ def isAbstract(nodetype): def is_dynamic_pinned(node): return node.pinned in ["memory", "exception"] +def is_fragile(node): + return hasattr(node, "flags") and "fragile" in node.flags + def inout_contains(l, name): for entry in l: if entry[0] == name: @@ -24,15 +27,23 @@ def verify_node(node): print "WARNING: no flags specified for %s\n" % node.__name__ elif type(node.flags) != list: print "ERROR: flags of %s not a list" % node.__name__ - if hasattr(node, "flags"): - flags = node.flags - if "fragile" in flags: - if not inout_contains(node.ins, "mem"): - print "ERROR: fragile node %s needs an input named 'mem'" % node.__name__ - if not inout_contains(node.outs, "X_regular"): - print "ERROR: fragile node %s needs an output named 'X_regular'" % node.__name__ - if not inout_contains(node.outs, "X_except"): - print "ERROR: fragile node %s needs an output named 'X_except'" % node.__name__ + if hasattr(node, "pinned_init") and not is_dynamic_pinned(node): + print "ERROR: node %s has pinned_init attribute but is not marked as dynamically pinned" % node.__name__ + if is_fragile(node): + if not is_dynamic_pinned(node): + print "ERROR: fragile node %s must be dynamically pinned" % node.__name__ + if not hasattr(node, "throws_init"): + print "ERROR: fragile node %s needs a throws_init attribute" % node.__name__ + if not inout_contains(node.ins, "mem"): + print "ERROR: fragile node %s needs an input named 'mem'" % node.__name__ + if not inout_contains(node.outs, "X_regular"): + print "ERROR: fragile node %s needs an output named 'X_regular'" % node.__name__ + if not inout_contains(node.outs, "X_except"): + print "ERROR: fragile node %s needs an output named 'X_except'" % node.__name__ + else: + if hasattr(node, "throws_init"): + print "ERROR: throws_init only makes sense for fragile nodes" + def setldefault(node, attr, val): # Don't use hasattr, as these things should not be inherited -- 2.20.1