* constrained flags for memory operations.
*/
typedef enum ir_cons_flags {
- cons_none = 0, /**< No constrains. */
- cons_volatile = 1U << 0, /**< Memory operation is volatile. */
- cons_unaligned = 1U << 1, /**< Memory operation is unaligned. */
- cons_floats = 1U << 2 /**< Memory operation can float. */
+ cons_none = 0, /**< No constrains. */
+ cons_volatile = 1U << 0, /**< Memory operation is volatile. */
+ cons_unaligned = 1U << 1, /**< Memory operation is unaligned. */
+ cons_floats = 1U << 2, /**< Memory operation can float. */
+ cons_throws_exception = 1U << 3, /**< fragile op throws exception (and
+ produces X_regular and X_except
+ values) */
} ir_cons_flags;
ENUM_BITSET(ir_cons_flags)
*/
FIRM_API int is_x_regular_Proj(const ir_node *node);
+/**
+ * Set throws exception attribute of a fragile node
+ * @p throws_exception must be 0 or 1
+ */
+FIRM_API void ir_set_throws_exception(ir_node *node, int throws_exception);
+
+/**
+ * Returns throws_exception attribute of a fragile node
+ */
+FIRM_API int ir_throws_exception(const ir_node *node);
+
/** returns the name of an ir_relation */
FIRM_API const char *get_relation_string(ir_relation relation);
int *reg_param_idxs;
int *stack_param_idx;
int i, n, destroy_all_regs;
+ int throws_exception;
size_t s;
size_t p;
dbg_info *dbgi;
assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
/* ins collected, build the call */
+ throws_exception = ir_throws_exception(irn);
if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
/* direct call */
low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
n_ins, in, get_Call_type(irn));
}
+ ir_set_throws_exception(low_call, throws_exception);
be_Call_set_pop(low_call, call->pop);
/* put the call into the list of all calls for later processing */
/* now handle results */
for (i = 0; i < n_res; ++i) {
- int pn;
ir_node *proj = res_projs[i];
be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
+ long pn = i + pn_be_Call_first_res;
/* returns values on stack not supported yet */
assert(arg->in_reg);
}
if (! mem_proj) {
- mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M_regular);
+ mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M);
keep_alive(mem_proj);
}
}
op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_exc_pinned, irop_flag_keep, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_exc_pinned, irop_flag_keep, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
+ ir_op_set_fragile_indices(op_be_Call, n_be_Call_mem, pn_be_Call_X_regular, pn_be_Call_X_except);
op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_exc_pinned, irop_flag_cfopcode, oparity_dynamic, 0, sizeof(be_return_attr_t), &be_node_op_ops);
op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_SubSP = new_ir_op(beo_SubSP, "be_SubSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
* Projection numbers for result of be_Call node: use for Proj nodes!
*/
typedef enum {
- pn_be_Call_M_regular = pn_Call_M, /**< The memory result of a be_Call. */
+ pn_be_Call_M = pn_Call_M, /**< The memory result of a be_Call. */
+ pn_be_Call_X_regular = pn_Call_X_regular,
+ pn_be_Call_X_except = pn_Call_X_except,
pn_be_Call_sp = pn_Call_max+1,
- pn_be_Call_first_res /**< The first result proj number of a
- be_Call. */
+ pn_be_Call_first_res /**< The first result proj number of a be_Call. */
} pn_be_Call;
/**
ir_node *nomem = get_irg_no_mem(irg);
ir_node *ptr = get_irg_frame(irg);
ir_node *val = get_irn_n(node, n_be_Spill_val);
+ ir_node *res;
ir_node *store;
ir_node *sched_point = NULL;
}
if (mode_is_float(mode)) {
- if (ia32_cg_config.use_sse2)
+ if (ia32_cg_config.use_sse2) {
store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
- else
+ res = new_r_Proj(store, mode_M, pn_ia32_xStore_M);
+ } else {
store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
+ res = new_r_Proj(store, mode_M, pn_ia32_vfst_M);
+ }
} else if (get_mode_size_bits(mode) == 128) {
/* Spill 128 bit SSE registers */
store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
+ res = new_r_Proj(store, mode_M, pn_ia32_xxStore_M);
} else if (get_mode_size_bits(mode) == 8) {
store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
+ res = new_r_Proj(store, mode_M, pn_ia32_Store8Bit_M);
} else {
store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
+ res = new_r_Proj(store, mode_M, pn_ia32_Store_M);
}
set_ia32_op_type(store, ia32_AddrModeD);
sched_remove(node);
}
- exchange(node, store);
+ exchange(node, res);
}
static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
ir_node *res = NULL;
dbg_info *dbgi = get_irn_dbg_info(node);
int size = get_type_size_bytes(get_CopyB_type(node));
+ int throws_exception = ir_throws_exception(node);
int rem;
/* If we have to copy more than 32 bytes, we use REP MOVSx and */
}
res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
}
+ ir_set_throws_exception(res, throws_exception);
SET_IA32_ORIG_NODE(res, node);
ir_mode *lsmode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
ir_node *nomem = get_irg_no_mem(irg);
ir_node *cwstore, *load, *load_res, *orn, *store, *fldcw;
+ ir_node *store_proj;
ir_node *or_const;
assert(last_state != NULL);
/* use mode_Iu, as movl has a shorter opcode than movw */
set_ia32_ls_mode(store, mode_Iu);
set_ia32_use_frame(store);
+ store_proj = new_r_Proj(store, mode_M, pn_ia32_Store_M);
sched_add_before(before, store);
- fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store);
+ fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store_proj);
set_ia32_op_type(fldcw, ia32_AddrModeS);
set_ia32_ls_mode(fldcw, lsmode);
set_ia32_use_frame(fldcw);
ir_node *val, *mem, *mem_proj;
ir_node *store = stores[i];
ir_node *noreg = ia32_new_NoReg_gp(irg);
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
val = get_irn_n(store, n_ia32_unary_op);
mem = get_irn_n(store, n_ia32_mem);
spreg = arch_get_irn_register(curr_sp);
- push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, mem, val, curr_sp);
+ push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg,
+ mem, val, curr_sp);
copy_mark(store, push);
if (first_push == NULL)
/* create memory Proj */
mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M);
+ /* rewire Store Projs */
+ foreach_out_edge_safe(store, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+ switch (get_Proj_proj(proj)) {
+ case pn_ia32_Store_M:
+ exchange(proj, mem_proj);
+ break;
+ default:
+ panic("unexpected Proj on Store->IncSp");
+ }
+ }
+
/* use the memproj now */
- be_peephole_exchange(store, mem_proj);
+ be_peephole_exchange(store, push);
inc_ofs -= 4;
}
emit => '. mov%M %SI3, %AM',
latency => 2,
units => [ "GP" ],
- mode => "mode_M",
},
Store8Bit => {
emit => '. mov%M %SB3, %AM',
latency => 2,
units => [ "GP" ],
- mode => "mode_M",
},
Lea => {
},
Call => {
+ op_flags => [ "fragile" ],
state => "exc_pinned",
reg_req => {
in => [ "gp", "gp", "none", "gp", "esp", "fpcw", "eax", "ecx", "edx" ],
- out => [ "esp:I|S", "fpcw:I", "none", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ]
+ out => [ "esp:I|S", "fpcw:I", "none", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "none", "none" ]
},
ins => [ "base", "index", "mem", "addr", "stack", "fpcw", "eax", "ecx", "edx" ],
- outs => [ "stack", "fpcw", "M", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ],
+ outs => [ "stack", "fpcw", "M", "eax", "ecx", "edx", "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "X_regular", "X_except" ],
attr_type => "ia32_call_attr_t",
attr => "unsigned pop, ir_type *call_tp",
am => "source,unary",
emit => '. mov%XXM %S3, %AM',
latency => 0,
units => [ "SSE" ],
- mode => "mode_M",
},
xStoreSimple => {
emit => '. mov%XXM %S3, %AM',
latency => 0,
units => [ "SSE" ],
- mode => "mode_M",
},
CvtSI2SS => {
},
Conv_I2I => {
+ op_flags => [ "fragile" ],
state => "exc_pinned",
reg_req => { in => [ "gp", "gp", "none", "gp" ],
- out => [ "gp", "none", "none" ] },
+ out => [ "gp", "none", "none", "none", "none" ] },
ins => [ "base", "index", "mem", "val" ],
- outs => [ "res", "flags", "M" ],
+ outs => [ "res", "flags", "M", "X_regular", "X_except" ],
am => "source,unary",
units => [ "GP" ],
latency => 1,
},
Conv_I2I8Bit => {
+ op_flags => [ "fragile" ],
state => "exc_pinned",
reg_req => { in => [ "gp", "gp", "none", "eax ebx ecx edx" ],
- out => [ "gp", "none", "none" ] },
+ out => [ "gp", "none", "none", "none", "none" ] },
ins => [ "base", "index", "mem", "val" ],
- outs => [ "res", "flags", "M" ],
+ outs => [ "res", "flags", "M", "X_regular", "X_except" ],
am => "source,unary",
units => [ "GP" ],
latency => 1,
init_attr => "attr->attr.ls_mode = store_mode;",
latency => 2,
units => [ "VFP" ],
- mode => "mode_M",
attr_type => "ia32_x87_attr_t",
},
},
vfist => {
+ op_flags => [ "fragile" ],
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "vfp", "fpcw" ], out => [ "none" ] },
+ reg_req => { in => [ "gp", "gp", "none", "vfp", "fpcw" ],
+ out => [ "none", "none", "none", "none" ] },
ins => [ "base", "index", "mem", "val", "fpcw" ],
- outs => [ "M" ],
+ outs => [ "dummy", "M", "X_regular", "X_except" ],
latency => 4,
units => [ "VFP" ],
- mode => "mode_M",
attr_type => "ia32_x87_attr_t",
},
# SSE3 fisttp instruction
vfisttp => {
+ op_flags => [ "fragile" ],
state => "exc_pinned",
- reg_req => { in => [ "gp", "gp", "none", "vfp" ], out => [ "in_r4", "none" ]},
+ reg_req => { in => [ "gp", "gp", "none", "vfp" ],
+ out => [ "in_r4", "none", "none", "none" ]},
ins => [ "base", "index", "mem", "val" ],
- outs => [ "res", "M" ],
+ outs => [ "res", "M", "X_regular", "X_except" ],
latency => 4,
units => [ "VFP" ],
attr_type => "ia32_x87_attr_t",
emit => '. movdqu %binop',
units => [ "SSE" ],
latency => 1,
- mode => "mode_M",
},
); # end of %nodes
*/
static ir_node *create_Div(ir_node *node)
{
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *block = get_nodes_block(node);
- ir_node *new_block = be_transform_node(block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ int throws_exception = ir_throws_exception(node);
ir_node *mem;
ir_node *new_mem;
ir_node *op1;
addr->index, new_mem, am.new_op2,
am.new_op1, sign_extension);
}
+ ir_set_throws_exception(new_node, throws_exception);
set_irn_pinned(new_node, get_irn_pinned(node));
ir_node *new_mem = be_transform_node(mem);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_Load_mode(node);
+ int throws_exception = ir_throws_exception(node);
ir_node *base;
ir_node *index;
ir_node *new_node;
new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem);
}
}
+ ir_set_throws_exception(new_node, throws_exception);
set_irn_pinned(new_node, get_irn_pinned(node));
set_ia32_op_type(new_node, ia32_AddrModeS);
dbg_info *dbgi = get_irn_dbg_info(node);
int ofs = 0;
int i = 0;
+ int throws_exception = ir_throws_exception(node);
ir_node *ins[4];
ia32_address_t addr;
ir_node *new_node = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, imm);
+ ir_node *mem = new_r_Proj(new_node, mode_M, pn_ia32_Store_M);
+ ir_set_throws_exception(new_node, throws_exception);
set_irn_pinned(new_node, get_irn_pinned(node));
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode_Iu);
SET_IA32_ORIG_NODE(new_node, node);
assert(i < 4);
- ins[i++] = new_node;
+ ins[i++] = mem;
size -= 4;
ofs += 4;
if (i > 1) {
return new_rd_Sync(dbgi, new_block, i, ins);
} else {
- return ins[0];
+ return get_Proj_pred(ins[0]);
}
}
/**
* Generate a vfist or vfisttp instruction.
*/
-static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node *index,
- ir_node *mem, ir_node *val, ir_node **fist)
+static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base,
+ ir_node *index, ir_node *mem, ir_node *val)
{
- ir_node *new_node;
-
if (ia32_cg_config.use_fisttp) {
/* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied
if other users exists */
ir_node *value = new_r_Proj(vfisttp, mode_E, pn_ia32_vfisttp_res);
be_new_Keep(block, 1, &value);
- new_node = new_r_Proj(vfisttp, mode_M, pn_ia32_vfisttp_M);
- *fist = vfisttp;
+ return vfisttp;
} else {
ir_node *trunc_mode = ia32_new_Fpu_truncate(current_ir_graph);
/* do a fist */
- new_node = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode);
- *fist = new_node;
+ ir_node *vfist = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode);
+ return vfist;
}
- return new_node;
}
+
/**
* Transforms a general (no special case) Store.
*
ir_node *ptr = get_Store_ptr(node);
ir_node *mem = get_Store_mem(node);
dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *new_val, *new_node, *store;
+ int throws_exception = ir_throws_exception(node);
+ ir_node *new_val;
+ ir_node *new_node;
ia32_address_t addr;
/* check for destination address mode */
new_node = new_bd_ia32_vfst(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val, mode);
}
- store = new_node;
} else if (!ia32_cg_config.use_sse2 && is_float_to_int_conv(val)) {
val = get_Conv_op(val);
/* TODO: is this optimisation still necessary at all (middleend)? */
- /* We can skip ALL float->float up-Convs (and strict-up-Convs) before stores. */
+ /* We can skip ALL float->float up-Convs (and strict-up-Convs) before
+ * stores. */
while (is_Conv(val)) {
ir_node *op = get_Conv_op(val);
if (!mode_is_float(get_irn_mode(op)))
val = op;
}
new_val = be_transform_node(val);
- new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, &store);
+ new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val);
} else {
new_val = create_immediate_or_transform(val, 0);
assert(mode != mode_b);
new_node = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val);
}
- store = new_node;
}
+ ir_set_throws_exception(new_node, throws_exception);
- set_irn_pinned(store, get_irn_pinned(node));
- set_ia32_op_type(store, ia32_AddrModeD);
- set_ia32_ls_mode(store, mode);
+ set_irn_pinned(new_node, get_irn_pinned(node));
+ set_ia32_op_type(new_node, ia32_AddrModeD);
+ set_ia32_ls_mode(new_node, mode);
- set_address(store, &addr);
- SET_IA32_ORIG_NODE(store, node);
+ set_address(new_node, &addr);
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(node);
+ ir_node *frame = get_irg_frame(irg);
ir_node *fist, *load, *mem;
- mem = gen_vfist(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist);
+ fist = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_op);
set_irn_pinned(fist, op_pin_state_floats);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
+ assert((long)pn_ia32_vfist_M == (long) pn_ia32_vfisttp_M);
+ mem = new_r_Proj(fist, mode_M, pn_ia32_vfist_M);
+
assert(get_mode_size_bits(mode) <= 32);
/* exception we can only store signed 32 bit integers, so for unsigned
we store a 64bit (signed) integer and load the lower bits */
ir_graph *irg = get_Block_irg(block);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *frame = get_irg_frame(irg);
+ ir_node *store_mem;
ir_node *store, *load;
ir_node *new_node;
set_ia32_op_type(store, ia32_AddrModeD);
SET_IA32_ORIG_NODE(store, node);
- load = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store, tgt_mode);
+ store_mem = new_r_Proj(store, mode_M, pn_ia32_vfst_M);
+
+ load = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store_mem, tgt_mode);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
SET_IA32_ORIG_NODE(load, node);
ir_mode *store_mode;
ir_node *fild;
ir_node *store;
+ ir_node *store_mem;
ir_node *new_node;
/* fild can use source AM if the operand is a signed 16bit or 32bit integer */
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, mode_Iu);
+ store_mem = new_r_Proj(store, mode_M, pn_ia32_Store_M);
+
/* exception for 32bit unsigned, do a 64bit spill+load */
if (!mode_is_signed(mode)) {
ir_node *in[2];
ir_node *zero_store = new_bd_ia32_Store(dbgi, block, get_irg_frame(irg),
noreg_GP, nomem, zero_const);
+ ir_node *zero_store_mem = new_r_Proj(zero_store, mode_M, pn_ia32_Store_M);
set_ia32_use_frame(zero_store);
set_ia32_op_type(zero_store, ia32_AddrModeD);
add_ia32_am_offs_int(zero_store, 4);
set_ia32_ls_mode(zero_store, mode_Iu);
- in[0] = zero_store;
- in[1] = store;
+ in[0] = zero_store_mem;
+ in[1] = store_mem;
- store = new_rd_Sync(dbgi, block, 2, in);
+ store_mem = new_rd_Sync(dbgi, block, 2, in);
store_mode = mode_Ls;
} else {
store_mode = mode_Is;
}
/* do a fild */
- fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg_GP, store);
+ fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg_GP, store_mem);
set_ia32_use_frame(fild);
set_ia32_op_type(fild, ia32_AddrModeS);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_type *res_type;
ir_mode *mode;
- ir_node *frame, *sse_store, *fld, *mproj;
+ ir_node *frame;
+ ir_node *sse_store;
+ ir_node *store_mem;
+ ir_node *fld;
+ ir_node *mproj;
int i;
int arity;
unsigned pop;
set_ia32_ls_mode(sse_store, mode);
set_ia32_op_type(sse_store, ia32_AddrModeD);
set_ia32_use_frame(sse_store);
+ store_mem = new_r_Proj(sse_store, mode_M, pn_ia32_xStoreSimple_M);
/* load into x87 register */
- fld = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, sse_store, mode);
+ fld = new_bd_ia32_vfld(dbgi, block, frame, noreg_GP, store_mem, mode);
set_ia32_op_type(fld, ia32_AddrModeS);
set_ia32_use_frame(fld);
ir_node *new_val_high = be_transform_node(val_high);
ir_node *in[2];
ir_node *sync, *fild, *res;
- ir_node *store_low, *store_high;
+ ir_node *store_low;
+ ir_node *store_high;
+ ir_node *mem_low;
+ ir_node *mem_high;
if (ia32_cg_config.use_sse2) {
panic("ia32_l_LLtoFloat not implemented for SSE2");
SET_IA32_ORIG_NODE(store_low, node);
SET_IA32_ORIG_NODE(store_high, node);
+ mem_low = new_r_Proj(store_low, mode_M, pn_ia32_Store_M);
+ mem_high = new_r_Proj(store_high, mode_M, pn_ia32_Store_M);
+
set_ia32_use_frame(store_low);
set_ia32_use_frame(store_high);
set_ia32_op_type(store_low, ia32_AddrModeD);
set_ia32_ls_mode(store_high, mode_Is);
add_ia32_am_offs_int(store_high, 4);
- in[0] = store_low;
- in[1] = store_high;
+ in[0] = mem_low;
+ in[1] = mem_high;
sync = new_rd_Sync(dbgi, block, 2, in);
/* do a fild */
ir_node *frame = get_irg_frame(irg);
ir_node *val = get_irn_n(node, n_ia32_l_FloattoLL_val);
ir_node *new_val = be_transform_node(val);
- ir_node *fist, *mem;
+ ir_node *fist;
- mem = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val, &fist);
+ fist = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val);
SET_IA32_ORIG_NODE(fist, node);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
set_ia32_ls_mode(fist, mode_Ls);
- return mem;
+ assert((long)pn_ia32_vfist_M == (long) pn_ia32_vfisttp_M);
+ return new_r_Proj(fist, mode_M, pn_ia32_vfist_M);
}
static ir_node *gen_Proj_l_FloattoLL(ir_node *node)
} else if (is_ia32_Conv_I2I(new_pred) ||
is_ia32_Conv_I2I8Bit(new_pred)) {
set_irn_mode(new_pred, mode_T);
- if (proj == pn_Load_res) {
+ switch ((pn_Load)proj) {
+ case pn_Load_res:
return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_ia32_res);
- } else if (proj == pn_Load_M) {
+ case pn_Load_M:
return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_mem);
+ case pn_Load_X_except:
+ /* This Load might raise an exception. Mark it. */
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Conv_I2I_X_except);
+ case pn_Load_X_regular:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Conv_I2I_X_regular);
}
} else if (is_ia32_xLoad(new_pred)) {
switch ((pn_Load)proj) {
return new_rd_Proj(dbgi, new_pred, mode_M, 1);
}
- panic("No idea how to transform proj");
+ panic("No idea how to transform Proj(Load) %+F", node);
+}
+
+static ir_node *gen_Proj_Store(ir_node *node)
+{
+ ir_node *pred = get_Proj_pred(node);
+ ir_node *new_pred = be_transform_node(pred);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ long pn = get_Proj_proj(node);
+
+ if (is_ia32_Store(new_pred) || is_ia32_Store8Bit(new_pred)) {
+ switch ((pn_Store)pn) {
+ case pn_Store_M:
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_Store_M);
+ case pn_Store_X_except:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Store_X_except);
+ case pn_Store_X_regular:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Store_X_regular);
+ }
+ } else if (is_ia32_vfist(new_pred)) {
+ switch ((pn_Store)pn) {
+ case pn_Store_M:
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfist_M);
+ case pn_Store_X_except:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfist_X_except);
+ case pn_Store_X_regular:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfist_X_regular);
+ }
+ } else if (is_ia32_vfisttp(new_pred)) {
+ switch ((pn_Store)pn) {
+ case pn_Store_M:
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfisttp_M);
+ case pn_Store_X_except:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfisttp_X_except);
+ case pn_Store_X_regular:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfisttp_X_regular);
+ }
+ } else if (is_ia32_vfst(new_pred)) {
+ switch ((pn_Store)pn) {
+ case pn_Store_M:
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_vfst_M);
+ case pn_Store_X_except:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfst_X_except);
+ case pn_Store_X_regular:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfst_X_regular);
+ }
+ } else if (is_ia32_xStore(new_pred)) {
+ switch ((pn_Store)pn) {
+ case pn_Store_M:
+ return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_xStore_M);
+ case pn_Store_X_except:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xStore_X_except);
+ case pn_Store_X_regular:
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xStore_X_regular);
+ }
+ } else if (is_Sync(new_pred)) {
+ /* hack for the case that gen_float_const_Store produced a Sync */
+ if (pn == pn_Store_M) {
+ return new_pred;
+ }
+ panic("exception control flow for gen_float_const_Store not implemented yet");
+ } else if (get_ia32_op_type(new_pred) == ia32_AddrModeD) {
+ /* destination address mode */
+ if (pn == pn_Store_M) {
+ return new_pred;
+ }
+ panic("exception control flow for destination AM not implemented yet");
+ }
+
+ panic("No idea how to transform Proj(Store) %+F", node);
}
/**
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
- switch (proj) {
+ switch ((pn_CopyB)proj) {
case pn_CopyB_M:
if (is_ia32_CopyB_i(new_pred)) {
return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_i_M);
return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_CopyB_M);
}
break;
- default:
+ case pn_CopyB_X_regular:
+ if (is_ia32_CopyB_i(new_pred)) {
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_i_X_regular);
+ } else if (is_ia32_CopyB(new_pred)) {
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_X_regular);
+ }
+ break;
+ case pn_CopyB_X_except:
+ if (is_ia32_CopyB_i(new_pred)) {
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_i_X_except);
+ } else if (is_ia32_CopyB(new_pred)) {
+ return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_CopyB_X_except);
+ }
break;
}
unsigned const pop = be_Call_get_pop(node);
ir_type *const call_tp = be_Call_get_type(node);
int old_no_pic_adjust;
+ int throws_exception = ir_throws_exception(node);
/* Run the x87 simulator if the call returns a float value */
if (get_method_n_ress(call_tp) > 0) {
mem = transform_AM_mem(block, src_ptr, src_mem, addr->mem);
call = new_bd_ia32_Call(dbgi, block, addr->base, addr->index, mem,
am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp);
+ ir_set_throws_exception(call, throws_exception);
set_am_attributes(call, &am);
call = fix_mem_proj(call, &am);
ir_mode *mode = get_irn_mode(node);
ir_node *res;
- if (proj == pn_be_Call_M_regular) {
+ if (proj == pn_be_Call_M) {
return new_rd_Proj(dbgi, new_call, mode_M, n_ia32_Call_mem);
}
/* transform call modes */
/* Map from be_Call to ia32_Call proj number */
if (proj == pn_be_Call_sp) {
proj = pn_ia32_Call_stack;
- } else if (proj == pn_be_Call_M_regular) {
+ } else if (proj == pn_be_Call_M) {
proj = pn_ia32_Call_M;
+ } else if (proj == pn_be_Call_X_except) {
+ proj = pn_ia32_Call_X_except;
+ } else if (proj == pn_be_Call_X_regular) {
+ proj = pn_ia32_Call_X_regular;
} else {
arch_register_req_t const *const req = arch_get_register_req_out(node);
int const n_outs = arch_irn_get_n_outs(new_call);
/* TODO arch_set_irn_register() only operates on Projs, need variant with index */
switch (proj) {
- case pn_ia32_Call_stack:
- arch_set_irn_register(res, &ia32_registers[REG_ESP]);
- break;
+ case pn_ia32_Call_stack:
+ arch_set_irn_register(res, &ia32_registers[REG_ESP]);
+ break;
- case pn_ia32_Call_fpcw:
- arch_set_irn_register(res, &ia32_registers[REG_FPCW]);
- break;
+ case pn_ia32_Call_fpcw:
+ arch_set_irn_register(res, &ia32_registers[REG_FPCW]);
+ break;
}
return res;
long proj;
switch (get_irn_opcode(pred)) {
- case iro_Store:
- proj = get_Proj_proj(node);
- if (proj == pn_Store_M) {
- return be_transform_node(pred);
- } else {
- panic("No idea how to transform proj->Store");
- }
case iro_Load:
return gen_Proj_Load(node);
+ case iro_Store:
+ return gen_Proj_Store(node);
case iro_ASM:
return gen_Proj_ASM(node);
case iro_Builtin:
ir_mode *mode = get_ia32_ls_mode(succ);
ir_node *st = new_bd_ia32_vfst(db, block, base, index, mem, value, mode);
+ //ir_node *mem = new_r_Proj(st, mode_M, pn_ia32_vfst_M);
set_ia32_am_offs_int(st, get_ia32_am_offs_int(succ));
if (is_ia32_use_frame(succ))
set_ia32_use_frame(st);
set_irn_pinned(st, get_irn_pinned(succ));
set_ia32_op_type(st, ia32_AddrModeD);
+ assert((long)pn_ia32_xStore_M == (long)pn_ia32_vfst_M);
+ assert((long)pn_ia32_xStore_X_regular == (long)pn_ia32_vfst_X_regular);
+ assert((long)pn_ia32_xStore_X_except == (long)pn_ia32_vfst_X_except);
+
exchange(succ, st);
- } else {
- if (new_res == NULL) {
- dbg_info *db = get_irn_dbg_info(call);
- ir_node *block = get_nodes_block(call);
- ir_node *frame = get_irg_frame(current_ir_graph);
- ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M);
- ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M);
- ir_node *vfst, *xld, *new_mem;
-
- /* store st(0) on stack */
- vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem, res, mode);
- set_ia32_op_type(vfst, ia32_AddrModeD);
- set_ia32_use_frame(vfst);
-
- /* load into SSE register */
- xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst, mode);
- set_ia32_op_type(xld, ia32_AddrModeS);
- set_ia32_use_frame(xld);
-
- new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res);
- new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M);
-
- if (old_mem != NULL) {
- edges_reroute(old_mem, new_mem);
- kill_node(old_mem);
- }
+ } else if (new_res == NULL) {
+ dbg_info *db = get_irn_dbg_info(call);
+ ir_node *block = get_nodes_block(call);
+ ir_node *frame = get_irg_frame(current_ir_graph);
+ ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M);
+ ir_node *call_mem = new_r_Proj(call, mode_M, pn_ia32_Call_M);
+ ir_node *vfst, *xld, *new_mem;
+ ir_node *vfst_mem;
+
+ /* store st(0) on stack */
+ vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem,
+ res, mode);
+ set_ia32_op_type(vfst, ia32_AddrModeD);
+ set_ia32_use_frame(vfst);
+
+ vfst_mem = new_r_Proj(vfst, mode_M, pn_ia32_vfst_M);
+
+ /* load into SSE register */
+ xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst_mem,
+ mode);
+ set_ia32_op_type(xld, ia32_AddrModeS);
+ set_ia32_use_frame(xld);
+
+ new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res);
+ new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M);
+
+ if (old_mem != NULL) {
+ edges_reroute(old_mem, new_mem);
+ kill_node(old_mem);
}
- set_irn_n(succ, get_edge_src_pos(edge), new_res);
}
+ set_irn_n(succ, get_edge_src_pos(edge), new_res);
}
}
}
return get_Proj_proj(node) == pred->op->pn_x_regular;
}
+void ir_set_throws_exception(ir_node *node, int throws_exception)
+{
+ except_attr *attr = &node->attr.except;
+ assert(is_fragile_op(node));
+ attr->throws_exception = throws_exception;
+}
+
+int ir_throws_exception(const ir_node *node)
+{
+ const except_attr *attr = &node->attr.except;
+ assert(is_fragile_op(node));
+ return attr->throws_exception;
+}
+
ir_node **get_Tuple_preds_arr(ir_node *node)
{
assert(is_Tuple(node));
/* Returns true if the operation manipulates control flow. */
int is_cfop(const ir_node *node)
{
+ if (is_fragile_op(node) && ir_throws_exception(node))
+ return true;
+
return is_op_cfopcode(get_irn_op(node));
}
state = _get_op_pinned(_get_irn_op(node));
if (state >= op_pin_state_exc_pinned)
- return node->attr.except.pin_state;
+ return (op_pin_state)node->attr.except.pin_state;
return state;
}
ir_node *bad = new_r_Bad(irg, mode_X);
ir_mode *mode = get_Load_mode(n);
ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
- ir_node *in[pn_Load_max+1] = { mem, jmp, bad, res };
+ ir_node *in[pn_Load_max+1] = { mem, res, jmp, bad };
ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
return tuple;
}
ir_graph *irg = get_irn_irg(n);
ir_node *bad = new_r_Bad(irg, mode_X);
ir_node *res = value;
- ir_node *in[pn_Load_max+1] = { mem, jmp, bad, res };
+ ir_node *in[pn_Load_max+1] = { mem, res, jmp, bad };
ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
return tuple;
}
/** Exception attributes. */
typedef struct except_attr {
- op_pin_state pin_state; /**< the pin state for operations that might generate a exception:
- If it's know that no exception will be generated, could be set to
- op_pin_state_floats. */
+ unsigned pin_state : 2; /**< the pin state for operations with
+ variable pinned state. Contains a
+ op_pin_state */
+ unsigned throws_exception : 1; /**< if true a fragile op throws and
+ must produce X_except and X_regular
+ values */
} except_attr;
/** Call attributes. */
return 1;
}
+static int verify_node_Proj_fragile(const ir_node *node)
+{
+ ir_node *pred = get_Proj_pred(node);
+ int throws_exception = ir_throws_exception(pred);
+ ASSERT_AND_RET((!is_x_except_Proj(node) || throws_exception)
+ && (!is_x_regular_Proj(node) || throws_exception),
+ "X_except und X_regular Proj only allowed when throws_exception is set",
+ 0);
+ return 1;
+}
+
/**
* verify a Proj node
*/
ASSERT_AND_RET(get_irn_mode(pred) == mode_T, "mode of a 'projed' node is not Tuple", 0);
ASSERT_AND_RET(get_irg_pinned(irg) == op_pin_state_floats || get_nodes_block(pred) == get_nodes_block(p), "Proj must be in same block as its predecessor", 0);
+ if (is_fragile_op(pred)) {
+ int res = verify_node_Proj_fragile(p);
+ if (res != 1)
+ return res;
+ }
+
op = get_irn_op(pred);
if (op->ops.verify_proj_node)
return op->ops.verify_proj_node(p);
* @param reg_jmp new regular control flow, if NULL, a Jmp will be used
* @param exc_jmp new exception control flow, if reg_jmp == NULL, a Bad will be used
*/
-static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp)
+static void replace_call(ir_node *irn, ir_node *call, ir_node *mem,
+ ir_node *reg_jmp, ir_node *exc_jmp)
{
ir_node *block = get_nodes_block(call);
ir_graph *irg = get_irn_irg(block);
+ ir_node *rest = new_r_Tuple(block, 1, &irn);
- if (reg_jmp == NULL) {
-
- /* Beware: do we need here a protection against CSE? Better we do it. */
- int old_cse = get_opt_cse();
- set_opt_cse(0);
- reg_jmp = new_r_Jmp(block);
- set_opt_cse(old_cse);
- exc_jmp = new_r_Bad(irg, mode_X);
+ if (ir_throws_exception(call)) {
+ turn_into_tuple(call, pn_Call_max+1);
+ if (reg_jmp == NULL) {
+ reg_jmp = new_r_Jmp(block);
+ }
+ if (exc_jmp == NULL) {
+ exc_jmp = new_r_Bad(irg, mode_X);
+ }
+ set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
+ set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
+ } else {
+ assert(reg_jmp == NULL);
+ assert(exc_jmp == NULL);
+ turn_into_tuple(call, pn_Call_T_result+1);
+ assert(pn_Call_M <= pn_Call_T_result);
+ assert(pn_Call_X_regular > pn_Call_T_result);
+ assert(pn_Call_X_except > pn_Call_T_result);
}
- irn = new_r_Tuple(block, 1, &irn);
-
- turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
- set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
- set_Tuple_pred(call, pn_Call_T_result, irn);
-} /* replace_call */
+ set_Tuple_pred(call, pn_Call_T_result, rest);
+}
/* A mapper for the integer abs. */
int i_mapper_abs(ir_node *call, void *ctx)
irn = new_rd_Alloc(dbg, block, mem, op, firm_unknown_type, stack_alloc);
mem = new_rd_Proj(dbg, irn, mode_M, pn_Alloc_M);
- no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular);
- exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except);
irn = new_rd_Proj(dbg, irn, get_modeP_data(), pn_Alloc_res);
+ if (ir_throws_exception(call)) {
+ no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular);
+ exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except);
+ ir_set_throws_exception(irn, true);
+ } else {
+ no_exc = NULL;
+ exc = NULL;
+ }
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_ALLOCA);
replace_call(irn, call, mem, no_exc, exc);
/* A mapper for the floating point pow. */
int i_mapper_pow(ir_node *call, void *ctx)
{
+ ir_node *left = get_Call_param(call, 0);
+ ir_node *right = get_Call_param(call, 1);
+ ir_node *block = get_nodes_block(call);
+ ir_graph *irg = get_irn_irg(block);
+ ir_node *reg_jmp = NULL;
+ ir_node *exc_jmp = NULL;
+ ir_node *irn;
dbg_info *dbg;
ir_node *mem;
- ir_node *left = get_Call_param(call, 0);
- ir_node *right = get_Call_param(call, 1);
- ir_node *block = get_nodes_block(call);
- ir_graph *irg = get_irn_irg(block);
- ir_node *irn, *reg_jmp = NULL, *exc_jmp = NULL;
(void) ctx;
if (is_Const(left) && is_Const_one(left)) {
div = new_rd_Div(dbg, block, mem, irn, left, mode, op_pin_state_pinned);
mem = new_r_Proj(div, mode_M, pn_Div_M);
irn = new_r_Proj(div, mode, pn_Div_res);
- reg_jmp = new_r_Proj(div, mode_X, pn_Div_X_regular);
- exc_jmp = new_r_Proj(div, mode_X, pn_Div_X_except);
+ if (ir_throws_exception(call)) {
+ reg_jmp = new_r_Proj(div, mode_X, pn_Div_X_regular);
+ exc_jmp = new_r_Proj(div, mode_X, pn_Div_X_except);
+ ir_set_throws_exception(div, true);
+ }
}
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_POW);
replace_call(irn, call, mem, reg_jmp, exc_jmp);
/* replace the strcmp by (*x) */
irn = new_rd_Load(dbg, block, mem, v, mode, cons_none);
mem = new_r_Proj(irn, mode_M, pn_Load_M);
- exc = new_r_Proj(irn, mode_X, pn_Load_X_except);
- reg = new_r_Proj(irn, mode_X, pn_Load_X_regular);
irn = new_r_Proj(irn, mode, pn_Load_res);
+ if (ir_throws_exception(call)) {
+ exc = new_r_Proj(irn, mode_X, pn_Load_X_except);
+ reg = new_r_Proj(irn, mode_X, pn_Load_X_regular);
+ ir_set_throws_exception(irn, true);
+ } else {
+ exc = NULL;
+ reg = NULL;
+ }
/* conv to the result mode */
mode = get_type_mode(res_tp);
fqname = ".exc.pin_state",
init = "pin_state"
))
+ if hasattr(node, "throws_init"):
+ initattrs.append(dict(
+ fqname = ".exc.throws_exception",
+ init = node.throws_init
+ ))
for arg in node.constructor_args:
arguments.append(prepare_attr(arg))
]
outs = [
("M", "memory result"),
+ ("res", "pointer to newly allocated memory"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("res", "pointer to newly allocated memory"),
]
attrs = [
dict(
]
flags = [ "fragile", "uses_memory" ]
pinned = "exception"
+ throws_init = "false"
pinned_init = "op_pin_state_pinned"
attr_struct = "alloc_attr"
]
outs = [
("M", "memory result"),
+ ("res", "the checked index"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("res", "the checked index"),
]
flags = [ "fragile", "highlevel" ]
pinned = "exception"
pinned_init = "op_pin_state_pinned"
+ throws_init = "false"
attr_struct = "bound_attr"
attrs_name = "bound"
arity = "variable"
outs = [
("M", "memory result"),
+ ("T_result", "tuple containing all results"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("T_result", "tuple containing all results"),
]
flags = [ "fragile", "uses_memory" ]
attrs = [
attr_struct = "call_attr"
pinned = "memory"
pinned_init = "op_pin_state_pinned"
+ throws_init = "false"
init = '''
assert((get_unknown_type() == type) || is_Method_type(type));
'''
attrs_name = "copyb"
pinned = "memory"
pinned_init = "op_pin_state_pinned"
+ throws_init = "false"
class Div(Op):
"""returns the quotient of its 2 operands"""
]
outs = [
("M", "memory result"),
+ ("res", "result of computation"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("res", "result of computation"),
]
flags = [ "fragile", "uses_memory" ]
attrs_name = "div"
]
attr_struct = "div_attr"
pinned = "exception"
+ throws_init = "false"
op_index = 1
arity_override = "oparity_binary"
]
outs = [
("M", "memory result"),
+ ("res", "checked object pointer"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("res", "checked object pointer"),
]
flags = [ "highlevel" ]
attrs = [
]
outs = [
("M", "memory result"),
+ ("res", "result of load operation"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("res", "result of load operation"),
]
flags = [ "fragile", "uses_memory" ]
pinned = "exception"
),
]
pinned_init = "flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned"
+ throws_init = "(flags & cons_throws_exception) != 0"
class Minus(Unop):
"""returns the difference between its operands"""
]
outs = [
("M", "memory result"),
+ ("res", "result of computation"),
("X_regular", "control flow when no exception occurs"),
("X_except", "control flow when exception occured"),
- ("res", "result of computation"),
]
flags = [ "fragile", "uses_memory" ]
attrs_name = "mod"
]
attr_struct = "mod_attr"
pinned = "exception"
+ throws_init = "false"
op_index = 1
arity_override = "oparity_binary"
pinned = "exception"
attr_struct = "store_attr"
pinned_init = "flags & cons_floats ? op_pin_state_floats : op_pin_state_pinned"
+ throws_init = "(flags & cons_throws_exception) != 0"
attrs = [
dict(
type = "ir_volatility",
def is_dynamic_pinned(node):
return node.pinned in ["memory", "exception"]
+def is_fragile(node):
+ return hasattr(node, "flags") and "fragile" in node.flags
+
def inout_contains(l, name):
for entry in l:
if entry[0] == name:
print "WARNING: no flags specified for %s\n" % node.__name__
elif type(node.flags) != list:
print "ERROR: flags of %s not a list" % node.__name__
- if hasattr(node, "flags"):
- flags = node.flags
- if "fragile" in flags:
- if not inout_contains(node.ins, "mem"):
- print "ERROR: fragile node %s needs an input named 'mem'" % node.__name__
- if not inout_contains(node.outs, "X_regular"):
- print "ERROR: fragile node %s needs an output named 'X_regular'" % node.__name__
- if not inout_contains(node.outs, "X_except"):
- print "ERROR: fragile node %s needs an output named 'X_except'" % node.__name__
+ if hasattr(node, "pinned_init") and not is_dynamic_pinned(node):
+ print "ERROR: node %s has pinned_init attribute but is not marked as dynamically pinned" % node.__name__
+ if is_fragile(node):
+ if not is_dynamic_pinned(node):
+ print "ERROR: fragile node %s must be dynamically pinned" % node.__name__
+ if not hasattr(node, "throws_init"):
+ print "ERROR: fragile node %s needs a throws_init attribute" % node.__name__
+ if not inout_contains(node.ins, "mem"):
+ print "ERROR: fragile node %s needs an input named 'mem'" % node.__name__
+ if not inout_contains(node.outs, "X_regular"):
+ print "ERROR: fragile node %s needs an output named 'X_regular'" % node.__name__
+ if not inout_contains(node.outs, "X_except"):
+ print "ERROR: fragile node %s needs an output named 'X_except'" % node.__name__
+ else:
+ if hasattr(node, "throws_init"):
+ print "ERROR: throws_init only makes sense for fragile nodes"
+
def setldefault(node, attr, val):
# Don't use hasattr, as these things should not be inherited