am->commutative = commutative;
}
-static void set_transformed_and_mark(ir_node *const old_node, ir_node *const new_node)
-{
- mark_irn_visited(old_node);
- be_set_transformed_node(old_node, new_node);
-}
-
static ir_node *fix_mem_proj(ir_node *node, ia32_address_mode_t *am)
{
ir_mode *mode;
mode = get_irn_mode(node);
load = get_Proj_pred(am->mem_proj);
- set_transformed_and_mark(load, node);
+ be_set_transformed_node(load, node);
if (mode != mode_T) {
set_irn_mode(node, mode_T);
am.new_op1, am.new_op2);
set_am_attributes(new_node, &am);
/* we can't use source address mode anymore when using immediates */
- if (is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2))
- set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none);
+ if (!(flags & match_am_and_immediates) &&
+ (is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)))
+ set_ia32_am_support(new_node, ia32_am_none);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
new_node = fix_mem_proj(new_node, &am);
addr->mem, am.new_op1, am.new_op2, new_eflags);
set_am_attributes(new_node, &am);
/* we can't use source address mode anymore when using immediates */
- if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2))
- set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none);
+ if (!(flags & match_am_and_immediates) &&
+ (is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2)))
+ set_ia32_am_support(new_node, ia32_am_none);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
new_node = fix_mem_proj(new_node, &am);
*
* @return the created ia32 Mulh node
*/
-static ir_node *gen_Mulh(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- ir_node *new_block = be_transform_node(block);
- ir_graph *irg = current_ir_graph;
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_mode *mode = get_irn_mode(node);
- ir_node *op1 = get_Mulh_left(node);
- ir_node *op2 = get_Mulh_right(node);
- ir_node *proj_res_high;
- ir_node *new_node;
- ia32_address_mode_t am;
- ia32_address_t *addr = &am.addr;
-
- assert(!mode_is_float(mode) && "Mulh with float not supported");
- assert(get_mode_size_bits(mode) == 32);
-
- match_arguments(&am, block, op1, op2, NULL, match_commutative | match_am);
+static ir_node *gen_Mulh(ir_node *node) {
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *op1 = get_Mulh_left(node);
+ ir_node *op2 = get_Mulh_right(node);
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *new_node;
+ ir_node *proj_res_high;
if (mode_is_signed(mode)) {
- new_node = new_rd_ia32_IMul1OP(dbgi, irg, new_block, addr->base,
- addr->index, addr->mem, am.new_op1,
- am.new_op2);
+ new_node = gen_binop(node, op1, op2, new_rd_ia32_IMul1OP, match_commutative | match_am);
+ proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node,
+ mode_Iu, pn_ia32_IMul1OP_res_high);
} else {
- new_node = new_rd_ia32_Mul(dbgi, irg, new_block, addr->base,
- addr->index, addr->mem, am.new_op1,
- am.new_op2);
+ new_node = gen_binop(node, op1, op2, new_rd_ia32_Mul, match_commutative | match_am);
+ proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node,
+ mode_Iu, pn_ia32_Mul_res_high);
}
-
- set_am_attributes(new_node, &am);
- /* we can't use source address mode anymore when using immediates */
- if(is_ia32_Immediate(am.new_op1) || is_ia32_Immediate(am.new_op2))
- set_ia32_am_support(new_node, ia32_am_None, ia32_am_arity_none);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
-
- assert(get_irn_mode(new_node) == mode_T);
-
- fix_mem_proj(new_node, &am);
-
- assert(pn_ia32_IMul1OP_res_high == pn_ia32_Mul_res_high);
- proj_res_high = new_rd_Proj(dbgi, irg, block, new_node,
- mode_Iu, pn_ia32_IMul1OP_res_high);
-
return proj_res_high;
}
-
-
/**
* Creates an ia32 And.
*
set_ia32_ls_mode(new_node, mode);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
- set_transformed_and_mark(get_Proj_pred(am.mem_proj), new_node);
+ be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node);
mem_proj = be_transform_node(am.mem_proj);
- set_transformed_and_mark(mem_proj ? mem_proj : am.mem_proj, new_node);
+ be_set_transformed_node(mem_proj ? mem_proj : am.mem_proj, new_node);
return new_node;
}
set_ia32_ls_mode(new_node, mode);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
- set_transformed_and_mark(get_Proj_pred(am.mem_proj), new_node);
+ be_set_transformed_node(get_Proj_pred(am.mem_proj), new_node);
mem_proj = be_transform_node(am.mem_proj);
- set_transformed_and_mark(mem_proj ? mem_proj : am.mem_proj, new_node);
+ be_set_transformed_node(mem_proj ? mem_proj : am.mem_proj, new_node);
return new_node;
}
return new_node;
}
-static int is_float_to_int32_conv(const ir_node *node)
+static int is_float_to_int_conv(const ir_node *node)
{
ir_mode *mode = get_irn_mode(node);
ir_node *conv_op;
ir_mode *conv_mode;
- if(get_mode_size_bits(mode) != 32 || !ia32_mode_needs_gp_reg(mode))
- return 0;
- /* don't report unsigned as conv to 32bit, because we really need to do
- * a vfist with 64bit signed in this case */
- if(!mode_is_signed(mode))
+ if (mode != mode_Is && mode != mode_Hs)
return 0;
if(!is_Conv(node))
addr.index, addr.mem, new_val, mode);
}
store = new_node;
- } else if (!ia32_cg_config.use_sse2 && is_float_to_int32_conv(val)) {
+ } else if (!ia32_cg_config.use_sse2 && is_float_to_int_conv(val)) {
val = get_Conv_op(val);
/* TODO: is this optimisation still necessary at all (middleend)? */
/**
* returns true if it is assured, that the upper bits of a node are "clean"
* which means for a 16 or 8 bit value, that the upper bits in the register
- * are 0 for unsigned and a copy of the last significant bit for unsigned
+ * are 0 for unsigned and a copy of the last significant bit for signed
* numbers.
*/
static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
ir_node *new_node;
int src_bits;
- /* fild can use source AM if the operand is a signed 32bit integer */
- if (src_mode == mode_Is) {
+ /* fild can use source AM if the operand is a signed 16bit or 32bit integer */
+ if (src_mode == mode_Is || src_mode == mode_Hs) {
ia32_address_mode_t am;
match_arguments(&am, src_block, NULL, op, NULL,
- match_am | match_try_am);
+ match_am | match_try_am | match_16bit | match_16bit_am);
if (am.op_type == ia32_AddrModeS) {
ia32_address_t *addr = &am.addr;
arity, in);
copy_node_attr(barrier, new_barrier);
be_duplicate_deps(barrier, new_barrier);
- set_transformed_and_mark(barrier, new_barrier);
+ be_set_transformed_node(barrier, new_barrier);
/* transform normally */
return be_duplicate_node(node);
copy_node_attr(node, phi);
be_duplicate_deps(node, phi);
- be_set_transformed_node(node, phi);
be_enqueue_preds(node);
return phi;
panic("No idea how to transform proj->Quot");
}
-static ir_node *gen_be_Call(ir_node *node) {
- ir_node *res = be_duplicate_node(node);
- ir_type *call_tp;
-
- be_node_add_flags(res, -1, arch_irn_flags_modify_flags);
+static ir_node *gen_be_Call(ir_node *node)
+{
+ dbg_info *const dbgi = get_irn_dbg_info(node);
+ ir_graph *const irg = current_ir_graph;
+ ir_node *const src_block = get_nodes_block(node);
+ ir_node *const block = be_transform_node(src_block);
+ ir_node *const src_mem = get_irn_n(node, be_pos_Call_mem);
+ ir_node *const src_sp = get_irn_n(node, be_pos_Call_sp);
+ ir_node *const sp = be_transform_node(src_sp);
+ ir_node *const src_ptr = get_irn_n(node, be_pos_Call_ptr);
+ ir_node *const noreg = ia32_new_NoReg_gp(env_cg);
+ ia32_address_mode_t am;
+ ia32_address_t *const addr = &am.addr;
+ ir_node * mem;
+ ir_node * call;
+ int i;
+ ir_node * fpcw;
+ ir_node * eax = noreg;
+ ir_node * ecx = noreg;
+ ir_node * edx = noreg;
+ unsigned const pop = be_Call_get_pop(node);
+ ir_type *const call_tp = be_Call_get_type(node);
/* Run the x87 simulator if the call returns a float value */
- call_tp = be_Call_get_type(node);
if (get_method_n_ress(call_tp) > 0) {
ir_type *const res_type = get_method_res_type(call_tp, 0);
ir_mode *const res_mode = get_type_mode(res_type);
}
}
- return res;
+ /* We do not want be_Call direct calls */
+ assert(be_Call_get_entity(node) == NULL);
+
+ match_arguments(&am, src_block, NULL, src_ptr, src_mem,
+ match_am | match_immediate);
+
+ i = get_irn_arity(node) - 1;
+ fpcw = be_transform_node(get_irn_n(node, i--));
+ for (; i >= be_pos_Call_first_arg; --i) {
+ arch_register_req_t const *const req =
+ arch_get_register_req(env_cg->arch_env, node, i);
+ ir_node *const reg_parm = be_transform_node(get_irn_n(node, i));
+
+ assert(req->type == arch_register_req_type_limited);
+ assert(req->cls == &ia32_reg_classes[CLASS_ia32_gp]);
+
+ switch (*req->limited) {
+ case 1 << REG_EAX: assert(eax == noreg); eax = reg_parm; break;
+ case 1 << REG_ECX: assert(ecx == noreg); ecx = reg_parm; break;
+ case 1 << REG_EDX: assert(edx == noreg); edx = reg_parm; break;
+ default: panic("Invalid GP register for register parameter");
+ }
+ }
+
+ mem = transform_AM_mem(irg, block, src_ptr, src_mem, addr->mem);
+ call = new_rd_ia32_Call(dbgi, irg, block, addr->base, addr->index, mem,
+ am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp);
+ set_am_attributes(call, &am);
+ call = fix_mem_proj(call, &am);
+
+ if (get_irn_pinned(node) == op_pin_state_pinned)
+ set_irn_pinned(call, op_pin_state_pinned);
+
+ SET_IA32_ORIG_NODE(call, ia32_get_old_node_name(env_cg, node));
+ return call;
}
static ir_node *gen_be_IncSP(ir_node *node) {
/**
* Transform the Projs from a be_Call.
*/
-static ir_node *gen_Proj_be_Call(ir_node *node) {
+static ir_node *gen_Proj_be_Call(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *call = get_Proj_pred(node);
ir_node *new_call = be_transform_node(call);
ir_mode *mode = get_irn_mode(node);
ir_node *sse_load;
const arch_register_class_t *cls;
+ ir_node *res;
/* The following is kinda tricky: If we're using SSE, then we have to
* move the result value of the call in floating point registers to an
call_res_pred = get_Proj_pred(call_res_new);
}
- if (call_res_pred == NULL || be_is_Call(call_res_pred)) {
+ if (call_res_pred == NULL || is_ia32_Call(call_res_pred)) {
return new_rd_Proj(dbgi, irg, block, new_call, mode_M,
- pn_be_Call_M_regular);
+ n_ia32_Call_mem);
} else {
assert(is_ia32_xLoad(call_res_pred));
return new_rd_Proj(dbgi, irg, block, call_res_pred, mode_M,
mode = cls->mode;
}
- return new_rd_Proj(dbgi, irg, block, new_call, mode, proj);
+ /* Map from be_Call to ia32_Call proj number */
+ if (proj == pn_be_Call_sp) {
+ proj = pn_ia32_Call_stack;
+ } else if (proj == pn_be_Call_M_regular) {
+ proj = pn_ia32_Call_M;
+ } else {
+ arch_register_req_t const *const req = arch_get_register_req(env_cg->arch_env, node, BE_OUT_POS(proj));
+ int const n_outs = get_ia32_n_res(new_call);
+ int i;
+
+ assert(proj >= pn_be_Call_first_res);
+ assert(req->type == arch_register_req_type_limited);
+
+ for (i = 0; i < n_outs; ++i) {
+ arch_register_req_t const *const new_req = get_ia32_out_req(new_call, i);
+
+ if (new_req->type != arch_register_req_type_limited ||
+ new_req->cls != req->cls ||
+ *new_req->limited != *req->limited)
+ continue;
+
+ proj = i;
+ break;
+ }
+ assert(i < n_outs);
+ }
+
+ res = new_rd_Proj(dbgi, irg, block, new_call, mode, proj);
+
+ /* TODO arch_set_irn_register() only operates on Projs, need variant with index */
+ switch (proj) {
+ case pn_ia32_Call_stack:
+ arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]);
+ break;
+
+ case pn_ia32_Call_fpcw:
+ arch_set_irn_register(env_cg->arch_env, res, &ia32_fp_cw_regs[REG_FPCW]);
+ break;
+ }
+
+ return res;
}
/**
return gen_Proj_Bound(node);
case iro_Start:
proj = get_Proj_proj(node);
- if (proj == pn_Start_X_initial_exec) {
- ir_node *block = get_nodes_block(pred);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *jump;
-
- /* we exchange the ProjX with a jump */
- block = be_transform_node(block);
- jump = new_rd_Jmp(dbgi, current_ir_graph, block);
- return jump;
- }
- if (node == be_get_old_anchor(anchor_tls)) {
- return gen_Proj_tls(node);
+ switch (proj) {
+ case pn_Start_X_initial_exec: {
+ ir_node *block = get_nodes_block(pred);
+ ir_node *new_block = be_transform_node(block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ /* we exchange the ProjX with a jump */
+ ir_node *jump = new_rd_Jmp(dbgi, current_ir_graph, new_block);
+
+ return jump;
+ }
+
+ case pn_Start_P_tls:
+ return gen_Proj_tls(node);
}
break;
/**
* Pre-transform all unknown and noreg nodes.
*/
-static void ia32_pretransform_node(void *arch_cg) {
- ia32_code_gen_t *cg = arch_cg;
+static void ia32_pretransform_node(void)
+{
+ ia32_code_gen_t *cg = env_cg;
cg->unknown_gp = be_pre_transform_node(cg->unknown_gp);
cg->unknown_vfp = be_pre_transform_node(cg->unknown_vfp);
assert(n_outs < (int) sizeof(unsigned) * 8);
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
- int pn = get_Proj_proj(proj);
+ int pn;
+
+ /* The node could be kept */
+ if (is_End(proj))
+ continue;
if (get_irn_mode(proj) == mode_M)
continue;
+ pn = get_Proj_proj(proj);
assert(pn < n_outs);
found_projs |= 1 << pn;
}
}
/* do the transformation */
-void ia32_transform_graph(ia32_code_gen_t *cg) {
+void ia32_transform_graph(ia32_code_gen_t *cg)
+{
int cse_last;
- ir_graph *irg = cg->irg;
register_transformers();
env_cg = cg;
initial_fpcw = NULL;
BE_TIMER_PUSH(t_heights);
- heights = heights_new(irg);
+ heights = heights_new(cg->irg);
BE_TIMER_POP(t_heights);
ia32_calculate_non_address_mode_nodes(cg->birg);
cse_last = get_opt_cse();
set_opt_cse(0);
- be_transform_graph(cg->birg, ia32_pretransform_node, cg);
+ be_transform_graph(cg->birg, ia32_pretransform_node);
set_opt_cse(cse_last);