#include "array_t.h"
#include "height.h"
-#include "../benode_t.h"
+#include "../benode.h"
#include "../besched.h"
#include "../beabi.h"
#include "../beutil.h"
-#include "../beirg_t.h"
+#include "../beirg.h"
#include "../betranshlp.h"
#include "../be_t.h"
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
- res = new_r_Proj(current_ir_graph, block, load, mode_xmm, pn_ia32_xLoad_res);
+ res = new_r_Proj(block, load, mode_xmm, pn_ia32_xLoad_res);
}
} else {
if (is_Const_null(node)) {
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
- res = new_r_Proj(current_ir_graph, block, load, mode_vfp, pn_ia32_vfld_res);
+ res = new_r_Proj(block, load, mode_vfp, pn_ia32_vfld_res);
}
}
#ifdef CONSTRUCT_SSE_CONST
* @param mode the mode for the float type (might be integer mode for SSE2 types)
* @param align alignment
*/
-static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align) {
- char buf[32];
+static ir_type *ia32_create_float_type(ir_mode *mode, unsigned align)
+{
ir_type *tp;
assert(align <= 16);
static ir_type *int_Iu[16] = {NULL, };
if (int_Iu[align] == NULL) {
- snprintf(buf, sizeof(buf), "int_Iu_%u", align);
- int_Iu[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ int_Iu[align] = tp = new_type_primitive(mode);
/* set the specified alignment */
set_type_alignment_bytes(tp, align);
}
static ir_type *int_Lu[16] = {NULL, };
if (int_Lu[align] == NULL) {
- snprintf(buf, sizeof(buf), "int_Lu_%u", align);
- int_Lu[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ int_Lu[align] = tp = new_type_primitive(mode);
/* set the specified alignment */
set_type_alignment_bytes(tp, align);
}
static ir_type *float_F[16] = {NULL, };
if (float_F[align] == NULL) {
- snprintf(buf, sizeof(buf), "float_F_%u", align);
- float_F[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ float_F[align] = tp = new_type_primitive(mode);
/* set the specified alignment */
set_type_alignment_bytes(tp, align);
}
static ir_type *float_D[16] = {NULL, };
if (float_D[align] == NULL) {
- snprintf(buf, sizeof(buf), "float_D_%u", align);
- float_D[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ float_D[align] = tp = new_type_primitive(mode);
/* set the specified alignment */
set_type_alignment_bytes(tp, align);
}
static ir_type *float_E[16] = {NULL, };
if (float_E[align] == NULL) {
- snprintf(buf, sizeof(buf), "float_E_%u", align);
- float_E[align] = tp = new_type_primitive(new_id_from_str(buf), mode);
+ float_E[align] = tp = new_type_primitive(mode);
/* set the specified alignment */
set_type_alignment_bytes(tp, align);
}
*
* @param tp the atomic type
*/
-static ir_type *ia32_create_float_array(ir_type *tp) {
- char buf[32];
+static ir_type *ia32_create_float_array(ir_type *tp)
+{
ir_mode *mode = get_type_mode(tp);
unsigned align = get_type_alignment_bytes(tp);
ir_type *arr;
if (float_F[align] != NULL)
return float_F[align];
- snprintf(buf, sizeof(buf), "arr_float_F_%u", align);
- arr = float_F[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ arr = float_F[align] = new_type_array(1, tp);
} else if (mode == mode_D) {
static ir_type *float_D[16] = {NULL, };
if (float_D[align] != NULL)
return float_D[align];
- snprintf(buf, sizeof(buf), "arr_float_D_%u", align);
- arr = float_D[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ arr = float_D[align] = new_type_array(1, tp);
} else {
static ir_type *float_E[16] = {NULL, };
if (float_E[align] != NULL)
return float_E[align];
- snprintf(buf, sizeof(buf), "arr_float_E_%u", align);
- arr = float_E[align] = new_type_array(new_id_from_str(buf), 1, tp);
+ arr = float_E[align] = new_type_array(1, tp);
}
set_type_alignment_bytes(arr, align);
set_type_size_bytes(arr, 2 * get_type_size_bytes(tp));
}
am->op_type = ia32_AddrModeS;
} else {
+ ir_mode *mode;
am->op_type = ia32_Normal;
if (flags & match_try_am) {
return;
}
- new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
- if (new_op2 == NULL)
- new_op2 = be_transform_node(op2);
- am->ls_mode =
- (flags & match_mode_neutral ? mode_Iu : get_irn_mode(op2));
+ mode = get_irn_mode(op2);
+ if (flags & match_upconv_32 && get_mode_size_bits(mode) != 32) {
+ new_op1 = (op1 == NULL ? NULL : create_upconv(op1, NULL));
+ if (new_op2 == NULL)
+ new_op2 = create_upconv(op2, NULL);
+ am->ls_mode = mode_Iu;
+ } else {
+ new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
+ if (new_op2 == NULL)
+ new_op2 = be_transform_node(op2);
+ am->ls_mode = (flags & match_mode_neutral) ? mode_Iu : mode;
+ }
}
if (addr->base == NULL)
addr->base = noreg_GP;
if (mode != mode_T) {
set_irn_mode(node, mode_T);
- return new_rd_Proj(NULL, current_ir_graph, get_nodes_block(node), node, mode, pn_ia32_res);
+ return new_rd_Proj(NULL, get_nodes_block(node), node, mode, pn_ia32_res);
} else {
return node;
}
static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2,
construct_binop_float_func *func)
{
- ir_mode *mode = get_irn_mode(node);
+ ir_mode *mode = get_irn_mode(node);
dbg_info *dbgi;
ir_node *block, *new_block, *new_node;
ia32_address_mode_t am;
* variants */
match_flags_t flags = match_commutative;
+ /* happens for div nodes... */
+ if (mode == mode_T)
+ mode = get_divop_resmod(node);
+
/* cannot use address mode with long double on x87 */
if (get_mode_size_bits(mode) <= 64)
flags |= match_am;
ir_node *new_node;
ir_node *proj_res_high;
+ if (get_mode_size_bits(mode) != 32) {
+ panic("Mulh without 32bit size not supported in ia32 backend (%+F)", node);
+ }
+
if (mode_is_signed(mode)) {
new_node = gen_binop(node, op1, op2, new_bd_ia32_IMul1OP, match_commutative | match_am);
- proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node,
- mode_Iu, pn_ia32_IMul1OP_res_high);
+ proj_res_high = new_rd_Proj(dbgi, new_block, new_node, mode_Iu, pn_ia32_IMul1OP_res_high);
} else {
new_node = gen_binop(node, op1, op2, new_bd_ia32_Mul, match_commutative | match_am);
- proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node,
- mode_Iu, pn_ia32_Mul_res_high);
+ proj_res_high = new_rd_Proj(dbgi, new_block, new_node, mode_Iu, pn_ia32_Mul_res_high);
}
return proj_res_high;
}
| match_am | match_immediate);
}
-static ir_node *transform_AM_mem(ir_graph *const irg, ir_node *const block,
+static ir_node *transform_AM_mem(ir_node *const block,
ir_node *const src_val,
ir_node *const src_mem,
ir_node *const am_mem)
ins[n++] = am_mem;
- return new_r_Sync(irg, block, n, ins);
+ return new_r_Sync(block, n, ins);
} else {
ir_node *ins[2];
ins[0] = be_transform_node(src_mem);
ins[1] = am_mem;
- return new_r_Sync(irg, block, 2, ins);
+ return new_r_Sync(block, 2, ins);
}
}
panic("invalid divmod node %+F", node);
}
- match_arguments(&am, block, op1, op2, NULL, match_am);
+ match_arguments(&am, block, op1, op2, NULL, match_am | match_upconv_32);
/* Beware: We don't need a Sync, if the memory predecessor of the Div node
is the memory of the consumed address. We can have only the second op as address
in Div nodes, so check only op2. */
- new_mem = transform_AM_mem(current_ir_graph, block, op2, mem, addr->mem);
+ new_mem = transform_AM_mem(block, op2, mem, addr->mem);
if (mode_is_signed(mode)) {
sign_extension = create_sex_32_64(dbgi, new_block, am.new_op1, node);
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
- new_mem = transform_AM_mem(current_ir_graph, block, am.am_node, mem, addr->mem);
+ new_mem = transform_AM_mem(block, am.am_node, mem, addr->mem);
if (get_mode_size_bits(mode) == 8) {
new_node = func8bit(dbgi, block, addr->base, addr->index, new_mem, new_op);
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
- new_mem = transform_AM_mem(current_ir_graph, block, am.am_node, mem, addr->mem);
+ new_mem = transform_AM_mem(block, am.am_node, mem, addr->mem);
new_node = func(dbgi, block, addr->base, addr->index, new_mem);
set_address(new_node, addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
} while (size != 0);
if (i > 1) {
- return new_rd_Sync(dbgi, current_ir_graph, new_block, i, ins);
+ return new_rd_Sync(dbgi, new_block, i, ins);
} else {
return ins[0];
}
/**
* Generate a vfist or vfisttp instruction.
*/
-static ir_node *gen_vfist(dbg_info *dbgi, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index,
+static ir_node *gen_vfist(dbg_info *dbgi, ir_node *block, ir_node *base, ir_node *index,
ir_node *mem, ir_node *val, ir_node **fist)
{
ir_node *new_node;
if (ia32_cg_config.use_fisttp) {
/* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied
if other users exists */
- const arch_register_class_t *reg_class = &ia32_reg_classes[CLASS_ia32_vfp];
ir_node *vfisttp = new_bd_ia32_vfisttp(dbgi, block, base, index, mem, val);
- ir_node *value = new_r_Proj(irg, block, vfisttp, mode_E, pn_ia32_vfisttp_res);
- be_new_Keep(reg_class, irg, block, 1, &value);
+ ir_node *value = new_r_Proj(block, vfisttp, mode_E, pn_ia32_vfisttp_res);
+ be_new_Keep(block, 1, &value);
- new_node = new_r_Proj(irg, block, vfisttp, mode_M, pn_ia32_vfisttp_M);
+ new_node = new_r_Proj(block, vfisttp, mode_M, pn_ia32_vfisttp_M);
*fist = vfisttp;
} else {
ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg);
val = op;
}
new_val = be_transform_node(val);
- new_node = gen_vfist(dbgi, current_ir_graph, new_block, addr.base, addr.index, addr.mem, new_val, &store);
+ new_node = gen_vfist(dbgi, new_block, addr.base, addr.index, addr.mem, new_val, &store);
} else {
new_val = create_immediate_or_transform(val, 0);
assert(mode != mode_b);
switch_max = pn;
}
- if ((unsigned long) (switch_max - switch_min) > 256000) {
- panic("Size of switch %+F bigger than 256000", node);
+ if ((unsigned long) (switch_max - switch_min) > 128000) {
+ panic("Size of switch %+F bigger than 128000", node);
}
if (switch_min != 0) {
*/
static ir_node *create_Doz(ir_node *psi, ir_node *a, ir_node *b)
{
- ir_graph *irg = current_ir_graph;
ir_mode *mode = get_irn_mode(psi);
ir_node *new_node, *sub, *sbb, *eflags, *block;
} else {
sub = new_node;
set_irn_mode(sub, mode_T);
- new_node = new_rd_Proj(NULL, irg, block, sub, mode, pn_ia32_res);
+ new_node = new_rd_Proj(NULL, block, sub, mode, pn_ia32_res);
}
- eflags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags);
+ eflags = new_rd_Proj(NULL, block, sub, mode_Iu, pn_ia32_Sub_flags);
dbgi = get_irn_dbg_info(psi);
sbb = new_bd_ia32_Sbb0(dbgi, block, eflags);
unsigned scale;
flags = get_flags_node(cond, &pnc);
- new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_premuted=*/0);
+ new_node = create_set_32bit(dbgi, new_block, flags, pnc, node, /*is_permuted=*/0);
if (ia32_cg_config.use_sse2) {
/* cannot load from different mode on SSE */
load = new_bd_ia32_vfld(dbgi, block, am.addr.base, am.addr.index, am.addr.mem, new_mode);
set_am_attributes(load, &am);
- return new_rd_Proj(NULL, current_ir_graph, block, load, mode_vfp, pn_ia32_res);
+ return new_rd_Proj(NULL, block, load, mode_vfp, pn_ia32_res);
}
panic("cannot transform floating point Mux");
ir_mode *mode = get_irn_mode(node);
ir_node *fist, *load, *mem;
- mem = gen_vfist(dbgi, irg, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist);
+ mem = gen_vfist(dbgi, block, get_irg_frame(irg), noreg_GP, nomem, new_op, &fist);
set_irn_pinned(fist, op_pin_state_floats);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
}
SET_IA32_ORIG_NODE(load, node);
- return new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
+ return new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
}
/**
static ir_node *gen_x87_strict_conv(ir_mode *tgt_mode, ir_node *node)
{
ir_node *block = get_nodes_block(node);
- ir_graph *irg = current_ir_graph;
+ ir_graph *irg = get_Block_irg(block);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *frame = get_irg_frame(irg);
ir_node *store, *load;
set_ia32_op_type(load, ia32_AddrModeS);
SET_IA32_ORIG_NODE(load, node);
- new_node = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res);
+ new_node = new_r_Proj(block, load, mode_E, pn_ia32_vfld_res);
return new_node;
}
{
ir_node *src_block = get_nodes_block(node);
ir_node *block = be_transform_node(src_block);
- ir_graph *irg = current_ir_graph;
+ ir_graph *irg = get_Block_irg(block);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *op = get_Conv_op(node);
ir_node *new_op = NULL;
ia32_address_t *addr = &am.addr;
fild = new_bd_ia32_vfild(dbgi, block, addr->base, addr->index, addr->mem);
- new_node = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
+ new_node = new_r_Proj(block, fild, mode_vfp, pn_ia32_vfild_res);
set_am_attributes(fild, &am);
SET_IA32_ORIG_NODE(fild, node);
in[0] = zero_store;
in[1] = store;
- store = new_rd_Sync(dbgi, irg, block, 2, in);
+ store = new_rd_Sync(dbgi, block, 2, in);
store_mode = mode_Ls;
} else {
store_mode = mode_Is;
set_ia32_op_type(fild, ia32_AddrModeS);
set_ia32_ls_mode(fild, store_mode);
- new_node = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
+ new_node = new_r_Proj(block, fild, mode_vfp, pn_ia32_vfild_res);
return new_node;
}
assert(!mode_is_int(src_mode) || src_bits <= 32);
assert(!mode_is_int(tgt_mode) || tgt_bits <= 32);
+ /* modeB -> X should already be lowered by the lower_mode_b pass */
if (src_mode == mode_b) {
- assert(mode_is_int(tgt_mode) || mode_is_reference(tgt_mode));
- /* nothing to do, we already model bools as 0/1 ints */
- return be_transform_node(op);
+ panic("ConvB not lowered %+F", node);
}
if (src_mode == tgt_mode) {
set_ia32_op_type(fld, ia32_AddrModeS);
set_ia32_use_frame(fld);
- mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_vfld_M);
- fld = new_r_Proj(irg, block, fld, mode_vfp, pn_ia32_vfld_res);
+ mproj = new_r_Proj(block, fld, mode_M, pn_ia32_vfld_M);
+ fld = new_r_Proj(block, fld, mode_vfp, pn_ia32_vfld_res);
/* create a new barrier */
arity = get_irn_arity(barrier);
*/
static ir_node *gen_Phi(ir_node *node)
{
+ const arch_register_req_t *req;
ir_node *block = be_transform_node(get_nodes_block(node));
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
assert(get_mode_size_bits(mode) <= 32);
/* all integer operations are on 32bit registers now */
mode = mode_Iu;
+ req = ia32_reg_classes[CLASS_ia32_gp].class_req;
} else if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2) {
mode = mode_xmm;
+ req = ia32_reg_classes[CLASS_ia32_xmm].class_req;
} else {
mode = mode_vfp;
+ req = ia32_reg_classes[CLASS_ia32_vfp].class_req;
}
+ } else {
+ req = arch_no_register_req;
}
/* phi nodes allow loops, so we use the old arguments for now
copy_node_attr(node, phi);
be_duplicate_deps(node, phi);
+ arch_set_out_register_req(phi, 0, req);
+
be_enqueue_preds(node);
return phi;
}
+static ir_node *gen_Jmp(ir_node *node)
+{
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *new_node;
+
+ new_node = new_bd_ia32_Jmp(dbgi, new_block);
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ return new_node;
+}
+
/**
* Transform IJmp
*/
if (is_Const_0(lower)) {
/* typical case for Java */
ir_node *sub, *res, *flags, *block;
- ir_graph *irg = current_ir_graph;
res = gen_binop(node, get_Bound_index(node), get_Bound_upper(node),
new_bd_ia32_Sub, match_mode_neutral | match_am | match_immediate);
if (! is_Proj(res)) {
sub = res;
set_irn_mode(sub, mode_T);
- res = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_res);
+ res = new_rd_Proj(NULL, block, sub, mode_Iu, pn_ia32_res);
} else {
sub = get_Proj_pred(res);
}
- flags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags);
+ flags = new_rd_Proj(NULL, block, sub, mode_Iu, pn_ia32_Sub_flags);
new_node = new_bd_ia32_Jcc(dbgi, block, flags, pn_Cmp_Lt | ia32_pn_Cmp_unsigned);
SET_IA32_ORIG_NODE(new_node, node);
} else {
in[0] = store_low;
in[1] = store_high;
- sync = new_rd_Sync(dbgi, irg, block, 2, in);
+ sync = new_rd_Sync(dbgi, block, 2, in);
/* do a fild */
fild = new_bd_ia32_vfild(dbgi, block, frame, noreg_GP, sync);
SET_IA32_ORIG_NODE(fild, node);
- res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
+ res = new_r_Proj(block, fild, mode_vfp, pn_ia32_vfild_res);
if (! mode_is_signed(get_irn_mode(val_high))) {
ia32_address_mode_t am;
set_am_attributes(fadd, &am);
set_irn_mode(fadd, mode_T);
- res = new_rd_Proj(NULL, irg, block, fadd, mode_vfp, pn_ia32_res);
+ res = new_rd_Proj(NULL, block, fadd, mode_vfp, pn_ia32_res);
}
return res;
}
{
ir_node *src_block = get_nodes_block(node);
ir_node *block = be_transform_node(src_block);
- ir_graph *irg = current_ir_graph;
+ ir_graph *irg = get_Block_irg(block);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *frame = get_irg_frame(irg);
ir_node *val = get_irn_n(node, n_ia32_l_FloattoLL_val);
ir_node *new_val = be_transform_node(val);
ir_node *fist, *mem;
- mem = gen_vfist(dbgi, irg, block, frame, noreg_GP, nomem, new_val, &fist);
+ mem = gen_vfist(dbgi, block, frame, noreg_GP, nomem, new_val, &fist);
SET_IA32_ORIG_NODE(fist, node);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
static ir_node *gen_Proj_l_FloattoLL(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_graph *irg = get_Block_irg(block);
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
ir_node *frame = get_irg_frame(irg);
assert(pn == pn_ia32_l_FloattoLL_res_low);
}
- proj = new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
+ proj = new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
return proj;
}
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
if (proj == pn_be_AddSP_sp) {
- ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
+ ir_node *res = new_rd_Proj(dbgi, block, new_pred, mode_Iu,
pn_ia32_SubSP_stack);
arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
return res;
} else if (proj == pn_be_AddSP_res) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu,
pn_ia32_SubSP_addr);
} else if (proj == pn_be_AddSP_M) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_SubSP_M);
}
panic("No idea how to transform proj->AddSP");
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
if (proj == pn_be_SubSP_sp) {
- ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
+ ir_node *res = new_rd_Proj(dbgi, block, new_pred, mode_Iu,
pn_ia32_AddSP_stack);
arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
return res;
} else if (proj == pn_be_SubSP_M) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_AddSP_M);
}
panic("No idea how to transform proj->SubSP");
ir_node *new_pred;
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
*/
if (is_Load(pred) && proj == pn_Load_M && get_irn_n_edges(pred) > 1) {
ir_node *res;
+ ir_node *old_block = get_nodes_block(node);
/* this is needed, because sometimes we have loops that are only
reachable through the ProjM */
be_enqueue_preds(node);
/* do it in 2 steps, to silence firm verifier */
- res = new_rd_Proj(dbgi, irg, block, pred, mode_M, pn_Load_M);
+ res = new_rd_Proj(dbgi, old_block, pred, mode_M, pn_Load_M);
set_Proj_proj(res, pn_ia32_mem);
return res;
}
if (is_ia32_Load(new_pred)) {
switch (proj) {
case pn_Load_res:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Load_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Load_res);
case pn_Load_M:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Load_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Load_M);
case pn_Load_X_regular:
- return new_rd_Jmp(dbgi, irg, block);
+ return new_rd_Jmp(dbgi, block);
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Load_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Load_X_exc);
default:
break;
}
is_ia32_Conv_I2I8Bit(new_pred)) {
set_irn_mode(new_pred, mode_T);
if (proj == pn_Load_res) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_res);
} else if (proj == pn_Load_M) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_mem);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_mem);
}
} else if (is_ia32_xLoad(new_pred)) {
switch (proj) {
case pn_Load_res:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xLoad_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_xmm, pn_ia32_xLoad_res);
case pn_Load_M:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xLoad_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_xLoad_M);
case pn_Load_X_regular:
- return new_rd_Jmp(dbgi, irg, block);
+ return new_rd_Jmp(dbgi, block);
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
default:
break;
}
} else if (is_ia32_vfld(new_pred)) {
switch (proj) {
case pn_Load_res:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfld_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_vfp, pn_ia32_vfld_res);
case pn_Load_M:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfld_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_vfld_M);
case pn_Load_X_regular:
- return new_rd_Jmp(dbgi, irg, block);
+ return new_rd_Jmp(dbgi, block);
case pn_Load_X_except:
/* This Load might raise an exception. Mark it. */
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_vfld_X_exc);
default:
break;
}
if (proj != pn_Load_M) {
panic("internal error: transformed node not a Load");
}
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, 1);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, 1);
}
panic("No idea how to transform proj");
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
case iro_Div:
switch (proj) {
case pn_Div_M:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Div_M);
case pn_Div_res:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
case pn_Div_X_regular:
- return new_rd_Jmp(dbgi, irg, block);
+ return new_rd_Jmp(dbgi, block);
case pn_Div_X_except:
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
case iro_Mod:
switch (proj) {
case pn_Mod_M:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Div_M);
case pn_Mod_res:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
case pn_Mod_X_except:
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
case iro_DivMod:
switch (proj) {
case pn_DivMod_M:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_Div_M);
case pn_DivMod_res_div:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
case pn_DivMod_res_mod:
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
case pn_DivMod_X_regular:
- return new_rd_Jmp(dbgi, irg, block);
+ return new_rd_Jmp(dbgi, block);
case pn_DivMod_X_except:
set_ia32_exc_label(new_pred, 1);
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc);
+ return new_rd_Proj(dbgi, block, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
switch (proj) {
case pn_CopyB_M_regular:
if (is_ia32_CopyB_i(new_pred)) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_i_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_CopyB_i_M);
} else if (is_ia32_CopyB(new_pred)) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_CopyB_M);
}
break;
default:
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
switch (proj) {
case pn_Quot_M:
if (is_ia32_xDiv(new_pred)) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xDiv_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_xDiv_M);
} else if (is_ia32_vfdiv(new_pred)) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M);
+ return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_ia32_vfdiv_M);
}
break;
case pn_Quot_res:
if (is_ia32_xDiv(new_pred)) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xDiv_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_xmm, pn_ia32_xDiv_res);
} else if (is_ia32_vfdiv(new_pred)) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfdiv_res);
+ return new_rd_Proj(dbgi, block, new_pred, mode_vfp, pn_ia32_vfdiv_res);
}
break;
case pn_Quot_X_regular:
static ir_node *gen_be_Call(ir_node *node)
{
dbg_info *const dbgi = get_irn_dbg_info(node);
- ir_graph *const irg = current_ir_graph;
ir_node *const src_block = get_nodes_block(node);
ir_node *const block = be_transform_node(src_block);
ir_node *const src_mem = get_irn_n(node, be_pos_Call_mem);
}
}
- mem = transform_AM_mem(irg, block, src_ptr, src_mem, addr->mem);
+ mem = transform_AM_mem(block, src_ptr, src_mem, addr->mem);
call = new_bd_ia32_Call(dbgi, block, addr->base, addr->index, mem,
am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp);
set_am_attributes(call, &am);
}
SET_IA32_ORIG_NODE(load, node);
- return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+ return new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
}
/**
}
SET_IA32_ORIG_NODE(load, node);
- return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+ return new_r_Proj(block, load, mode_Iu, pn_ia32_Load_res);
}
/**
SET_IA32_ORIG_NODE(new_node, node);
be_dep_on_frame(new_node);
- return new_r_Proj(current_ir_graph, block, new_node, mode_M, pn_ia32_Prefetch_M);
+ return new_r_Proj(block, new_node, mode_M, pn_ia32_Prefetch_M);
}
/**
/* bsf x */
if (get_irn_mode(real) != mode_T) {
set_irn_mode(real, mode_T);
- bsf = new_r_Proj(current_ir_graph, block, real, mode_Iu, pn_ia32_res);
+ bsf = new_r_Proj(block, real, mode_Iu, pn_ia32_res);
}
- flag = new_r_Proj(current_ir_graph, block, real, mode_b, pn_ia32_flags);
+ flag = new_r_Proj(block, real, mode_b, pn_ia32_flags);
/* sete */
set = new_bd_ia32_Set(dbgi, block, flag, pn_Cmp_Eq, 0);
in[0] = store;
in[1] = trampoline;
- return new_r_Tuple(current_ir_graph, new_block, 2, in);
+ return new_r_Tuple(new_block, 2, in);
}
/**
return new_node;
case ir_bk_inport:
if (get_Proj_proj(proj) == pn_Builtin_1_result) {
- return new_r_Proj(current_ir_graph, get_nodes_block(new_node),
+ return new_r_Proj(get_nodes_block(new_node),
new_node, get_irn_mode(proj), pn_ia32_Inport_res);
} else {
assert(get_Proj_proj(proj) == pn_Builtin_M);
- return new_r_Proj(current_ir_graph, get_nodes_block(new_node),
+ return new_r_Proj(get_nodes_block(new_node),
new_node, mode_M, pn_ia32_Inport_M);
}
case ir_bk_inner_trampoline:
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *call = get_Proj_pred(node);
ir_node *new_call = be_transform_node(call);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
ir_mode *mode = get_irn_mode(node);
ir_node *res;
if (proj == pn_be_Call_M_regular) {
- return new_rd_Proj(dbgi, irg, block, new_call, mode_M, n_ia32_Call_mem);
+ return new_rd_Proj(dbgi, block, new_call, mode_M, n_ia32_Call_mem);
}
/* transform call modes */
if (mode_is_data(mode)) {
assert(req->type & arch_register_req_type_limited);
for (i = 0; i < n_outs; ++i) {
- arch_register_req_t const *const new_req = get_ia32_out_req(new_call, i);
+ arch_register_req_t const *const new_req
+ = arch_get_out_register_req(new_call, i);
if (!(new_req->type & arch_register_req_type_limited) ||
new_req->cls != req->cls ||
assert(i < n_outs);
}
- res = new_rd_Proj(dbgi, irg, block, new_call, mode, proj);
+ res = new_rd_Proj(dbgi, block, new_call, mode, proj);
/* TODO arch_set_irn_register() only operates on Projs, need variant with index */
switch (proj) {
case pn_Bound_X_regular:
new_node = be_transform_node(pred);
block = get_nodes_block(new_node);
- return new_r_Proj(current_ir_graph, block, new_node, mode_X, pn_ia32_Jcc_true);
+ return new_r_Proj(block, new_node, mode_X, pn_ia32_Jcc_true);
case pn_Bound_X_except:
new_node = be_transform_node(pred);
block = get_nodes_block(new_node);
- return new_r_Proj(current_ir_graph, block, new_node, mode_X, pn_ia32_Jcc_false);
+ return new_r_Proj(block, new_node, mode_X, pn_ia32_Jcc_false);
case pn_Bound_res:
return be_transform_node(get_Bound_index(pred));
default:
long pos = get_Proj_proj(node);
if (mode == mode_M) {
- pos = arch_irn_get_n_outs(new_pred) + 1;
+ pos = arch_irn_get_n_outs(new_pred)-1;
} else if (mode_is_int(mode) || mode_is_reference(mode)) {
mode = mode_Iu;
} else if (mode_is_float(mode)) {
panic("unexpected proj mode at ASM");
}
- return new_r_Proj(current_ir_graph, block, new_pred, mode, pos);
+ return new_r_Proj(block, new_pred, mode, pos);
}
/**
ir_node *new_block = be_transform_node(block);
dbg_info *dbgi = get_irn_dbg_info(node);
/* we exchange the ProjX with a jump */
- ir_node *jump = new_rd_Jmp(dbgi, current_ir_graph, new_block);
+ ir_node *jump = new_rd_Jmp(dbgi, new_block);
return jump;
}
if (ia32_mode_needs_gp_reg(mode)) {
ir_node *new_pred = be_transform_node(pred);
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_node *new_proj = new_r_Proj(current_ir_graph, block, new_pred,
+ ir_node *new_proj = new_r_Proj(block, new_pred,
mode_Iu, get_Proj_proj(node));
new_proj->node_nr = node->node_nr;
return new_proj;
GEN(Mux);
GEN(Proj);
GEN(Phi);
+ GEN(Jmp);
GEN(IJmp);
GEN(Bound);
continue;
}
- req = get_ia32_out_req(node, i);
+ req = arch_get_out_register_req(node, i);
cls = req->cls;
if (cls == NULL) {
continue;
}
block = get_nodes_block(node);
- in[0] = new_r_Proj(current_ir_graph, block, node,
- arch_register_class_mode(cls), i);
+ in[0] = new_r_Proj(block, node, arch_register_class_mode(cls), i);
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, in[0]);
} else {
- last_keep = be_new_Keep(cls, current_ir_graph, block, 1, in);
+ last_keep = be_new_Keep(block, 1, in);
if (sched_is_scheduled(node)) {
sched_add_after(node, last_keep);
}
ir_node *block = get_nodes_block(call);
ir_node *frame = get_irg_frame(current_ir_graph);
ir_node *old_mem = be_get_Proj_for_pn(call, pn_ia32_Call_M);
- ir_node *call_mem = new_r_Proj(current_ir_graph, block, call, mode_M, pn_ia32_Call_M);
+ ir_node *call_mem = new_r_Proj(block, call, mode_M, pn_ia32_Call_M);
ir_node *vfst, *xld, *new_mem;
/* store st(0) on stack */
set_ia32_op_type(xld, ia32_AddrModeS);
set_ia32_use_frame(xld);
- new_res = new_r_Proj(current_ir_graph, block, xld, mode, pn_ia32_xLoad_res);
- new_mem = new_r_Proj(current_ir_graph, block, xld, mode_M, pn_ia32_xLoad_M);
+ new_res = new_r_Proj(block, xld, mode, pn_ia32_xLoad_res);
+ new_mem = new_r_Proj(block, xld, mode_M, pn_ia32_xLoad_M);
if (old_mem != NULL) {
edges_reroute(old_mem, new_mem, current_ir_graph);
initial_fpcw = NULL;
no_pic_adjust = 0;
- BE_TIMER_PUSH(t_heights);
+ be_timer_push(T_HEIGHTS);
heights = heights_new(cg->irg);
- BE_TIMER_POP(t_heights);
+ be_timer_pop(T_HEIGHTS);
ia32_calculate_non_address_mode_nodes(cg->birg);
/* the transform phase is not safe for CSE (yet) because several nodes get