* @return the created TEMPLATE Add node
*/
static ir_node *gen_Add(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_Add(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Add(env->dbg, env->block, op1, op2, env->mode);
}
*/
static ir_node *gen_Mul(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
if (mode_is_float(env->mode)) {
- return new_rd_TEMPLATE_fMul(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_fMul(env->dbg, env->block, op1, op2, env->mode);
}
else {
- return new_rd_TEMPLATE_Mul(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Mul(env->dbg, env->block, op1, op2, env->mode);
}
}
* @return the created TEMPLATE And node
*/
static ir_node *gen_And(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_And(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_And(env->dbg, env->block, op1, op2, env->mode);
}
* @return the created TEMPLATE Or node
*/
static ir_node *gen_Or(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_Or(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Or(env->dbg, env->block, op1, op2, env->mode);
}
* @return the created TEMPLATE Eor node
*/
static ir_node *gen_Eor(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_Eor(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Eor(env->dbg, env->block, op1, op2, env->mode);
}
*/
static ir_node *gen_Sub(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
if (mode_is_float(env->mode)) {
- return new_rd_TEMPLATE_fSub(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_fSub(env->dbg, env->block, op1, op2, env->mode);
}
else {
- return new_rd_TEMPLATE_Sub(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Sub(env->dbg, env->block, op1, op2, env->mode);
}
}
* @return the created TEMPLATE fDiv node
*/
static ir_node *gen_Quot(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_fDiv(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_fDiv(env->dbg, env->block, op1, op2, env->mode);
}
* @return the created TEMPLATE Shl node
*/
static ir_node *gen_Shl(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_Shl(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Shl(env->dbg, env->block, op1, op2, env->mode);
}
* @return the created TEMPLATE Shr node
*/
static ir_node *gen_Shr(TEMPLATE_transform_env_t *env, ir_node *op1, ir_node *op2) {
- return new_rd_TEMPLATE_Shr(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_TEMPLATE_Shr(env->dbg, env->block, op1, op2, env->mode);
}
*/
static ir_node *gen_Minus(TEMPLATE_transform_env_t *env, ir_node *op) {
if (mode_is_float(env->mode)) {
- return new_rd_TEMPLATE_fMinus(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_TEMPLATE_fMinus(env->dbg, env->block, op, env->mode);
}
- return new_rd_TEMPLATE_Minus(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_TEMPLATE_Minus(env->dbg, env->block, op, env->mode);
}
* @return the created TEMPLATE Not node
*/
static ir_node *gen_Not(TEMPLATE_transform_env_t *env, ir_node *op) {
- return new_rd_TEMPLATE_Not(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_TEMPLATE_Not(env->dbg, env->block, op, env->mode);
}
ir_node *node = env->irn;
if (mode_is_float(env->mode)) {
- return new_rd_TEMPLATE_fLoad(env->dbg, env->irg, env->block, get_Load_ptr(node), get_Load_mem(node), env->mode);
+ return new_bd_TEMPLATE_fLoad(env->dbg, env->block, get_Load_ptr(node), get_Load_mem(node), env->mode);
}
- return new_rd_TEMPLATE_Load(env->dbg, env->irg, env->block, get_Load_ptr(node), get_Load_mem(node), env->mode);
+ return new_bd_TEMPLATE_Load(env->dbg, env->block, get_Load_ptr(node), get_Load_mem(node), env->mode);
}
ir_node *node = env->irn;
if (mode_is_float(env->mode)) {
- return new_rd_TEMPLATE_fStore(env->dbg, env->irg, env->block, get_Store_ptr(node), get_Store_value(node), get_Store_mem(node), env->mode);
+ return new_bd_TEMPLATE_fStore(env->dbg, env->block, get_Store_ptr(node), get_Store_value(node), get_Store_mem(node), env->mode);
}
- return new_rd_TEMPLATE_Store(env->dbg, env->irg, env->block, get_Store_ptr(node), get_Store_value(node), get_Store_mem(node), env->mode);
+ return new_bd_TEMPLATE_Store(env->dbg, env->block, get_Store_ptr(node), get_Store_value(node), get_Store_mem(node), env->mode);
}
*/
static ir_node *gen_ptr_add(ir_node *node, ir_node *frame, arm_vals *v)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
int cnt;
ir_node *ptr;
- ptr = new_rd_arm_Add_i(dbg, irg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
+ ptr = new_bd_arm_Add_i(dbg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
sched_add_before(node, ptr);
for (cnt = 1; cnt < v->ops; ++cnt) {
long value = arm_encode_imm_w_shift(v->shifts[cnt], v->values[cnt]);
- ir_node *next = new_rd_arm_Add_i(dbg, irg, block, ptr, mode_Iu, value);
+ ir_node *next = new_bd_arm_Add_i(dbg, block, ptr, mode_Iu, value);
arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
sched_add_before(node, next);
ptr = next;
*/
static ir_node *gen_ptr_sub(ir_node *node, ir_node *frame, arm_vals *v)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
int cnt;
ir_node *ptr;
- ptr = new_rd_arm_Sub_i(dbg, irg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
+ ptr = new_bd_arm_Sub_i(dbg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
sched_add_before(node, ptr);
for (cnt = 1; cnt < v->ops; ++cnt) {
long value = arm_encode_imm_w_shift(v->shifts[cnt], v->values[cnt]);
- ir_node *next = new_rd_arm_Sub_i(dbg, irg, block, ptr, mode_Iu, value);
+ ir_node *next = new_bd_arm_Sub_i(dbg, block, ptr, mode_Iu, value);
arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
sched_add_before(node, next);
ptr = next;
if (mode_is_float(mode)) {
if (USE_FPA(cg->isa)) {
/* transform into fpaStf */
- store = new_rd_arm_fpaStf(dbg, irg, block, ptr, value, get_irg_no_mem(irg), mode);
+ store = new_bd_arm_fpaStf(dbg, block, ptr, value, get_irg_no_mem(irg), mode);
sched_add_before(node, store);
} else {
panic("peephole_be_Spill: spill not supported for this mode");
}
} else if (mode_is_dataM(mode)) {
/* transform into Store */;
- store = new_rd_arm_Store(dbg, irg, block, ptr, value, get_irg_no_mem(irg));
+ store = new_bd_arm_Store(dbg, block, ptr, value, get_irg_no_mem(irg));
sched_add_before(node, store);
} else {
panic("peephole_be_Spill: spill not supported for this mode");
if (mode_is_float(mode)) {
if (USE_FPA(cg->isa)) {
/* transform into fpaLdf */
- load = new_rd_arm_fpaLdf(dbg, irg, block, ptr, mem, mode);
+ load = new_bd_arm_fpaLdf(dbg, block, ptr, mem, mode);
sched_add_before(node, load);
proj = new_rd_Proj(dbg, irg, block, load, mode, pn_arm_fpaLdf_res);
arch_set_irn_register(proj, reg);
}
} else if (mode_is_dataM(mode)) {
/* transform into Store */;
- load = new_rd_arm_Load(dbg, irg, block, ptr, mem);
+ load = new_bd_arm_Load(dbg, block, ptr, mem);
sched_add_before(node, load);
proj = new_rd_Proj(dbg, irg, block, load, mode_Iu, pn_arm_Load_res);
arch_set_irn_register(proj, reg);
*/
static ir_node *create_mov_node(dbg_info *dbg, ir_node *block, long value) {
ir_mode *mode = mode_Iu;
- ir_graph *irg = current_ir_graph;
ir_node *res;
if (mode_needs_gp_reg(mode))
mode = mode_Iu;
- res = new_rd_arm_Mov_i(dbg, irg, block, mode, value);
+ res = new_bd_arm_Mov_i(dbg, block, mode, value);
be_dep_on_frame(res);
return res;
}
*/
static ir_node *create_mvn_node(dbg_info *dbg, ir_node *block, long value) {
ir_mode *mode = mode_Iu;
- ir_graph *irg = current_ir_graph;
ir_node *res;
if (mode_needs_gp_reg(mode))
mode = mode_Iu;
- res = new_rd_arm_Mvn_i(dbg, irg, block, mode, value);
+ res = new_bd_arm_Mvn_i(dbg, block, mode, value);
be_dep_on_frame(res);
return res;
}
-#define NEW_BINOP_NODE(opname, env, op1, op2) new_rd_arm_##opname(env->dbg, current_ir_graph, env->block, op1, op2, env->mode)
+#define NEW_BINOP_NODE(opname, env, op1, op2) new_bd_arm_##opname(env->dbg, current_ir_graph, env->block, op1, op2, env->mode)
/**
* Creates a possible DAG for an constant.
for (cnt = 1; cnt < vn.ops; ++cnt) {
long value = arm_encode_imm_w_shift(vn.shifts[cnt], vn.values[cnt]);
- ir_node *bic_i_node = new_rd_arm_Bic_i(dbg, current_ir_graph, block, result, mode, value);
+ ir_node *bic_i_node = new_bd_arm_Bic_i(dbg, block, result, mode, value);
result = bic_i_node;
}
}
for (cnt = 1; cnt < v.ops; ++cnt) {
long value = arm_encode_imm_w_shift(v.shifts[cnt], v.values[cnt]);
- ir_node *orr_i_node = new_rd_arm_Or_i(dbg, current_ir_graph, block, result, mode, value);
+ ir_node *orr_i_node = new_bd_arm_Or_i(dbg, block, result, mode, value);
result = orr_i_node;
}
}
static ir_node *gen_zero_extension(dbg_info *dbg, ir_node *block, ir_node *op, int result_bits) {
unsigned mask_bits = (1 << result_bits) - 1;
ir_node *mask_node = create_const_graph_value(dbg, block, mask_bits);
- return new_rd_arm_And(dbg, current_ir_graph, block, op, mask_node, mode_Iu, ARM_SHF_NONE, 0);
+ return new_bd_arm_And(dbg, block, op, mask_node, mode_Iu, ARM_SHF_NONE, 0);
}
/**
* Generate code for a sign extension.
*/
static ir_node *gen_sign_extension(dbg_info *dbg, ir_node *block, ir_node *op, int result_bits) {
- ir_graph *irg = current_ir_graph;
int shift_width = 32 - result_bits;
ir_node *shift_const_node = create_const_graph_value(dbg, block, shift_width);
- ir_node *lshift_node = new_rd_arm_Shl(dbg, irg, block, op, shift_const_node, mode_Iu);
- ir_node *rshift_node = new_rd_arm_Shrs(dbg, irg, block, lshift_node, shift_const_node, mode_Iu);
+ ir_node *lshift_node = new_bd_arm_Shl(dbg, block, op, shift_const_node, mode_Iu);
+ ir_node *rshift_node = new_bd_arm_Shrs(dbg, block, lshift_node, shift_const_node, mode_Iu);
return rshift_node;
}
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Conv_op(node);
ir_node *new_op = be_transform_node(op);
- ir_graph *irg = current_ir_graph;
ir_mode *src_mode = get_irn_mode(op);
ir_mode *dst_mode = get_irn_mode(node);
dbg_info *dbg = get_irn_dbg_info(node);
if (mode_is_float(src_mode)) {
if (mode_is_float(dst_mode)) {
/* from float to float */
- return new_rd_arm_fpaMvf(dbg, irg, block, new_op, dst_mode);
+ return new_bd_arm_fpaMvf(dbg, block, new_op, dst_mode);
}
else {
/* from float to int */
- return new_rd_arm_fpaFix(dbg, irg, block, new_op, dst_mode);
+ return new_bd_arm_fpaFix(dbg, block, new_op, dst_mode);
}
}
else {
/* from int to float */
- return new_rd_arm_fpaFlt(dbg, irg, block, new_op, dst_mode);
+ return new_bd_arm_fpaFlt(dbg, block, new_op, dst_mode);
}
}
else if (USE_VFP(env_cg->isa)) {
ir_node *op2 = get_Add_right(node);
ir_node *new_op2 = be_transform_node(op2);
ir_mode *mode = get_irn_mode(node);
- ir_graph *irg = current_ir_graph;
ir_node *new_op3;
int v;
arm_shift_modifier mod;
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa)) {
if (is_arm_fpaMvf_i(new_op1))
- return new_rd_arm_fpaAdf_i(dbg, irg, block, new_op2, mode, get_arm_imm_value(new_op1));
+ return new_bd_arm_fpaAdf_i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1));
if (is_arm_fpaMvf_i(new_op2))
- return new_rd_arm_fpaAdf_i(dbg, irg, block, new_op1, mode, get_arm_imm_value(new_op2));
- return new_rd_arm_fpaAdf(dbg, irg, block, new_op1, new_op2, mode);
+ return new_bd_arm_fpaAdf_i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2));
+ return new_bd_arm_fpaAdf(dbg, block, new_op1, new_op2, mode);
} else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
mode = mode_Iu;
if (is_arm_Mov_i(new_op1))
- return new_rd_arm_Add_i(dbg, irg, block, new_op2, mode, get_arm_imm_value(new_op1));
+ return new_bd_arm_Add_i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1));
if (is_arm_Mov_i(new_op2))
- return new_rd_arm_Add_i(dbg, irg, block, new_op1, mode, get_arm_imm_value(new_op2));
+ return new_bd_arm_Add_i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2));
/* check for MLA */
if (is_arm_Mul(new_op1) && get_irn_n_edges(op1) == 1) {
new_op2 = get_irn_n(new_op1, 1);
new_op1 = get_irn_n(new_op1, 0);
- return new_rd_arm_Mla(dbg, irg, block, new_op1, new_op2, new_op3, mode);
+ return new_bd_arm_Mla(dbg, block, new_op1, new_op2, new_op3, mode);
}
if (is_arm_Mul(new_op2) && get_irn_n_edges(op2) == 1) {
new_op3 = new_op1;
new_op1 = get_irn_n(new_op2, 0);
new_op2 = get_irn_n(new_op2, 1);
- return new_rd_arm_Mla(dbg, irg, block, new_op1, new_op2, new_op3, mode);
+ return new_bd_arm_Mla(dbg, block, new_op1, new_op2, new_op3, mode);
}
/* is the first a shifter */
v = is_shifter_operand(new_op1, &mod);
if (v) {
new_op1 = get_irn_n(new_op1, 0);
- return new_rd_arm_Add(dbg, irg, block, new_op2, new_op1, mode, mod, v);
+ return new_bd_arm_Add(dbg, block, new_op2, new_op1, mode, mod, v);
}
/* is the second a shifter */
v = is_shifter_operand(new_op2, &mod);
if (v) {
new_op2 = get_irn_n(new_op2, 0);
- return new_rd_arm_Add(dbg, irg, block, new_op1, new_op2, mode, mod, v);
+ return new_bd_arm_Add(dbg, block, new_op1, new_op2, mode, mod, v);
}
/* normal ADD */
- return new_rd_arm_Add(dbg, irg, block, new_op1, new_op2, mode, ARM_SHF_NONE, 0);
+ return new_bd_arm_Add(dbg, block, new_op1, new_op2, mode, ARM_SHF_NONE, 0);
}
}
ir_node *op2 = get_Mul_right(node);
ir_node *new_op2 = be_transform_node(op2);
ir_mode *mode = get_irn_mode(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
if (mode_is_float(mode)) {
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa)) {
if (is_arm_Mov_i(new_op1))
- return new_rd_arm_fpaMuf_i(dbg, irg, block, new_op2, mode, get_arm_imm_value(new_op1));
+ return new_bd_arm_fpaMuf_i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1));
if (is_arm_Mov_i(new_op2))
- return new_rd_arm_fpaMuf_i(dbg, irg, block, new_op1, mode, get_arm_imm_value(new_op2));
- return new_rd_arm_fpaMuf(dbg, irg, block, new_op1, new_op2, mode);
+ return new_bd_arm_fpaMuf_i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2));
+ return new_bd_arm_fpaMuf(dbg, block, new_op1, new_op2, mode);
}
else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
}
assert(mode_is_data(mode));
mode = mode_Iu;
- return new_rd_arm_Mul(dbg, irg, block, new_op1, new_op2, mode);
+ return new_bd_arm_Mul(dbg, block, new_op1, new_op2, mode);
}
/**
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa)) {
if (is_arm_Mov_i(new_op1))
- return new_rd_arm_fpaRdf_i(dbg, current_ir_graph, block, new_op2, mode, get_arm_imm_value(new_op1));
+ return new_bd_arm_fpaRdf_i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1));
if (is_arm_Mov_i(new_op2))
- return new_rd_arm_fpaDvf_i(dbg, current_ir_graph, block, new_op1, mode, get_arm_imm_value(new_op2));
- return new_rd_arm_fpaDvf(dbg, current_ir_graph, block, new_op1, new_op2, mode);
+ return new_bd_arm_fpaDvf_i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2));
+ return new_bd_arm_fpaDvf(dbg, block, new_op1, new_op2, mode);
} else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
ir_node *new_op1 = be_transform_node(op1); \
ir_node *op2 = get_ ## op ## _right(node); \
ir_node *new_op2 = be_transform_node(op2); \
- ir_graph *irg = current_ir_graph; \
ir_mode *mode = mode_Iu; \
dbg_info *dbg = get_irn_dbg_info(node); \
int v; \
arm_shift_modifier mod; \
\
if (is_arm_Mov_i(new_op1)) \
- return new_rd_arm_ ## op ## _i(dbg, irg, block, new_op2, mode, get_arm_imm_value(new_op1)); \
+ return new_bd_arm_ ## op ## _i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1)); \
if (is_arm_Mov_i(new_op2)) \
- return new_rd_arm_ ## op ## _i(dbg, irg, block, new_op1, mode, get_arm_imm_value(new_op2)); \
+ return new_bd_arm_ ## op ## _i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2)); \
/* is the first a shifter */ \
v = is_shifter_operand(new_op1, &mod); \
if (v) { \
new_op1 = get_irn_n(new_op1, 0); \
- return new_rd_arm_ ## op(dbg, irg, block, new_op2, new_op1, mode, mod, v); \
+ return new_bd_arm_ ## op(dbg, block, new_op2, new_op1, mode, mod, v); \
} \
/* is the second a shifter */ \
v = is_shifter_operand(new_op2, &mod); \
if (v) { \
new_op2 = get_irn_n(new_op2, 0); \
- return new_rd_arm_ ## op(dbg, irg, block, new_op1, new_op2, mode, mod, v); \
+ return new_bd_arm_ ## op(dbg, block, new_op1, new_op2, mode, mod, v); \
} \
/* Normal op */ \
- return new_rd_arm_ ## op(dbg, irg, block, new_op1, new_op2, mode, ARM_SHF_NONE, 0) \
+ return new_bd_arm_ ## op(dbg, block, new_op1, new_op2, mode, ARM_SHF_NONE, 0) \
/**
* Creates an ARM And.
ir_node *op2 = get_Sub_right(node);
ir_node *new_op2 = be_transform_node(op2);
ir_mode *mode = get_irn_mode(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
int v;
arm_shift_modifier mod;
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa)) {
if (is_arm_Mov_i(new_op1))
- return new_rd_arm_fpaRsf_i(dbg, irg, block, new_op2, mode, get_arm_imm_value(new_op1));
+ return new_bd_arm_fpaRsf_i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1));
if (is_arm_Mov_i(new_op2))
- return new_rd_arm_fpaSuf_i(dbg, irg, block, new_op1, mode, get_arm_imm_value(new_op2));
- return new_rd_arm_fpaSuf(dbg, irg, block, new_op1, new_op2, mode);
+ return new_bd_arm_fpaSuf_i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2));
+ return new_bd_arm_fpaSuf(dbg, block, new_op1, new_op2, mode);
} else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
mode = mode_Iu;
if (is_arm_Mov_i(new_op1))
- return new_rd_arm_Rsb_i(dbg, irg, block, new_op2, mode, get_arm_imm_value(new_op1));
+ return new_bd_arm_Rsb_i(dbg, block, new_op2, mode, get_arm_imm_value(new_op1));
if (is_arm_Mov_i(new_op2))
- return new_rd_arm_Sub_i(dbg, irg, block, new_op1, mode, get_arm_imm_value(new_op2));
+ return new_bd_arm_Sub_i(dbg, block, new_op1, mode, get_arm_imm_value(new_op2));
/* is the first a shifter */
v = is_shifter_operand(new_op1, &mod);
if (v) {
new_op1 = get_irn_n(new_op1, 0);
- return new_rd_arm_Rsb(dbg, irg, block, new_op2, new_op1, mode, mod, v);
+ return new_bd_arm_Rsb(dbg, block, new_op2, new_op1, mode, mod, v);
}
/* is the second a shifter */
v = is_shifter_operand(new_op2, &mod);
if (v) {
new_op2 = get_irn_n(new_op2, 0);
- return new_rd_arm_Sub(dbg, irg, block, new_op1, new_op2, mode, mod, v);
+ return new_bd_arm_Sub(dbg, block, new_op1, new_op2, mode, mod, v);
}
/* normal sub */
- return new_rd_arm_Sub(dbg, irg, block, new_op1, new_op2, mode, ARM_SHF_NONE, 0);
+ return new_bd_arm_Sub(dbg, block, new_op1, new_op2, mode, ARM_SHF_NONE, 0);
}
}
dbg_info *dbg = get_irn_dbg_info(node);
if (is_arm_Mov_i(new_op2)) {
- return new_rd_arm_Mov(dbg, current_ir_graph, block, new_op1, mode, ARM_SHF_LSL, get_arm_imm_value(new_op2));
+ return new_bd_arm_Mov(dbg, block, new_op1, mode, ARM_SHF_LSL, get_arm_imm_value(new_op2));
}
- return new_rd_arm_Shl(dbg, current_ir_graph, block, new_op1, new_op2, mode);
+ return new_bd_arm_Shl(dbg, block, new_op1, new_op2, mode);
}
/**
dbg_info *dbg = get_irn_dbg_info(node);
if (is_arm_Mov_i(new_op2)) {
- return new_rd_arm_Mov(dbg, current_ir_graph, block, new_op1, mode, ARM_SHF_LSR, get_arm_imm_value(new_op2));
+ return new_bd_arm_Mov(dbg, block, new_op1, mode, ARM_SHF_LSR, get_arm_imm_value(new_op2));
}
- return new_rd_arm_Shr(dbg, current_ir_graph, block, new_op1, new_op2, mode);
+ return new_bd_arm_Shr(dbg, block, new_op1, new_op2, mode);
}
/**
dbg_info *dbg = get_irn_dbg_info(node);
if (is_arm_Mov_i(new_op2)) {
- return new_rd_arm_Mov(dbg, current_ir_graph, block, new_op1, mode, ARM_SHF_ASR, get_arm_imm_value(new_op2));
+ return new_bd_arm_Mov(dbg, block, new_op1, mode, ARM_SHF_ASR, get_arm_imm_value(new_op2));
}
- return new_rd_arm_Shrs(dbg, current_ir_graph, block, new_op1, new_op2, mode);
+ return new_bd_arm_Shrs(dbg, block, new_op1, new_op2, mode);
}
/**
dbg_info *dbg = get_irn_dbg_info(node);
if (is_arm_Mov_i(new_op2)) {
- return new_rd_arm_Mov(dbg, current_ir_graph, block, new_op1, mode, ARM_SHF_ROR, get_arm_imm_value(new_op2));
+ return new_bd_arm_Mov(dbg, block, new_op1, mode, ARM_SHF_ROR, get_arm_imm_value(new_op2));
}
- return new_rd_arm_Ror(dbg, current_ir_graph, block, new_op1, new_op2, mode);
+ return new_bd_arm_Ror(dbg, block, new_op1, new_op2, mode);
}
/**
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *new_op2 = be_transform_node(op2);
- new_op2 = new_rd_arm_Rsb_i(dbg, current_ir_graph, block, new_op2, mode, 32);
- return new_rd_arm_Ror(dbg, current_ir_graph, block, new_op1, new_op2, mode);
+ new_op2 = new_bd_arm_Rsb_i(dbg, block, new_op2, mode, 32);
+ return new_bd_arm_Ror(dbg, block, new_op1, new_op2, mode);
}
/**
dbg_info *dbg = get_irn_dbg_info(node);
bits = (bits - get_tarval_long(tv)) & 31;
- rotate = new_rd_arm_Mov(dbg, current_ir_graph, block, new_op1, mode, ARM_SHF_ROR, bits);
+ rotate = new_bd_arm_Mov(dbg, block, new_op1, mode, ARM_SHF_ROR, bits);
}
}
if (v) {
new_op = get_irn_n(new_op, 0);
}
- return new_rd_arm_Mvn(dbg, current_ir_graph, block, new_op, mode, mod, v);
+ return new_bd_arm_Mvn(dbg, block, new_op, mode, mod, v);
}
/**
if (mode_is_float(mode)) {
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa))
- return new_rd_arm_fpaAbs(dbg, current_ir_graph, block, new_op, mode);
+ return new_bd_arm_fpaAbs(dbg, block, new_op, mode);
else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
}
assert(mode_is_data(mode));
mode = mode_Iu;
- return new_rd_arm_Abs(dbg, current_ir_graph, block, new_op, mode);
+ return new_bd_arm_Abs(dbg, block, new_op, mode);
}
/**
if (mode_is_float(mode)) {
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa))
- return new_rd_arm_fpaMvf(dbg, current_ir_graph, block, op, mode);
+ return new_bd_arm_fpaMvf(dbg, block, op, mode);
else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
}
assert(mode_is_data(mode));
mode = mode_Iu;
- return new_rd_arm_Rsb_i(dbg, current_ir_graph, block, new_op, mode, 0);
+ return new_bd_arm_Rsb_i(dbg, block, new_op, mode, 0);
}
/**
ir_node *mem = get_Load_mem(node);
ir_node *new_mem = be_transform_node(mem);
ir_mode *mode = get_Load_mode(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *new_load = NULL;
if (mode_is_float(mode)) {
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa))
- new_load = new_rd_arm_fpaLdf(dbg, irg, block, new_ptr, new_mem, mode);
+ new_load = new_bd_arm_fpaLdf(dbg, block, new_ptr, new_mem, mode);
else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
/* sign extended loads */
switch (get_mode_size_bits(mode)) {
case 8:
- new_load = new_rd_arm_Loadbs(dbg, irg, block, new_ptr, new_mem);
+ new_load = new_bd_arm_Loadbs(dbg, block, new_ptr, new_mem);
break;
case 16:
- new_load = new_rd_arm_Loadhs(dbg, irg, block, new_ptr, new_mem);
+ new_load = new_bd_arm_Loadhs(dbg, block, new_ptr, new_mem);
break;
case 32:
- new_load = new_rd_arm_Load(dbg, irg, block, new_ptr, new_mem);
+ new_load = new_bd_arm_Load(dbg, block, new_ptr, new_mem);
break;
default:
panic("mode size not supported");
/* zero extended loads */
switch (get_mode_size_bits(mode)) {
case 8:
- new_load = new_rd_arm_Loadb(dbg, irg, block, new_ptr, new_mem);
+ new_load = new_bd_arm_Loadb(dbg, block, new_ptr, new_mem);
break;
case 16:
- new_load = new_rd_arm_Loadh(dbg, irg, block, new_ptr, new_mem);
+ new_load = new_bd_arm_Loadh(dbg, block, new_ptr, new_mem);
break;
case 32:
- new_load = new_rd_arm_Load(dbg, irg, block, new_ptr, new_mem);
+ new_load = new_bd_arm_Load(dbg, block, new_ptr, new_mem);
break;
default:
panic("mode size not supported");
/* check for special case: the loaded value might not be used */
if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
+ ir_graph *irg = current_ir_graph;
+
/* add a result proj and a Keep to produce a pseudo use */
ir_node *proj = new_r_Proj(irg, block, new_load, mode_Iu, pn_arm_Load_res);
be_new_Keep(arch_get_irn_reg_class_out(proj), irg, block, 1, &proj);
ir_node *val = get_Store_value(node);
ir_node *new_val = be_transform_node(val);
ir_mode *mode = get_irn_mode(val);
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *new_store = NULL;
if (mode_is_float(mode)) {
env_cg->have_fp_insn = 1;
if (USE_FPA(env_cg->isa))
- new_store = new_rd_arm_fpaStf(dbg, irg, block, new_ptr, new_val, new_mem, mode);
+ new_store = new_bd_arm_fpaStf(dbg, block, new_ptr, new_val, new_mem, mode);
else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
assert(mode_is_data(mode) && "unsupported mode for Store");
switch (get_mode_size_bits(mode)) {
case 8:
- new_store = new_rd_arm_Storeb(dbg, irg, block, new_ptr, new_val, new_mem);
+ new_store = new_bd_arm_Storeb(dbg, block, new_ptr, new_val, new_mem);
case 16:
- new_store = new_rd_arm_Storeh(dbg, irg, block, new_ptr, new_val, new_mem);
+ new_store = new_bd_arm_Storeh(dbg, block, new_ptr, new_val, new_mem);
default:
- new_store = new_rd_arm_Store(dbg, irg, block, new_ptr, new_val, new_mem);
+ new_store = new_bd_arm_Store(dbg, block, new_ptr, new_val, new_mem);
}
}
set_irn_pinned(new_store, get_irn_pinned(node));
static ir_node *gen_Cond(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *selector = get_Cond_selector(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(selector);
if (pnc & pn_Cmp_Uo) {
/* check for unordered, need cmf */
- return new_rd_arm_fpaCmfBra(dbg, irg, block, new_op1, new_op2, pnc);
+ return new_bd_arm_fpaCmfBra(dbg, block, new_op1, new_op2, pnc);
}
/* Hmm: use need cmfe */
- return new_rd_arm_fpaCmfeBra(dbg, irg, block, new_op1, new_op2, pnc);
+ return new_bd_arm_fpaCmfeBra(dbg, block, new_op1, new_op2, pnc);
} else if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
/* compare with 0 */
- return new_rd_arm_TstBra(dbg, irg, block, new_op1, new_op1, get_Proj_proj(selector));
+ return new_bd_arm_TstBra(dbg, block, new_op1, new_op1, get_Proj_proj(selector));
} else {
/* integer compare */
ir_node *new_op2 = be_transform_node(op2);
- return new_rd_arm_CmpBra(dbg, irg, block, new_op1, new_op2, get_Proj_proj(selector));
+ return new_bd_arm_CmpBra(dbg, block, new_op1, new_op2, get_Proj_proj(selector));
}
} else {
/* SwitchJmp */
}
const_graph = create_const_graph_value(dbg, block, translation);
- sub = new_rd_arm_Sub(dbg, irg, block, new_op, const_graph, mode, ARM_SHF_NONE, 0);
- return new_rd_arm_SwitchJmp(dbg, irg, block, sub, n_projs, get_Cond_defaultProj(node) - translation);
+ sub = new_bd_arm_Sub(dbg, block, new_op, const_graph, mode, ARM_SHF_NONE, 0);
+ return new_bd_arm_SwitchJmp(dbg, block, sub, n_projs, get_Cond_defaultProj(node) - translation);
}
}
*/
static ir_node *gen_Const(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_graph *irg = current_ir_graph;
ir_mode *mode = get_irn_mode(node);
dbg_info *dbg = get_irn_dbg_info(node);
if (imm != fpa_max) {
if (imm > 0)
- node = new_rd_arm_fpaMvf_i(dbg, irg, block, mode, imm);
+ node = new_bd_arm_fpaMvf_i(dbg, block, mode, imm);
else
- node = new_rd_arm_fpaMnf_i(dbg, irg, block, mode, -imm);
+ node = new_bd_arm_fpaMnf_i(dbg, block, mode, -imm);
} else {
- node = new_rd_arm_fpaConst(dbg, irg, block, tv);
+ node = new_bd_arm_fpaConst(dbg, block, tv);
}
be_dep_on_frame(node);
return node;
ir_node *block = be_transform_node(get_nodes_block(node));
ir_mode *mode = mode_Iu;
dbg_info *dbg = get_irn_dbg_info(node);
- ir_graph *irg = current_ir_graph;
ir_node *res;
- res = new_rd_arm_SymConst(dbg, irg, block, mode, get_sc_ident(node));
+ res = new_bd_arm_SymConst(dbg, block, mode, get_sc_ident(node));
be_dep_on_frame(res);
return res;
}
src_copy = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], irg, block, new_src);
dst_copy = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], irg, block, new_dst);
- return new_rd_arm_CopyB(dbg, irg, block, dst_copy, src_copy,
- new_rd_arm_EmptyReg(dbg, irg, block, mode_Iu),
- new_rd_arm_EmptyReg(dbg, irg, block, mode_Iu),
- new_rd_arm_EmptyReg(dbg, irg, block, mode_Iu),
+ return new_bd_arm_CopyB(dbg, block, dst_copy, src_copy,
+ new_bd_arm_EmptyReg(dbg, block, mode_Iu),
+ new_bd_arm_EmptyReg(dbg, block, mode_Iu),
+ new_bd_arm_EmptyReg(dbg, block, mode_Iu),
new_mem, size);
}
}
cnst = create_const_graph_value(dbg, block, (unsigned)offset);
if (is_arm_Mov_i(cnst))
- return new_rd_arm_Add_i(dbg, current_ir_graph, block, new_op, mode, get_arm_imm_value(cnst));
- return new_rd_arm_Add(dbg, current_ir_graph, block, new_op, cnst, mode, ARM_SHF_NONE, 0);
+ return new_bd_arm_Add_i(dbg, block, new_op, mode, get_arm_imm_value(cnst));
+ return new_bd_arm_Add(dbg, block, new_op, cnst, mode, ARM_SHF_NONE, 0);
}
/**
ir_node *new_sz = be_transform_node(sz);
ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
ir_node *new_sp = be_transform_node(sp);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *nomem = new_NoMem();
ir_node *new_op;
/* ARM stack grows in reverse direction, make a SubSPandCopy */
- new_op = new_rd_arm_SubSPandCopy(dbgi, irg, block, new_sp, new_sz, nomem);
+ new_op = new_bd_arm_SubSPandCopy(dbgi, block, new_sp, new_sz, nomem);
return new_op;
}
ir_node *new_sz = be_transform_node(sz);
ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
ir_node *new_sp = be_transform_node(sp);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *nomem = new_NoMem();
ir_node *new_op;
/* ARM stack grows in reverse direction, make an AddSP */
- new_op = new_rd_arm_AddSP(dbgi, irg, block, new_sp, new_sz, nomem);
+ new_op = new_bd_arm_AddSP(dbgi, block, new_sp, new_sz, nomem);
return new_op;
}
*/
static ir_node *gen_Proj_tls(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = NULL;
- return new_rd_arm_LdTls(dbgi, irg, block, mode_Iu);
+ return new_bd_arm_LdTls(dbgi, block, mode_Iu);
}
/**
return be_duplicate_node(node);
}
-typedef ir_node *(*create_const_node_func)(dbg_info *db, ir_graph *irg, ir_node *block);
+typedef ir_node *(*create_const_node_func)(dbg_info *db, ir_node *block);
static inline ir_node *create_const(ir_node **place,
create_const_node_func func,
return *place;
block = get_irg_start_block(env_cg->irg);
- res = func(NULL, env_cg->irg, block);
+ res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
}
static ir_node *arm_new_Unknown_gp(void) {
- return create_const(&env_cg->unknown_gp, new_rd_arm_Unknown_GP,
+ return create_const(&env_cg->unknown_gp, new_bd_arm_Unknown_GP,
&arm_gp_regs[REG_GP_UKNWN]);
}
static ir_node *arm_new_Unknown_fpa(void) {
- return create_const(&env_cg->unknown_fpa, new_rd_arm_Unknown_FPA,
+ return create_const(&env_cg->unknown_fpa, new_bd_arm_Unknown_FPA,
&arm_fpa_regs[REG_FPA_UKNWN]);
}
ir_graph *irg = current_ir_graph;
ir_node *conv;
- conv = new_rd_arm_fpaDbl2GP(NULL, irg, bl, arg, mem);
+ conv = new_bd_arm_fpaDbl2GP(NULL, bl, arg, mem);
/* move high/low */
*resL = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_low);
*resH = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_high);
ip = be_new_Copy(gp, irg, block, sp);
be_set_constr_single_reg_out(ip, 0, &arm_gp_regs[REG_R12], arch_register_req_type_produces_sp);
- store = new_rd_arm_StoreStackM4Inc(NULL, irg, block, sp, fp, ip, lr, pc, *mem);
+ store = new_bd_arm_StoreStackM4Inc(NULL, block, sp, fp, ip, lr, pc, *mem);
sp = new_r_Proj(irg, block, store, env->arch_env->sp->reg_class->mode, pn_arm_StoreStackM4Inc_ptr);
arch_set_irn_register(sp, env->arch_env->sp);
be_node_set_reg_class_in(keep, 1, gp);
be_set_constr_single_reg_out(keep, 0, &arm_gp_regs[REG_R12], arch_register_req_type_produces_sp);
- fp = new_rd_arm_Sub_i(NULL, irg, block, keep, get_irn_mode(fp), 4);
+ fp = new_bd_arm_Sub_i(NULL, block, keep, get_irn_mode(fp), 4);
arch_set_irn_register(fp, env->arch_env->bp);
fp = be_new_Copy(gp, irg, block, fp); // XXX Gammelfix: only be_ have custom register requirements
be_set_constr_single_reg_out(fp, 0, env->arch_env->bp, 0);
} else {
ir_node *sub12_node;
ir_node *load_node;
- sub12_node = new_rd_arm_Sub_i(NULL, env->irg, bl, curr_bp, mode_Iu, 12);
+ sub12_node = new_bd_arm_Sub_i(NULL, bl, curr_bp, mode_Iu, 12);
// FIXME
//set_arm_req_out_all(sub12_node, sub12_req);
arch_set_irn_register(sub12_node, env->arch_env->sp);
- load_node = new_rd_arm_LoadStackM3( NULL, env->irg, bl, sub12_node, *mem );
+ load_node = new_bd_arm_LoadStackM3(NULL, bl, sub12_node, *mem);
// FIXME
//set_arm_req_out(load_node, &arm_default_req_arm_gp_r11, 0);
//set_arm_req_out(load_node, &arm_default_req_arm_gp_sp, 1);
};
-typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
+typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
create_const_node_func func,
return *place;
block = get_irg_start_block(cg->irg);
- res = func(NULL, cg->irg, block);
+ res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
/* Creates the unique per irg GP NoReg node. */
ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
+ return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
&ia32_gp_regs[REG_GP_NOREG]);
}
ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
+ return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
&ia32_vfp_regs[REG_VFP_NOREG]);
}
ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
+ return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
&ia32_xmm_regs[REG_XMM_NOREG]);
}
ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
+ return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
&ia32_gp_regs[REG_GP_UKNWN]);
}
ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
+ return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
&ia32_vfp_regs[REG_VFP_UKNWN]);
}
ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
+ return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
&ia32_xmm_regs[REG_XMM_UKNWN]);
}
ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
- return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
+ return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
&ia32_fp_cw_regs[REG_FPCW]);
}
get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
/* push ebp */
- push = new_rd_ia32_Push(NULL, irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
+ push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
*mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
ir_node *leave;
/* leave */
- leave = new_rd_ia32_Leave(NULL, irg, bl, curr_bp);
+ leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
} else {
arch_register_req_type_ignore);
/* pop ebp */
- pop = new_rd_ia32_PopEbp(NULL, env->irg, bl, *mem, curr_sp);
+ pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
* @return The inverse operation or NULL if operation invertible
*/
static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
- ir_graph *irg;
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
irn);
return NULL;
- irg = get_irn_irg(irn);
block = get_nodes_block(irn);
mode = get_irn_mode(irn);
irn_mode = get_irn_mode(irn);
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
inverse->costs += 2;
}
#endif
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
if (i == n_ia32_binary_left) {
- inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
+ inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
}
else {
- inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
}
inverse->costs += 1;
}
#if 0
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
+ inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
inverse->costs += 1;
}
#endif
break;
case iro_ia32_Not: {
- inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
case iro_ia32_Neg: {
- inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
ir_node *mem = get_irn_n(node, n_ia32_mem);
ir_node *noreg;
- ir_node *load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
+ ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
ir_node *load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
if (mode_is_float(spillmode)) {
if (ia32_cg_config.use_sse2)
- new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
else
- new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
}
else if (get_mode_size_bits(spillmode) == 128) {
/* Reload 128 bit SSE registers */
- new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
}
else
- new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_ls_mode(new_op, spillmode);
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
- store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
else
- store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, nomem, val, mode);
+ store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
} else if (get_mode_size_bits(mode) == 128) {
/* Spill 128 bit SSE registers */
- store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
} else if (get_mode_size_bits(mode) == 8) {
- store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
} else {
- store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
}
set_ia32_op_type(store, ia32_AddrModeD);
}
static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
- ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, mem, noreg, sp);
+ ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
set_ia32_frame_ent(push, ent);
set_ia32_use_frame(push);
}
static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_rd_ia32_PopMem(dbg, irg, block, frame, noreg, new_NoMem(), sp);
+ ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
return get_eip;
block = get_irg_start_block(cg->irg);
- get_eip = new_rd_ia32_GetEIP(NULL, cg->irg, block);
+ get_eip = new_bd_ia32_GetEIP(NULL, block);
cg->get_eip = get_eip;
be_dep_on_frame(get_eip);
{
ir_graph *irg = current_ir_graph;
ir_node *start_block = get_irg_start_block(irg);
- ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block,
- symconst, symconst_sign, val);
+ ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
+ symconst_sign, val);
arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
return immediate;
ir_node *gen_ASM(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
ir_node *block = NULL;
ir_node *new_block = NULL;
dbg_info *dbgi = get_irn_dbg_info(node);
}
++reg_map_size;
- obst = get_irg_obstack(irg);
+ obst = get_irg_obstack(current_ir_graph);
register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size);
memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
++out_idx;
}
- new_node = new_rd_ia32_Asm(dbgi, irg, new_block, arity, in, out_arity,
+ new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
get_ASM_text(node), register_map);
if (arity == 0)
ir_node *mem = NULL;
ir_node *new_mem = NULL;
ir_node *res = NULL;
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
int size = get_type_size_bytes(get_CopyB_type(node));
int rem;
rem = size & 0x3; /* size % 4 */
size >>= 2;
- res = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, size);
+ res = new_bd_ia32_Const(dbgi, block, NULL, 0, size);
be_dep_on_frame(res);
- res = new_rd_ia32_CopyB(dbgi, irg, block, new_dst, new_src, res, new_mem, rem);
+ res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
} else {
if(size == 0) {
ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
node);
}
- res = new_rd_ia32_CopyB_i(dbgi, irg, block, new_dst, new_src, new_mem, size);
+ res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
}
SET_IA32_ORIG_NODE(res, node);
ir_node *gen_Proj_tls(ir_node *node) {
ir_node *block = NULL;
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = NULL;
ir_node *res = NULL;
default: panic("invalid transformer");
}
- res = new_rd_ia32_LdTls(dbgi, irg, block, mode_Iu);
+ res = new_bd_ia32_LdTls(dbgi, block, mode_Iu);
return res;
}
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_irg_start_block(irg);
- ir_node *ret = new_rd_ia32_vfldz(dbgi, irg, block);
+ ir_node *ret = new_bd_ia32_vfldz(dbgi, block);
be_dep_on_frame(ret);
return ret;
assert(get_irn_mode(irn) != mode_T);
- res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, nomem, in2, noreg_fp);
+ res = new_bd_ia32_xXor(dbg, block, noreg, noreg, nomem, in2, noreg_fp);
size = get_mode_size_bits(op_mode);
entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
set_ia32_am_sc(res, entity);
sched_add_before(irn, res);
/* generate the add */
- res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, nomem, res, in1);
+ res = new_bd_ia32_xAdd(dbg, block, noreg, noreg, nomem, res, in1);
set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
/* exchange the add and the sub */
}
if (flags_proj == NULL) {
- res = new_rd_ia32_Neg(dbg, irg, block, in2);
+ res = new_bd_ia32_Neg(dbg, block, in2);
arch_set_irn_register(res, in2_reg);
/* add to schedule */
sched_add_before(irn, res);
/* generate the add */
- res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, res, in1);
+ res = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, res, in1);
arch_set_irn_register(res, out_reg);
set_ia32_commutative(res);
*
* a + -b = a + (~b + 1) would set the carry flag IF a == b ...
*/
- not = new_rd_ia32_Not(dbg, irg, block, in2);
+ not = new_bd_ia32_Not(dbg, block, in2);
arch_set_irn_register(not, in2_reg);
sched_add_before(irn, not);
- stc = new_rd_ia32_Stc(dbg, irg, block);
+ stc = new_bd_ia32_Stc(dbg, block);
arch_set_irn_register(stc, &ia32_flags_regs[REG_EFLAGS]);
sched_add_before(irn, stc);
- adc = new_rd_ia32_Adc(dbg, irg, block, noreg, noreg, nomem, not,
- in1, stc);
+ adc = new_bd_ia32_Adc(dbg, block, noreg, noreg, nomem, not, in1, stc);
arch_set_irn_register(adc, out_reg);
sched_add_before(irn, adc);
adc_flags = new_r_Proj(irg, block, adc, mode_Iu, pn_ia32_Adc_flags);
arch_set_irn_register(adc_flags, &ia32_flags_regs[REG_EFLAGS]);
- cmc = new_rd_ia32_Cmc(dbg, irg, block, adc_flags);
+ cmc = new_bd_ia32_Cmc(dbg, block, adc_flags);
arch_set_irn_register(cmc, &ia32_flags_regs[REG_EFLAGS]);
sched_add_before(irn, cmc);
/* we don't spill the fpcw in unsafe mode */
if(ia32_cg_config.use_unsafe_floatconv) {
- ir_graph *irg = get_irn_irg(state);
ir_node *block = get_nodes_block(state);
if(force == 1 || !is_ia32_ChangeCW(state)) {
- ir_node *spill = new_rd_ia32_FnstCWNOP(NULL, irg, block, state);
+ ir_node *spill = new_bd_ia32_FnstCWNOP(NULL, block, state);
sched_add_after(after, spill);
return spill;
}
ir_node *nomem = new_NoMem();
ir_node *frame = get_irg_frame(irg);
- spill = new_rd_ia32_FnstCW(NULL, irg, block, frame, noreg, nomem, state);
+ spill = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem, state);
set_ia32_op_type(spill, ia32_AddrModeD);
/* use mode_Iu, as movl has a shorter opcode than movw */
set_ia32_ls_mode(spill, mode_Iu);
static ir_node *create_fldcw_ent(ia32_code_gen_t *cg, ir_node *block,
ir_entity *entity)
{
- ir_graph *irg = get_irn_irg(block);
ir_node *nomem = new_NoMem();
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *reload;
- reload = new_rd_ia32_FldCW(NULL, irg, block, noreg, noreg, nomem);
+ reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem);
set_ia32_op_type(reload, ia32_AddrModeS);
set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_am_sc(reload, entity);
}
if(spill != NULL) {
- reload = new_rd_ia32_FldCW(NULL, irg, block, frame, noreg, spill);
+ reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, spill);
set_ia32_op_type(reload, ia32_AddrModeS);
set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_use_frame(reload);
ir_node *or_const;
assert(last_state != NULL);
- cwstore = new_rd_ia32_FnstCW(NULL, irg, block, frame, noreg, nomem,
+ cwstore = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem,
last_state);
set_ia32_op_type(cwstore, ia32_AddrModeD);
set_ia32_ls_mode(cwstore, lsmode);
set_ia32_use_frame(cwstore);
sched_add_before(before, cwstore);
- load = new_rd_ia32_Load(NULL, irg, block, frame, noreg, cwstore);
+ load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, lsmode);
set_ia32_use_frame(load);
load_res = new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
/* TODO: make the actual mode configurable in ChangeCW... */
- or_const = new_rd_ia32_Immediate(NULL, irg, get_irg_start_block(irg),
+ or_const = new_bd_ia32_Immediate(NULL, get_irg_start_block(irg),
NULL, 0, 3072);
arch_set_irn_register(or_const, &ia32_gp_regs[REG_GP_NOREG]);
- or = new_rd_ia32_Or(NULL, irg, block, noreg, noreg, nomem, load_res,
+ or = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res,
or_const);
sched_add_before(before, or);
- store = new_rd_ia32_Store(NULL, irg, block, frame, noreg, nomem, or);
+ store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, or);
set_ia32_op_type(store, ia32_AddrModeD);
/* use mode_Iu, as movl has a shorter opcode than movw */
set_ia32_ls_mode(store, mode_Iu);
set_ia32_use_frame(store);
sched_add_before(before, store);
- fldcw = new_rd_ia32_FldCW(NULL, irg, block, frame, noreg, store);
+ fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store);
set_ia32_op_type(fldcw, ia32_AddrModeS);
set_ia32_ls_mode(fldcw, lsmode);
set_ia32_use_frame(fldcw);
/* l_res = a_l + b_l */
/* h_res = a_h + b_h + carry */
- add_low = new_rd_ia32_l_Add(dbg, irg, block, a_l, b_l, mode_T);
+ add_low = new_bd_ia32_l_Add(dbg, block, a_l, b_l, mode_T);
flags = new_r_Proj(irg, block, add_low, mode_flags, pn_ia32_flags);
- add_high = new_rd_ia32_l_Adc(dbg, irg, block, a_h, b_h, flags, h_mode);
+ add_high = new_bd_ia32_l_Adc(dbg, block, a_h, b_h, flags, h_mode);
l_res = new_r_Proj(irg, block, add_low, l_mode, pn_ia32_res);
h_res = add_high;
/* l_res = a_l - b_l */
/* h_res = a_h - b_h - carry */
- sub_low = new_rd_ia32_l_Sub(dbg, irg, block, a_l, b_l, mode_T);
+ sub_low = new_bd_ia32_l_Sub(dbg, block, a_l, b_l, mode_T);
flags = new_r_Proj(irg, block, sub_low, mode_flags, pn_ia32_flags);
- sub_high = new_rd_ia32_l_Sbb(dbg, irg, block, a_h, b_h, flags, h_mode);
+ sub_high = new_bd_ia32_l_Sbb(dbg, block, a_h, b_h, flags, h_mode);
l_res = new_r_Proj(irg, block, sub_low, l_mode, pn_ia32_res);
h_res = sub_high;
} else {
/* h_res = SHLD a_h, a_l, cnt */
- h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, h_mode);
+ h_res = new_bd_ia32_l_ShlD(dbg, block, a_h, a_l, cnt, h_mode);
/* l_res = SHL a_l, cnt */
- l_res = new_rd_ia32_l_ShlDep(dbg, irg, block, a_l, cnt, h_res, l_mode);
+ l_res = new_bd_ia32_l_ShlDep(dbg, block, a_l, cnt, h_res, l_mode);
}
resolve_call(call, l_res, h_res, irg, block);
upper = get_nodes_block(call);
/* h_res = SHLD a_h, a_l, cnt */
- h1 = new_rd_ia32_l_ShlD(dbg, irg, upper, a_h, a_l, cnt, h_mode);
+ h1 = new_bd_ia32_l_ShlD(dbg, upper, a_h, a_l, cnt, h_mode);
/* l_res = SHL a_l, cnt */
- l1 = new_rd_ia32_l_ShlDep(dbg, irg, upper, a_l, cnt, h1, l_mode);
+ l1 = new_bd_ia32_l_ShlDep(dbg, upper, a_l, cnt, h1, l_mode);
c_mode = get_irn_mode(cnt);
irn = new_r_Const_long(irg, upper, c_mode, 32);
l_res = new_rd_Shr(dbg, irg, block, conv, cnt, l_mode);
} else {
/* l_res = SHRD a_h:a_l, cnt */
- l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_mode);
+ l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
/* h_res = SHR a_h, cnt */
- h_res = new_rd_ia32_l_ShrDep(dbg, irg, block, a_h, cnt, l_res, h_mode);
+ h_res = new_bd_ia32_l_ShrDep(dbg, block, a_h, cnt, l_res, h_mode);
}
resolve_call(call, l_res, h_res, irg, block);
return 1;
upper = get_nodes_block(call);
/* l_res = SHRD a_h:a_l, cnt */
- l1 = new_rd_ia32_l_ShrD(dbg, irg, upper, a_l, a_h, cnt, l_mode);
+ l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
/* h_res = SHR a_h, cnt */
- h1 = new_rd_ia32_l_ShrDep(dbg, irg, upper, a_h, cnt, l1, h_mode);
+ h1 = new_bd_ia32_l_ShrDep(dbg, upper, a_h, cnt, l1, h_mode);
c_mode = get_irn_mode(cnt);
irn = new_r_Const_long(irg, upper, c_mode, 32);
l_res = new_rd_Shrs(dbg, irg, block, conv, cnt, l_mode);
} else {
/* l_res = SHRD a_h:a_l, cnt */
- l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_mode);
+ l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
/* h_res = SAR a_h, cnt */
- h_res = new_rd_ia32_l_SarDep(dbg, irg, block, a_h, cnt, l_res, h_mode);
+ h_res = new_bd_ia32_l_SarDep(dbg, block, a_h, cnt, l_res, h_mode);
}
resolve_call(call, l_res, h_res, irg, block);
return 1;
upper = get_nodes_block(call);
/* l_res = SHRD a_h:a_l, cnt */
- l1 = new_rd_ia32_l_ShrD(dbg, irg, upper, a_l, a_h, cnt, l_mode);
+ l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
/* h_res = SAR a_h, cnt */
- h1 = new_rd_ia32_l_SarDep(dbg, irg, upper, a_h, cnt, l1, h_mode);
+ h1 = new_bd_ia32_l_SarDep(dbg, upper, a_h, cnt, l1, h_mode);
c_mode = get_irn_mode(cnt);
irn = new_r_Const_long(irg, upper, c_mode, 32);
/* handle the often used case of 32x32=64 mul */
if (is_sign_extend(a_l, a_h) && is_sign_extend(b_l, b_h)) {
- mul = new_rd_ia32_l_IMul(dbg, irg, block, a_l, b_l);
+ mul = new_bd_ia32_l_IMul(dbg, block, a_l, b_l);
h_res = new_rd_Proj(dbg, irg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
l_res = new_rd_Proj(dbg, irg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
goto end;
}
- mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
+ mul = new_bd_ia32_l_Mul(dbg, block, a_l, b_l);
pEDX = new_rd_Proj(dbg, irg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
l_res = new_rd_Proj(dbg, irg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
ir_node *l_res, *h_res, *res;
(void) ctx;
- res = new_rd_ia32_Minus64Bit(dbg, irg, block, a_l, a_h);
+ res = new_bd_ia32_Minus64Bit(dbg, block, a_l, a_h);
l_res = new_r_Proj(irg, block, res, l_mode, pn_ia32_Minus64Bit_low_res);
h_res = new_r_Proj(irg, block, res, h_mode, pn_ia32_Minus64Bit_high_res);
sub_l = new_rd_Eor(dbg, irg, block, a_l, sign_l, l_mode);
sub_h = new_rd_Eor(dbg, irg, block, a_h, sign, h_mode);
- l_sub = new_rd_ia32_l_Sub(dbg, irg, block, sub_l, sign_l, mode_T);
+ l_sub = new_bd_ia32_l_Sub(dbg, block, sub_l, sign_l, mode_T);
l_res = new_r_Proj(irg, block, l_sub, l_mode, pn_ia32_res);
flags = new_r_Proj(irg, block, l_sub, mode_flags, pn_ia32_flags);
- h_res = new_rd_ia32_l_Sbb(dbg, irg, block, sub_h, sign, flags, h_mode);
+ h_res = new_bd_ia32_l_Sbb(dbg, block, sub_h, sign, flags, h_mode);
resolve_call(call, l_res, h_res, irg, block);
assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
- float_to_ll = new_rd_ia32_l_FloattoLL(dbg, irg, block, a_f);
+ float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, a_f);
l_res = new_r_Proj(irg, block, float_to_ll, l_res_mode,
pn_ia32_l_FloattoLL_res_low);
assert(! mode_is_float(get_irn_mode(a_l))
&& ! mode_is_float(get_irn_mode(a_h)));
- ll_to_float = new_rd_ia32_l_LLtoFloat(dbg, irg, block, a_h, a_l,
- fres_mode);
+ ll_to_float = new_bd_ia32_l_LLtoFloat(dbg, block, a_h, a_l, fres_mode);
/* lower the call */
resolve_call(call, ll_to_float, NULL, irg, block);
ir_node *right;
ia32_immediate_attr_t const *imm;
dbg_info *dbgi;
- ir_graph *irg;
ir_node *block;
ir_node *noreg;
ir_node *nomem;
return;
dbgi = get_irn_dbg_info(node);
- irg = current_ir_graph;
block = get_nodes_block(node);
noreg = ia32_new_NoReg_gp(cg);
- nomem = get_irg_no_mem(irg);
+ nomem = get_irg_no_mem(current_ir_graph);
op = get_irn_n(node, n_ia32_Cmp_left);
attr = get_irn_generic_attr(node);
ins_permuted = attr->data.ins_permuted;
cmp_unsigned = attr->data.cmp_unsigned;
if (is_ia32_Cmp(node)) {
- test = new_rd_ia32_Test(dbgi, irg, block, noreg, noreg, nomem,
+ test = new_bd_ia32_Test(dbgi, block, noreg, noreg, nomem,
op, op, ins_permuted, cmp_unsigned);
} else {
- test = new_rd_ia32_Test8Bit(dbgi, irg, block, noreg, noreg, nomem,
+ test = new_bd_ia32_Test8Bit(dbgi, block, noreg, noreg, nomem,
op, op, ins_permuted, cmp_unsigned);
}
set_ia32_ls_mode(test, get_ia32_ls_mode(node));
mem = get_irn_n(store, n_ia32_mem);
spreg = arch_get_irn_register(curr_sp);
- push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
+ push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, mem, val, curr_sp);
copy_mark(store, push);
if (first_push == NULL)
noreg = ia32_new_NoReg_gp(cg);
val = get_irn_n(store, n_ia32_Store_val);
- push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem,
+ push = new_bd_ia32_Push(dbgi, block, noreg, noreg, mem,
create_push(dbgi, current_ir_graph, block, am_base, store);
}
mem = get_irn_n(load, n_ia32_mem);
reg = arch_irn_get_register(load, pn_ia32_Load_res);
- pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp);
+ pop = new_bd_ia32_Pop(get_irn_dbg_info(load), block, mem, pred_sp);
arch_irn_set_register(pop, pn_ia32_Load_res, reg);
copy_mark(load, pop);
ir_node *val;
ir_node *in[1];
- pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
+ pop = new_bd_ia32_Pop(dbgi, block, new_NoMem(), stack);
stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
arch_set_irn_register(stack, esp);
ir_node *val = ia32_new_Unknown_gp(cg);
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *nomem = get_irg_no_mem(irg);
- ir_node *push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
+ ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack);
sched_add_before(schedpoint, push);
stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
{
const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
const arch_register_t *reg;
- ir_graph *irg = current_ir_graph;
ir_node *block;
dbg_info *dbgi;
ir_node *produceval;
/* create xor(produceval, produceval) */
block = get_nodes_block(node);
dbgi = get_irn_dbg_info(node);
- produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
+ produceval = new_bd_ia32_ProduceVal(dbgi, block);
arch_set_irn_register(produceval, reg);
noreg = ia32_new_NoReg_gp(cg);
- xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
- produceval, produceval);
+ xor = new_bd_ia32_Xor(dbgi, block, noreg, noreg, new_NoMem(), produceval,
+ produceval);
arch_set_irn_register(xor, reg);
sched_add_before(node, produceval);
{
ir_graph *irg = current_ir_graph;
ir_node *start_block = get_irg_start_block(irg);
- ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
- 0, val);
+ ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, NULL, 0,
+ val);
arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
return immediate;
static ir_node *create_immediate_from_am(const ir_node *node)
{
- ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
int offset = get_ia32_am_offs_int(node);
int sc_sign = is_ia32_am_sc_sign(node);
ir_entity *entity = get_ia32_am_sc(node);
ir_node *res;
- res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
+ res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, offset);
arch_set_irn_register(res, &ia32_gp_regs[REG_GP_NOREG]);
return res;
}
*/
static void peephole_ia32_Lea(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
ir_node *base;
ir_node *index;
const arch_register_t *base_reg;
if(is_am_one(node)) {
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- res = new_rd_ia32_Inc(dbgi, irg, block, op1);
+ res = new_bd_ia32_Inc(dbgi, block, op1);
arch_set_irn_register(res, out_reg);
goto exchange;
}
if(is_am_minus_one(node)) {
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- res = new_rd_ia32_Dec(dbgi, irg, block, op1);
+ res = new_bd_ia32_Dec(dbgi, block, op1);
arch_set_irn_register(res, out_reg);
goto exchange;
}
block = get_nodes_block(node);
noreg = ia32_new_NoReg_gp(cg);
nomem = new_NoMem();
- res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
+ res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
arch_set_irn_register(res, out_reg);
set_ia32_commutative(res);
goto exchange;
block = get_nodes_block(node);
noreg = ia32_new_NoReg_gp(cg);
nomem = new_NoMem();
- res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
+ res = new_bd_ia32_Shl(dbgi, block, op1, op2);
arch_set_irn_register(res, out_reg);
goto exchange;
extern ir_op *get_op_Mulh(void);
-typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *base, ir_node *index, ir_node *mem,
- ir_node *op1, ir_node *op2);
+typedef ir_node *construct_binop_func(dbg_info *db, ir_node *block,
+ ir_node *base, ir_node *index, ir_node *mem, ir_node *op1,
+ ir_node *op2);
-typedef ir_node *construct_binop_flags_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *base, ir_node *index, ir_node *mem,
- ir_node *op1, ir_node *op2, ir_node *flags);
+typedef ir_node *construct_binop_flags_func(dbg_info *db, ir_node *block,
+ ir_node *base, ir_node *index, ir_node *mem, ir_node *op1, ir_node *op2,
+ ir_node *flags);
-typedef ir_node *construct_shift_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *op1, ir_node *op2);
+typedef ir_node *construct_shift_func(dbg_info *db, ir_node *block,
+ ir_node *op1, ir_node *op2);
-typedef ir_node *construct_binop_dest_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *base, ir_node *index, ir_node *mem,
- ir_node *op);
+typedef ir_node *construct_binop_dest_func(dbg_info *db, ir_node *block,
+ ir_node *base, ir_node *index, ir_node *mem, ir_node *op);
-typedef ir_node *construct_unop_dest_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
+typedef ir_node *construct_unop_dest_func(dbg_info *db, ir_node *block,
+ ir_node *base, ir_node *index, ir_node *mem);
-typedef ir_node *construct_binop_float_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *base, ir_node *index, ir_node *mem,
- ir_node *op1, ir_node *op2, ir_node *fpcw);
+typedef ir_node *construct_binop_float_func(dbg_info *db, ir_node *block,
+ ir_node *base, ir_node *index, ir_node *mem, ir_node *op1, ir_node *op2,
+ ir_node *fpcw);
-typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *op);
+typedef ir_node *construct_unop_func(dbg_info *db, ir_node *block, ir_node *op);
static ir_node *create_immediate_or_transform(ir_node *node,
char immediate_constraint_type);
*/
static ir_node *gen_Const(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
- ir_node *old_block = get_nodes_block(node);
- ir_node *block = be_transform_node(old_block);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_mode *mode = get_irn_mode(node);
+ ir_node *old_block = get_nodes_block(node);
+ ir_node *block = be_transform_node(old_block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_mode *mode = get_irn_mode(node);
assert(is_Const(node));
if (ia32_cg_config.use_sse2) {
tarval *tv = get_Const_tarval(node);
if (tarval_is_null(tv)) {
- load = new_rd_ia32_xZero(dbgi, irg, block);
+ load = new_bd_ia32_xZero(dbgi, block);
set_ia32_ls_mode(load, mode);
res = load;
} else if (tarval_is_one(tv)) {
ir_node *imm2 = create_Immediate(NULL, 0, 2);
ir_node *pslld, *psrld;
- load = new_rd_ia32_xAllOnes(dbgi, irg, block);
+ load = new_bd_ia32_xAllOnes(dbgi, block);
set_ia32_ls_mode(load, mode);
- pslld = new_rd_ia32_xPslld(dbgi, irg, block, load, imm1);
+ pslld = new_bd_ia32_xPslld(dbgi, block, load, imm1);
set_ia32_ls_mode(pslld, mode);
- psrld = new_rd_ia32_xPsrld(dbgi, irg, block, pslld, imm2);
+ psrld = new_bd_ia32_xPsrld(dbgi, block, pslld, imm2);
set_ia32_ls_mode(psrld, mode);
res = psrld;
} else if (mode == mode_F) {
(get_tarval_sub_bits(tv, 1) << 8) |
(get_tarval_sub_bits(tv, 2) << 16) |
(get_tarval_sub_bits(tv, 3) << 24);
- ir_node *cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val);
- load = new_rd_ia32_xMovd(dbgi, irg, block, cnst);
+ ir_node *cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, val);
+ load = new_bd_ia32_xMovd(dbgi, block, cnst);
set_ia32_ls_mode(load, mode);
res = load;
} else {
(get_tarval_sub_bits(tv, 5) << 8) |
(get_tarval_sub_bits(tv, 6) << 16) |
(get_tarval_sub_bits(tv, 7) << 24);
- cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val);
- load = new_rd_ia32_xMovd(dbgi, irg, block, cnst);
+ cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, val);
+ load = new_bd_ia32_xMovd(dbgi, block, cnst);
set_ia32_ls_mode(load, mode);
- psllq = new_rd_ia32_xPsllq(dbgi, irg, block, load, imm32);
+ psllq = new_bd_ia32_xPsllq(dbgi, block, load, imm32);
set_ia32_ls_mode(psllq, mode);
res = psllq;
goto end;
}
floatent = create_float_const_entity(node);
- load = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem,
+ load = new_bd_ia32_xLoad(dbgi, block, noreg, noreg, nomem,
mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
- res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res);
+ res = new_r_Proj(current_ir_graph, block, load, mode_xmm, pn_ia32_xLoad_res);
}
} else {
if (is_Const_null(node)) {
- load = new_rd_ia32_vfldz(dbgi, irg, block);
+ load = new_bd_ia32_vfldz(dbgi, block);
res = load;
set_ia32_ls_mode(load, mode);
} else if (is_Const_one(node)) {
- load = new_rd_ia32_vfld1(dbgi, irg, block);
+ load = new_bd_ia32_vfld1(dbgi, block);
res = load;
set_ia32_ls_mode(load, mode);
} else {
floatent = create_float_const_entity(node);
- load = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode);
+ load = new_bd_ia32_vfld(dbgi, block, noreg, noreg, nomem, mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
- res = new_r_Proj(irg, block, load, mode_vfp, pn_ia32_vfld_res);
+ res = new_r_Proj(current_ir_graph, block, load, mode_vfp, pn_ia32_vfld_res);
/* take the mode from the entity */
set_ia32_ls_mode(load, get_type_mode(get_entity_type(floatent)));
}
}
val = get_tarval_long(tv);
- cnst = new_rd_ia32_Const(dbgi, irg, block, NULL, 0, val);
+ cnst = new_bd_ia32_Const(dbgi, block, NULL, 0, val);
SET_IA32_ORIG_NODE(cnst, node);
be_dep_on_frame(cnst);
*/
static ir_node *gen_SymConst(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
ir_node *old_block = get_nodes_block(node);
ir_node *block = be_transform_node(old_block);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *nomem = new_NoMem();
if (ia32_cg_config.use_sse2)
- cnst = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem, mode_E);
+ cnst = new_bd_ia32_xLoad(dbgi, block, noreg, noreg, nomem, mode_E);
else
- cnst = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem, mode_E);
+ cnst = new_bd_ia32_vfld(dbgi, block, noreg, noreg, nomem, mode_E);
set_ia32_am_sc(cnst, get_SymConst_entity(node));
set_ia32_use_frame(cnst);
} else {
panic("backend only support symconst_addr_ent (at %+F)", node);
}
entity = get_SymConst_entity(node);
- cnst = new_rd_ia32_Const(dbgi, irg, block, entity, 0, 0);
+ cnst = new_bd_ia32_Const(dbgi, block, entity, 0, 0);
}
SET_IA32_ORIG_NODE(cnst, node);
dbgi = get_irn_dbg_info(node);
new_block = be_transform_node(block);
- new_node = func(dbgi, current_ir_graph, new_block,
- addr->base, addr->index, addr->mem,
- am.new_op1, am.new_op2);
+ new_node = func(dbgi, new_block, addr->base, addr->index, addr->mem,
+ am.new_op1, am.new_op2);
set_am_attributes(new_node, &am);
/* we can't use source address mode anymore when using immediates */
if (!(flags & match_am_and_immediates) &&
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
new_eflags = be_transform_node(eflags);
- new_node = func(dbgi, current_ir_graph, block, addr->base, addr->index,
- addr->mem, am.new_op1, am.new_op2, new_eflags);
+ new_node = func(dbgi, block, addr->base, addr->index, addr->mem,
+ am.new_op1, am.new_op2, new_eflags);
set_am_attributes(new_node, &am);
/* we can't use source address mode anymore when using immediates */
if (!(flags & match_am_and_immediates) &&
dbgi = get_irn_dbg_info(node);
new_block = be_transform_node(block);
- new_node = func(dbgi, current_ir_graph, new_block,
- addr->base, addr->index, addr->mem,
- am.new_op1, am.new_op2, get_fpcw());
+ new_node = func(dbgi, new_block, addr->base, addr->index, addr->mem,
+ am.new_op1, am.new_op2, get_fpcw());
set_am_attributes(new_node, &am);
attr = get_ia32_x87_attr(new_node);
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
new_block = be_transform_node(block);
- new_node = func(dbgi, current_ir_graph, new_block, new_op1, new_op2);
+ new_node = func(dbgi, new_block, new_op1, new_op2);
SET_IA32_ORIG_NODE(new_node, node);
/* lowered shift instruction may have a dependency operand, handle it here */
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
new_block = be_transform_node(block);
- new_node = func(dbgi, current_ir_graph, new_block, new_op);
+ new_node = func(dbgi, new_block, new_op);
SET_IA32_ORIG_NODE(new_node, node);
index = be_transform_node(index);
}
- res = new_rd_ia32_Lea(dbgi, current_ir_graph, block, base, index);
+ res = new_bd_ia32_Lea(dbgi, block, base, index);
set_address(res, addr);
return res;
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
- return gen_binop(node, op1, op2, new_rd_ia32_xAdd,
+ return gen_binop(node, op1, op2, new_bd_ia32_xAdd,
match_commutative | match_am);
else
- return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfadd);
+ return gen_binop_x87_float(node, op1, op2, new_bd_ia32_vfadd);
}
ia32_mark_non_am(node);
/* a constant? */
if (addr.base == NULL && addr.index == NULL) {
- ir_graph *irg = current_ir_graph;
- new_node = new_rd_ia32_Const(dbgi, irg, new_block, addr.symconst_ent,
+ new_node = new_bd_ia32_Const(dbgi, new_block, addr.symconst_ent,
addr.symconst_sign, addr.offset);
be_dep_on_frame(new_node);
SET_IA32_ORIG_NODE(new_node, node);
/* construct an Add with source address mode */
if (am.op_type == ia32_AddrModeS) {
- ir_graph *irg = current_ir_graph;
ia32_address_t *am_addr = &am.addr;
- new_node = new_rd_ia32_Add(dbgi, irg, new_block, am_addr->base,
+ new_node = new_bd_ia32_Add(dbgi, new_block, am_addr->base,
am_addr->index, am_addr->mem, am.new_op1,
am.new_op2);
set_am_attributes(new_node, &am);
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
- return gen_binop(node, op1, op2, new_rd_ia32_xMul,
+ return gen_binop(node, op1, op2, new_bd_ia32_xMul,
match_commutative | match_am);
else
- return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfmul);
+ return gen_binop_x87_float(node, op1, op2, new_bd_ia32_vfmul);
}
- return gen_binop(node, op1, op2, new_rd_ia32_IMul,
+ return gen_binop(node, op1, op2, new_bd_ia32_IMul,
match_commutative | match_am | match_mode_neutral |
match_immediate | match_am_and_immediates);
}
ir_node *proj_res_high;
if (mode_is_signed(mode)) {
- new_node = gen_binop(node, op1, op2, new_rd_ia32_IMul1OP, match_commutative | match_am);
+ new_node = gen_binop(node, op1, op2, new_bd_ia32_IMul1OP, match_commutative | match_am);
proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node,
mode_Iu, pn_ia32_IMul1OP_res_high);
} else {
- new_node = gen_binop(node, op1, op2, new_rd_ia32_Mul, match_commutative | match_am);
+ new_node = gen_binop(node, op1, op2, new_bd_ia32_Mul, match_commutative | match_am);
proj_res_high = new_rd_Proj(dbgi, current_ir_graph, new_block, new_node,
mode_Iu, pn_ia32_Mul_res_high);
}
return res;
}
}
- return gen_binop(node, op1, op2, new_rd_ia32_And,
- match_commutative | match_mode_neutral | match_am
- | match_immediate);
+ return gen_binop(node, op1, op2, new_bd_ia32_And,
+ match_commutative | match_mode_neutral | match_am | match_immediate);
}
ir_node *op2 = get_Or_right(node);
assert (! mode_is_float(get_irn_mode(node)));
- return gen_binop(node, op1, op2, new_rd_ia32_Or, match_commutative
+ return gen_binop(node, op1, op2, new_bd_ia32_Or, match_commutative
| match_mode_neutral | match_am | match_immediate);
}
ir_node *op2 = get_Eor_right(node);
assert(! mode_is_float(get_irn_mode(node)));
- return gen_binop(node, op1, op2, new_rd_ia32_Xor, match_commutative
+ return gen_binop(node, op1, op2, new_bd_ia32_Xor, match_commutative
| match_mode_neutral | match_am | match_immediate);
}
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2)
- return gen_binop(node, op1, op2, new_rd_ia32_xSub, match_am);
+ return gen_binop(node, op1, op2, new_bd_ia32_xSub, match_am);
else
- return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfsub);
+ return gen_binop_x87_float(node, op1, op2, new_bd_ia32_vfsub);
}
if (is_Const(op2)) {
node);
}
- return gen_binop(node, op1, op2, new_rd_ia32_Sub, match_mode_neutral
+ return gen_binop(node, op1, op2, new_bd_ia32_Sub, match_mode_neutral
| match_am | match_immediate);
}
}
}
-static ir_node *create_sex_32_64(dbg_info *dbgi, ir_graph *irg, ir_node *block,
+static ir_node *create_sex_32_64(dbg_info *dbgi, ir_node *block,
ir_node *val, const ir_node *orig)
{
ir_node *res;
(void)orig;
if (ia32_cg_config.use_short_sex_eax) {
- ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block);
+ ir_node *pval = new_bd_ia32_ProduceVal(dbgi, block);
be_dep_on_frame(pval);
- res = new_rd_ia32_Cltd(dbgi, irg, block, val, pval);
+ res = new_bd_ia32_Cltd(dbgi, block, val, pval);
} else {
ir_node *imm31 = create_Immediate(NULL, 0, 31);
- res = new_rd_ia32_Sar(dbgi, irg, block, val, imm31);
+ res = new_bd_ia32_Sar(dbgi, block, val, imm31);
}
SET_IA32_ORIG_NODE(res, orig);
return res;
*/
static ir_node *create_Div(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
/* Beware: We don't need a Sync, if the memory predecessor of the Div node
is the memory of the consumed address. We can have only the second op as address
in Div nodes, so check only op2. */
- new_mem = transform_AM_mem(irg, block, op2, mem, addr->mem);
+ new_mem = transform_AM_mem(current_ir_graph, block, op2, mem, addr->mem);
if (mode_is_signed(mode)) {
- sign_extension = create_sex_32_64(dbgi, irg, new_block, am.new_op1, node);
- new_node = new_rd_ia32_IDiv(dbgi, irg, new_block, addr->base,
+ sign_extension = create_sex_32_64(dbgi, new_block, am.new_op1, node);
+ new_node = new_bd_ia32_IDiv(dbgi, new_block, addr->base,
addr->index, new_mem, am.new_op2, am.new_op1, sign_extension);
} else {
- sign_extension = new_rd_ia32_Const(dbgi, irg, new_block, NULL, 0, 0);
+ sign_extension = new_bd_ia32_Const(dbgi, new_block, NULL, 0, 0);
be_dep_on_frame(sign_extension);
- new_node = new_rd_ia32_Div(dbgi, irg, new_block, addr->base,
+ new_node = new_bd_ia32_Div(dbgi, new_block, addr->base,
addr->index, new_mem, am.new_op2,
am.new_op1, sign_extension);
}
ir_node *op2 = get_Quot_right(node);
if (ia32_cg_config.use_sse2) {
- return gen_binop(node, op1, op2, new_rd_ia32_xDiv, match_am);
+ return gen_binop(node, op1, op2, new_bd_ia32_xDiv, match_am);
} else {
- return gen_binop_x87_float(node, op1, op2, new_rd_ia32_vfdiv);
+ return gen_binop_x87_float(node, op1, op2, new_bd_ia32_vfdiv);
}
}
ir_node *left = get_Shl_left(node);
ir_node *right = get_Shl_right(node);
- return gen_shift_binop(node, left, right, new_rd_ia32_Shl,
+ return gen_shift_binop(node, left, right, new_bd_ia32_Shl,
match_mode_neutral | match_immediate);
}
ir_node *left = get_Shr_left(node);
ir_node *right = get_Shr_right(node);
- return gen_shift_binop(node, left, right, new_rd_ia32_Shr, match_immediate);
+ return gen_shift_binop(node, left, right, new_bd_ia32_Shr, match_immediate);
}
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *new_op = be_transform_node(left);
- return create_sex_32_64(dbgi, current_ir_graph, block, new_op, node);
+ return create_sex_32_64(dbgi, block, new_op, node);
}
}
}
}
- return gen_shift_binop(node, left, right, new_rd_ia32_Sar, match_immediate);
+ return gen_shift_binop(node, left, right, new_bd_ia32_Sar, match_immediate);
}
*/
static ir_node *gen_Rol(ir_node *node, ir_node *op1, ir_node *op2)
{
- return gen_shift_binop(node, op1, op2, new_rd_ia32_Rol, match_immediate);
+ return gen_shift_binop(node, op1, op2, new_bd_ia32_Rol, match_immediate);
}
*/
static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2)
{
- return gen_shift_binop(node, op1, op2, new_rd_ia32_Ror, match_immediate);
+ return gen_shift_binop(node, op1, op2, new_bd_ia32_Ror, match_immediate);
}
{
ir_node *op = get_Minus_op(node);
ir_node *block = be_transform_node(get_nodes_block(node));
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(node);
ir_entity *ent;
ir_node *noreg_xmm = ia32_new_NoReg_xmm(env_cg);
ir_node *nomem = new_NoMem();
- new_node = new_rd_ia32_xXor(dbgi, irg, block, noreg_gp, noreg_gp,
+ new_node = new_bd_ia32_xXor(dbgi, block, noreg_gp, noreg_gp,
nomem, new_op, noreg_xmm);
size = get_mode_size_bits(mode);
set_ia32_op_type(new_node, ia32_AddrModeS);
set_ia32_ls_mode(new_node, mode);
} else {
- new_node = new_rd_ia32_vfchs(dbgi, irg, block, new_op);
+ new_node = new_bd_ia32_vfchs(dbgi, block, new_op);
}
} else {
- new_node = gen_unop(node, op, new_rd_ia32_Neg, match_mode_neutral);
+ new_node = gen_unop(node, op, new_bd_ia32_Neg, match_mode_neutral);
}
SET_IA32_ORIG_NODE(new_node, node);
assert(get_irn_mode(node) != mode_b); /* should be lowered already */
assert (! mode_is_float(get_irn_mode(node)));
- return gen_unop(node, op, new_rd_ia32_Not, match_mode_neutral);
+ return gen_unop(node, op, new_bd_ia32_Not, match_mode_neutral);
}
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
ir_node *op = get_Abs_op(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(node);
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
if (ia32_cg_config.use_sse2) {
ir_node *noreg_fp = ia32_new_NoReg_xmm(env_cg);
- new_node = new_rd_ia32_xAnd(dbgi,irg, new_block, noreg_gp, noreg_gp,
+ new_node = new_bd_ia32_xAnd(dbgi, new_block, noreg_gp, noreg_gp,
nomem, new_op, noreg_fp);
size = get_mode_size_bits(mode);
set_ia32_op_type(new_node, ia32_AddrModeS);
set_ia32_ls_mode(new_node, mode);
} else {
- new_node = new_rd_ia32_vfabs(dbgi, irg, new_block, new_op);
+ new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op);
SET_IA32_ORIG_NODE(new_node, node);
}
} else {
new_op = create_I2I_Conv(mode, mode_Is, dbgi, block, op, node);
}
- sign_extension = create_sex_32_64(dbgi, irg, new_block, new_op, node);
+ sign_extension = create_sex_32_64(dbgi, new_block, new_op, node);
- xor = new_rd_ia32_Xor(dbgi, irg, new_block, noreg_gp, noreg_gp,
+ xor = new_bd_ia32_Xor(dbgi, new_block, noreg_gp, noreg_gp,
nomem, new_op, sign_extension);
SET_IA32_ORIG_NODE(xor, node);
- new_node = new_rd_ia32_Sub(dbgi, irg, new_block, noreg_gp, noreg_gp,
+ new_node = new_bd_ia32_Sub(dbgi, new_block, noreg_gp, noreg_gp,
nomem, xor, sign_extension);
SET_IA32_ORIG_NODE(new_node, node);
}
ir_node *op1 = be_transform_node(x);
ir_node *op2 = be_transform_node(n);
- return new_rd_ia32_Bt(dbgi, current_ir_graph, new_block, op1, op2);
+ return new_bd_ia32_Bt(dbgi, new_block, op1, op2);
}
/**
new_op = be_transform_node(node);
noreg = ia32_new_NoReg_gp(env_cg);
nomem = new_NoMem();
- flags = new_rd_ia32_Test(dbgi, current_ir_graph, new_block, noreg, noreg, nomem,
- new_op, new_op, /*is_permuted=*/0, /*cmp_unsigned=*/0);
+ flags = new_bd_ia32_Test(dbgi, new_block, noreg, noreg, nomem, new_op,
+ new_op, /*is_permuted=*/0, /*cmp_unsigned=*/0);
*pnc_out = pn_Cmp_Lg;
return flags;
}
ir_node *new_mem = be_transform_node(mem);
ir_node *base;
ir_node *index;
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
ir_mode *mode = get_Load_mode(node);
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2) {
- new_node = new_rd_ia32_xLoad(dbgi, irg, block, base, index, new_mem,
+ new_node = new_bd_ia32_xLoad(dbgi, block, base, index, new_mem,
mode);
res_mode = mode_xmm;
} else {
- new_node = new_rd_ia32_vfld(dbgi, irg, block, base, index, new_mem,
+ new_node = new_bd_ia32_vfld(dbgi, block, base, index, new_mem,
mode);
res_mode = mode_vfp;
}
/* create a conv node with address mode for smaller modes */
if (get_mode_size_bits(mode) < 32) {
- new_node = new_rd_ia32_Conv_I2I(dbgi, irg, block, base, index,
+ new_node = new_bd_ia32_Conv_I2I(dbgi, block, base, index,
new_mem, noreg, mode);
} else {
- new_node = new_rd_ia32_Load(dbgi, irg, block, base, index, new_mem);
+ new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem);
}
res_mode = mode_Iu;
}
ir_node *src_block = get_nodes_block(node);
ir_node *block;
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi;
ir_node *new_mem;
ir_node *new_node;
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
- new_mem = transform_AM_mem(irg, block, am.am_node, mem, addr->mem);
+ new_mem = transform_AM_mem(current_ir_graph, block, am.am_node, mem, addr->mem);
if (get_mode_size_bits(mode) == 8) {
- new_node = func8bit(dbgi, irg, block, addr->base, addr->index,
- new_mem, new_op);
+ new_node = func8bit(dbgi, block, addr->base, addr->index, new_mem, new_op);
} else {
- new_node = func(dbgi, irg, block, addr->base, addr->index, new_mem,
- new_op);
+ new_node = func(dbgi, block, addr->base, addr->index, new_mem, new_op);
}
set_address(new_node, addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
ir_node *ptr, ir_mode *mode,
construct_unop_dest_func *func)
{
- ir_graph *irg = current_ir_graph;
ir_node *src_block = get_nodes_block(node);
ir_node *block;
dbg_info *dbgi;
dbgi = get_irn_dbg_info(node);
block = be_transform_node(src_block);
- new_mem = transform_AM_mem(irg, block, am.am_node, mem, addr->mem);
- new_node = func(dbgi, irg, block, addr->base, addr->index, new_mem);
+ new_mem = transform_AM_mem(current_ir_graph, block, am.am_node, mem, addr->mem);
+ new_node = func(dbgi, block, addr->base, addr->index, new_mem);
set_address(new_node, addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode);
ir_mode *mode = get_irn_mode(node);
ir_node *mux_true = get_Mux_true(node);
ir_node *mux_false = get_Mux_false(node);
- ir_graph *irg;
ir_node *cond;
ir_node *new_mem;
dbg_info *dbgi;
build_address_ptr(&addr, ptr, mem);
- irg = current_ir_graph;
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
new_block = be_transform_node(block);
cond = get_Mux_sel(node);
flags = get_flags_node(cond, &pnc);
new_mem = be_transform_node(mem);
- new_node = new_rd_ia32_SetMem(dbgi, irg, new_block, addr.base,
+ new_node = new_bd_ia32_SetMem(dbgi, new_block, addr.base,
addr.index, addr.mem, flags, pnc, negated);
set_address(new_node, &addr);
set_ia32_op_type(new_node, ia32_AddrModeD);
op2 = get_Add_right(val);
if (ia32_cg_config.use_incdec) {
if (is_Const_1(op2)) {
- new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_IncMem);
+ new_node = dest_am_unop(val, op1, mem, ptr, mode, new_bd_ia32_IncMem);
break;
} else if (is_Const_Minus_1(op2)) {
- new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_DecMem);
+ new_node = dest_am_unop(val, op1, mem, ptr, mode, new_bd_ia32_DecMem);
break;
}
}
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_AddMem, new_rd_ia32_AddMem8Bit,
+ new_bd_ia32_AddMem, new_bd_ia32_AddMem8Bit,
match_dest_am | match_commutative |
match_immediate);
break;
ir_fprintf(stderr, "Optimisation warning: not-normalized sub ,C found\n");
}
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_SubMem, new_rd_ia32_SubMem8Bit,
+ new_bd_ia32_SubMem, new_bd_ia32_SubMem8Bit,
match_dest_am | match_immediate);
break;
case iro_And:
op1 = get_And_left(val);
op2 = get_And_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_AndMem, new_rd_ia32_AndMem8Bit,
+ new_bd_ia32_AndMem, new_bd_ia32_AndMem8Bit,
match_dest_am | match_commutative |
match_immediate);
break;
op1 = get_Or_left(val);
op2 = get_Or_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_OrMem, new_rd_ia32_OrMem8Bit,
+ new_bd_ia32_OrMem, new_bd_ia32_OrMem8Bit,
match_dest_am | match_commutative |
match_immediate);
break;
op1 = get_Eor_left(val);
op2 = get_Eor_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_XorMem, new_rd_ia32_XorMem8Bit,
+ new_bd_ia32_XorMem, new_bd_ia32_XorMem8Bit,
match_dest_am | match_commutative |
match_immediate);
break;
op1 = get_Shl_left(val);
op2 = get_Shl_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_ShlMem, new_rd_ia32_ShlMem,
+ new_bd_ia32_ShlMem, new_bd_ia32_ShlMem,
match_dest_am | match_immediate);
break;
case iro_Shr:
op1 = get_Shr_left(val);
op2 = get_Shr_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_ShrMem, new_rd_ia32_ShrMem,
+ new_bd_ia32_ShrMem, new_bd_ia32_ShrMem,
match_dest_am | match_immediate);
break;
case iro_Shrs:
op1 = get_Shrs_left(val);
op2 = get_Shrs_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_SarMem, new_rd_ia32_SarMem,
+ new_bd_ia32_SarMem, new_bd_ia32_SarMem,
match_dest_am | match_immediate);
break;
case iro_Rotl:
op1 = get_Rotl_left(val);
op2 = get_Rotl_right(val);
new_node = dest_am_binop(val, op1, op2, mem, ptr, mode,
- new_rd_ia32_RolMem, new_rd_ia32_RolMem,
+ new_bd_ia32_RolMem, new_bd_ia32_RolMem,
match_dest_am | match_immediate);
break;
/* TODO: match ROR patterns... */
break;
case iro_Minus:
op1 = get_Minus_op(val);
- new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_NegMem);
+ new_node = dest_am_unop(val, op1, mem, ptr, mode, new_bd_ia32_NegMem);
break;
case iro_Not:
/* should be lowered already */
assert(mode != mode_b);
op1 = get_Not_op(val);
- new_node = dest_am_unop(val, op1, mem, ptr, mode, new_rd_ia32_NotMem);
+ new_node = dest_am_unop(val, op1, mem, ptr, mode, new_bd_ia32_NotMem);
break;
default:
return NULL;
ir_node *new_block = be_transform_node(block);
ir_node *ptr = get_Store_ptr(node);
ir_node *mem = get_Store_mem(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
int ofs = 0;
size_t i = 0;
(get_tarval_sub_bits(tv, ofs + 3) << 24);
ir_node *imm = create_Immediate(NULL, 0, val);
- ir_node *new_node = new_rd_ia32_Store(dbgi, irg, new_block, addr.base,
+ ir_node *new_node = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, imm);
set_irn_pinned(new_node, get_irn_pinned(node));
} while (size != 0);
if (i > 1) {
- return new_rd_Sync(dbgi, irg, new_block, i, ins);
+ return new_rd_Sync(dbgi, current_ir_graph, new_block, i, ins);
} else {
return ins[0];
}
/* Note: fisttp ALWAYS pop the tos. We have to ensure here that the value is copied
if other users exists */
const arch_register_class_t *reg_class = &ia32_reg_classes[CLASS_ia32_vfp];
- ir_node *vfisttp = new_rd_ia32_vfisttp(dbgi, irg, block, base, index, mem, val);
+ ir_node *vfisttp = new_bd_ia32_vfisttp(dbgi, block, base, index, mem, val);
ir_node *value = new_r_Proj(irg, block, vfisttp, mode_E, pn_ia32_vfisttp_res);
be_new_Keep(reg_class, irg, block, 1, &value);
ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg);
/* do a fist */
- new_node = new_rd_ia32_vfist(dbgi, irg, block, base, index, mem, val, trunc_mode);
+ new_node = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode);
*fist = new_node;
}
return new_node;
ir_node *new_block = be_transform_node(block);
ir_node *ptr = get_Store_ptr(node);
ir_node *mem = get_Store_mem(node);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
ir_node *new_val, *new_node, *store;
}
new_val = be_transform_node(val);
if (ia32_cg_config.use_sse2) {
- new_node = new_rd_ia32_xStore(dbgi, irg, new_block, addr.base,
+ new_node = new_bd_ia32_xStore(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val);
} else {
- new_node = new_rd_ia32_vfst(dbgi, irg, new_block, addr.base,
+ new_node = new_bd_ia32_vfst(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val, mode);
}
store = new_node;
val = op;
}
new_val = be_transform_node(val);
- new_node = gen_vfist(dbgi, irg, new_block, addr.base, addr.index, addr.mem, new_val, &store);
+ new_node = gen_vfist(dbgi, current_ir_graph, new_block, addr.base, addr.index, addr.mem, new_val, &store);
} else {
new_val = create_immediate_or_transform(val, 0);
assert(mode != mode_b);
if (get_mode_size_bits(mode) == 8) {
- new_node = new_rd_ia32_Store8Bit(dbgi, irg, new_block, addr.base,
+ new_node = new_bd_ia32_Store8Bit(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val);
} else {
- new_node = new_rd_ia32_Store(dbgi, irg, new_block, addr.base,
+ new_node = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, new_val);
}
store = new_node;
*/
static ir_node *create_Switch(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *sel = get_Cond_selector(node);
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
/* if smallest switch case is not 0 we need an additional sub */
- new_sel = new_rd_ia32_Lea(dbgi, irg, block, new_sel, noreg);
+ new_sel = new_bd_ia32_Lea(dbgi, block, new_sel, noreg);
add_ia32_am_offs_int(new_sel, -switch_min);
set_ia32_op_type(new_sel, ia32_AddrModeS);
SET_IA32_ORIG_NODE(new_sel, node);
}
- new_node = new_rd_ia32_SwitchJmp(dbgi, irg, block, new_sel, default_pn);
+ new_node = new_bd_ia32_SwitchJmp(dbgi, block, new_sel, default_pn);
SET_IA32_ORIG_NODE(new_node, node);
return new_node;
{
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *sel = get_Cond_selector(node);
ir_mode *sel_mode = get_irn_mode(sel);
/* we get flags from a Cmp */
flags = get_flags_node(sel, &pnc);
- new_node = new_rd_ia32_Jcc(dbgi, irg, new_block, flags, pnc);
+ new_node = new_bd_ia32_Jcc(dbgi, new_block, flags, pnc);
SET_IA32_ORIG_NODE(new_node, node);
return new_node;
static ir_node *create_Fucom(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
if (ia32_cg_config.use_fucomi) {
new_right = be_transform_node(right);
- new_node = new_rd_ia32_vFucomi(dbgi, irg, new_block, new_left,
+ new_node = new_bd_ia32_vFucomi(dbgi, new_block, new_left,
new_right, 0);
set_ia32_commutative(new_node);
SET_IA32_ORIG_NODE(new_node, node);
} else {
if (ia32_cg_config.use_ftst && is_Const_0(right)) {
- new_node = new_rd_ia32_vFtstFnstsw(dbgi, irg, new_block, new_left,
- 0);
+ new_node = new_bd_ia32_vFtstFnstsw(dbgi, new_block, new_left, 0);
} else {
new_right = be_transform_node(right);
- new_node = new_rd_ia32_vFucomFnstsw(dbgi, irg, new_block, new_left,
- new_right, 0);
+ new_node = new_bd_ia32_vFucomFnstsw(dbgi, new_block, new_left, new_right, 0);
}
set_ia32_commutative(new_node);
SET_IA32_ORIG_NODE(new_node, node);
- new_node = new_rd_ia32_Sahf(dbgi, irg, new_block, new_node);
+ new_node = new_bd_ia32_Sahf(dbgi, new_block, new_node);
SET_IA32_ORIG_NODE(new_node, node);
}
static ir_node *create_Ucomi(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *src_block = get_nodes_block(node);
ir_node *new_block = be_transform_node(src_block);
match_arguments(&am, src_block, left, right, NULL,
match_commutative | match_am);
- new_node = new_rd_ia32_Ucomi(dbgi, irg, new_block, addr->base, addr->index,
+ new_node = new_bd_ia32_Ucomi(dbgi, new_block, addr->base, addr->index,
addr->mem, am.new_op1, am.new_op2,
am.ins_permuted);
set_am_attributes(new_node, &am);
*/
static ir_node *gen_Cmp(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
}
if (get_mode_size_bits(cmp_mode) == 8) {
- new_node = new_rd_ia32_Test8Bit(dbgi, irg, new_block, addr->base,
- addr->index, addr->mem, am.new_op1,
- am.new_op2, am.ins_permuted,
- cmp_unsigned);
+ new_node = new_bd_ia32_Test8Bit(dbgi, new_block, addr->base,
+ addr->index, addr->mem, am.new_op1, am.new_op2, am.ins_permuted,
+ cmp_unsigned);
} else {
- new_node = new_rd_ia32_Test(dbgi, irg, new_block, addr->base,
- addr->index, addr->mem, am.new_op1,
- am.new_op2, am.ins_permuted,
- cmp_unsigned);
+ new_node = new_bd_ia32_Test(dbgi, new_block, addr->base, addr->index,
+ addr->mem, am.new_op1, am.new_op2, am.ins_permuted, cmp_unsigned);
}
} else {
/* Cmp(left, right) */
}
if (get_mode_size_bits(cmp_mode) == 8) {
- new_node = new_rd_ia32_Cmp8Bit(dbgi, irg, new_block, addr->base,
+ new_node = new_bd_ia32_Cmp8Bit(dbgi, new_block, addr->base,
addr->index, addr->mem, am.new_op1,
am.new_op2, am.ins_permuted,
cmp_unsigned);
} else {
- new_node = new_rd_ia32_Cmp(dbgi, irg, new_block, addr->base,
- addr->index, addr->mem, am.new_op1,
- am.new_op2, am.ins_permuted, cmp_unsigned);
+ new_node = new_bd_ia32_Cmp(dbgi, new_block, addr->base, addr->index,
+ addr->mem, am.new_op1, am.new_op2, am.ins_permuted, cmp_unsigned);
}
}
set_am_attributes(new_node, &am);
static ir_node *create_CMov(ir_node *node, ir_node *flags, ir_node *new_flags,
pn_Cmp pnc)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
match_arguments(&am, block, val_false, val_true, flags, match_flags);
- new_node = new_rd_ia32_CMov(dbgi, irg, new_block, addr->base, addr->index,
+ new_node = new_bd_ia32_CMov(dbgi, new_block, addr->base, addr->index,
addr->mem, am.new_op1, am.new_op2, new_flags,
am.ins_permuted, pnc);
set_am_attributes(new_node, &am);
ir_node *flags, pn_Cmp pnc, ir_node *orig_node,
int ins_permuted)
{
- ir_graph *irg = current_ir_graph;
- ir_node *noreg = ia32_new_NoReg_gp(env_cg);
- ir_node *nomem = new_NoMem();
- ir_mode *mode = get_irn_mode(orig_node);
- ir_node *new_node;
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *nomem = new_NoMem();
+ ir_mode *mode = get_irn_mode(orig_node);
+ ir_node *new_node;
- new_node = new_rd_ia32_Set(dbgi, irg, new_block, flags, pnc, ins_permuted);
+ new_node = new_bd_ia32_Set(dbgi, new_block, flags, pnc, ins_permuted);
SET_IA32_ORIG_NODE(new_node, orig_node);
/* we might need to conv the result up */
if (get_mode_size_bits(mode) > 8) {
- new_node = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, noreg, noreg,
+ new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, noreg, noreg,
nomem, new_node, mode_Bu);
SET_IA32_ORIG_NODE(new_node, orig_node);
}
ir_node *new_node, *sub, *sbb, *eflags, *block, *noreg, *tmpreg, *nomem;
dbg_info *dbgi;
- new_node = gen_binop(psi, a, b, new_rd_ia32_Sub,
+ new_node = gen_binop(psi, a, b, new_bd_ia32_Sub,
match_mode_neutral | match_am | match_immediate | match_two_users);
block = get_nodes_block(new_node);
dbgi = get_irn_dbg_info(psi);
noreg = ia32_new_NoReg_gp(env_cg);
- tmpreg = new_rd_ia32_ProduceVal(dbgi, irg, block);
+ tmpreg = new_bd_ia32_ProduceVal(dbgi, block);
nomem = new_NoMem();
- sbb = new_rd_ia32_Sbb(dbgi, irg, block, noreg, noreg, nomem, tmpreg, tmpreg, eflags);
+ sbb = new_bd_ia32_Sbb(dbgi, block, noreg, noreg, nomem, tmpreg, tmpreg, eflags);
- new_node = new_rd_ia32_And(dbgi, irg, block, noreg, noreg, nomem, new_node, sbb);
+ new_node = new_bd_ia32_And(dbgi, block, noreg, noreg, nomem, new_node, sbb);
set_ia32_commutative(new_node);
return new_node;
}
if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Le) {
if (cmp_left == mux_true && cmp_right == mux_false) {
/* Mux(a <= b, a, b) => MIN */
- return gen_binop(node, cmp_left, cmp_right, new_rd_ia32_xMin,
+ return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMin,
match_commutative | match_am | match_two_users);
} else if (cmp_left == mux_false && cmp_right == mux_true) {
/* Mux(a <= b, b, a) => MAX */
- return gen_binop(node, cmp_left, cmp_right, new_rd_ia32_xMax,
+ return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMax,
match_commutative | match_am | match_two_users);
}
} else if (pnc == pn_Cmp_Gt || pnc == pn_Cmp_Ge) {
if (cmp_left == mux_true && cmp_right == mux_false) {
/* Mux(a >= b, a, b) => MAX */
- return gen_binop(node, cmp_left, cmp_right, new_rd_ia32_xMax,
+ return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMax,
match_commutative | match_am | match_two_users);
} else if (cmp_left == mux_false && cmp_right == mux_true) {
/* Mux(a >= b, b, a) => MIN */
- return gen_binop(node, cmp_left, cmp_right, new_rd_ia32_xMin,
+ return gen_binop(node, cmp_left, cmp_right, new_bd_ia32_xMin,
match_commutative | match_am | match_two_users);
}
}
SET_IA32_ORIG_NODE(fist, node);
/* do a Load */
- load = new_rd_ia32_Load(dbgi, irg, block, get_irg_frame(irg), noreg, mem);
+ load = new_bd_ia32_Load(dbgi, block, get_irg_frame(irg), noreg, mem);
set_irn_pinned(load, op_pin_state_floats);
set_ia32_use_frame(load);
ir_node *store, *load;
ir_node *new_node;
- store = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, nomem, node,
- tgt_mode);
+ store = new_bd_ia32_vfst(dbgi, block, frame, noreg, nomem, node, tgt_mode);
set_ia32_use_frame(store);
set_ia32_op_type(store, ia32_AddrModeD);
SET_IA32_ORIG_NODE(store, node);
- load = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, store,
- tgt_mode);
+ load = new_bd_ia32_vfld(dbgi, block, frame, noreg, store, tgt_mode);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
SET_IA32_ORIG_NODE(load, node);
if (am.op_type == ia32_AddrModeS) {
ia32_address_t *addr = &am.addr;
- fild = new_rd_ia32_vfild(dbgi, irg, block, addr->base,
- addr->index, addr->mem);
+ fild = new_bd_ia32_vfild(dbgi, block, addr->base, addr->index,
+ addr->mem);
new_node = new_r_Proj(irg, block, fild, mode_vfp,
pn_ia32_vfild_res);
/* first convert to 32 bit signed if necessary */
src_bits = get_mode_size_bits(src_mode);
if (src_bits == 8) {
- new_op = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, nomem,
+ new_op = new_bd_ia32_Conv_I2I8Bit(dbgi, block, noreg, noreg, nomem,
new_op, src_mode);
SET_IA32_ORIG_NODE(new_op, node);
mode = mode_Is;
} else if (src_bits < 32) {
- new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, nomem,
+ new_op = new_bd_ia32_Conv_I2I(dbgi, block, noreg, noreg, nomem,
new_op, src_mode);
SET_IA32_ORIG_NODE(new_op, node);
mode = mode_Is;
assert(get_mode_size_bits(mode) == 32);
/* do a store */
- store = new_rd_ia32_Store(dbgi, irg, block, get_irg_frame(irg), noreg, nomem,
+ store = new_bd_ia32_Store(dbgi, block, get_irg_frame(irg), noreg, nomem,
new_op);
set_ia32_use_frame(store);
/* store a zero */
ir_node *zero_const = create_Immediate(NULL, 0, 0);
- ir_node *zero_store = new_rd_ia32_Store(dbgi, irg, block,
- get_irg_frame(irg), noreg, nomem,
- zero_const);
+ ir_node *zero_store = new_bd_ia32_Store(dbgi, block, get_irg_frame(irg),
+ noreg, nomem, zero_const);
set_ia32_use_frame(zero_store);
set_ia32_op_type(zero_store, ia32_AddrModeD);
}
/* do a fild */
- fild = new_rd_ia32_vfild(dbgi, irg, block, get_irg_frame(irg), noreg, store);
+ fild = new_bd_ia32_vfild(dbgi, block, get_irg_frame(irg), noreg, store);
set_ia32_use_frame(fild);
set_ia32_op_type(fild, ia32_AddrModeS);
dbg_info *dbgi, ir_node *block, ir_node *op,
ir_node *node)
{
- ir_graph *irg = current_ir_graph;
int src_bits = get_mode_size_bits(src_mode);
int tgt_bits = get_mode_size_bits(tgt_mode);
ir_node *new_block = be_transform_node(block);
}
if (smaller_bits == 8) {
- new_node = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, addr->base,
+ new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, addr->base,
addr->index, addr->mem, am.new_op2,
smaller_mode);
} else {
- new_node = new_rd_ia32_Conv_I2I(dbgi, irg, new_block, addr->base,
+ new_node = new_bd_ia32_Conv_I2I(dbgi, new_block, addr->base,
addr->index, addr->mem, am.new_op2,
smaller_mode);
}
ir_node *new_block = be_transform_node(block);
ir_node *op = get_Conv_op(node);
ir_node *new_op = NULL;
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *src_mode = get_irn_mode(op);
ir_mode *tgt_mode = get_irn_mode(node);
/* ... to float */
if (ia32_cg_config.use_sse2) {
DB((dbg, LEVEL_1, "create Conv(float, float) ..."));
- res = new_rd_ia32_Conv_FP2FP(dbgi, irg, new_block, noreg, noreg,
+ res = new_bd_ia32_Conv_FP2FP(dbgi, new_block, noreg, noreg,
nomem, new_op);
set_ia32_ls_mode(res, tgt_mode);
} else {
/* ... to int */
DB((dbg, LEVEL_1, "create Conv(float, int) ..."));
if (ia32_cg_config.use_sse2) {
- res = new_rd_ia32_Conv_FP2I(dbgi, irg, new_block, noreg, noreg,
+ res = new_bd_ia32_Conv_FP2I(dbgi, new_block, noreg, noreg,
nomem, new_op);
set_ia32_ls_mode(res, src_mode);
} else {
DB((dbg, LEVEL_1, "create Conv(int, float) ..."));
if (ia32_cg_config.use_sse2) {
new_op = be_transform_node(op);
- res = new_rd_ia32_Conv_I2FP(dbgi, irg, new_block, noreg, noreg,
+ res = new_bd_ia32_Conv_I2FP(dbgi, new_block, noreg, noreg,
nomem, new_op);
set_ia32_ls_mode(res, tgt_mode);
} else {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = be_get_FrameAddr_frame(node);
ir_node *new_op = be_transform_node(op);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
ir_node *new_node;
- new_node = new_rd_ia32_Lea(dbgi, irg, block, new_op, noreg);
+ new_node = new_bd_ia32_Lea(dbgi, block, new_op, noreg);
set_ia32_frame_ent(new_node, arch_get_frame_entity(node));
set_ia32_use_frame(new_node);
noreg = ia32_new_NoReg_gp(env_cg);
/* store xmm0 onto stack */
- sse_store = new_rd_ia32_xStoreSimple(dbgi, irg, block, frame, noreg,
+ sse_store = new_bd_ia32_xStoreSimple(dbgi, block, frame, noreg,
new_ret_mem, new_ret_val);
set_ia32_ls_mode(sse_store, mode);
set_ia32_op_type(sse_store, ia32_AddrModeD);
set_ia32_use_frame(sse_store);
/* load into x87 register */
- fld = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, sse_store, mode);
+ fld = new_bd_ia32_vfld(dbgi, block, frame, noreg, sse_store, mode);
set_ia32_op_type(fld, ia32_AddrModeS);
set_ia32_use_frame(fld);
ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
- return gen_binop(node, sp, sz, new_rd_ia32_SubSP,
+ return gen_binop(node, sp, sz, new_bd_ia32_SubSP,
match_am | match_immediate);
}
ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
- return gen_binop(node, sp, sz, new_rd_ia32_AddSP,
+ return gen_binop(node, sp, sz, new_bd_ia32_AddSP,
match_am | match_immediate);
}
match_am | match_8bit_am | match_16bit_am |
match_immediate | match_8bit | match_16bit);
- new_node = new_rd_ia32_IJmp(dbgi, current_ir_graph, new_block,
- addr->base, addr->index, addr->mem,
- am.new_op2);
+ new_node = new_bd_ia32_IJmp(dbgi, new_block, addr->base, addr->index,
+ addr->mem, am.new_op2);
set_am_attributes(new_node, &am);
SET_IA32_ORIG_NODE(new_node, node);
ir_graph *irg = current_ir_graph;
res = gen_binop(node, get_Bound_index(node), get_Bound_upper(node),
- new_rd_ia32_Sub, match_mode_neutral | match_am | match_immediate);
+ new_bd_ia32_Sub, match_mode_neutral | match_am | match_immediate);
block = get_nodes_block(res);
if (! is_Proj(res)) {
sub = get_Proj_pred(res);
}
flags = new_rd_Proj(NULL, irg, block, sub, mode_Iu, pn_ia32_Sub_flags);
- new_node = new_rd_ia32_Jcc(dbgi, irg, block, flags, pn_Cmp_Lt | ia32_pn_Cmp_unsigned);
+ new_node = new_bd_ia32_Jcc(dbgi, block, flags, pn_Cmp_Lt | ia32_pn_Cmp_unsigned);
SET_IA32_ORIG_NODE(new_node, node);
} else {
panic("generic Bound not supported in ia32 Backend");
ir_node *left = get_irn_n(node, n_ia32_l_ShlDep_val);
ir_node *right = get_irn_n(node, n_ia32_l_ShlDep_count);
- return gen_shift_binop(node, left, right, new_rd_ia32_Shl,
+ return gen_shift_binop(node, left, right, new_bd_ia32_Shl,
match_immediate | match_mode_neutral);
}
{
ir_node *left = get_irn_n(node, n_ia32_l_ShrDep_val);
ir_node *right = get_irn_n(node, n_ia32_l_ShrDep_count);
- return gen_shift_binop(node, left, right, new_rd_ia32_Shr,
+ return gen_shift_binop(node, left, right, new_bd_ia32_Shr,
match_immediate);
}
{
ir_node *left = get_irn_n(node, n_ia32_l_SarDep_val);
ir_node *right = get_irn_n(node, n_ia32_l_SarDep_count);
- return gen_shift_binop(node, left, right, new_rd_ia32_Sar,
+ return gen_shift_binop(node, left, right, new_bd_ia32_Sar,
match_immediate);
}
{
ir_node *left = get_irn_n(node, n_ia32_l_Add_left);
ir_node *right = get_irn_n(node, n_ia32_l_Add_right);
- ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Add,
+ ir_node *lowered = gen_binop(node, left, right, new_bd_ia32_Add,
match_commutative | match_am | match_immediate |
match_mode_neutral);
static ir_node *gen_ia32_l_Adc(ir_node *node)
{
- return gen_binop_flags(node, new_rd_ia32_Adc,
+ return gen_binop_flags(node, new_bd_ia32_Adc,
match_commutative | match_am | match_immediate |
match_mode_neutral);
}
ir_node *left = get_binop_left(node);
ir_node *right = get_binop_right(node);
- return gen_binop(node, left, right, new_rd_ia32_Mul,
+ return gen_binop(node, left, right, new_bd_ia32_Mul,
match_commutative | match_am | match_mode_neutral);
}
ir_node *left = get_binop_left(node);
ir_node *right = get_binop_right(node);
- return gen_binop(node, left, right, new_rd_ia32_IMul1OP,
+ return gen_binop(node, left, right, new_bd_ia32_IMul1OP,
match_commutative | match_am | match_mode_neutral);
}
{
ir_node *left = get_irn_n(node, n_ia32_l_Sub_minuend);
ir_node *right = get_irn_n(node, n_ia32_l_Sub_subtrahend);
- ir_node *lowered = gen_binop(node, left, right, new_rd_ia32_Sub,
+ ir_node *lowered = gen_binop(node, left, right, new_bd_ia32_Sub,
match_am | match_immediate | match_mode_neutral);
if (is_Proj(lowered)) {
static ir_node *gen_ia32_l_Sbb(ir_node *node)
{
- return gen_binop_flags(node, new_rd_ia32_Sbb,
+ return gen_binop_flags(node, new_bd_ia32_Sbb,
match_am | match_immediate | match_mode_neutral);
}
{
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_high = be_transform_node(high);
ir_node *new_low = be_transform_node(low);
new_count = create_immediate_or_transform(count, 0);
if (is_ia32_l_ShlD(node)) {
- new_node = new_rd_ia32_ShlD(dbgi, irg, new_block, new_high, new_low,
+ new_node = new_bd_ia32_ShlD(dbgi, new_block, new_high, new_low,
new_count);
} else {
- new_node = new_rd_ia32_ShrD(dbgi, irg, new_block, new_high, new_low,
+ new_node = new_bd_ia32_ShrD(dbgi, new_block, new_high, new_low,
new_count);
}
SET_IA32_ORIG_NODE(new_node, node);
}
/* do a store */
- store_low = new_rd_ia32_Store(dbgi, irg, block, frame, noreg, nomem,
+ store_low = new_bd_ia32_Store(dbgi, block, frame, noreg, nomem,
new_val_low);
- store_high = new_rd_ia32_Store(dbgi, irg, block, frame, noreg, nomem,
+ store_high = new_bd_ia32_Store(dbgi, block, frame, noreg, nomem,
new_val_high);
SET_IA32_ORIG_NODE(store_low, node);
SET_IA32_ORIG_NODE(store_high, node);
sync = new_rd_Sync(dbgi, irg, block, 2, in);
/* do a fild */
- fild = new_rd_ia32_vfild(dbgi, irg, block, frame, noreg, sync);
+ fild = new_bd_ia32_vfild(dbgi, block, frame, noreg, sync);
set_ia32_use_frame(fild);
set_ia32_op_type(fild, ia32_AddrModeS);
ir_node *proj;
ia32_attr_t *attr;
- load = new_rd_ia32_Load(dbgi, irg, block, frame, noreg, new_pred);
+ load = new_bd_ia32_Load(dbgi, block, frame, noreg, new_pred);
SET_IA32_ORIG_NODE(load, node);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
}
mem = transform_AM_mem(irg, block, src_ptr, src_mem, addr->mem);
- call = new_rd_ia32_Call(dbgi, irg, block, addr->base, addr->index, mem,
+ call = new_bd_ia32_Call(dbgi, block, addr->base, addr->index, mem,
am.new_op2, sp, fpcw, eax, ecx, edx, pop, call_tp);
set_am_attributes(call, &am);
call = fix_mem_proj(call, &am);
pn_be_Call_first_res);
/* store st(0) onto stack */
- fstp = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, call_mem,
+ fstp = new_bd_ia32_vfst(dbgi, block, frame, noreg, call_mem,
call_res, mode);
set_ia32_op_type(fstp, ia32_AddrModeD);
set_ia32_use_frame(fstp);
/* load into SSE register */
- sse_load = new_rd_ia32_xLoad(dbgi, irg, block, frame, noreg, fstp,
- mode);
+ sse_load = new_bd_ia32_xLoad(dbgi, block, frame, noreg, fstp, mode);
set_ia32_op_type(sse_load, ia32_AddrModeS);
set_ia32_use_frame(sse_load);
ir_node *fxch;
ia32_x87_attr_t *attr;
- fxch = new_rd_ia32_fxch(NULL, get_irn_irg(block), block);
+ fxch = new_bd_ia32_fxch(NULL, block);
attr = get_ia32_x87_attr(fxch);
attr->x87[0] = &ia32_st_regs[pos];
attr->x87[2] = &ia32_st_regs[0];
{
ir_node *fxch;
ia32_x87_attr_t *attr;
- ir_graph *irg = get_irn_irg(n);
ir_node *block = get_nodes_block(n);
x87_fxch(state, pos);
- fxch = new_rd_ia32_fxch(NULL, irg, block);
+ fxch = new_bd_ia32_fxch(NULL, block);
attr = get_ia32_x87_attr(fxch);
attr->x87[0] = &ia32_st_regs[pos];
attr->x87[2] = &ia32_st_regs[0];
x87_push_dbl(state, arch_register_get_index(out), pred);
- fpush = new_rd_ia32_fpush(NULL, get_irn_irg(n), get_nodes_block(n));
+ fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
attr = get_ia32_x87_attr(fpush);
attr->x87[0] = &ia32_st_regs[pos];
attr->x87[2] = &ia32_st_regs[0];
while (num > 0) {
x87_pop(state);
if (ia32_cg_config.use_ffreep)
- fpop = new_rd_ia32_ffreep(NULL, get_irn_irg(n), get_nodes_block(n));
+ fpop = new_bd_ia32_ffreep(NULL, get_nodes_block(n));
else
- fpop = new_rd_ia32_fpop(NULL, get_irn_irg(n), get_nodes_block(n));
+ fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
attr = get_ia32_x87_attr(fpop);
attr->x87[0] = &ia32_st_regs[0];
attr->x87[1] = &ia32_st_regs[0];
*/
static ir_node *x87_create_fldz(x87_state *state, ir_node *n, int regidx)
{
- ir_graph *irg = get_irn_irg(n);
ir_node *block = get_nodes_block(n);
ir_node *fldz;
- fldz = new_rd_ia32_fldz(NULL, irg, block, mode_E);
+ fldz = new_bd_ia32_fldz(NULL, block, mode_E);
sched_add_before(n, fldz);
DB((dbg, LEVEL_1, "<<< %s\n", get_irn_opname(fldz)));
x87_patch_insn(n, op_p);
block = get_nodes_block(n);
- irg = get_irn_irg(n);
- vfld = new_rd_ia32_vfld(NULL, irg, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
+ vfld = new_bd_ia32_vfld(NULL, block, get_irn_n(n, 0), get_irn_n(n, 1), new_NoMem(), get_ia32_ls_mode(n));
/* copy all attributes */
set_ia32_frame_ent(vfld, get_ia32_frame_ent(n));
set_ia32_am_sc(vfld, get_ia32_am_sc(n));
set_ia32_ls_mode(vfld, get_ia32_ls_mode(n));
+ irg = get_irn_irg(n);
rproj = new_r_Proj(irg, block, vfld, get_ia32_ls_mode(vfld), pn_ia32_vfld_res);
mproj = new_r_Proj(irg, block, vfld, mode_M, pn_ia32_vfld_M);
mem = get_irn_Proj_for_mode(n, mode_M);
*/
static ir_node *create_Copy(x87_state *state, ir_node *n)
{
- ir_graph *irg = get_irn_irg(n);
dbg_info *n_dbg = get_irn_dbg_info(n);
ir_mode *mode = get_irn_mode(n);
ir_node *block = get_nodes_block(n);
ir_node *pred = get_irn_n(n, 0);
- ir_node *(*cnstr)(dbg_info *, ir_graph *, ir_node *, ir_mode *) = NULL;
+ ir_node *(*cnstr)(dbg_info *, ir_node *, ir_mode *) = NULL;
ir_node *res;
const arch_register_t *out;
const arch_register_t *op1;
switch (get_ia32_irn_opcode(pred)) {
case iro_ia32_Unknown_VFP:
case iro_ia32_fldz:
- cnstr = new_rd_ia32_fldz;
+ cnstr = new_bd_ia32_fldz;
break;
case iro_ia32_fld1:
- cnstr = new_rd_ia32_fld1;
+ cnstr = new_bd_ia32_fld1;
break;
case iro_ia32_fldpi:
- cnstr = new_rd_ia32_fldpi;
+ cnstr = new_bd_ia32_fldpi;
break;
case iro_ia32_fldl2e:
- cnstr = new_rd_ia32_fldl2e;
+ cnstr = new_bd_ia32_fldl2e;
break;
case iro_ia32_fldl2t:
- cnstr = new_rd_ia32_fldl2t;
+ cnstr = new_bd_ia32_fldl2t;
break;
case iro_ia32_fldlg2:
- cnstr = new_rd_ia32_fldlg2;
+ cnstr = new_bd_ia32_fldlg2;
break;
case iro_ia32_fldln2:
- cnstr = new_rd_ia32_fldln2;
+ cnstr = new_bd_ia32_fldln2;
break;
default:
break;
if (cnstr != NULL) {
/* copy a constant */
- res = (*cnstr)(n_dbg, irg, block, mode);
+ res = (*cnstr)(n_dbg, block, mode);
x87_push(state, arch_register_get_index(out), res);
} else {
int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
- res = new_rd_ia32_fpushCopy(n_dbg, irg, block, pred, mode);
+ res = new_bd_ia32_fpushCopy(n_dbg, block, pred, mode);
x87_push(state, arch_register_get_index(out), res);
/* create a zero */
block = get_nodes_block(node);
- zero = new_rd_ia32_fldz(NULL, current_ir_graph, block, mode_E);
+ zero = new_bd_ia32_fldz(NULL, block, mode_E);
x87_push(state, arch_register_get_index(reg), zero);
attr = get_ia32_x87_attr(zero);
if (ia32_cg_config.use_femms || ia32_cg_config.use_emms) {
if (ia32_cg_config.use_femms) {
/* use FEMMS on AMD processors to clear all */
- keep = new_rd_ia32_femms(NULL, get_irn_irg(block), block);
+ keep = new_bd_ia32_femms(NULL, block);
} else {
/* use EMMS to clear all */
- keep = new_rd_ia32_emms(NULL, get_irn_irg(block), block);
+ keep = new_bd_ia32_emms(NULL, block);
}
sched_add_before(first_insn, keep);
keep_alive(keep);
reg = arch_get_irn_register(node);
/* create a zero at end of pred block */
- zero = new_rd_ia32_fldz(NULL, current_ir_graph, pred_block, mode_E);
+ zero = new_bd_ia32_fldz(NULL, pred_block, mode_E);
x87_push(state, arch_register_get_index(reg), zero);
attr = get_ia32_x87_attr(zero);
{
mips_abi_env_t *env = self;
ir_graph *irg = env->irg;
- ir_node *block = get_irg_start_block(env->irg);
+ ir_node *block = get_irg_start_block(irg);
ir_node *sp = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_SP]);
ir_node *fp = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_FP]);
int initialstackframesize;
initialstackframesize = 24;
// - setup first part of stackframe
- sp = new_rd_mips_addu(NULL, irg, block, sp,
+ sp = new_bd_mips_addu(NULL, block, sp,
mips_create_Immediate(initialstackframesize));
mips_set_irn_reg(sp, &mips_gp_regs[REG_SP]);
panic("FIXME Use IncSP or set register requirement with ignore");
int i;
for(i = 0; i < 4; ++i) {
ir_node *reg = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_A0 + i]);
- ir_node *store = new_rd_mips_store_r(dbg, irg, block, *mem, sp, reg, mode_T);
+ ir_node *store = new_bd_mips_store_r(dbg, block, *mem, sp, reg, mode_T);
attr = get_mips_attr(store);
attr->load_store_mode = mode_Iu;
attr->tv = new_tarval_from_long(i * 4, mode_Is);
*/
reg = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_FP]);
- store = new_rd_mips_sw(NULL, irg, block, sp, reg, *mem, NULL, 16);
+ store = new_bd_mips_sw(NULL, block, sp, reg, *mem, NULL, 16);
mm[4] = store;
reg = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_RA]);
- store = new_rd_mips_sw(NULL, irg, block, sp, reg, *mem, NULL, 20);
+ store = new_bd_mips_sw(NULL, block, sp, reg, *mem, NULL, 20);
mm[5] = store;
initialstackframesize = 4;
// save old framepointer
- sp = new_rd_mips_addu(NULL, irg, block, sp,
+ sp = new_bd_mips_addu(NULL, block, sp,
mips_create_Immediate(-initialstackframesize));
mips_set_irn_reg(sp, &mips_gp_regs[REG_SP]);
panic("FIXME Use IncSP or set register requirement with ignore");
reg = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_FP]);
- store = new_rd_mips_sw(NULL, irg, block, sp, reg, *mem, NULL, 0);
+ store = new_bd_mips_sw(NULL, block, sp, reg, *mem, NULL, 0);
*mem = store;
}
// setup framepointer
- fp = new_rd_mips_addu(NULL, irg, block, sp,
+ fp = new_bd_mips_addu(NULL, block, sp,
mips_create_Immediate(-initialstackframesize));
mips_set_irn_reg(fp, &mips_gp_regs[REG_FP]);
panic("FIXME Use IncSP or set register requirement with ignore");
int fp_save_offset = env->debug ? 16 : 0;
// copy fp to sp
- sp = new_rd_mips_or(NULL, irg, block, fp, mips_create_zero());
+ sp = new_bd_mips_or(NULL, block, fp, mips_create_zero());
mips_set_irn_reg(sp, &mips_gp_regs[REG_SP]);
panic("FIXME Use be_Copy or set register requirement with ignore");
// 1. restore fp
- load = new_rd_mips_lw(NULL, irg, block, sp, *mem, NULL,
+ load = new_bd_mips_lw(NULL, block, sp, *mem, NULL,
fp_save_offset - initial_frame_size);
panic("FIXME register requirement with ignore");
*
****************************************************************************************************/
-typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg,
- ir_node *block, ir_node *left, ir_node *right);
+typedef ir_node *construct_binop_func(dbg_info *db, ir_node *block,
+ ir_node *left, ir_node *right);
static inline int mode_needs_gp_reg(ir_mode *mode) {
return mode_is_int(mode) || mode_is_reference(mode);
ir_node *res;
assert(val >= -32768 && val <= 32767);
- res = new_rd_mips_Immediate(NULL, irg, block, MIPS_IMM_CONST, NULL,
- val);
+ res = new_bd_mips_Immediate(NULL, block, MIPS_IMM_CONST, NULL, val);
slots = get_mips_slots(res);
slots[0] = &mips_gp_regs[REG_GP_NOREG];
{
ir_graph *irg = current_ir_graph;
ir_node *block = get_irg_start_block(irg);
- ir_node *zero = new_rd_mips_zero(NULL, irg, block);
+ ir_node *zero = new_bd_mips_zero(NULL, block);
const arch_register_t **slots = get_mips_slots(zero);
slots[0] = &mips_gp_regs[REG_ZERO];
static ir_node *gen_binop(ir_node *node, ir_node *left, ir_node *right,
construct_binop_func func, int supports_immediate)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *res;
new_right = be_transform_node(right);
}
- res = func(dbgi, irg, block, new_left, new_right);
+ res = func(dbgi, block, new_left, new_right);
return res;
}
{
/* TODO: match add(symconst, const) */
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_addu, 1);
+ new_bd_mips_addu, 1);
}
static ir_node *gen_Sub(ir_node *node)
{
return gen_binop(node, get_Sub_left(node), get_Sub_right(node),
- new_rd_mips_addu, 0);
+ new_bd_mips_addu, 0);
}
static ir_node *gen_And(ir_node *node)
{
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_and, 1);
+ new_bd_mips_and, 1);
}
static ir_node *gen_Or(ir_node *node)
{
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_or, 1);
+ new_bd_mips_or, 1);
}
static ir_node *gen_Eor(ir_node *node)
{
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_xor, 1);
+ new_bd_mips_xor, 1);
}
static ir_node *gen_Shl(ir_node *node)
{
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_sll, 1);
+ new_bd_mips_sll, 1);
}
static ir_node *gen_Shr(ir_node *node)
{
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_srl, 1);
+ new_bd_mips_srl, 1);
}
static ir_node *gen_Shrs(ir_node *node)
{
return gen_binop(node, get_Add_left(node), get_Add_right(node),
- new_rd_mips_sra, 1);
+ new_bd_mips_sra, 1);
}
static ir_node *gen_Not(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Not_op(node);
/* we can transform not->or to nor */
if(is_Or(op)) {
return gen_binop(op, get_Or_left(op), get_Or_right(op),
- new_rd_mips_nor, 1);
+ new_bd_mips_nor, 1);
}
/* construct (op < 1) */
one = mips_create_Immediate(1);
new_op = be_transform_node(op);
- res = new_rd_mips_sltu(dbgi, irg, block, new_op, one);
+ res = new_bd_mips_sltu(dbgi, block, new_op, one);
return res;
}
static ir_node *gen_Minus(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Minus_op(node);
/* construct (0 - op) */
zero = mips_create_zero();
- res = new_rd_mips_subu(dbgi, irg, block, zero, new_op);
+ res = new_bd_mips_subu(dbgi, block, zero, new_op);
return res;
}
static ir_node *gen_Abs(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Abs_op(node);
/* TODO: support other bit sizes... */
assert(get_mode_size_bits(get_irn_mode(node)) == 32);
sra_const = mips_create_Immediate(31);
- sra = new_rd_mips_sra(dbgi, irg, block, new_op, sra_const);
- add = new_rd_mips_addu(dbgi, irg, block, new_op, sra);
- xor = new_rd_mips_xor(dbgi, irg, block, sra, add);
+ sra = new_bd_mips_sra( dbgi, block, new_op, sra_const);
+ add = new_bd_mips_addu(dbgi, block, new_op, sra);
+ xor = new_bd_mips_xor( dbgi, block, sra, add);
return xor;
}
static ir_node* gen_Const(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
tarval *tv = get_Const_tarval(node);
if(upper == 0) {
upper_node = mips_create_zero();
} else {
- upper_node = new_rd_mips_lui(dbgi, irg, block, MIPS_IMM_CONST, NULL,
- upper);
+ upper_node = new_bd_mips_lui(dbgi, block, MIPS_IMM_CONST, NULL, upper);
}
if(lower == 0)
return upper_node;
or_const = mips_create_Immediate(lower);
- lower_node = new_rd_mips_or(dbgi, irg, block, upper_node, or_const);
+ lower_node = new_bd_mips_or(dbgi, block, upper_node, or_const);
return lower_node;
}
static ir_node* gen_SymConst(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_entity *entity;
entity = get_SymConst_entity(node);
- lui = new_rd_mips_lui(dbgi, irg, block, MIPS_IMM_SYMCONST_HI,
- entity, 0);
- or_const = new_rd_mips_Immediate(dbgi, irg, block,
- MIPS_IMM_SYMCONST_LO, entity, 0);
- or = new_rd_mips_or(dbgi, irg, block, lui, or_const);
+ lui = new_bd_mips_lui(dbgi, block, MIPS_IMM_SYMCONST_HI, entity, 0);
+ or_const = new_bd_mips_Immediate(dbgi, block, MIPS_IMM_SYMCONST_LO, entity, 0);
+ or = new_bd_mips_or(dbgi, block, lui, or_const);
slots = get_mips_slots(or_const);
slots[0] = &mips_gp_regs[REG_GP_NOREG];
return or;
}
-typedef ir_node* (*gen_load_func) (dbg_info *dbg, ir_graph *irg,
- ir_node *block, ir_node *ptr, ir_node *mem,
- ir_entity *entity, long offset);
+typedef ir_node* (*gen_load_func)(dbg_info *dbg, ir_node *block, ir_node *ptr,
+ ir_node *mem, ir_entity *entity, long offset);
/**
* Generates a mips node for a firm Load node
*/
static ir_node *gen_Load(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = get_Load_mem(node);
switch(get_mode_size_bits(mode)) {
case 32:
- func = new_rd_mips_lw;
+ func = new_bd_mips_lw;
break;
case 16:
- func = sign ? new_rd_mips_lh : new_rd_mips_lhu;
+ func = sign ? new_bd_mips_lh : new_bd_mips_lhu;
break;
case 8:
- func = sign ? new_rd_mips_lb : new_rd_mips_lbu;
+ func = sign ? new_bd_mips_lb : new_bd_mips_lbu;
break;
default:
panic("mips backend only support 32, 16, 8 bit loads");
}
- res = func(dbgi, irg, block, new_ptr, new_mem, NULL, 0);
+ res = func(dbgi, block, new_ptr, new_mem, NULL, 0);
set_irn_pinned(res, get_irn_pinned(node));
return res;
}
-typedef ir_node* (*gen_store_func) (dbg_info *dbg, ir_graph *irg,
- ir_node *block, ir_node *ptr, ir_node *val,
- ir_node *mem, ir_entity *ent, long offset);
+typedef ir_node* (*gen_store_func)(dbg_info *dbg, ir_node *block, ir_node *ptr,
+ ir_node *val, ir_node *mem, ir_entity *ent, long offset);
/**
* Generates a mips node for a firm Store node
*/
static ir_node *gen_Store(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = get_Store_mem(node);
switch(get_mode_size_bits(mode)) {
case 32:
- func = new_rd_mips_sw;
+ func = new_bd_mips_sw;
break;
case 16:
- func = new_rd_mips_sh;
+ func = new_bd_mips_sh;
break;
case 8:
- func = new_rd_mips_sb;
+ func = new_bd_mips_sb;
break;
default:
panic("store only supported for 32, 16, 8 bit values in mips backend");
}
- res = func(dbgi, irg, block, new_ptr, new_val, new_mem, NULL, 0);
+ res = func(dbgi, block, new_ptr, new_val, new_mem, NULL, 0);
set_irn_pinned(res, get_irn_pinned(node));
return res;
case pn_Div_res:
proj = new_rd_Proj(dbgi, irg, block, new_div, mode_M,
pn_mips_div_lohi);
- return new_rd_mips_mflo(dbgi, irg, block, proj);
+ return new_bd_mips_mflo(dbgi, block, proj);
default:
break;
}
case pn_Mod_res:
proj = new_rd_Proj(dbgi, irg, block, new_div, mode_M,
pn_mips_div_lohi);
- return new_rd_mips_mfhi(dbgi, irg, block, proj);
+ return new_bd_mips_mfhi(dbgi, block, proj);
default:
break;
}
case pn_DivMod_res_div:
proj = new_rd_Proj(dbgi, irg, block, new_div, mode_M,
pn_mips_div_lohi);
- return new_rd_mips_mflo(dbgi, irg, block, proj);
+ return new_bd_mips_mflo(dbgi, block, proj);
case pn_DivMod_res_mod:
proj = new_rd_Proj(dbgi, irg, block, new_div, mode_M,
pn_mips_div_lohi);
- return new_rd_mips_mfhi(dbgi, irg, block, proj);
+ return new_bd_mips_mfhi(dbgi, block, proj);
default:
break;
}
if(minval != 0) {
minval_const = new_rd_Const(dbg, irg, block, selector_mode, new_tarval_from_long(minval, selector_mode));
minval_const = gen_node_for_Const(env, dbg, irg, block, minval_const);
- sub = new_rd_mips_sub(dbg, irg, block, selector, minval_const);
+ sub = new_bd_mips_sub(dbg, block, selector, minval_const);
} else {
sub = selector;
}
max_const = new_rd_Const(dbg, irg, block, unsigned_mode, new_tarval_from_long(maxval - minval + 1, unsigned_mode));
max_const = gen_node_for_Const(env, dbg, irg, block, max_const);
- sltu = new_rd_mips_slt(dbg, irg, block, sub, max_const);
+ sltu = new_bd_mips_slt(dbg, block, sub, max_const);
zero = gen_zero_node(env, dbg, irg, block);
- beq = new_rd_mips_beq(dbg, irg, block, sltu, zero, mode_T);
+ beq = new_bd_mips_beq(dbg, block, sltu, zero, mode_T);
// attach defaultproj to beq now
set_irn_n(defaultproj, 0, beq);
two_const = new_rd_Const(dbg, irg, block, unsigned_mode, new_tarval_from_long(2, unsigned_mode));
two_const = gen_node_for_Const(env, dbg, irg, block, two_const);
- sl = new_rd_mips_sl(dbg, irg, block, sub, two_const);
+ sl = new_bd_mips_sl(dbg, block, sub, two_const);
- la = new_rd_mips_la(dbg, irg, block);
- add = new_rd_mips_addu(dbg, irg, block, sl, la);
- load = new_rd_mips_load_r(dbg, irg, block, new_NoMem(), add, mode_T);
+ la = new_bd_mips_la( dbg, block);
+ add = new_bd_mips_addu( dbg, block, sl, la);
+ load = new_bd_mips_load_r(dbg, block, new_NoMem(), add, mode_T);
attr = get_mips_attr(load);
attr->modes.load_store_mode = mode_Iu;
attr->tv = new_tarval_from_long(0, mode_Iu);
proj = new_rd_Proj(dbg, irg, block, load, mode_Iu, pn_Load_res);
- switchjmp = new_rd_mips_SwitchJump(dbg, irg, block, proj, mode_T);
+ switchjmp = new_bd_mips_SwitchJump(dbg, block, proj, mode_T);
attr = get_mips_attr(switchjmp);
attr->switch_default_pn = defaultprojn;
static ir_node *gen_Cond(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *sel_proj = get_Cond_selector(node);
panic("mips backend can't handle unoptimized constant Cond");
case pn_Cmp_Eq:
- res = new_rd_mips_beq(dbgi, irg, block, new_left, new_right);
+ res = new_bd_mips_beq(dbgi, block, new_left, new_right);
break;
case pn_Cmp_Lt:
zero = mips_create_zero();
- slt = new_rd_mips_slt(dbgi, irg, block, new_left, new_right);
- res = new_rd_mips_bne(dbgi, irg, block, slt, zero);
+ slt = new_bd_mips_slt(dbgi, block, new_left, new_right);
+ res = new_bd_mips_bne(dbgi, block, slt, zero);
break;
case pn_Cmp_Le:
zero = mips_create_zero();
- slt = new_rd_mips_slt(dbgi, irg, block, new_right, new_left);
- res = new_rd_mips_beq(dbgi, irg, block, slt, zero);
+ slt = new_bd_mips_slt(dbgi, block, new_right, new_left);
+ res = new_bd_mips_beq(dbgi, block, slt, zero);
break;
case pn_Cmp_Gt:
zero = mips_create_zero();
- slt = new_rd_mips_slt(dbgi, irg, block, new_right, new_left);
- res = new_rd_mips_bne(dbgi, irg, block, slt, zero);
+ slt = new_bd_mips_slt(dbgi, block, new_right, new_left);
+ res = new_bd_mips_bne(dbgi, block, slt, zero);
break;
case pn_Cmp_Ge:
zero = mips_create_zero();
- slt = new_rd_mips_slt(dbgi, irg, block, new_right, new_left);
- res = new_rd_mips_bne(dbgi, irg, block, slt, zero);
+ slt = new_bd_mips_slt(dbgi, block, new_right, new_left);
+ res = new_bd_mips_bne(dbgi, block, slt, zero);
break;
case pn_Cmp_Lg:
- res = new_rd_mips_bne(dbgi, irg, block, new_left, new_right);
+ res = new_bd_mips_bne(dbgi, block, new_left, new_right);
break;
default:
static ir_node *gen_Conv(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Conv_op(node);
if(mode_is_signed(src_mode)) {
if(src_size == 8) {
- res = new_rd_mips_seb(dbgi, irg, block, new_op);
+ res = new_bd_mips_seb(dbgi, block, new_op);
} else if(src_size == 16) {
- res = new_rd_mips_seh(dbgi, irg, block, new_op);
+ res = new_bd_mips_seh(dbgi, block, new_op);
} else {
panic("invalid conv %+F", node);
}
} else {
panic("invalid conv %+F", node);
}
- res = new_rd_mips_and(dbgi, irg, block, new_op, and_const);
+ res = new_bd_mips_and(dbgi, block, new_op, and_const);
}
return res;
static ir_node *create_div(ir_node *node, ir_node *left, ir_node *right,
ir_mode *mode)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *new_left = be_transform_node(left);
ir_node *res;
if(mode_is_signed(mode)) {
- res = new_rd_mips_div(dbgi, irg, block, new_left, new_right);
+ res = new_bd_mips_div(dbgi, block, new_left, new_right);
} else {
- res = new_rd_mips_divu(dbgi, irg, block, new_left, new_right);
+ res = new_bd_mips_divu(dbgi, block, new_left, new_right);
}
set_irn_pinned(res, get_irn_pinned(node));
assert(get_mode_size_bits(get_irn_mode(op2)) == get_mode_size_bits(env->mode));
if(mode_is_signed(mode)) {
- mul = new_rd_mips_mult(env->dbg, env->irg, env->block, get_Mul_left(node), get_Mul_right(node));
+ mul = new_bd_mips_mult(env->dbg, env->block, get_Mul_left(node), get_Mul_right(node));
} else {
- mul = new_rd_mips_multu(env->dbg, env->irg, env->block, get_Mul_left(node), get_Mul_right(node));
+ mul = new_bd_mips_multu(env->dbg, env->block, get_Mul_left(node), get_Mul_right(node));
}
- mflo = new_rd_mips_mflo(env->dbg, env->irg, env->block, mul);
+ mflo = new_bd_mips_mflo(env->dbg, env->block, mul);
return mflo;
}
static
ir_node *gen_node_for_IJmp(mips_transform_env_t *env) {
- ir_graph *irg = env->irg;
ir_node *node = env->irn;
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *target = get_IJmp_target(node);
- return new_rd_mips_jr(dbg, irg, block, target);
+ return new_bd_mips_jr(dbg, block, target);
}
static
ir_node *node = env->irn;
ir_node *subu, *srlv, *sllv, *or;
- subu = new_rd_mips_subuzero(env->dbg, env->irg, env->block, get_Rot_right(node));
- srlv = new_rd_mips_srlv(env->dbg, env->irg, env->block, get_Rot_left(node), subu);
- sllv = new_rd_mips_sllv(env->dbg, env->irg, env->block, get_Rot_left(node), get_Rot_right(node));
- or = new_rd_mips_or(env->dbg, env->irg, env->block, sllv, srlv);
+ subu = new_bd_mips_subuzero(env->dbg, env->block, get_Rot_right(node));
+ srlv = new_bd_mips_srlv(env->dbg, env->block, get_Rot_left(node), subu);
+ sllv = new_bd_mips_sllv(env->dbg, env->block, get_Rot_left(node), get_Rot_right(node));
+ or = new_bd_mips_or(env->dbg, env->block, sllv, srlv);
return or;
}
for (i = 0; i < 4; ++i) {
ir_node *load;
- load = new_rd_mips_load_r(dbg, irg, new_bl, mem_phi, src, mode_T);
+ load = new_bd_mips_load_r(dbg, new_bl, mem_phi, src, mode_T);
attr = get_mips_attr(load);
attr->modes.load_store_mode = mode_Iu;
attr->tv = new_tarval_from_long(i * 4, mode_Iu);
for (i = 0; i < 4; ++i) {
ir_node *store;
- store = new_rd_mips_store_r(dbg, irg, new_bl, mem_phi, dst, ld[i], mode_T);
+ store = new_bd_mips_store_r(dbg, new_bl, mem_phi, dst, ld[i], mode_T);
attr = get_mips_attr(store);
attr->modes.load_store_mode = mode_Iu;
attr->tv = new_tarval_from_long(i * 4, mode_Iu);
i -= 1;
}
- load = new_rd_mips_load_r(dbg, irg, block, mem, src, mode_T);
+ load = new_bd_mips_load_r(dbg, block, mem, src, mode_T);
attr = get_mips_attr(load);
attr->modes.load_store_mode = mode;
attr->tv = new_tarval_from_long(offset, mode_Iu);
projv = new_rd_Proj(dbg, irg, block, load, mode, pn_Load_res);
- store = new_rd_mips_store_r(dbg, irg, block, mem, dst, projv, mode_T);
+ store = new_bd_mips_store_r(dbg, block, mem, dst, projv, mode_T);
attr = get_mips_attr(store);
attr->modes.load_store_mode = mode;
attr->tv = new_tarval_from_long(offset, mode_Iu);
sched_point = sched_prev(node);
}
- store = new_rd_mips_sw(env->dbg, env->irg, env->block, ptr, val, nomem,
- ent, 0);
+ store = new_bd_mips_sw(env->dbg, env->block, ptr, val, nomem, ent, 0);
if (sched_point) {
sched_add_after(sched_point, store);
sched_point = sched_prev(node);
}
- load = new_rd_mips_lw(env->dbg, env->irg, env->block, ptr, mem, ent, 0);
+ load = new_bd_mips_lw(env->dbg, env->block, ptr, mem, ent, 0);
proj = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_Iu, pn_mips_lw_res);
op1 = get_irn_n(node, 0);
op2 = get_irn_n(node, 1);
- add = new_rd_mips_addu(env->dbg, env->irg, env->block, op1, op2);
+ add = new_bd_mips_addu(env->dbg, env->block, op1, op2);
/* copy the register requirements from the old node to the new node */
reg = arch_get_irn_register(node);
if (regclass == &ppc32_reg_classes[CLASS_ppc32_gp])
{
- store = new_rd_ppc32_Stw(dbg, current_ir_graph, block,
+ store = new_bd_ppc32_Stw(dbg, block,
get_irn_n(node, 0), get_irn_n(node, 1), new_NoMem());
}
else if (regclass == &ppc32_reg_classes[CLASS_ppc32_fp])
{
- store = new_rd_ppc32_Stfd(dbg, current_ir_graph, block,
+ store = new_bd_ppc32_Stfd(dbg, block,
get_irn_n(node, 0), get_irn_n(node, 1), new_NoMem());
}
else panic("Spill for register class not supported yet!");
if (regclass == &ppc32_reg_classes[CLASS_ppc32_gp])
{
- load = new_rd_ppc32_Lwz(dbg, current_ir_graph, block, get_irn_n(node, 0), get_irn_n(node, 1));
+ load = new_bd_ppc32_Lwz(dbg, block, get_irn_n(node, 0), get_irn_n(node, 1));
}
else if (regclass == &ppc32_reg_classes[CLASS_ppc32_fp])
{
- load = new_rd_ppc32_Lfd(dbg, current_ir_graph, block, get_irn_n(node, 0), get_irn_n(node, 1));
+ load = new_bd_ppc32_Lfd(dbg, block, get_irn_n(node, 0), get_irn_n(node, 1));
}
else panic("Reload for register class not supported yet!");
switch(get_nice_modecode(env->mode)){
case irm_D:
- return new_rd_ppc32_fAdd(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fAdd(env->dbg, env->block, op1, op2, env->mode);
case irm_F:
- return new_rd_ppc32_fAdds(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fAdds(env->dbg, env->block, op1, op2, env->mode);
case irm_Is:
case irm_Iu:
case irm_Hs:
case irm_P:
if(is_16bit_signed_const(op1))
{
- ir_node *addnode = new_rd_ppc32_Addi(env->dbg, env->irg, env->block, op2, env->mode);
+ ir_node *addnode = new_bd_ppc32_Addi(env->dbg, env->block, op2, env->mode);
set_ppc32_constant_tarval(addnode, get_ppc32_constant_tarval(op1));
set_ppc32_offset_mode(addnode, ppc32_ao_None);
return addnode;
}
if(is_16bit_signed_const(op2))
{
- ir_node *addnode = new_rd_ppc32_Addi(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *addnode = new_bd_ppc32_Addi(env->dbg, env->block, op1, env->mode);
set_ppc32_constant_tarval(addnode, get_ppc32_constant_tarval(op2));
set_ppc32_offset_mode(addnode, ppc32_ao_None);
return addnode;
}
- return new_rd_ppc32_Add(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Add(env->dbg, env->block, op1, op2, env->mode);
default:
panic("Mode for Add not supported: %F", env->mode);
switch(get_nice_modecode(env->mode)){
case irm_D:
- return new_rd_ppc32_fMul(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fMul(env->dbg, env->block, op1, op2, env->mode);
case irm_F:
- return new_rd_ppc32_fMuls(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fMuls(env->dbg, env->block, op1, op2, env->mode);
case irm_Is:
case irm_Iu:
case irm_Hs:
case irm_Hu:
case irm_Bs:
case irm_Bu:
- return new_rd_ppc32_Mullw(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Mullw(env->dbg, env->block, op1, op2, env->mode);
case irm_P:
default:
case irm_Is:
case irm_Hs:
case irm_Bs:
- return new_rd_ppc32_Mulhw(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Mulhw(env->dbg, env->block, op1, op2, env->mode);
case irm_Iu:
case irm_Hu:
case irm_Bu:
- return new_rd_ppc32_Mulhwu(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Mulhwu(env->dbg, env->block, op1, op2, env->mode);
case irm_D:
case irm_F:
ir_node *op1 = get_And_left(env->irn);
ir_node *op2 = get_And_right(env->irn);
- return new_rd_ppc32_And(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_And(env->dbg, env->block, op1, op2, env->mode);
}
/**
ir_node *op1 = get_Or_left(env->irn);
ir_node *op2 = get_Or_right(env->irn);
- return new_rd_ppc32_Or(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Or(env->dbg, env->block, op1, op2, env->mode);
}
/**
ir_node *op1 = get_Eor_left(env->irn);
ir_node *op2 = get_Eor_right(env->irn);
- return new_rd_ppc32_Xor(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Xor(env->dbg, env->block, op1, op2, env->mode);
}
/**
switch(get_nice_modecode(env->mode)){
case irm_D:
- return new_rd_ppc32_fSub(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fSub(env->dbg, env->block, op1, op2, env->mode);
case irm_F:
- return new_rd_ppc32_fSubs(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fSubs(env->dbg, env->block, op1, op2, env->mode);
case irm_Is:
case irm_Iu:
case irm_Hs:
case irm_Bs:
case irm_Bu:
case irm_P:
- return new_rd_ppc32_Sub(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Sub(env->dbg, env->block, op1, op2, env->mode);
default:
panic("Mode for Sub not supported: %F", env->mode);
switch(get_nice_modecode(env->mode)){
case irm_D:
- return new_rd_ppc32_fDiv(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fDiv(env->dbg, env->block, op1, op2, env->mode);
case irm_F:
- return new_rd_ppc32_fDivs(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fDivs(env->dbg, env->block, op1, op2, env->mode);
default:
panic("Mode for Quot not supported: %F", env->mode);
case irm_Is:
case irm_Hs:
case irm_Bs:
- return new_rd_ppc32_Divw(env->dbg, env->irg, env->block, op1, op2, mode_T);
+ return new_bd_ppc32_Divw(env->dbg, env->block, op1, op2, mode_T);
case irm_Iu:
case irm_Hu:
case irm_Bu:
- return new_rd_ppc32_Divwu(env->dbg, env->irg, env->block, op1, op2, mode_T);
+ return new_bd_ppc32_Divwu(env->dbg, env->block, op1, op2, mode_T);
default:
panic("Mode for Div not supported: %F", get_irn_mode(op1));
case irm_Is:
case irm_Hs:
case irm_Bs:
- div_result = new_rd_ppc32_Divw(env->dbg, env->irg, env->block, op1, op2, mode_T);
+ div_result = new_bd_ppc32_Divw(env->dbg, env->block, op1, op2, mode_T);
break;
case irm_Iu:
case irm_Hu:
case irm_Bu:
- div_result = new_rd_ppc32_Divwu(env->dbg, env->irg, env->block, op1, op2, mode_T);
+ div_result = new_bd_ppc32_Divwu(env->dbg, env->block, op1, op2, mode_T);
break;
default:
ir_node *mul_result;
ir_node *mod_result;
- mul_result = new_rd_ppc32_Mullw(env->dbg, env->irg, env->block, proj_div, op2, res_mode);
- mod_result = new_rd_ppc32_Sub(env->dbg, env->irg, env->block, op1, mul_result, res_mode);
+ mul_result = new_bd_ppc32_Mullw(env->dbg, env->block, proj_div, op2, res_mode);
+ mod_result = new_bd_ppc32_Sub(env->dbg, env->block, op1, mul_result, res_mode);
exchange(proj_mod, mod_result);
}
case irm_Is:
case irm_Hs:
case irm_Bs:
- div_result = new_rd_ppc32_Divw(env->dbg, env->irg, env->block, op1, op2, mode_T);
+ div_result = new_bd_ppc32_Divw(env->dbg, env->block, op1, op2, mode_T);
break;
case irm_Iu:
case irm_Hu:
case irm_Bu:
- div_result = new_rd_ppc32_Divwu(env->dbg, env->irg, env->block, op1, op2, mode_T);
+ div_result = new_bd_ppc32_Divwu(env->dbg, env->block, op1, op2, mode_T);
break;
default:
proj_div = new_rd_Proj(env->dbg, env->irg, env->block, div_result, res_mode, pn_DivMod_res_div);
- mul_result = new_rd_ppc32_Mullw(env->dbg, env->irg, env->block, proj_div, op2, res_mode);
- mod_result = new_rd_ppc32_Sub(env->dbg, env->irg, env->block, op1, mul_result, res_mode);
+ mul_result = new_bd_ppc32_Mullw(env->dbg, env->block, proj_div, op2, res_mode);
+ mod_result = new_bd_ppc32_Sub(env->dbg, env->block, op1, mul_result, res_mode);
exchange(proj_mod, mod_result);
if(is_ppc32_Const(op2))
{
- ir_node *shift = new_rd_ppc32_Rlwinm(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *shift = new_bd_ppc32_Rlwinm(env->dbg, env->block, op1, env->mode);
tarval *tv_const = get_ppc32_constant_tarval(op2);
int sh = get_tarval_long(tv_const);
assert(0<=sh && sh<=31);
set_ppc32_rlwimi_const(shift, sh, 0, 31-sh);
return shift;
}
- return new_rd_ppc32_Slw(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Slw(env->dbg, env->block, op1, op2, env->mode);
}
/**
if(is_ppc32_Const(op2))
{
- ir_node *shift = new_rd_ppc32_Rlwinm(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *shift = new_bd_ppc32_Rlwinm(env->dbg, env->block, op1, env->mode);
tarval *tv_const = get_ppc32_constant_tarval(op2);
int sh = get_tarval_long(tv_const);
assert(0<=sh && sh<=31);
set_ppc32_rlwimi_const(shift, 32-sh, sh, 31);
return shift;
}
- return new_rd_ppc32_Srw(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Srw(env->dbg, env->block, op1, op2, env->mode);
}
/**
if(is_ppc32_Const(op2))
{
- ir_node *shift = new_rd_ppc32_Srawi(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *shift = new_bd_ppc32_Srawi(env->dbg, env->block, op1, env->mode);
tarval *tv_const = get_ppc32_constant_tarval(op2);
int sh = get_tarval_long(tv_const);
assert(0<=sh && sh<=31);
set_ppc32_offset_mode(shift, ppc32_ao_None);
return shift;
}
- return new_rd_ppc32_Sraw(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Sraw(env->dbg, env->block, op1, op2, env->mode);
}
/**
if(is_ppc32_Const(op2))
{
- ir_node *rot = new_rd_ppc32_Rlwinm(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *rot = new_bd_ppc32_Rlwinm(env->dbg, env->block, op1, env->mode);
tarval *tv_const = get_ppc32_constant_tarval(op2);
int sh = get_tarval_long(tv_const);
assert(0<=sh && sh<=31);
set_ppc32_rlwimi_const(rot, sh, 0, 31);
return rot;
}
- return new_rd_ppc32_Rlwnm(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Rlwnm(env->dbg, env->block, op1, op2, env->mode);
}
/**
}
if(mode_is_float(env->mode))
- return new_rd_ppc32_fCmpu(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_fCmpu(env->dbg, env->block, op1, op2, env->mode);
else if(mode_is_signed(env->mode))
{
if(is_16bit_signed_const(op2))
{
- ir_node *cmp = new_rd_ppc32_Cmpi(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *cmp = new_bd_ppc32_Cmpi(env->dbg, env->block, op1, env->mode);
tarval *tv_const = get_ppc32_constant_tarval(op2);
set_ppc32_constant_tarval(cmp, tv_const);
set_ppc32_offset_mode(cmp, ppc32_ao_None);
}
else
{
- return new_rd_ppc32_Cmp(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Cmp(env->dbg, env->block, op1, op2, env->mode);
}
}
else
{
if(is_16bit_unsigned_const(op2))
{
- ir_node *cmp = new_rd_ppc32_Cmpli(env->dbg, env->irg, env->block, op1, env->mode);
+ ir_node *cmp = new_bd_ppc32_Cmpli(env->dbg, env->block, op1, env->mode);
tarval *tv_const = get_ppc32_constant_tarval(op2);
set_ppc32_constant_tarval(cmp, tv_const);
set_ppc32_offset_mode(cmp, ppc32_ao_None);
}
else
{
- return new_rd_ppc32_Cmpl(env->dbg, env->irg, env->block, op1, op2, env->mode);
+ return new_bd_ppc32_Cmpl(env->dbg, env->block, op1, op2, env->mode);
}
}
}
switch(get_nice_modecode(env->mode)){
case irm_D:
case irm_F:
- return new_rd_ppc32_fNeg(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_fNeg(env->dbg, env->block, op, env->mode);
case irm_Is:
case irm_Iu:
case irm_Hs:
case irm_Bs:
case irm_Bu:
case irm_P:
- return new_rd_ppc32_Neg(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_Neg(env->dbg, env->block, op, env->mode);
default:
panic("Mode for Neg not supported: %F", env->mode);
* @return the created ppc Not node
*/
static ir_node *gen_Not(ppc32_transform_env_t *env) {
- return new_rd_ppc32_Not(env->dbg, env->irg, env->block, get_Not_op(env->irn), env->mode);
+ return new_bd_ppc32_Not(env->dbg, env->block, get_Not_op(env->irn), env->mode);
}
static ir_node *own_gen_Andi_dot_lo16(ppc32_transform_env_t *env, ir_node *op, int mask)
{
- ir_node *andi = new_rd_ppc32_Andi_dot(env->dbg, env->irg, env->block, op, mode_T);
+ ir_node *andi = new_bd_ppc32_Andi_dot(env->dbg, env->block, op, mode_T);
ir_node* in[1];
set_ppc32_offset_mode(andi, ppc32_ao_Lo16);
set_ppc32_constant_tarval(andi, new_tarval_from_long(mask, mode_Is));
switch(to_mode)
{
case irm_F:
- return new_rd_ppc32_fRsp(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_fRsp(env->dbg, env->block, op, env->mode);
default:
break;
}
switch(to_mode)
{
case irm_Hs:
- return new_rd_ppc32_Extsh(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_Extsh(env->dbg, env->block, op, env->mode);
case irm_Hu:
return own_gen_Andi_dot_lo16(env, op, 0xffff);
case irm_Bs:
- return new_rd_ppc32_Extsb(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_Extsb(env->dbg, env->block, op, env->mode);
case irm_Bu:
return own_gen_Andi_dot_lo16(env, op, 0xff);
case irm_Is:
case irm_Is:
SKIP;
case irm_Bs:
- return new_rd_ppc32_Extsb(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_Extsb(env->dbg, env->block, op, env->mode);
case irm_Bu:
return own_gen_Andi_dot_lo16(env, op, 0xff);
case irm_Hs:
- return new_rd_ppc32_Extsh(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_Extsh(env->dbg, env->block, op, env->mode);
default:
break;
}
case irm_Hs:
SKIP;
case irm_Bs:
- return new_rd_ppc32_Extsb(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_Extsb(env->dbg, env->block, op, env->mode);
default:
break;
}
{
case irm_F:
case irm_D:
- return new_rd_ppc32_fAbs(env->dbg, env->irg, env->block, op, env->mode);
+ return new_bd_ppc32_fAbs(env->dbg, env->block, op, env->mode);
case irm_Is:
shift += 16;
case irm_Hs:
shift += 8;
case irm_Bs:
- n1 = new_rd_ppc32_Srawi(env->dbg, env->irg, env->block, op, env->mode);
+ n1 = new_bd_ppc32_Srawi(env->dbg, env->block, op, env->mode);
set_ppc32_constant_tarval(n1, new_tarval_from_long(shift, mode_Is));
set_ppc32_offset_mode(n1, ppc32_ao_None);
- n2 = new_rd_ppc32_Add(env->dbg, env->irg, env->block, op, n1, env->mode);
- return new_rd_ppc32_Xor(env->dbg, env->irg, env->block, n2, n1, env->mode);
+ n2 = new_bd_ppc32_Add(env->dbg, env->block, op, n1, env->mode);
+ return new_bd_ppc32_Xor(env->dbg, env->block, n2, n1, env->mode);
default:
break;
}
if(is_Proj(selector) && projmode==get_ppc32_mode_Cond())
{
int projnum = get_Proj_proj(selector);
- ir_node *branch = new_rd_ppc32_Branch(env->dbg, env->irg, env->block, selector, env->mode);
+ ir_node *branch = new_bd_ppc32_Branch(env->dbg, env->block, selector, env->mode);
set_ppc32_proj_nr(branch, projnum);
return branch;
}
else
{
- ir_node *unknown_gpr = new_rd_ppc32_Unknown(env->dbg, env->irg, env->block, mode_Is);
- ir_node *unknown_cond = new_rd_ppc32_cUnknown(env->dbg, env->irg, env->block, get_ppc32_mode_Cond());
+ ir_node *unknown_gpr = new_bd_ppc32_Unknown(env->dbg, env->block, mode_Is);
+ ir_node *unknown_cond = new_bd_ppc32_cUnknown(env->dbg, env->block, get_ppc32_mode_Cond());
- ir_node *switch_node = new_rd_ppc32_Switch(env->dbg, env->irg, env->block, selector,
+ ir_node *switch_node = new_bd_ppc32_Switch(env->dbg, env->block, selector,
unknown_gpr, unknown_cond, env->mode);
set_ppc32_proj_nr(switch_node, get_Cond_defaultProj(env->irn));
*/
static ir_node *gen_Unknown(ppc32_transform_env_t *env) {
if(mode_is_float(env->mode))
- return new_rd_ppc32_fUnknown(env->dbg, env->irg, env->block, env->mode);
+ return new_bd_ppc32_fUnknown(env->dbg, env->block, env->mode);
else if (mode_is_int(env->mode))
- return new_rd_ppc32_Unknown(env->dbg, env->irg, env->block, env->mode);
+ return new_bd_ppc32_Unknown(env->dbg, env->block, env->mode);
else
panic("Mode %F for unknown value not supported.", env->mode);
}
if(is_ppc32_Const(ptr))
{
tv_const = get_ppc32_constant_tarval(ptr);
- ptr = new_rd_ppc32_Addis_zero(env->dbg, env->irg, env->block, mode_P, ppc32_ao_Ha16, tv_const, NULL);
+ ptr = new_bd_ppc32_Addis_zero(env->dbg, env->block, mode_P, ppc32_ao_Ha16, tv_const, NULL);
}
else if(is_ppc32_SymConst(ptr))
{
if(is_direct_entity(ent))
{
id_symconst = get_entity_ident(ent);
- ptr = new_rd_ppc32_Addis_zero(env->dbg, env->irg, env->block, mode_P, ppc32_ao_Ha16, NULL, id_symconst);
+ ptr = new_bd_ppc32_Addis_zero(env->dbg, env->block, mode_P, ppc32_ao_Ha16, NULL, id_symconst);
}
}
*ptv = tv_const;
loadptr = ldst_insert_const(loadptr, &tv_const, &id_symconst, env);
switch(get_nice_modecode(mode)){
case irm_Bu:
- load = new_rd_ppc32_Lbz(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load = new_bd_ppc32_Lbz(env->dbg, env->block, loadptr, get_Load_mem(node));
break;
case irm_Bs:
{
ir_node *proj_load, *extsb_node;
- load = new_rd_ppc32_Lbz(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load = new_bd_ppc32_Lbz(env->dbg, env->block, loadptr, get_Load_mem(node));
proj_load = new_rd_Proj(env->dbg, env->irg, env->block, load, mode, pn_Load_res);
- extsb_node = new_rd_ppc32_Extsb(env->dbg, env->irg, env->block, proj_load, mode);
+ extsb_node = new_bd_ppc32_Extsb(env->dbg, env->block, proj_load, mode);
exchange(get_succ_Proj(env->irn, pn_Load_res), extsb_node);
break;
}
case irm_Hu:
- load = new_rd_ppc32_Lhz(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load = new_bd_ppc32_Lhz(env->dbg, env->block, loadptr, get_Load_mem(node));
break;
case irm_Hs:
- load =new_rd_ppc32_Lha(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load =new_bd_ppc32_Lha(env->dbg, env->block, loadptr, get_Load_mem(node));
break;
case irm_Is:
case irm_Iu:
case irm_P:
- load = new_rd_ppc32_Lwz(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load = new_bd_ppc32_Lwz(env->dbg, env->block, loadptr, get_Load_mem(node));
break;
case irm_D:
- load = new_rd_ppc32_Lfd(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load = new_bd_ppc32_Lfd(env->dbg, env->block, loadptr, get_Load_mem(node));
break;
case irm_F:
- load = new_rd_ppc32_Lfs(env->dbg, env->irg, env->block, loadptr, get_Load_mem(node));
+ load = new_bd_ppc32_Lfs(env->dbg, env->block, loadptr, get_Load_mem(node));
break;
default:
switch(get_nice_modecode(mode)){
case irm_Bu:
case irm_Bs:
- store = new_rd_ppc32_Stb(env->dbg, env->irg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
+ store = new_bd_ppc32_Stb(env->dbg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
break;
case irm_Hu:
case irm_Hs:
- store = new_rd_ppc32_Sth(env->dbg, env->irg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
+ store = new_bd_ppc32_Sth(env->dbg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
break;
case irm_Is:
case irm_Iu:
case irm_P:
- store = new_rd_ppc32_Stw(env->dbg, env->irg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
+ store = new_bd_ppc32_Stw(env->dbg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
break;
case irm_D:
- store = new_rd_ppc32_Stfd(env->dbg, env->irg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
+ store = new_bd_ppc32_Stfd(env->dbg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
break;
case irm_F:
- store = new_rd_ppc32_Stfs(env->dbg, env->irg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
+ store = new_bd_ppc32_Stfs(env->dbg, env->block, storeptr, get_Store_value(node), get_Store_mem(node));
break;
default:
tarval *offset0 = new_tarval_from_long(0, mode_Is);
tarval *offset4 = new_tarval_from_long(4, mode_Is);
- load = new_rd_ppc32_Lwz(env->dbg, env->irg, env->block, src, mem);
+ load = new_bd_ppc32_Lwz(env->dbg, env->block, src, mem);
set_ppc32_constant_tarval(load, offset0);
set_ppc32_offset_mode(load, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_M, pn_Load_M);
res = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_Is, pn_Load_res);
- store = new_rd_ppc32_Stw(env->dbg, env->irg, env->block, dest, res, mem);
+ store = new_bd_ppc32_Stw(env->dbg, env->block, dest, res, mem);
set_ppc32_constant_tarval(store, offset0);
set_ppc32_offset_mode(store, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, store, mode_M, pn_Store_M);
if(size/4==2)
{
- load = new_rd_ppc32_Lwz(env->dbg, env->irg, env->block, src, mem);
+ load = new_bd_ppc32_Lwz(env->dbg, env->block, src, mem);
set_ppc32_constant_tarval(load, offset4);
set_ppc32_offset_mode(load, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_M, pn_Load_M);
res = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_Is, pn_Load_res);
- store = new_rd_ppc32_Stw(env->dbg, env->irg, env->block, dest, res, mem);
+ store = new_bd_ppc32_Stw(env->dbg, env->block, dest, res, mem);
set_ppc32_constant_tarval(store, offset4);
set_ppc32_offset_mode(store, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, store, mode_M, pn_Store_M);
assert(size/4-1<=0xffff);
if(size/4-1<0x8000)
{
- ornode = new_rd_ppc32_Addi_zero(env->dbg, env->irg, env->block, mode_Is);
+ ornode = new_bd_ppc32_Addi_zero(env->dbg, env->block, mode_Is);
set_ppc32_offset_mode(ornode, ppc32_ao_None);
}
else
{
- ir_node *zeroreg = new_rd_ppc32_Addi_zero(env->dbg, env->irg, env->block, mode_Is);
+ ir_node *zeroreg = new_bd_ppc32_Addi_zero(env->dbg, env->block, mode_Is);
set_ppc32_offset_mode(zeroreg, ppc32_ao_None);
set_ppc32_constant_tarval(zeroreg, new_tarval_from_long(0, mode_Is));
- ornode = new_rd_ppc32_Ori(env->dbg, env->irg, env->block, zeroreg, mode_Is);
+ ornode = new_bd_ppc32_Ori(env->dbg, env->block, zeroreg, mode_Is);
set_ppc32_offset_mode(ornode, ppc32_ao_Lo16);
}
set_ppc32_constant_tarval(ornode, new_tarval_from_long(size/4-1, mode_Is));
- mtctrnode = new_rd_ppc32_Mtctr(env->dbg, env->irg, env->block, ornode, mode_Is);
- store = new_rd_ppc32_LoopCopy(env->dbg, env->irg, env->block, src, dest, mtctrnode, mem, mode_T);
+ mtctrnode = new_bd_ppc32_Mtctr(env->dbg, env->block, ornode, mode_Is);
+ store = new_bd_ppc32_LoopCopy(env->dbg, env->block, src, dest, mtctrnode, mem, mode_T);
in[0] = new_rd_Proj(env->dbg, env->irg, env->block, store, mode_Is, 1); // src
in[1] = new_rd_Proj(env->dbg, env->irg, env->block, store, mode_Is, 2); // dest
{
ir_node *res;
tarval* offset_tarval = new_tarval_from_long(offset, mode_Is);
- load = new_rd_ppc32_Lhz(env->dbg, env->irg, env->block, src, mem);
+ load = new_bd_ppc32_Lhz(env->dbg, env->block, src, mem);
set_ppc32_constant_tarval(load, offset_tarval);
set_ppc32_offset_mode(load, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_M, pn_Load_M);
res = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_Is, pn_Load_res);
- store = new_rd_ppc32_Sth(env->dbg, env->irg, env->block, dest, res, mem);
+ store = new_bd_ppc32_Sth(env->dbg, env->block, dest, res, mem);
set_ppc32_constant_tarval(store, offset_tarval);
set_ppc32_offset_mode(store, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, store, mode_M, pn_Store_M);
{
ir_node *res;
tarval* offset_tarval = new_tarval_from_long(offset, mode_Is);
- load = new_rd_ppc32_Lbz(env->dbg, env->irg, env->block, src, mem);
+ load = new_bd_ppc32_Lbz(env->dbg, env->block, src, mem);
set_ppc32_constant_tarval(load, offset_tarval);
set_ppc32_offset_mode(load, ppc32_ao_None);
mem = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_M, pn_Load_M);
res = new_rd_Proj(env->dbg, env->irg, env->block, load, mode_Is, pn_Load_res);
- store = new_rd_ppc32_Stb(env->dbg, env->irg, env->block, dest, res, mem);
+ store = new_bd_ppc32_Stb(env->dbg, env->block, dest, res, mem);
set_ppc32_constant_tarval(store, offset_tarval);
set_ppc32_offset_mode(store, ppc32_ao_None);
// mem = new_rd_Proj(env->dbg, env->irg, env->block, store, mode_M, pn_Store_M);
*/
static ir_node *gen_be_FrameAddr(ppc32_transform_env_t *env) {
ir_node *op = get_irn_n(env->irn, 0);
- ir_node *add = new_rd_ppc32_Addi(env->dbg, env->irg, env->block, op, mode_P);
+ ir_node *add = new_bd_ppc32_Addi(env->dbg, env->block, op, mode_P);
set_ppc32_frame_entity(add, be_get_frame_entity(env->irn));
return add;
}
entry->ent = ent;
} // TODO: Wird nicht richtig in global type gesteckt, ppc32_gen_decls.c findet ihn nicht
- symcnst = new_rd_ppc32_SymConst(env->dbg, env->irg, env->block, env->mode);
+ symcnst = new_bd_ppc32_SymConst(env->dbg, env->block, env->mode);
set_ppc32_frame_entity(symcnst, ent);
return symcnst;
}
unsigned char val1 = get_tarval_sub_bits(tv_const, 1);
if(val1&0x80)
{
- ir_node *zeroreg = new_rd_ppc32_Addi_zero(env->dbg, env->irg, env->block, mode_Is);
+ ir_node *zeroreg = new_bd_ppc32_Addi_zero(env->dbg, env->block, mode_Is);
set_ppc32_constant_tarval(zeroreg, new_tarval_from_long(0, mode_Is));
set_ppc32_offset_mode(zeroreg, ppc32_ao_None);
- node = new_rd_ppc32_Ori(env->dbg, env->irg, env->block, zeroreg, mode_Is);
+ node = new_bd_ppc32_Ori(env->dbg, env->block, zeroreg, mode_Is);
set_ppc32_offset_mode(node, ppc32_ao_Lo16);
break;
}
case irm_Bu:
case irm_Bs:
case irm_Hs:
- node = new_rd_ppc32_Addi_zero(env->dbg, env->irg, env->block, env->mode);
+ node = new_bd_ppc32_Addi_zero(env->dbg, env->block, env->mode);
set_ppc32_offset_mode(node, ppc32_ao_None);
break;
case irm_Is:
unsigned char val1 = get_tarval_sub_bits(tv_const, 1);
if(val1&0x80)
{
- ir_node *zeroreg = new_rd_ppc32_Addi_zero(env->dbg, env->irg, env->block, mode_Is);
+ ir_node *zeroreg = new_bd_ppc32_Addi_zero(env->dbg, env->block, mode_Is);
set_ppc32_constant_tarval(zeroreg, new_tarval_from_long(0, mode_Is));
set_ppc32_offset_mode(zeroreg, ppc32_ao_None);
- node = new_rd_ppc32_Ori(env->dbg, env->irg, env->block, zeroreg, mode_Is);
+ node = new_bd_ppc32_Ori(env->dbg, env->block, zeroreg, mode_Is);
set_ppc32_offset_mode(node, ppc32_ao_Lo16);
}
else
{
- node = new_rd_ppc32_Addi_zero(env->dbg, env->irg, env->block, env->mode);
+ node = new_bd_ppc32_Addi_zero(env->dbg, env->block, env->mode);
set_ppc32_offset_mode(node, ppc32_ao_None);
}
}
{
unsigned char val0 = get_tarval_sub_bits(tv_const,0);
unsigned char val1 = get_tarval_sub_bits(tv_const,1);
- node = new_rd_ppc32_Addis_zero(env->dbg, env->irg, env->block, env->mode, ppc32_ao_Hi16, tv_const, NULL);
+ node = new_bd_ppc32_Addis_zero(env->dbg, env->block, env->mode, ppc32_ao_Hi16, tv_const, NULL);
if(val0 || val1)
{
set_ppc32_constant_tarval(node, tv_const);
- node = new_rd_ppc32_Ori(env->dbg, env->irg, env->block, node, env->mode);
+ node = new_bd_ppc32_Ori(env->dbg, env->block, node, env->mode);
set_ppc32_offset_mode(node, ppc32_ao_Lo16);
}
}
if(is_direct_entity(ent))
{
ident *id_symconst = get_entity_ident(ent);
- ir_node *node_addis = new_rd_ppc32_Addis_zero(env->dbg, env->irg, env->block, env->mode, ppc32_ao_Ha16, NULL, id_symconst);
+ ir_node *node_addis = new_bd_ppc32_Addis_zero(env->dbg, env->block, env->mode, ppc32_ao_Ha16, NULL, id_symconst);
if(mode==mode_D)
- load = new_rd_ppc32_Lfd(env->dbg, env->irg, env->block, node_addis, new_NoMem());
+ load = new_bd_ppc32_Lfd(env->dbg, env->block, node_addis, new_NoMem());
else // mode_F
- load = new_rd_ppc32_Lfs(env->dbg, env->irg, env->block, node_addis, new_NoMem());
+ load = new_bd_ppc32_Lfs(env->dbg, env->block, node_addis, new_NoMem());
set_ppc32_symconst_ident(load, id_symconst);
set_ppc32_offset_mode(load, ppc32_ao_Lo16);
{
addr = gen_ppc32_SymConst (env);
if(mode==mode_D)
- load = new_rd_ppc32_Lfd(env->dbg, env->irg, env->block, addr, new_NoMem());
+ load = new_bd_ppc32_Lfd(env->dbg, env->block, addr, new_NoMem());
else // mode_F
- load = new_rd_ppc32_Lfs(env->dbg, env->irg, env->block, addr, new_NoMem());
+ load = new_bd_ppc32_Lfs(env->dbg, env->block, addr, new_NoMem());
}
return new_rd_Proj(env->dbg, env->irg, env->block, load, mode, pn_Load_res);
}
{
if (is_direct_entity(ent))
{
- ir_node *node_addis = new_rd_ppc32_Addis_zero(env->dbg, env->irg, env->block, env->mode, ppc32_ao_Hi16, NULL, id_symconst);
- node = new_rd_ppc32_Ori(env->dbg, env->irg, env->block, node_addis, env->mode);
+ ir_node *node_addis = new_bd_ppc32_Addis_zero(env->dbg, env->block, env->mode, ppc32_ao_Hi16, NULL, id_symconst);
+ node = new_bd_ppc32_Ori(env->dbg, env->block, node_addis, env->mode);
set_ppc32_symconst_ident(node, id_symconst);
set_ppc32_offset_mode(node, ppc32_ao_Lo16);
}
else
{
- ir_node *node_addis = new_rd_ppc32_Addis_zero(env->dbg, env->irg, env->block, env->mode, ppc32_ao_Ha16, NULL, id_symconst);
- node = new_rd_ppc32_Lwz(env->dbg, env->irg, env->block, node_addis, new_NoMem());
+ ir_node *node_addis = new_bd_ppc32_Addis_zero(env->dbg, env->block, env->mode, ppc32_ao_Ha16, NULL, id_symconst);
+ node = new_bd_ppc32_Lwz(env->dbg, env->block, node_addis, new_NoMem());
set_ppc32_symconst_ident(node, id_symconst);
set_ppc32_offset_mode(node, ppc32_ao_Lo16);
node = new_rd_Proj(env->dbg, env->irg, env->block, node, env->mode, pn_Load_res);
ir_node *res;
if (mode_is_signed(to_mode)) // Float to integer
{
- ir_node *fctiw = new_rd_ppc32_fCtiw(env->dbg, env->irg, env->block, op, from_mode);
- ir_node *stfd = new_rd_ppc32_Stfd(env->dbg, env->irg, env->block, get_irg_frame(env->irg),
+ ir_node *fctiw = new_bd_ppc32_fCtiw(env->dbg, env->block, op, from_mode);
+ ir_node *stfd = new_bd_ppc32_Stfd(env->dbg, env->block, get_irg_frame(env->irg),
fctiw, memory);
ir_node *storememproj = new_rd_Proj(env->dbg, env->irg, env->block, stfd, mode_M, pn_Store_M);
- ir_node *lwz = new_rd_ppc32_Lwz(env->dbg, env->irg, env->block, get_irg_frame(env->irg),
+ ir_node *lwz = new_bd_ppc32_Lwz(env->dbg, env->block, get_irg_frame(env->irg),
storememproj);
set_ppc32_frame_entity(stfd, memslot);
set_ppc32_offset_mode(stfd, ppc32_ao_Lo16); // TODO: only allows a 16-bit offset on stack
ir_node *constant;
if (mode_is_float(env->mode))
- constant = new_rd_ppc32_fConst(env->dbg, env->irg, env->block, env->mode);
+ constant = new_bd_ppc32_fConst(env->dbg, env->block, env->mode);
else
- constant = new_rd_ppc32_Const(env->dbg, env->irg, env->block, env->mode);
+ constant = new_bd_ppc32_Const(env->dbg, env->block, env->mode);
set_ppc32_constant_tarval(constant, tv_const);
return constant;
}
*/
static ir_node *gen_SymConst(ppc32_transform_env_t *env) {
ir_node *symconst;
- symconst = new_rd_ppc32_SymConst(env->dbg, env->irg, env->block, env->mode);
+ symconst = new_bd_ppc32_SymConst(env->dbg, env->block, env->mode);
set_ppc32_frame_entity(symconst, get_SymConst_entity(env->irn));
return symconst;
}
my $complete_args = "";
$temp = "";
- $temp = "ir_node *new_rd_$op(dbg_info *db, ir_graph *irg, ir_node *block";
+ $temp = "ir_node *new_bd_$op(dbg_info *db, ir_node *block";
if (!exists($n{"args"})) { # default args
if ($arity == $ARITY_VARIABLE) {
$complete_args = ", int arity, ir_node *in[]";
$temp .= "\t/* create node */\n";
$temp .= "\tassert(op != NULL);\n";
- $temp .= "\tres = new_ir_node(db, irg, block, op, mode, arity, in);\n";
+ $temp .= "\tres = new_ir_node(db, current_ir_graph, block, op, mode, arity, in);\n";
$temp .= "\n";
$temp .= "\t/* init node attributes */\n";
$temp .= "\t/* optimize node */\n";
$temp .= "\tres = optimize_node(res);\n";
- $temp .= "\tirn_vrfy_irg(res, irg);\n";
+ $temp .= "\tirn_vrfy_irg(res, current_ir_graph);\n";
$temp .= "\n";
$temp .= "\treturn res;\n";