transformer_t be_transformer = TRANSFORMER_DEFAULT;
#endif
-DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-
-ir_mode *ia32_mode_fpcw = NULL;
+ir_mode *ia32_mode_fpcw = NULL;
/** The current omit-fp state */
static ir_type *omit_fp_between_type = NULL;
};
-typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
+typedef ir_node *(*create_const_node_func) (dbg_info *dbgi, ir_node *block);
/**
* Used to create per-graph unique pseudo nodes.
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
- dbg_info *dbg;
+ dbg_info *dbgi;
/* we cannot invert non-ia32 irns */
if (! is_ia32_irn(irn))
irn_mode = get_irn_mode(irn);
noreg = get_irn_n(irn, 0);
nomem = get_irg_no_mem(irg);
- dbg = get_irn_dbg_info(irn);
+ dbgi = get_irn_dbg_info(irn);
/* initialize structure */
inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
- inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
- inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
- inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
+ inverse->nodes[0] = new_bd_ia32_Sub(dbgi, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
inverse->costs += 2;
}
break;
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
- inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
if (i == n_ia32_binary_left) {
- inverse->nodes[0] = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
+ inverse->nodes[0] = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
}
else {
- inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Sub(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
}
inverse->costs += 1;
}
case iro_ia32_Xor:
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
+ inverse->nodes[0] = new_bd_ia32_Xor(dbgi, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
+ inverse->nodes[0] = new_bd_ia32_Xor(dbgi, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
inverse->costs += 1;
}
break;
case iro_ia32_Not: {
- inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Not(dbgi, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
case iro_ia32_Neg: {
- inverse->nodes[0] = new_bd_ia32_Neg(dbg, block, (ir_node*) irn);
+ inverse->nodes[0] = new_bd_ia32_Neg(dbgi, block, (ir_node*) irn);
inverse->costs += 1;
break;
}
ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
ir_node *base = get_irn_n(node, n_ia32_base);
- ir_node *index = get_irn_n(node, n_ia32_index);
+ ir_node *idx = get_irn_n(node, n_ia32_index);
ir_node *mem = get_irn_n(node, n_ia32_mem);
ir_node *noreg;
- ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
+ ir_node *load = new_bd_ia32_Load(dbgi, block, base, idx, mem);
ir_node *load_res = new_rd_Proj(dbgi, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
case ia32_AddrModeD:
/* TODO implement this later... */
panic("found DestAM with flag user %+F this should not happen", node);
- break;
default: assert(type == ia32_Normal); break;
}
static void transform_to_Load(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
- dbg_info *dbg = get_irn_dbg_info(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_entity *ent = be_get_frame_entity(node);
ir_mode *mode = get_irn_mode(node);
if (mode_is_float(spillmode)) {
if (ia32_cg_config.use_sse2)
- new_op = new_bd_ia32_xLoad(dbg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_xLoad(dbgi, block, ptr, noreg, mem, spillmode);
else
- new_op = new_bd_ia32_vfld(dbg, block, ptr, noreg, mem, spillmode);
+ new_op = new_bd_ia32_vfld(dbgi, block, ptr, noreg, mem, spillmode);
}
else if (get_mode_size_bits(spillmode) == 128) {
/* Reload 128 bit SSE registers */
- new_op = new_bd_ia32_xxLoad(dbg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_xxLoad(dbgi, block, ptr, noreg, mem);
}
else
- new_op = new_bd_ia32_Load(dbg, block, ptr, noreg, mem);
+ new_op = new_bd_ia32_Load(dbgi, block, ptr, noreg, mem);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_ls_mode(new_op, spillmode);
DBG_OPT_RELOAD2LD(node, new_op);
- proj = new_rd_Proj(dbg, new_op, mode, pn_ia32_Load_res);
+ proj = new_rd_Proj(dbgi, new_op, mode, pn_ia32_Load_res);
if (sched_point) {
sched_add_after(sched_point, new_op);
static void transform_to_Store(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
- dbg_info *dbg = get_irn_dbg_info(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_entity *ent = be_get_frame_entity(node);
const ir_node *spillval = get_irn_n(node, n_be_Spill_val);
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2) {
- store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xStore(dbgi, block, ptr, noreg, nomem, val);
res = new_r_Proj(store, mode_M, pn_ia32_xStore_M);
} else {
- store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
+ store = new_bd_ia32_vfst(dbgi, block, ptr, noreg, nomem, val, mode);
res = new_r_Proj(store, mode_M, pn_ia32_vfst_M);
}
} else if (get_mode_size_bits(mode) == 128) {
/* Spill 128 bit SSE registers */
- store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_xxStore(dbgi, block, ptr, noreg, nomem, val);
res = new_r_Proj(store, mode_M, pn_ia32_xxStore_M);
} else if (get_mode_size_bits(mode) == 8) {
- store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store8Bit(dbgi, block, ptr, noreg, nomem, val);
res = new_r_Proj(store, mode_M, pn_ia32_Store8Bit_M);
} else {
- store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
+ store = new_bd_ia32_Store(dbgi, block, ptr, noreg, nomem, val);
res = new_r_Proj(store, mode_M, pn_ia32_Store_M);
}
static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
{
- dbg_info *dbg = get_irn_dbg_info(node);
- ir_node *block = get_nodes_block(node);
- ir_graph *irg = get_irn_irg(node);
- ir_node *noreg = ia32_new_NoReg_gp(irg);
- ir_node *frame = get_irg_frame(irg);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
+ ir_node *frame = get_irg_frame(irg);
- ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
+ ir_node *push = new_bd_ia32_Push(dbgi, block, frame, noreg, mem, noreg, sp);
set_ia32_frame_ent(push, ent);
set_ia32_use_frame(push);
static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
{
- dbg_info *dbg = get_irn_dbg_info(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_graph *irg = get_irn_irg(node);
ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg,
+ ir_node *pop = new_bd_ia32_PopMem(dbgi, block, frame, noreg,
get_irg_no_mem(irg), sp);
set_ia32_frame_ent(pop, ent);
static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
{
- dbg_info *dbg = get_irn_dbg_info(node);
- ir_mode *spmode = mode_Iu;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_mode *spmode = mode_Iu;
const arch_register_t *spreg = &ia32_registers[REG_ESP];
ir_node *sp;
- sp = new_rd_Proj(dbg, pred, spmode, pos);
+ sp = new_rd_Proj(dbgi, pred, spmode, pos);
arch_set_irn_register(sp, spreg);
return sp;
static void ia32_get_call_abi(const void *self, ir_type *method_type,
be_abi_call_t *abi)
{
- ir_type *tp;
- ir_mode *mode;
unsigned cc;
int n, i, regnum;
int pop_amount = 0;
n = get_method_n_params(method_type);
for (i = regnum = 0; i < n; i++) {
- ir_mode *mode;
- const arch_register_t *reg = NULL;
+ const arch_register_t *reg = NULL;
+ ir_type *tp = get_method_param_type(method_type, i);
+ ir_mode *mode = get_type_mode(tp);
- tp = get_method_param_type(method_type, i);
- mode = get_type_mode(tp);
if (mode != NULL) {
reg = ia32_get_RegParam_reg(cc, regnum, mode);
}
/* In case of 64bit returns, we will have two 32bit values */
if (n == 2) {
- tp = get_method_res_type(method_type, 0);
- mode = get_type_mode(tp);
+ ir_type *tp = get_method_res_type(method_type, 0);
+ ir_mode *mode = get_type_mode(tp);
assert(!mode_is_float(mode) && "two FP results not supported");
be_abi_call_res_reg(abi, 1, &ia32_registers[REG_EDX], ABI_CONTEXT_BOTH);
}
else if (n == 1) {
+ ir_type *tp = get_method_res_type(method_type, 0);
+ ir_mode *mode = get_type_mode(tp);
const arch_register_t *reg;
-
- tp = get_method_res_type(method_type, 0);
assert(is_atomic_type(tp));
- mode = get_type_mode(tp);
reg = mode_is_float(mode) ? &ia32_registers[REG_VF0] : &ia32_registers[REG_EAX];
lc_opt_add_table(ia32_grp, ia32_options);
be_register_isa_if("ia32", &ia32_isa_if);
- FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
-
ia32_init_emitter();
ia32_init_finish();
ia32_init_optimize();
case 'e': /* not available in 32 bit mode */
panic("unsupported asm constraint '%c' found in (%+F)",
*c, current_ir_graph);
- break;
default:
panic("unknown asm constraint '%c' found in (%+F)", *c,
current_ir_graph);
- break;
}
++c;
}
}
if (input == NULL) {
- ir_node *pred = get_irn_n(node, i);
input = get_new_node(pred);
if (parsed_constraint.cls == NULL
/* count inputs which are real values (and not memory) */
value_arity = 0;
for (i = 0; i < arity; ++i) {
- ir_node *in = get_irn_n(node, i);
- if (get_irn_mode(in) == mode_M)
+ ir_node *node_in = get_irn_n(node, i);
+ if (get_irn_mode(node_in) == mode_M)
continue;
++value_arity;
}
int o;
bitset_t *used_ins = bitset_alloca(arity);
for (o = 0; o < out_arity; ++o) {
- int i;
const arch_register_req_t *outreq = out_reg_reqs[o];
if (outreq->cls == NULL) {
++arity;
}
} else {
- int i;
bitset_t *used_outs = bitset_alloca(out_arity);
int orig_out_arity = out_arity;
for (i = 0; i < arity; ++i) {
int offs = get_ia32_am_offs_int(node);
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
- ir_node *index = get_irn_n(node, n_ia32_index);
- int has_index = !is_ia32_NoReg_GP(index);
+ ir_node *idx = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(idx);
/* just to be sure... */
assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
assert(get_ia32_op_type(node) == ia32_Normal);
goto emit_S;
}
- break;
default: goto unknown;
}
*/
static void emit_ia32_Conv_I2I(const ir_node *node)
{
- ir_mode *smaller_mode = get_ia32_ls_mode(node);
- int signed_mode = mode_is_signed(smaller_mode);
+ ir_mode *smaller_mode = get_ia32_ls_mode(node);
+ int signed_mode = mode_is_signed(smaller_mode);
const char *sign_suffix;
assert(!mode_is_float(smaller_mode));
*/
static void ia32_emit_block_header(ir_node *block)
{
- ir_graph *irg = current_ir_graph;
+ ir_graph *irg = current_ir_graph;
int need_label = block_needs_label(block);
- int i, arity;
- ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
+ int arity;
if (block == get_irg_end_block(irg))
return;
if (arity <= 0) {
be_emit_cstring(" none");
} else {
+ int i;
for (i = 0; i < arity; ++i) {
ir_node *predblock = get_Block_cfgpred_block(block, i);
be_emit_irprintf(" %d", get_irn_node_nr(predblock));
Those are ascending with ascending addresses. */
qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
{
- size_t i;
+ size_t e;
- for (i = 0; i < ARR_LEN(exc_list); ++i) {
+ for (e = 0; e < ARR_LEN(exc_list); ++e) {
be_emit_cstring("\t.long ");
- ia32_emit_exc_label(exc_list[i].exc_instr);
+ ia32_emit_exc_label(exc_list[e].exc_instr);
be_emit_char('\n');
be_emit_cstring("\t.long ");
- be_gas_emit_block_name(exc_list[i].block);
+ be_gas_emit_block_name(exc_list[e].block);
be_emit_char('\n');
}
}
int offs = get_ia32_am_offs_int(node);
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
- ir_node *index = get_irn_n(node, n_ia32_index);
- int has_index = !is_ia32_NoReg_GP(index);
+ ir_node *idx = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(idx);
unsigned modrm = 0;
unsigned sib = 0;
unsigned emitoffs = 0;
/* Determine if we need a SIB byte. */
if (has_index) {
- const arch_register_t *reg_index = arch_get_irn_register(index);
+ const arch_register_t *reg_index = arch_get_irn_register(idx);
int scale = get_ia32_am_scale(node);
assert(scale < 4);
/* R/M set to ESP means SIB in 32bit mode. */
if (out->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
- ir_node *index = get_irn_n(node, n_ia32_index);
- int has_index = !is_ia32_NoReg_GP(index);
+ ir_node *idx = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(idx);
if (!has_base && !has_index) {
ir_entity *ent = get_ia32_am_sc(node);
int offs = get_ia32_am_offs_int(node);
if (in->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
- ir_node *index = get_irn_n(node, n_ia32_index);
- int has_index = !is_ia32_NoReg_GP(index);
+ ir_node *idx = get_irn_n(node, n_ia32_index);
+ int has_index = !is_ia32_NoReg_GP(idx);
if (!has_base && !has_index) {
ir_entity *ent = get_ia32_am_sc(node);
int offs = get_ia32_am_offs_int(node);
ir_graph *irg;
ir_node *in1, *in2, *noreg, *nomem, *res;
ir_node *noreg_fp, *block;
- dbg_info *dbg;
+ dbg_info *dbgi;
const arch_register_t *in1_reg, *in2_reg, *out_reg;
/* fix_am will solve this for AddressMode variants */
return;
block = get_nodes_block(irn);
- dbg = get_irn_dbg_info(irn);
+ dbgi = get_irn_dbg_info(irn);
/* generate the neg src2 */
if (is_ia32_xSub(irn)) {
assert(get_irn_mode(irn) != mode_T);
- res = new_bd_ia32_xXor(dbg, block, noreg, noreg, nomem, in2, noreg_fp);
+ res = new_bd_ia32_xXor(dbgi, block, noreg, noreg, nomem, in2, noreg_fp);
size = get_mode_size_bits(op_mode);
entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
set_ia32_am_sc(res, entity);
sched_add_before(irn, res);
/* generate the add */
- res = new_bd_ia32_xAdd(dbg, block, noreg, noreg, nomem, res, in1);
+ res = new_bd_ia32_xAdd(dbgi, block, noreg, noreg, nomem, res, in1);
set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
/* exchange the add and the sub */
}
if (flags_proj == NULL) {
- res = new_bd_ia32_Neg(dbg, block, in2);
+ res = new_bd_ia32_Neg(dbgi, block, in2);
arch_set_irn_register(res, in2_reg);
/* add to schedule */
sched_add_before(irn, res);
/* generate the add */
- res = new_bd_ia32_Add(dbg, block, noreg, noreg, nomem, res, in1);
+ res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, res, in1);
arch_set_irn_register(res, out_reg);
set_ia32_commutative(res);
*
* a + -b = a + (~b + 1) would set the carry flag IF a == b ...
*/
- nnot = new_bd_ia32_Not(dbg, block, in2);
+ nnot = new_bd_ia32_Not(dbgi, block, in2);
arch_set_irn_register(nnot, in2_reg);
sched_add_before(irn, nnot);
- stc = new_bd_ia32_Stc(dbg, block);
+ stc = new_bd_ia32_Stc(dbgi, block);
arch_set_irn_register(stc, &ia32_registers[REG_EFLAGS]);
sched_add_before(irn, stc);
- adc = new_bd_ia32_Adc(dbg, block, noreg, noreg, nomem, nnot, in1, stc);
+ adc = new_bd_ia32_Adc(dbgi, block, noreg, noreg, nomem, nnot, in1, stc);
arch_set_irn_register(adc, out_reg);
sched_add_before(irn, adc);
adc_flags = new_r_Proj(adc, mode_Iu, pn_ia32_Adc_flags);
arch_set_irn_register(adc_flags, &ia32_registers[REG_EFLAGS]);
- cmc = new_bd_ia32_Cmc(dbg, block, adc_flags);
+ cmc = new_bd_ia32_Cmc(dbgi, block, adc_flags);
arch_set_irn_register(cmc, &ia32_registers[REG_EFLAGS]);
sched_add_before(irn, cmc);
uses_out_reg_pos = -1;
for (i2 = 0; i2 < arity; ++i2) {
ir_node *in = get_irn_n(node, i2);
- const arch_register_t *in_reg;
+ const arch_register_t *other_in_reg;
if (!mode_is_data(get_irn_mode(in)))
continue;
- in_reg = arch_get_irn_register(in);
+ other_in_reg = arch_get_irn_register(in);
- if (in_reg != out_reg)
+ if (other_in_reg != out_reg)
continue;
if (uses_out_reg != NULL && in != uses_out_reg) {
static ir_node *create_fpu_mode_spill(void *env, ir_node *state, int force,
ir_node *after)
{
- ir_node *spill = NULL;
(void) env;
/* we don't spill the fpcw in unsafe mode */
ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *nomem = get_irg_no_mem(irg);
ir_node *frame = get_irg_frame(irg);
-
- spill = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem, state);
+ ir_node *spill
+ = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem, state);
set_ia32_op_type(spill, ia32_AddrModeD);
/* use mode_Iu, as movl has a shorter opcode than movw */
set_ia32_ls_mode(spill, mode_Iu);
set_ia32_use_frame(spill);
sched_add_after(skip_Proj(after), spill);
+ return spill;
}
- return spill;
+ return NULL;
}
static ir_node *create_fldcw_ent(ir_node *block, ir_entity *entity)
/**
* Reroute edges from the pn_Call_T_result proj of a call.
*
- * @param proj the pn_Call_T_result Proj
- * @param l_res the lower 32 bit result
- * @param h_res the upper 32 bit result or NULL
+ * @param resproj the pn_Call_T_result Proj
+ * @param l_res the lower 32 bit result
+ * @param h_res the upper 32 bit result or NULL
*/
-static void reroute_result(ir_node *proj, ir_node *l_res, ir_node *h_res)
+static void reroute_result(ir_node *resproj, ir_node *l_res, ir_node *h_res)
{
const ir_edge_t *edge, *next;
- foreach_out_edge_safe(proj, edge, next) {
+ foreach_out_edge_safe(resproj, edge, next) {
ir_node *proj = get_edge_src_irn(edge);
long pn = get_Proj_proj(proj);
if ((offset & 0xFFFFFF00) == 0) {
/* attr->am_offs += 0; */
} else if ((offset & 0xFFFF00FF) == 0) {
- ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 8);
- set_irn_n(node, n_ia32_Test_right, imm);
+ ir_node *imm_node = ia32_create_Immediate(NULL, 0, offset>>8);
+ set_irn_n(node, n_ia32_Test_right, imm_node);
attr->am_offs += 1;
} else if ((offset & 0xFF00FFFF) == 0) {
- ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 16);
- set_irn_n(node, n_ia32_Test_right, imm);
+ ir_node *imm_node = ia32_create_Immediate(NULL, 0, offset>>16);
+ set_irn_n(node, n_ia32_Test_right, imm_node);
attr->am_offs += 2;
} else if ((offset & 0x00FFFFFF) == 0) {
- ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 24);
- set_irn_n(node, n_ia32_Test_right, imm);
+ ir_node *imm_node = ia32_create_Immediate(NULL, 0, offset>>24);
+ set_irn_n(node, n_ia32_Test_right, imm_node);
attr->am_offs += 3;
} else {
return;
ir_node *val, *mem, *mem_proj;
ir_node *store = stores[i];
ir_node *noreg = ia32_new_NoReg_gp(irg);
- const ir_edge_t *edge;
- const ir_edge_t *next;
val = get_irn_n(store, n_ia32_unary_op);
mem = get_irn_n(store, n_ia32_mem);
if (mode_is_float(mode)) {
ir_node *res = NULL;
ir_node *load;
- ir_node *base;
ir_entity *floatent;
if (ia32_cg_config.use_sse2) {
set_ia32_ls_mode(load, mode);
res = load;
} else {
+ ir_node *base;
#ifdef CONSTRUCT_SSE_CONST
if (mode == mode_D) {
unsigned val = get_tarval_sub_bits(tv, 0) |
}
am->op_type = ia32_AddrModeS;
} else {
- ir_mode *mode;
am->op_type = ia32_Normal;
if (flags & match_try_am) {
static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block,
ia32_address_t *addr)
{
- ir_node *base, *index, *res;
+ ir_node *base;
+ ir_node *idx;
+ ir_node *res;
base = addr->base;
if (base == NULL) {
base = be_transform_node(base);
}
- index = addr->index;
- if (index == NULL) {
- index = noreg_GP;
+ idx = addr->index;
+ if (idx == NULL) {
+ idx = noreg_GP;
} else {
- index = be_transform_node(index);
+ idx = be_transform_node(idx);
}
/* segment overrides are ineffective for Leas :-( so we have to patch
addr->tls_segment = false;
}
- res = new_bd_ia32_Lea(dbgi, block, base, index);
+ res = new_bd_ia32_Lea(dbgi, block, base, idx);
set_address(res, addr);
return res;
return gen_unop(node, op, new_bd_ia32_Not, match_mode_neutral);
}
-static ir_node *create_abs(dbg_info *dbgi, ir_node *block, ir_node *op,
- bool negate, ir_node *node)
+static ir_node *create_float_abs(dbg_info *dbgi, ir_node *block, ir_node *op,
+ bool negate, ir_node *node)
{
ir_node *new_block = be_transform_node(block);
ir_mode *mode = get_irn_mode(op);
- ir_node *new_op;
+ ir_node *new_op = be_transform_node(op);
ir_node *new_node;
int size;
ir_entity *ent;
- if (mode_is_float(mode)) {
- new_op = be_transform_node(op);
+ assert(mode_is_float(mode));
- if (ia32_cg_config.use_sse2) {
- ir_node *noreg_fp = ia32_new_NoReg_xmm(current_ir_graph);
- new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(),
- noreg_GP, nomem, new_op, noreg_fp);
+ if (ia32_cg_config.use_sse2) {
+ ir_node *noreg_fp = ia32_new_NoReg_xmm(current_ir_graph);
+ new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(),
+ noreg_GP, nomem, new_op, noreg_fp);
- size = get_mode_size_bits(mode);
- ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS);
+ size = get_mode_size_bits(mode);
+ ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS);
- set_ia32_am_sc(new_node, ent);
+ set_ia32_am_sc(new_node, ent);
- SET_IA32_ORIG_NODE(new_node, node);
+ SET_IA32_ORIG_NODE(new_node, node);
- set_ia32_op_type(new_node, ia32_AddrModeS);
- set_ia32_ls_mode(new_node, mode);
+ set_ia32_op_type(new_node, ia32_AddrModeS);
+ set_ia32_ls_mode(new_node, mode);
- /* TODO, implement -Abs case */
- assert(!negate);
- } else {
- new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op);
+ /* TODO, implement -Abs case */
+ assert(!negate);
+ } else {
+ new_node = new_bd_ia32_vfabs(dbgi, new_block, new_op);
+ SET_IA32_ORIG_NODE(new_node, node);
+ if (negate) {
+ new_node = new_bd_ia32_vfchs(dbgi, new_block, new_node);
SET_IA32_ORIG_NODE(new_node, node);
- if (negate) {
- new_node = new_bd_ia32_vfchs(dbgi, new_block, new_node);
- SET_IA32_ORIG_NODE(new_node, node);
- }
}
}
ir_mode *mode = get_Load_mode(node);
int throws_exception = ir_throws_exception(node);
ir_node *base;
- ir_node *index;
+ ir_node *idx;
ir_node *new_node;
ia32_address_t addr;
/* construct load address */
memset(&addr, 0, sizeof(addr));
ia32_create_address_mode(&addr, ptr, ia32_create_am_normal);
- base = addr.base;
- index = addr.index;
+ base = addr.base;
+ idx = addr.index;
if (base == NULL) {
base = noreg_GP;
base = be_transform_node(base);
}
- if (index == NULL) {
- index = noreg_GP;
+ if (idx == NULL) {
+ idx = noreg_GP;
} else {
- index = be_transform_node(index);
+ idx = be_transform_node(idx);
}
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2) {
- new_node = new_bd_ia32_xLoad(dbgi, block, base, index, new_mem,
+ new_node = new_bd_ia32_xLoad(dbgi, block, base, idx, new_mem,
mode);
} else {
- new_node = new_bd_ia32_vfld(dbgi, block, base, index, new_mem,
+ new_node = new_bd_ia32_vfld(dbgi, block, base, idx, new_mem,
mode);
}
} else {
/* create a conv node with address mode for smaller modes */
if (get_mode_size_bits(mode) < 32) {
- new_node = new_bd_ia32_Conv_I2I(dbgi, block, base, index,
+ new_node = new_bd_ia32_Conv_I2I(dbgi, block, base, idx,
new_mem, noreg_GP, mode);
} else {
- new_node = new_bd_ia32_Load(dbgi, block, base, index, new_mem);
+ new_node = new_bd_ia32_Load(dbgi, block, base, idx, new_mem);
}
}
ir_set_throws_exception(new_node, throws_exception);
ir_node *new_node = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, imm);
- ir_node *mem = new_r_Proj(new_node, mode_M, pn_ia32_Store_M);
+ ir_node *new_mem = new_r_Proj(new_node, mode_M, pn_ia32_Store_M);
ir_set_throws_exception(new_node, throws_exception);
set_irn_pinned(new_node, get_irn_pinned(node));
SET_IA32_ORIG_NODE(new_node, node);
assert(i < 4);
- ins[i++] = mem;
+ ins[i++] = new_mem;
size -= 4;
ofs += 4;
++step;
res->steps[step].transform = SETCC_TR_NEG;
} else {
- int v = get_tarval_lowest_bit(t);
- assert(v >= 0);
+ int val = get_tarval_lowest_bit(t);
+ assert(val >= 0);
res->steps[step].transform = SETCC_TR_SHL;
- res->steps[step].scale = v;
+ res->steps[step].scale = val;
}
}
++step;
node);
} else {
ir_node *op = ir_get_abs_op(sel, mux_true, mux_false);
- return create_abs(dbgi, block, op, is_abs < 0, node);
+ return create_float_abs(dbgi, block, op, is_abs < 0, node);
}
}
static ir_node *gen_prefetch(ir_node *node)
{
dbg_info *dbgi;
- ir_node *ptr, *block, *mem, *base, *index;
+ ir_node *ptr, *block, *mem, *base, *idx;
ir_node *param, *new_node;
long rw, locality;
ir_tarval *tv;
memset(&addr, 0, sizeof(addr));
ptr = get_Builtin_param(node, 0);
ia32_create_address_mode(&addr, ptr, ia32_create_am_normal);
- base = addr.base;
- index = addr.index;
+ base = addr.base;
+ idx = addr.index;
if (base == NULL) {
base = noreg_GP;
base = be_transform_node(base);
}
- if (index == NULL) {
- index = noreg_GP;
+ if (idx == NULL) {
+ idx = noreg_GP;
} else {
- index = be_transform_node(index);
+ idx = be_transform_node(idx);
}
dbgi = get_irn_dbg_info(node);
if (rw == 1 && ia32_cg_config.use_3dnow_prefetch) {
/* we have 3DNow!, this was already checked above */
- new_node = new_bd_ia32_PrefetchW(dbgi, block, base, index, mem);
+ new_node = new_bd_ia32_PrefetchW(dbgi, block, base, idx, mem);
} else if (ia32_cg_config.use_sse_prefetch) {
/* note: rw == 1 is IGNORED in that case */
param = get_Builtin_param(node, 2);
/* SSE style prefetch */
switch (locality) {
case 0:
- new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, index, mem);
+ new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, idx, mem);
break;
case 1:
- new_node = new_bd_ia32_Prefetch2(dbgi, block, base, index, mem);
+ new_node = new_bd_ia32_Prefetch2(dbgi, block, base, idx, mem);
break;
case 2:
- new_node = new_bd_ia32_Prefetch1(dbgi, block, base, index, mem);
+ new_node = new_bd_ia32_Prefetch1(dbgi, block, base, idx, mem);
break;
default:
- new_node = new_bd_ia32_Prefetch0(dbgi, block, base, index, mem);
+ new_node = new_bd_ia32_Prefetch0(dbgi, block, base, idx, mem);
break;
}
} else {
assert(ia32_cg_config.use_3dnow_prefetch);
/* 3DNow! style prefetch */
- new_node = new_bd_ia32_Prefetch(dbgi, block, base, index, mem);
+ new_node = new_bd_ia32_Prefetch(dbgi, block, base, idx, mem);
}
set_irn_pinned(new_node, get_irn_pinned(node));
ir_type *res_tp = get_method_res_type(mtp, j);
ir_node *res, *new_res;
const ir_edge_t *edge, *next;
- ir_mode *mode;
+ ir_mode *res_mode;
if (! is_atomic_type(res_tp)) {
/* no floating point return */
continue;
}
- mode = get_type_mode(res_tp);
- if (! mode_is_float(mode)) {
+ res_mode = get_type_mode(res_tp);
+ if (! mode_is_float(res_mode)) {
/* no floating point return */
continue;
}
dbg_info *db = get_irn_dbg_info(succ);
ir_node *block = get_nodes_block(succ);
ir_node *base = get_irn_n(succ, n_ia32_xStore_base);
- ir_node *index = get_irn_n(succ, n_ia32_xStore_index);
+ ir_node *idx = get_irn_n(succ, n_ia32_xStore_index);
ir_node *mem = get_irn_n(succ, n_ia32_xStore_mem);
ir_node *value = get_irn_n(succ, n_ia32_xStore_val);
ir_mode *mode = get_ia32_ls_mode(succ);
- ir_node *st = new_bd_ia32_vfst(db, block, base, index, mem, value, mode);
+ ir_node *st = new_bd_ia32_vfst(db, block, base, idx, mem, value, mode);
//ir_node *mem = new_r_Proj(st, mode_M, pn_ia32_vfst_M);
set_ia32_am_offs_int(st, get_ia32_am_offs_int(succ));
if (is_ia32_use_frame(succ))
/* store st(0) on stack */
vfst = new_bd_ia32_vfst(db, block, frame, noreg_GP, call_mem,
- res, mode);
+ res, res_mode);
set_ia32_op_type(vfst, ia32_AddrModeD);
set_ia32_use_frame(vfst);
/* load into SSE register */
xld = new_bd_ia32_xLoad(db, block, frame, noreg_GP, vfst_mem,
- mode);
+ res_mode);
set_ia32_op_type(xld, ia32_AddrModeS);
set_ia32_use_frame(xld);
- new_res = new_r_Proj(xld, mode, pn_ia32_xLoad_res);
+ new_res = new_r_Proj(xld, res_mode, pn_ia32_xLoad_res);
new_mem = new_r_Proj(xld, mode_M, pn_ia32_xLoad_M);
if (old_mem != NULL) {
op1_idx = x87_on_stack(state, arch_register_get_index(op1));
if (is_vfp_live(arch_register_get_index(op1), live)) {
- ir_node *pred = get_irn_n(n, 0);
-
/* Operand is still live, a real copy. We need here an fpush that can
hold a a register, so use the fpushCopy or recreate constants */
node = create_Copy(state, n);
push(@obst_unit_tp_defs, "\t$tp_name,\n");
push(@obst_init, "\n\t\t/* init of execution unit type $tp_name */\n");
push(@obst_init, "\t\tcur_unit_tp = &$arch\_execution_unit_types[$tp_name];\n");
- push(@obst_init, "\t\t(void) cur_unit_tp; /* avoid warning */\n");
push(@obst_unit_defs, "/* enum for execution units of type $unit_type */\n");
push(@obst_unit_defs, "enum $arch\_execunit_tp_$unit_type\_vals {\n");
if ($num_unit_types > 0) {
print OUT<<EOF;
-be_execution_unit_type_t $arch\_execution_unit_types[] = {
+static be_execution_unit_type_t $arch\_execution_unit_types[] = {
EOF
}
}
print OUT<<EOF;
-be_machine_t $arch\_cpu = {
+static be_machine_t $arch\_cpu = {
$bundle_size,
$bundles_per_cycle,
$num_unit_types,
static int initialized = 0;
if (! initialized) {
- be_execution_unit_type_t *cur_unit_tp;
+ be_execution_unit_type_t *cur_unit_tp = NULL;
(void) cur_unit_tp; /* avoid warning */
be_machine_init_dummy_unit();
NULL, /* register_saved_by */
};
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc)
void be_init_arch_sparc(void)
{
be_register_isa_if("sparc", &sparc_isa_if);
reg = map_i_to_o_reg(reg);
param->reg1 = reg;
} else {
- ir_mode *mode = param_regs[0]->reg_class->mode;
- ir_type *type = get_type_for_mode(mode);
- param->type = type;
- param->offset = stack_offset;
- assert(get_mode_size_bits(mode) == 32);
+ ir_mode *regmode = param_regs[0]->reg_class->mode;
+ ir_type *type = get_type_for_mode(regmode);
+ param->type = type;
+ param->offset = stack_offset;
+ assert(get_mode_size_bits(regmode) == 32);
stack_offset += 4;
}
}
static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
{
- unsigned index = reg->global_index;
- assert(reg == &sparc_registers[index]);
- index++;
- assert(index - REG_F0 < N_sparc_fp_REGS);
- return &sparc_registers[index];
+ unsigned idx = reg->global_index;
+ assert(reg == &sparc_registers[idx]);
+ idx++;
+ assert(idx - REG_F0 < N_sparc_fp_REGS);
+ return &sparc_registers[idx];
}
static void emit_be_Copy(const ir_node *node)
*/
static void sparc_dump_node(FILE *F, ir_node *n, dump_reason_t reason)
{
- const sparc_attr_t *attr;
+ const sparc_attr_t *sparc_attr;
switch (reason) {
case dump_node_opcode_txt:
break;
arch_dump_reqs_and_registers(F, n);
- attr = get_sparc_attr_const(n);
- if (attr->immediate_value_entity) {
+ sparc_attr = get_sparc_attr_const(n);
+ if (sparc_attr->immediate_value_entity) {
ir_fprintf(F, "entity: %+F (offset %d)\n",
- attr->immediate_value_entity, attr->immediate_value);
+ sparc_attr->immediate_value_entity,
+ sparc_attr->immediate_value);
} else {
- ir_fprintf(F, "immediate value: %d\n", attr->immediate_value);
+ ir_fprintf(F, "immediate value: %d\n", sparc_attr->immediate_value);
}
if (sparc_has_load_store_attr(n)) {
const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(n);
return 1;
return attr_a->src_mode != attr_b->src_mode
- || attr_a->dest_mode != attr_b->dest_mode;;
+ || attr_a->dest_mode != attr_b->dest_mode;
}
/* Include the generated constructor functions */
static beabi_helper_env_t *abihelper;
static const arch_register_t *sp_reg = &sparc_registers[REG_SP];
static const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER];
-static calling_convention_t *cconv = NULL;
+static calling_convention_t *current_cconv = NULL;
static ir_mode *mode_gp;
static ir_mode *mode_flags;
static ir_mode *mode_fp;
assert(get_mode_size_bits(mode2) <= 32);
if (is_imm_encodeable(op2)) {
- ir_node *new_op1 = be_transform_node(op1);
int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ new_op1 = be_transform_node(op1);
if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
new_op1 = gen_extension(dbgi, block, new_op1, mode1);
}
assert(match_flags & MATCH_MODE_NEUTRAL);
if (is_imm_encodeable(op2)) {
- ir_node *new_op1 = be_transform_node(op1);
int32_t immediate = get_tarval_long(get_Const_tarval(op2));
+ new_op1 = be_transform_node(op1);
return new_binopx_imm(dbgi, block, new_op1, new_flags, NULL, immediate);
}
new_op2 = be_transform_node(op2);
long default_pn = get_Cond_default_proj(node);
ir_entity *entity;
ir_node *table_address;
- ir_node *index;
+ ir_node *idx;
ir_node *load;
ir_node *address;
/* construct base address */
table_address = make_address(dbgi, block, entity, 0);
/* scale index */
- index = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2);
+ idx = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2);
/* load from jumptable */
- load = new_bd_sparc_Ld_reg(dbgi, block, table_address, index,
+ load = new_bd_sparc_Ld_reg(dbgi, block, table_address, idx,
get_irg_no_mem(current_ir_graph),
mode_gp);
address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
ir_node *op = get_Conv_op(node);
ir_mode *src_mode = get_irn_mode(op);
ir_mode *dst_mode = get_irn_mode(node);
- dbg_info *dbg = get_irn_dbg_info(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_op;
int src_bits = get_mode_size_bits(src_mode);
if (mode_is_float(src_mode)) {
if (mode_is_float(dst_mode)) {
/* float -> float conv */
- return create_fftof(dbg, block, new_op, src_mode, dst_mode);
+ return create_fftof(dbgi, block, new_op, src_mode, dst_mode);
} else {
/* float -> int conv */
if (!mode_is_signed(dst_mode))
panic("float to unsigned not implemented yet");
- return create_ftoi(dbg, block, new_op, src_mode);
+ return create_ftoi(dbgi, block, new_op, src_mode);
}
} else {
/* int -> float conv */
if (src_bits < 32) {
- new_op = gen_extension(dbg, block, new_op, src_mode);
+ new_op = gen_extension(dbgi, block, new_op, src_mode);
} else if (src_bits == 32 && !mode_is_signed(src_mode)) {
panic("unsigned to float not lowered!");
}
- return create_itof(dbg, block, new_op, dst_mode);
+ return create_itof(dbgi, block, new_op, dst_mode);
}
} else if (src_mode == mode_b) {
panic("ConvB not lowered %+F", node);
}
if (mode_is_signed(min_mode)) {
- return gen_sign_extension(dbg, block, new_op, min_bits);
+ return gen_sign_extension(dbgi, block, new_op, min_bits);
} else {
- return gen_zero_extension(dbg, block, new_op, min_bits);
+ return gen_zero_extension(dbgi, block, new_op, min_bits);
}
}
}
static ir_type *between_type = NULL;
static ir_type *between_type0 = NULL;
- if (cconv->omit_fp) {
+ if (current_cconv->omit_fp) {
if (between_type0 == NULL) {
between_type0
= new_type_class(new_id_from_str("sparc_between_type"));
int n_params;
/* calling conventions must be decided by now */
- assert(cconv != NULL);
+ assert(current_cconv != NULL);
/* construct argument type */
arg_type = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
n_params = get_method_n_params(function_type);
for (p = 0; p < n_params; ++p) {
- reg_or_stackslot_t *param = &cconv->parameters[p];
+ reg_or_stackslot_t *param = ¤t_cconv->parameters[p];
char buf[128];
ident *id;
layout->arg_type = arg_type;
layout->initial_offset = 0;
layout->initial_bias = 0;
- layout->sp_relative = cconv->omit_fp;
+ layout->sp_relative = current_cconv->omit_fp;
assert(N_FRAME_TYPES == 3);
layout->order[0] = layout->frame_type;
arch_register_req_type_ignore);
/* function parameters in registers */
for (i = 0; i < get_method_n_params(function_type); ++i) {
- const reg_or_stackslot_t *param = &cconv->parameters[i];
+ const reg_or_stackslot_t *param = ¤t_cconv->parameters[i];
if (param->reg0 != NULL) {
be_prolog_add_reg(abihelper, param->reg0,
arch_register_req_type_none);
}
/* we need the values of the callee saves (Note: non omit-fp mode has no
* callee saves) */
- if (cconv->omit_fp) {
+ if (current_cconv->omit_fp) {
size_t n_callee_saves = ARRAY_SIZE(omit_fp_callee_saves);
size_t c;
for (c = 0; c < n_callee_saves; ++c) {
for (i = 0; i < n_res; ++i) {
ir_node *res_value = get_Return_res(node, i);
ir_node *new_res_value = be_transform_node(res_value);
- const reg_or_stackslot_t *slot = &cconv->results[i];
+ const reg_or_stackslot_t *slot = ¤t_cconv->results[i];
const arch_register_t *reg = slot->reg0;
assert(slot->reg1 == NULL);
be_epilog_add_reg(abihelper, reg, arch_register_req_type_none,
new_res_value);
}
/* callee saves */
- if (cconv->omit_fp) {
+ if (current_cconv->omit_fp) {
size_t n_callee_saves = ARRAY_SIZE(omit_fp_callee_saves);
- size_t i;
for (i = 0; i < n_callee_saves; ++i) {
const arch_register_t *reg = omit_fp_callee_saves[i];
ir_node *value
static ir_node *get_frame_base(void)
{
- const arch_register_t *reg = cconv->omit_fp ? sp_reg : fp_reg;
+ const arch_register_t *reg = current_cconv->omit_fp ? sp_reg : fp_reg;
return be_prolog_get_reg_value(abihelper, reg);
}
/* Proj->Proj->Start must be a method argument */
assert(get_Proj_proj(get_Proj_pred(node)) == pn_Start_T_args);
- param = &cconv->parameters[pn];
+ param = ¤t_cconv->parameters[pn];
if (param->reg0 != NULL) {
/* argument transmitted in register */
abihelper = be_abihelper_prepare(irg);
be_collect_stacknodes(abihelper);
- cconv = sparc_decide_calling_convention(get_entity_type(entity), irg);
+ current_cconv
+ = sparc_decide_calling_convention(get_entity_type(entity), irg);
create_stacklayout(irg);
be_transform_graph(irg, NULL);
be_abihelper_finish(abihelper);
- sparc_free_calling_convention(cconv);
+ sparc_free_calling_convention(current_cconv);
frame_type = get_irg_frame_type(irg);
if (get_type_state(frame_type) == layout_undefined)