static TEMPLATE_isa_t TEMPLATE_isa_template = {
{
&TEMPLATE_isa_if, /* isa interface implementation */
- &TEMPLATE_gp_regs[REG_SP], /* stack pointer register */
- &TEMPLATE_gp_regs[REG_BP], /* base pointer register */
+ N_TEMPLATE_REGISTERS,
+ TEMPLATE_registers,
+ &TEMPLATE_registers[REG_SP], /* stack pointer register */
+ &TEMPLATE_registers[REG_BP], /* base pointer register */
&TEMPLATE_reg_classes[CLASS_TEMPLATE_gp], /* link pointer register class */
-1, /* stack direction */
2, /* power of two stack alignment for calls, 2^2 == 4 */
static unsigned TEMPLATE_get_n_reg_class(void)
{
- return N_CLASSES;
+ return N_TEMPLATE_CLASSES;
}
static const arch_register_class_t *TEMPLATE_get_reg_class(unsigned i)
{
- assert(i < N_CLASSES);
+ assert(i < N_TEMPLATE_CLASSES);
return &TEMPLATE_reg_classes[i];
}
mode = get_type_mode(tp);
be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &TEMPLATE_fp_regs[REG_F0] : &TEMPLATE_gp_regs[REG_R0], ABI_CONTEXT_BOTH);
+ mode_is_float(mode) ? &TEMPLATE_registers[REG_F0] : &TEMPLATE_registers[REG_R0], ABI_CONTEXT_BOTH);
}
}
static amd64_isa_t amd64_isa_template = {
{
&amd64_isa_if, /* isa interface implementation */
- &amd64_gp_regs[REG_RSP], /* stack pointer register */
- &amd64_gp_regs[REG_RBP], /* base pointer register */
+ N_AMD64_REGISTERS,
+ amd64_registers,
+ &amd64_registers[REG_RSP], /* stack pointer register */
+ &amd64_registers[REG_RBP], /* base pointer register */
&amd64_reg_classes[CLASS_amd64_gp], /* link pointer register class */
-1, /* stack direction */
3, /* power of two stack alignment for calls, 2^2 == 4 */
static unsigned amd64_get_n_reg_class(void)
{
- return N_CLASSES;
+ return N_AMD64_CLASSES;
}
static const arch_register_class_t *amd64_get_reg_class(unsigned i)
{
- assert(i < N_CLASSES);
+ assert(i < N_AMD64_CLASSES);
return &amd64_reg_classes[i];
}
};
static const arch_register_t *gpreg_param_reg_std[] = {
- &amd64_gp_regs[REG_RDI],
- &amd64_gp_regs[REG_RSI],
- &amd64_gp_regs[REG_RDX],
- &amd64_gp_regs[REG_RCX],
- &amd64_gp_regs[REG_R8],
- &amd64_gp_regs[REG_R9],
+ &amd64_registers[REG_RDI],
+ &amd64_registers[REG_RSI],
+ &amd64_registers[REG_RDX],
+ &amd64_registers[REG_RCX],
+ &amd64_registers[REG_R8],
+ &amd64_registers[REG_R9],
};
static const arch_register_t *amd64_get_RegParam_reg(int n)
/* FIXME: No floating point yet */
/* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_gp_regs[REG_R0], ABI_CONTEXT_BOTH) */;
+ mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH) */;
be_abi_call_res_reg(abi, 0,
- &amd64_gp_regs[REG_RAX], ABI_CONTEXT_BOTH);
+ &amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
}
}
#include "gen_arm_regalloc_if.h"
static const arch_register_t *const callee_saves[] = {
- &arm_gp_regs[REG_R4],
- &arm_gp_regs[REG_R5],
- &arm_gp_regs[REG_R6],
- &arm_gp_regs[REG_R7],
- &arm_gp_regs[REG_R8],
- &arm_gp_regs[REG_R9],
- &arm_gp_regs[REG_R10],
- &arm_gp_regs[REG_R11],
- &arm_gp_regs[REG_LR],
+ &arm_registers[REG_R4],
+ &arm_registers[REG_R5],
+ &arm_registers[REG_R6],
+ &arm_registers[REG_R7],
+ &arm_registers[REG_R8],
+ &arm_registers[REG_R9],
+ &arm_registers[REG_R10],
+ &arm_registers[REG_R11],
+ &arm_registers[REG_LR],
};
static const arch_register_t *const caller_saves[] = {
- &arm_gp_regs[REG_R0],
- &arm_gp_regs[REG_R1],
- &arm_gp_regs[REG_R2],
- &arm_gp_regs[REG_R3],
- &arm_gp_regs[REG_LR],
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3],
+ &arm_registers[REG_LR],
- &arm_fpa_regs[REG_F0],
- &arm_fpa_regs[REG_F1],
- &arm_fpa_regs[REG_F2],
- &arm_fpa_regs[REG_F3],
- &arm_fpa_regs[REG_F4],
- &arm_fpa_regs[REG_F5],
- &arm_fpa_regs[REG_F6],
- &arm_fpa_regs[REG_F7],
+ &arm_registers[REG_F0],
+ &arm_registers[REG_F1],
+ &arm_registers[REG_F2],
+ &arm_registers[REG_F3],
+ &arm_registers[REG_F4],
+ &arm_registers[REG_F5],
+ &arm_registers[REG_F6],
+ &arm_registers[REG_F7],
};
static const arch_register_t* const param_regs[] = {
- &arm_gp_regs[REG_R0],
- &arm_gp_regs[REG_R1],
- &arm_gp_regs[REG_R2],
- &arm_gp_regs[REG_R3]
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3]
};
static const arch_register_t* const result_regs[] = {
- &arm_gp_regs[REG_R0],
- &arm_gp_regs[REG_R1],
- &arm_gp_regs[REG_R2],
- &arm_gp_regs[REG_R3]
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3]
};
static const arch_register_t* const float_result_regs[] = {
- &arm_fpa_regs[REG_F0],
- &arm_fpa_regs[REG_F1]
+ &arm_registers[REG_F0],
+ &arm_registers[REG_F1]
};
/** information about a single parameter or result */
tmpregs[0] = get_in_reg(irn, 2);
tmpregs[1] = get_in_reg(irn, 3);
tmpregs[2] = get_in_reg(irn, 4);
- tmpregs[3] = &arm_gp_regs[REG_R12];
+ tmpregs[3] = &arm_registers[REG_R12];
/* Note: R12 is always the last register because the RA did not assign higher ones */
qsort((void *)tmpregs, 3, sizeof(tmpregs[0]), reg_cmp);
static const arch_register_t *gpreg_param_reg_std[] = {
- &arm_gp_regs[REG_R0],
- &arm_gp_regs[REG_R1],
- &arm_gp_regs[REG_R2],
- &arm_gp_regs[REG_R3],
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3],
};
const arch_register_t *arm_get_RegParam_reg(int n)
block = get_nodes_block(node);
for (cnt = 1; cnt < v.ops; ++cnt) {
int value = sign * arm_ror(v.values[cnt], v.rors[cnt]);
- ir_node *next = be_new_IncSP(&arm_gp_regs[REG_SP], block, node,
+ ir_node *next = be_new_IncSP(&arm_registers[REG_SP], block, node,
value, 1);
sched_add_after(node, next);
node = next;
ir_node *ptr;
ptr = new_bd_arm_Add_imm(dbgi, block, frame, v->values[0], v->rors[0]);
- arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
+ arch_set_irn_register(ptr, &arm_registers[REG_R12]);
sched_add_before(node, ptr);
for (cnt = 1; cnt < v->ops; ++cnt) {
ir_node *next = new_bd_arm_Add_imm(dbgi, block, ptr, v->values[cnt],
v->rors[cnt]);
- arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
+ arch_set_irn_register(next, &arm_registers[REG_R12]);
sched_add_before(node, next);
ptr = next;
}
ir_node *ptr;
ptr = new_bd_arm_Sub_imm(dbgi, block, frame, v->values[0], v->rors[0]);
- arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
+ arch_set_irn_register(ptr, &arm_registers[REG_R12]);
sched_add_before(node, ptr);
for (cnt = 1; cnt < v->ops; ++cnt) {
ir_node *next = new_bd_arm_Sub_imm(dbgi, block, ptr, v->values[cnt],
v->rors[cnt]);
- arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
+ arch_set_irn_register(next, &arm_registers[REG_R12]);
sched_add_before(node, next);
ptr = next;
}
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static const arch_register_t *sp_reg = &arm_gp_regs[REG_SP];
+static const arch_register_t *sp_reg = &arm_registers[REG_SP];
static ir_mode *mode_gp;
static ir_mode *mode_fp;
static beabi_helper_env_t *abihelper;
static arm_isa_t arm_isa_template = {
{
&arm_isa_if, /* isa interface */
- &arm_gp_regs[REG_SP], /* stack pointer */
- &arm_gp_regs[REG_R11], /* base pointer */
+ N_ARM_REGISTERS,
+ arm_registers,
+ &arm_registers[REG_SP], /* stack pointer */
+ &arm_registers[REG_R11], /* base pointer */
&arm_reg_classes[CLASS_arm_gp], /* static link pointer class */
-1, /* stack direction */
2, /* power of two stack alignment for calls, 2^2 == 4 */
*/
static unsigned arm_get_n_reg_class(void)
{
- return N_CLASSES;
+ return N_ARM_CLASSES;
}
/**
*/
static const arch_register_class_t *arm_get_reg_class(unsigned i)
{
- assert(i < N_CLASSES);
+ assert(i < N_ARM_CLASSES);
return &arm_reg_classes[i];
}
struct arch_register_t {
const char *name; /**< The name of the register. */
const arch_register_class_t *reg_class; /**< The class of the register */
- unsigned index; /**< The index of the register in
+ unsigned short index; /**< The index of the register in
the class. */
+ unsigned short global_index;
arch_register_type_t type; /**< The type of the register. */
/** register constraint allowing just this register */
const arch_register_req_t *single_req;
*/
struct arch_env_t {
const arch_isa_if_t *impl;
+ unsigned n_registers; /**< number of registers */
+ const arch_register_t *registers; /**< register array */
const arch_register_t *sp; /**< The stack pointer register. */
const arch_register_t *bp; /**< The base pointer register. */
const arch_register_class_t *link_class; /**< The static link pointer
int spill_cost; /**< cost for a be_Spill node */
int reload_cost; /**< cost for a be_Reload node */
bool custom_abi : 1; /**< backend does all abi handling
- and does not need the generic stuff
- from beabi.h/.c */
+ and does not need the generic
+ stuff from beabi.h/.c */
};
static inline unsigned arch_irn_get_n_outs(const ir_node *node)
{
ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
return create_const(irg, &irg_data->noreg_gp, new_bd_ia32_NoReg_GP,
- &ia32_gp_regs[REG_GP_NOREG]);
+ &ia32_registers[REG_GP_NOREG]);
}
ir_node *ia32_new_NoReg_vfp(ir_graph *irg)
{
ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
return create_const(irg, &irg_data->noreg_vfp, new_bd_ia32_NoReg_VFP,
- &ia32_vfp_regs[REG_VFP_NOREG]);
+ &ia32_registers[REG_VFP_NOREG]);
}
ir_node *ia32_new_NoReg_xmm(ir_graph *irg)
{
ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
return create_const(irg, &irg_data->noreg_xmm, new_bd_ia32_NoReg_XMM,
- &ia32_xmm_regs[REG_XMM_NOREG]);
+ &ia32_registers[REG_XMM_NOREG]);
}
ir_node *ia32_new_Fpu_truncate(ir_graph *irg)
{
ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
return create_const(irg, &irg_data->fpu_trunc_mode, new_bd_ia32_ChangeCW,
- &ia32_fp_cw_regs[REG_FPCW]);
+ &ia32_registers[REG_FPCW]);
}
{
dbg_info *dbg = get_irn_dbg_info(node);
ir_mode *spmode = mode_Iu;
- const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
+ const arch_register_t *spreg = &ia32_registers[REG_ESP];
ir_node *sp;
sp = new_rd_Proj(dbg, pred, spmode, pos);
{
ir_node *block = get_nodes_block(node);
ir_graph *irg = get_irn_irg(node);
- ir_node *sp = be_abi_get_ignore_irn(be_get_irg_abi(irg), &ia32_gp_regs[REG_ESP]);
+ ir_node *sp = be_abi_get_ignore_irn(be_get_irg_abi(irg), &ia32_registers[REG_ESP]);
int arity = be_get_MemPerm_entity_arity(node);
ir_node **pops = ALLOCAN(ir_node*, arity);
ir_node *in[1];
static ia32_isa_t ia32_isa_template = {
{
&ia32_isa_if, /* isa interface implementation */
- &ia32_gp_regs[REG_ESP], /* stack pointer register */
- &ia32_gp_regs[REG_EBP], /* base pointer register */
+ N_IA32_REGISTERS,
+ ia32_registers,
+ &ia32_registers[REG_ESP], /* stack pointer register */
+ &ia32_registers[REG_EBP], /* base pointer register */
&ia32_reg_classes[CLASS_ia32_gp], /* static link pointer register class */
-1, /* stack direction */
2, /* power of two stack alignment, 2^2 == 4 */
*/
static unsigned ia32_get_n_reg_class(void)
{
- return N_CLASSES;
+ return N_IA32_CLASSES;
}
/**
*/
static const arch_register_class_t *ia32_get_reg_class(unsigned i)
{
- assert(i < N_CLASSES);
+ assert(i < N_IA32_CLASSES);
return &ia32_reg_classes[i];
}
const ir_mode *mode)
{
static const arch_register_t *gpreg_param_reg_fastcall[] = {
- &ia32_gp_regs[REG_ECX],
- &ia32_gp_regs[REG_EDX],
+ &ia32_registers[REG_ECX],
+ &ia32_registers[REG_EDX],
NULL
};
static const unsigned MAXNUM_GPREG_ARGS = 3;
static const arch_register_t *gpreg_param_reg_regparam[] = {
- &ia32_gp_regs[REG_EAX],
- &ia32_gp_regs[REG_EDX],
- &ia32_gp_regs[REG_ECX]
+ &ia32_registers[REG_EAX],
+ &ia32_registers[REG_EDX],
+ &ia32_registers[REG_ECX]
};
static const arch_register_t *gpreg_param_reg_this[] = {
- &ia32_gp_regs[REG_ECX],
+ &ia32_registers[REG_ECX],
NULL,
NULL
};
static const arch_register_t *fpreg_sse_param_reg_std[] = {
- &ia32_xmm_regs[REG_XMM0],
- &ia32_xmm_regs[REG_XMM1],
- &ia32_xmm_regs[REG_XMM2],
- &ia32_xmm_regs[REG_XMM3],
- &ia32_xmm_regs[REG_XMM4],
- &ia32_xmm_regs[REG_XMM5],
- &ia32_xmm_regs[REG_XMM6],
- &ia32_xmm_regs[REG_XMM7]
+ &ia32_registers[REG_XMM0],
+ &ia32_registers[REG_XMM1],
+ &ia32_registers[REG_XMM2],
+ &ia32_registers[REG_XMM3],
+ &ia32_registers[REG_XMM4],
+ &ia32_registers[REG_XMM5],
+ &ia32_registers[REG_XMM6],
+ &ia32_registers[REG_XMM7]
};
static const arch_register_t *fpreg_sse_param_reg_this[] = {
assert(!mode_is_float(mode) && "mixed INT, FP results not supported");
- be_abi_call_res_reg(abi, 0, &ia32_gp_regs[REG_EAX], ABI_CONTEXT_BOTH);
- be_abi_call_res_reg(abi, 1, &ia32_gp_regs[REG_EDX], ABI_CONTEXT_BOTH);
+ be_abi_call_res_reg(abi, 0, &ia32_registers[REG_EAX], ABI_CONTEXT_BOTH);
+ be_abi_call_res_reg(abi, 1, &ia32_registers[REG_EDX], ABI_CONTEXT_BOTH);
}
else if (n == 1) {
const arch_register_t *reg;
assert(is_atomic_type(tp));
mode = get_type_mode(tp);
- reg = mode_is_float(mode) ? &ia32_vfp_regs[REG_VF0] : &ia32_gp_regs[REG_EAX];
+ reg = mode_is_float(mode) ? &ia32_registers[REG_VF0] : &ia32_registers[REG_EAX];
be_abi_call_res_reg(abi, 0, reg, ABI_CONTEXT_BOTH);
}
ir_node *start_block = get_irg_start_block(irg);
ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
symconst_sign, no_pic_adjust, val);
- arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
+ arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
return immediate;
}
/* TODO: construct a hashmap instead of doing linear search for clobber
* register */
- for (c = 0; c < N_CLASSES; ++c) {
+ for (c = 0; c < N_IA32_CLASSES; ++c) {
cls = & ia32_reg_classes[c];
for (r = 0; r < cls->n_regs; ++r) {
const arch_register_t *temp_reg = arch_register_for_index(cls, r);
case 'a':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX;
+ limited |= 1 << REG_GP_EAX;
break;
case 'b':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EBX;
+ limited |= 1 << REG_GP_EBX;
break;
case 'c':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_ECX;
+ limited |= 1 << REG_GP_ECX;
break;
case 'd':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EDX;
+ limited |= 1 << REG_GP_EDX;
break;
case 'D':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EDI;
+ limited |= 1 << REG_GP_EDI;
break;
case 'S':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_ESI;
+ limited |= 1 << REG_GP_ESI;
break;
case 'Q':
case 'q':
* difference to Q for us (we only assign whole registers) */
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
- 1 << REG_EDX;
+ limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
+ 1 << REG_GP_EDX;
break;
case 'A':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX | 1 << REG_EDX;
+ limited |= 1 << REG_GP_EAX | 1 << REG_GP_EDX;
break;
case 'l':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
- 1 << REG_EDX | 1 << REG_ESI | 1 << REG_EDI |
- 1 << REG_EBP;
+ limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
+ 1 << REG_GP_EDX | 1 << REG_GP_ESI | 1 << REG_GP_EDI |
+ 1 << REG_GP_EBP;
break;
case 'R':
const ir_asm_constraint *out_constraints;
ident **clobbers;
int clobbers_flags = 0;
- unsigned clobber_bits[N_CLASSES];
+ unsigned clobber_bits[N_IA32_CLASSES];
int out_size;
backend_info_t *info;
assert(reg && "no in register found");
- if (reg == &ia32_gp_regs[REG_GP_NOREG])
+ if (reg == &ia32_registers[REG_GP_NOREG])
panic("trying to emit noreg for %+F input %d", irn, pos);
return reg;
static void build_reg_map(void)
{
- reg_gp_map[REG_EAX] = 0x0;
- reg_gp_map[REG_ECX] = 0x1;
- reg_gp_map[REG_EDX] = 0x2;
- reg_gp_map[REG_EBX] = 0x3;
- reg_gp_map[REG_ESP] = 0x4;
- reg_gp_map[REG_EBP] = 0x5;
- reg_gp_map[REG_ESI] = 0x6;
- reg_gp_map[REG_EDI] = 0x7;
+ reg_gp_map[REG_GP_EAX] = 0x0;
+ reg_gp_map[REG_GP_ECX] = 0x1;
+ reg_gp_map[REG_GP_EDX] = 0x2;
+ reg_gp_map[REG_GP_EBX] = 0x3;
+ reg_gp_map[REG_GP_ESP] = 0x4;
+ reg_gp_map[REG_GP_EBP] = 0x5;
+ reg_gp_map[REG_GP_ESI] = 0x6;
+ reg_gp_map[REG_GP_EDI] = 0x7;
pnc_map_signed[pn_Cmp_Eq] = 0x04;
pnc_map_signed[pn_Cmp_Lt] = 0x0C;
bemit_mod_am(ruval, node);
} else {
const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
- if (reg->index == REG_EAX) {
+ if (reg->index == REG_GP_EAX) {
bemit8(opcode_ax);
} else {
bemit8(opcode);
assert(cls0 == arch_register_get_class(in1) && "Register class mismatch at Perm");
if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
- if (in0->index == REG_EAX) {
+ if (in0->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[in1->index]);
- } else if (in1->index == REG_EAX) {
+ } else if (in1->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[in0->index]);
} else {
bemit8(0x87);
bemit_mod_am(7, node);
} else {
const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
- if (reg->index == REG_EAX) {
+ if (reg->index == REG_GP_EAX) {
bemit8(0x3D);
} else {
bemit8(0x81);
if (is_ia32_Immediate(right)) {
if (get_ia32_op_type(node) == ia32_Normal) {
const arch_register_t *out = get_in_reg(node, n_ia32_Cmp_left);
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
bemit8(0x3C);
} else {
bemit8(0x80);
if (is_ia32_Immediate(right)) {
if (get_ia32_op_type(node) == ia32_Normal) {
const arch_register_t *out = get_in_reg(node, n_ia32_Test8Bit_left);
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
bemit8(0xA8);
} else {
bemit8(0xF6);
const arch_register_t *out = get_out_reg(node, 0);
bemit8(0x65); // gs:
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
bemit8(0xA1); // movl 0, %eax
} else {
bemit8(0x8B); // movl 0, %reg
/* helper function for bemit_minus64bit */
static void bemit_helper_xchg(const arch_register_t *src, const arch_register_t *dst)
{
- if (src->index == REG_EAX) {
+ if (src->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[dst->index]); // xchgl %eax, %dst
- } else if (dst->index == REG_EAX) {
+ } else if (dst->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[src->index]); // xchgl %src, %eax
} else {
bemit8(0x87); // xchgl %src, %dst
{
const arch_register_t *out = get_out_reg(node, 0);
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
ir_node *index = get_irn_n(node, n_ia32_index);
} else {
const arch_register_t *in = get_in_reg(node, n_ia32_Store_val);
- if (in->index == REG_EAX) {
+ if (in->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
ir_node *index = get_irn_n(node, n_ia32_index);
sched_add_before(irn, not);
stc = new_bd_ia32_Stc(dbg, block);
- arch_set_irn_register(stc, &ia32_flags_regs[REG_EFLAGS]);
+ arch_set_irn_register(stc, &ia32_registers[REG_EFLAGS]);
sched_add_before(irn, stc);
adc = new_bd_ia32_Adc(dbg, block, noreg, noreg, nomem, not, in1, stc);
set_irn_mode(adc, mode_T);
adc_flags = new_r_Proj(adc, mode_Iu, pn_ia32_Adc_flags);
- arch_set_irn_register(adc_flags, &ia32_flags_regs[REG_EFLAGS]);
+ arch_set_irn_register(adc_flags, &ia32_registers[REG_EFLAGS]);
cmc = new_bd_ia32_Cmc(dbg, block, adc_flags);
- arch_set_irn_register(cmc, &ia32_flags_regs[REG_EFLAGS]);
+ arch_set_irn_register(cmc, &ia32_registers[REG_EFLAGS]);
sched_add_before(irn, cmc);
exchange(flags_proj, cmc);
set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_am_sc(reload, entity);
set_ia32_use_frame(reload);
- arch_set_irn_register(reload, &ia32_fp_cw_regs[REG_FPCW]);
+ arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
return reload;
}
set_ia32_op_type(reload, ia32_AddrModeS);
set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_use_frame(reload);
- arch_set_irn_register(reload, &ia32_fp_cw_regs[REG_FPCW]);
+ arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
sched_add_before(before, reload);
} else {
/* TODO: make the actual mode configurable in ChangeCW... */
or_const = new_bd_ia32_Immediate(NULL, get_irg_start_block(irg),
NULL, 0, 0, 3072);
- arch_set_irn_register(or_const, &ia32_gp_regs[REG_GP_NOREG]);
+ arch_set_irn_register(or_const, &ia32_registers[REG_GP_NOREG]);
or = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res,
or_const);
sched_add_before(before, or);
set_ia32_op_type(fldcw, ia32_AddrModeS);
set_ia32_ls_mode(fldcw, lsmode);
set_ia32_use_frame(fldcw);
- arch_set_irn_register(fldcw, &ia32_fp_cw_regs[REG_FPCW]);
+ arch_set_irn_register(fldcw, &ia32_registers[REG_FPCW]);
sched_add_before(before, fldcw);
reload = fldcw;
return;
reg = arch_get_irn_register(node);
- if (reg == &ia32_fp_cw_regs[REG_FPCW] && !is_ia32_ChangeCW(node)) {
+ if (reg == &ia32_registers[REG_FPCW] && !is_ia32_ChangeCW(node)) {
ARR_APP1(ir_node*, env->state_nodes, node);
}
}
{
collect_fpu_mode_nodes_env_t env;
be_ssa_construction_env_t senv;
- const arch_register_t *reg = &ia32_fp_cw_regs[REG_FPCW];
+ const arch_register_t *reg = &ia32_registers[REG_FPCW];
ir_node *initial_value;
ir_node **phis;
be_lv_t *lv = be_get_irg_liveness(irg);
rewire_fpu_mode_nodes(irg);
/* ensure correct fpu mode for operations */
- be_assure_state(irg, &ia32_fp_cw_regs[REG_FPCW],
+ be_assure_state(irg, &ia32_registers[REG_FPCW],
NULL, create_fpu_mode_spill, create_fpu_mode_reload);
}
void ia32_build_16bit_reg_map(pmap *reg_map)
{
- pmap_insert(reg_map, &ia32_gp_regs[REG_EAX], "ax");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EBX], "bx");
- pmap_insert(reg_map, &ia32_gp_regs[REG_ECX], "cx");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EDX], "dx");
- pmap_insert(reg_map, &ia32_gp_regs[REG_ESI], "si");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EDI], "di");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EBP], "bp");
- pmap_insert(reg_map, &ia32_gp_regs[REG_ESP], "sp");
+ pmap_insert(reg_map, &ia32_registers[REG_EAX], "ax");
+ pmap_insert(reg_map, &ia32_registers[REG_EBX], "bx");
+ pmap_insert(reg_map, &ia32_registers[REG_ECX], "cx");
+ pmap_insert(reg_map, &ia32_registers[REG_EDX], "dx");
+ pmap_insert(reg_map, &ia32_registers[REG_ESI], "si");
+ pmap_insert(reg_map, &ia32_registers[REG_EDI], "di");
+ pmap_insert(reg_map, &ia32_registers[REG_EBP], "bp");
+ pmap_insert(reg_map, &ia32_registers[REG_ESP], "sp");
}
void ia32_build_8bit_reg_map(pmap *reg_map)
{
- pmap_insert(reg_map, &ia32_gp_regs[REG_EAX], "al");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EBX], "bl");
- pmap_insert(reg_map, &ia32_gp_regs[REG_ECX], "cl");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EDX], "dl");
+ pmap_insert(reg_map, &ia32_registers[REG_EAX], "al");
+ pmap_insert(reg_map, &ia32_registers[REG_EBX], "bl");
+ pmap_insert(reg_map, &ia32_registers[REG_ECX], "cl");
+ pmap_insert(reg_map, &ia32_registers[REG_EDX], "dl");
}
void ia32_build_8bit_reg_map_high(pmap *reg_map)
{
- pmap_insert(reg_map, &ia32_gp_regs[REG_EAX], "ah");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EBX], "bh");
- pmap_insert(reg_map, &ia32_gp_regs[REG_ECX], "ch");
- pmap_insert(reg_map, &ia32_gp_regs[REG_EDX], "dh");
+ pmap_insert(reg_map, &ia32_registers[REG_EAX], "ah");
+ pmap_insert(reg_map, &ia32_registers[REG_EBX], "bh");
+ pmap_insert(reg_map, &ia32_registers[REG_ECX], "ch");
+ pmap_insert(reg_map, &ia32_registers[REG_EDX], "dh");
}
const char *ia32_get_mapped_reg_name(pmap *reg_map, const arch_register_t *reg)
flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
flags_proj = new_r_Proj(left, flags_mode, pn_ia32_flags);
- arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]);
+ arch_set_irn_register(flags_proj, &ia32_registers[REG_EFLAGS]);
assert(get_irn_mode(node) != mode_T);
} else if (offset < 256) {
arch_register_t const* const reg = arch_get_irn_register(left);
- if (reg != &ia32_gp_regs[REG_EAX] &&
- reg != &ia32_gp_regs[REG_EBX] &&
- reg != &ia32_gp_regs[REG_ECX] &&
- reg != &ia32_gp_regs[REG_EDX]) {
+ if (reg != &ia32_registers[REG_EAX] &&
+ reg != &ia32_registers[REG_EBX] &&
+ reg != &ia32_registers[REG_ECX] &&
+ reg != &ia32_registers[REG_EDX]) {
return;
}
} else {
static ir_node *create_push(dbg_info *dbgi, ir_node *block,
ir_node *stack, ir_node *schedpoint)
{
- const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+ const arch_register_t *esp = &ia32_registers[REG_ESP];
ir_node *val = ia32_new_NoReg_gp(cg);
ir_node *noreg = ia32_new_NoReg_gp(cg);
*/
static void peephole_Load_IncSP_to_pop(ir_node *irn)
{
- const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+ const arch_register_t *esp = &ia32_registers[REG_ESP];
int i, maxslot, inc_ofs, ofs;
ir_node *node, *pred_sp, *block;
ir_node *loads[MAXPUSH_OPTIMIZE];
int i;
for (i = 0; i < N_ia32_gp_REGS; ++i) {
- const arch_register_t *reg = &ia32_gp_regs[i];
+ const arch_register_t *reg = &ia32_reg_classes[CLASS_ia32_gp].regs[i];
if (arch_register_type_is(reg, ignore))
continue;
if (be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
- return &ia32_gp_regs[i];
+ return reg;
}
return NULL;
ir_node *stack, ir_node *schedpoint,
const arch_register_t *reg)
{
- const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+ const arch_register_t *esp = &ia32_registers[REG_ESP];
ir_graph *irg = get_irn_irg(block);
ir_node *pop;
ir_node *keep;
*/
static void peephole_be_IncSP(ir_node *node)
{
- const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+ const arch_register_t *esp = &ia32_registers[REG_ESP];
const arch_register_t *reg;
dbg_info *dbgi;
ir_node *block;
if (ia32_cg_config.use_mov_0)
return;
/* xor destroys the flags, so no-one must be using them */
- if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
+ if (be_peephole_get_value(CLASS_ia32_flags, REG_FLAGS_EFLAGS) != NULL)
return;
reg = arch_get_irn_register(node);
ir_node *start_block = get_irg_start_block(irg);
ir_node *immediate
= new_bd_ia32_Immediate(NULL, start_block, NULL, 0, 0, val);
- arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
+ arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
return immediate;
}
res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, sc_no_pic_adjust,
offset);
- arch_set_irn_register(res, &ia32_gp_regs[REG_GP_NOREG]);
+ arch_set_irn_register(res, &ia32_registers[REG_GP_NOREG]);
return res;
}
assert(is_ia32_Lea(node));
/* we can only do this if it is allowed to clobber the flags */
- if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
+ if (be_peephole_get_value(CLASS_ia32_flags, REG_FLAGS_EFLAGS) != NULL)
return;
base = get_irn_n(node, n_ia32_Lea_base);
*/
static void peephole_ia32_Conv_I2I(ir_node *node)
{
- const arch_register_t *eax = &ia32_gp_regs[REG_EAX];
+ const arch_register_t *eax = &ia32_registers[REG_EAX];
ir_mode *smaller_mode = get_ia32_ls_mode(node);
ir_node *val = get_irn_n(node, n_ia32_Conv_I2I_val);
dbg_info *dbgi;
return initial_fpcw;
fpcw = be_abi_get_ignore_irn(be_get_irg_abi(current_ir_graph),
- &ia32_fp_cw_regs[REG_FPCW]);
+ &ia32_registers[REG_FPCW]);
initial_fpcw = be_transform_node(fpcw);
return initial_fpcw;
if (proj == pn_be_AddSP_sp) {
ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
pn_ia32_SubSP_stack);
- arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
+ arch_set_irn_register(res, &ia32_registers[REG_ESP]);
return res;
} else if (proj == pn_be_AddSP_res) {
return new_rd_Proj(dbgi, new_pred, mode_Iu,
if (proj == pn_be_SubSP_sp) {
ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
pn_ia32_AddSP_stack);
- arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
+ arch_set_irn_register(res, &ia32_registers[REG_ESP]);
return res;
} else if (proj == pn_be_SubSP_M) {
return new_rd_Proj(dbgi, new_pred, mode_M, pn_ia32_AddSP_M);
assert(req->cls == &ia32_reg_classes[CLASS_ia32_gp]);
switch (*req->limited) {
- case 1 << REG_EAX: assert(eax == noreg_GP); eax = reg_parm; break;
- case 1 << REG_ECX: assert(ecx == noreg_GP); ecx = reg_parm; break;
- case 1 << REG_EDX: assert(edx == noreg_GP); edx = reg_parm; break;
+ case 1 << REG_GP_EAX: assert(eax == noreg_GP); eax = reg_parm; break;
+ case 1 << REG_GP_ECX: assert(ecx == noreg_GP); ecx = reg_parm; break;
+ case 1 << REG_GP_EDX: assert(edx == noreg_GP); edx = reg_parm; break;
default: panic("Invalid GP register for register parameter");
}
}
/* TODO arch_set_irn_register() only operates on Projs, need variant with index */
switch (proj) {
case pn_ia32_Call_stack:
- arch_set_irn_register(res, &ia32_gp_regs[REG_ESP]);
+ arch_set_irn_register(res, &ia32_registers[REG_ESP]);
break;
case pn_ia32_Call_fpcw:
- arch_set_irn_register(res, &ia32_fp_cw_regs[REG_FPCW]);
+ arch_set_irn_register(res, &ia32_registers[REG_FPCW]);
break;
}
#include "ia32_x87.h"
#include "ia32_architecture.h"
-#define N_x87_REGS 8
-
-/* the unop index */
-#define UNOP_IDX 0
-
-#define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
+#define MASK_TOS(x) ((x) & (N_ia32_st_REGS - 1))
/** the debug handle */
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
* The x87 state.
*/
typedef struct x87_state {
- st_entry st[N_x87_REGS]; /**< the register stack */
- int depth; /**< the current stack depth */
- int tos; /**< position of the tos */
- x87_simulator *sim; /**< The simulator. */
+ st_entry st[N_ia32_st_REGS]; /**< the register stack */
+ int depth; /**< the current stack depth */
+ int tos; /**< position of the tos */
+ x87_simulator *sim; /**< The simulator. */
} x87_state;
/** An empty state, used for blocks without fp instructions. */
state->st[MASK_TOS(state->tos + pos)] = state->st[MASK_TOS(state->tos)];
state->st[MASK_TOS(state->tos)] = entry;
- DB((dbg, LEVEL_2, "After FXCH: ")); DEBUG_ONLY(x87_dump_stack(state));
+ DB((dbg, LEVEL_2, "After FXCH: "));
+ DEBUG_ONLY(x87_dump_stack(state));
} /* x87_fxch */
/**
*/
static void x87_push_dbl(x87_state *state, int reg_idx, ir_node *node)
{
- assert(state->depth < N_x87_REGS && "stack overrun");
+ assert(state->depth < N_ia32_st_REGS && "stack overrun");
++state->depth;
state->tos = MASK_TOS(state->tos - 1);
{
const arch_register_t *res = arch_get_irn_register(irn);
- assert(res->reg_class->regs == ia32_vfp_regs);
+ assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
return res;
} /* x87_get_irn_register */
{
const arch_register_t *res = arch_irn_get_register(irn, pos);
- assert(res->reg_class->regs == ia32_vfp_regs);
+ assert(res->reg_class == &ia32_reg_classes[CLASS_ia32_vfp]);
return res;
} /* x87_irn_get_register */
+static inline const arch_register_t *get_st_reg(int index)
+{
+ return &ia32_registers[REG_ST0 + index];
+}
+
/* -------------- x87 perm --------------- */
/**
fxch = new_bd_ia32_fxch(NULL, block);
attr = get_ia32_x87_attr(fxch);
- attr->x87[0] = &ia32_st_regs[pos];
- attr->x87[2] = &ia32_st_regs[0];
+ attr->x87[0] = get_st_reg(pos);
+ attr->x87[2] = get_st_reg(0);
keep_alive(fxch);
fxch = new_bd_ia32_fxch(NULL, block);
attr = get_ia32_x87_attr(fxch);
- attr->x87[0] = &ia32_st_regs[pos];
- attr->x87[2] = &ia32_st_regs[0];
+ attr->x87[0] = get_st_reg(pos);
+ attr->x87[2] = get_st_reg(0);
keep_alive(fxch);
fpush = new_bd_ia32_fpush(NULL, get_nodes_block(n));
attr = get_ia32_x87_attr(fpush);
- attr->x87[0] = &ia32_st_regs[pos];
- attr->x87[2] = &ia32_st_regs[0];
+ attr->x87[0] = get_st_reg(pos);
+ attr->x87[2] = get_st_reg(0);
keep_alive(fpush);
sched_add_before(n, fpush);
else
fpop = new_bd_ia32_fpop(NULL, get_nodes_block(n));
attr = get_ia32_x87_attr(fpop);
- attr->x87[0] = &ia32_st_regs[0];
- attr->x87[1] = &ia32_st_regs[0];
- attr->x87[2] = &ia32_st_regs[0];
+ attr->x87[0] = get_st_reg(0);
+ attr->x87[1] = get_st_reg(0);
+ attr->x87[2] = get_st_reg(0);
keep_alive(fpop);
sched_add_before(n, fpop);
op1_idx = x87_on_stack(state, reg_index_1);
assert(op1_idx >= 0);
- op1_live_after = is_vfp_live(arch_register_get_index(op1_reg), live);
+ op1_live_after = is_vfp_live(reg_index_1, live);
attr = get_ia32_x87_attr(n);
permuted = attr->attr.data.ins_permuted;
- if (reg_index_2 != REG_VFP_NOREG) {
+ if (reg_index_2 != REG_VFP_VFP_NOREG) {
assert(!permuted);
/* second operand is a vfp register */
op2_idx = x87_on_stack(state, reg_index_2);
assert(op2_idx >= 0);
- op2_live_after = is_vfp_live(arch_register_get_index(op2_reg), live);
+ op2_live_after = is_vfp_live(reg_index_2, live);
if (op2_live_after) {
/* Second operand is live. */
}
/* patch the operation */
- attr->x87[0] = op1_reg = &ia32_st_regs[op1_idx];
- if (reg_index_2 != REG_VFP_NOREG) {
- attr->x87[1] = op2_reg = &ia32_st_regs[op2_idx];
+ attr->x87[0] = op1_reg = get_st_reg(op1_idx);
+ if (reg_index_2 != REG_VFP_VFP_NOREG) {
+ attr->x87[1] = op2_reg = get_st_reg(op2_idx);
}
- attr->x87[2] = out = &ia32_st_regs[out_idx];
+ attr->x87[2] = out = get_st_reg(out_idx);
- if (reg_index_2 != REG_VFP_NOREG) {
+ if (reg_index_2 != REG_VFP_VFP_NOREG) {
DB((dbg, LEVEL_1, "<<< %s %s, %s -> %s\n", get_irn_opname(n),
arch_register_get_name(op1_reg), arch_register_get_name(op2_reg),
arch_register_get_name(out)));
{
int op1_idx;
x87_simulator *sim = state->sim;
- const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, UNOP_IDX));
+ const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
const arch_register_t *out = x87_get_irn_register(n);
ia32_x87_attr_t *attr;
unsigned live = vfp_live_args_after(sim, n, REGMASK(out));
if (is_vfp_live(arch_register_get_index(op1), live)) {
/* push the operand here */
- x87_create_fpush(state, n, op1_idx, UNOP_IDX);
+ x87_create_fpush(state, n, op1_idx, 0);
op1_idx = 0;
}
else {
x87_set_tos(state, arch_register_get_index(out), x87_patch_insn(n, op));
attr = get_ia32_x87_attr(n);
- attr->x87[0] = op1 = &ia32_st_regs[0];
- attr->x87[2] = out = &ia32_st_regs[0];
+ attr->x87[0] = op1 = get_st_reg(0);
+ attr->x87[2] = out = get_st_reg(0);
DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), out->name));
return NO_NODE_ADDED;
x87_push(state, arch_register_get_index(out), x87_patch_insn(n, op));
assert(out == x87_irn_get_register(n, res_pos));
attr = get_ia32_x87_attr(n);
- attr->x87[2] = out = &ia32_st_regs[0];
+ attr->x87[2] = out = get_st_reg(0);
DB((dbg, LEVEL_1, "<<< %s -> %s\n", get_irn_opname(n), arch_register_get_name(out)));
return NO_NODE_ADDED;
Note that we cannot test on mode_E, because floats might be 96bit ...
*/
if (get_mode_size_bits(mode) > 64 || (mode_is_int(mode) && get_mode_size_bits(mode) > 32)) {
- if (depth < N_x87_REGS) {
+ if (depth < N_ia32_st_REGS) {
/* ok, we have a free register: push + fstp */
x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
x87_pop(state);
}
attr = get_ia32_x87_attr(n);
- attr->x87[1] = op2 = &ia32_st_regs[0];
+ attr->x87[1] = op2 = get_st_reg(0);
DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
return insn;
x87_patch_insn(n, op_ia32_fisttp);
attr = get_ia32_x87_attr(n);
- attr->x87[1] = op2 = &ia32_st_regs[0];
+ attr->x87[1] = op2 = get_st_reg(0);
DB((dbg, LEVEL_1, "<<< %s %s ->\n", get_irn_opname(n), arch_register_get_name(op2)));
return NO_NODE_ADDED;
/* patch the operation */
x87_patch_insn(n, op_ia32_FtstFnstsw);
- reg1 = &ia32_st_regs[op1_idx];
+ reg1 = get_st_reg(op1_idx);
attr->x87[0] = reg1;
attr->x87[1] = NULL;
attr->x87[2] = NULL;
assert(op1_idx >= 0);
/* BEWARE: check for comp a,a cases, they might happen */
- if (reg_index_2 != REG_VFP_NOREG) {
+ if (reg_index_2 != REG_VFP_VFP_NOREG) {
/* second operand is a vfp register */
op2_idx = x87_on_stack(state, reg_index_2);
assert(op2_idx >= 0);
op2_idx = tmp;
}
- op1 = &ia32_st_regs[op1_idx];
+ op1 = get_st_reg(op1_idx);
attr->x87[0] = op1;
if (op2_idx >= 0) {
- op2 = &ia32_st_regs[op2_idx];
+ op2 = get_st_reg(op2_idx);
attr->x87[1] = op2;
}
attr->x87[2] = NULL;
x87_push(state, arch_register_get_index(out), res);
attr = get_ia32_x87_attr(res);
- attr->x87[2] = &ia32_st_regs[0];
+ attr->x87[2] = get_st_reg(0);
} else {
int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
x87_push(state, arch_register_get_index(out), res);
attr = get_ia32_x87_attr(res);
- attr->x87[0] = &ia32_st_regs[op1_idx];
- attr->x87[2] = &ia32_st_regs[0];
+ attr->x87[0] = get_st_reg(op1_idx);
+ attr->x87[2] = get_st_reg(0);
}
arch_set_irn_register(res, out);
unsigned live;
cls = arch_get_irn_reg_class_out(n);
- if (cls->regs != ia32_vfp_regs)
+ if (cls != &ia32_reg_classes[CLASS_ia32_vfp])
return 0;
pred = get_irn_n(n, 0);
/* best case, simple remove and rename */
x87_patch_insn(n, op_ia32_Pop);
attr = get_ia32_x87_attr(n);
- attr->x87[0] = op1 = &ia32_st_regs[0];
+ attr->x87[0] = op1 = get_st_reg(0);
x87_pop(state);
x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
}
x87_patch_insn(n, op_ia32_Pop);
attr = get_ia32_x87_attr(n);
- attr->x87[0] = op1 = &ia32_st_regs[out_idx];
+ attr->x87[0] = op1 = get_st_reg(out_idx);
x87_pop(state);
x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
#ifdef BIT
#undef BIT
#endif
-#define BIT(x) (1 << (x % 32))
+#define BIT(x) (1 << (x))
END
} elsif(defined($temp)) {
$temp .= " | ";
}
- $temp .= "BIT(REG_".uc(${reg}).")";
+ my $firstreg = uc($reg_classes{$class}[0]->{"name"});
+ my $classuc = uc($class);
+ my $reguc = uc($reg);
+ $temp .= "BIT(REG_${classuc}_${reguc})";
}
if(defined($temp)) {
push(@obst_limit_func, "${temp}");
my @regclasses; # stack for the register class variables
my $classdef; # stack to define a name for a class index
my $regdef; # stack to define a name for a register index
+my $regdef2;
+my $regcounts;
my $reginit; # stack for the register type inits
my $single_constraints_decls;
my $single_constraints;
-my $numregs;
my $class_ptr;
my $class_idx = 0;
-my $tmp;
-
my %regclass2len = ();
my %reg2class = ();
my $limitedbitsetlen = $regclass2len{$regclass};
my $arraylen = ($limitedbitsetlen+31) / 32;
+ my $firstreg = uc($reg_classes{$regclass}[0]->{"name"});
+ my $classuc = uc($regclass);
my $first = 1;
for (my $i = 0; $i < $arraylen; ++$i) {
if ($first) {
my $index = $reg2class{"$reg"}{"index"};
if ($index >= $i*32 && $index < ($i+1)*32) {
if ($i > 0) {
- $result .= "(1 << (REG_${ucname} % 32))";
+ $result .= "(1 << (REG_${classuc}_${ucname} % 32))";
} else {
- $result .= "(1 << REG_${ucname})";
+ $result .= "(1 << REG_${classuc}_${ucname})";
}
} else {
$result .= "0";
my $old_classname = $class_name;
$class_name = $arch."_".$class_name;
- $numregs = "N_".$class_name."_REGS";
$class_ptr = "&".$arch."_reg_classes[CLASS_".$class_name."]";
my $flags = pop(@class);
$class_mode = $flags->{"mode"};
};
EOF
- $regtypes_decl .= "extern const arch_register_t ${class_name}_regs[$numregs];\n";
-
$classdef .= "\tCLASS_$class_name = $class_idx,\n";
- push(@regclasses, "{ $class_idx, \"$class_name\", $numregs, NULL, ".$class_name."_regs, $flags_prepared, &${arch}_class_reg_req_${old_classname} }");
+ my $numregs = @class;
+ my $first_reg = "&${arch}_registers[REG_". uc($class[0]->{"name"}) . "]";
+ push(@regclasses, "{ $class_idx, \"$class_name\", $numregs, NULL, $first_reg, $flags_prepared, &${arch}_class_reg_req_${old_classname} }");
my $idx = 0;
$reginit .= "\t$arch\_reg_classes[CLASS_".$class_name."].mode = $class_mode;\n";
- $regtypes_def .= "const arch_register_t ${class_name}_regs[$numregs] = {\n";
-
- $regdef .= "enum reg_${class_name}_indices {\n";
+ my $lastreg;
foreach (@class) {
my $name = $_->{"name"};
my $ucname = uc($name);
# realname is name if not set by user
$_->{"realname"} = $_->{"name"} if (! exists($_->{"realname"}));
my $realname = $_->{realname};
+ my $classuc = uc($old_classname);
- $regdef .= "\tREG_${ucname},\n";
+ $regdef .= "\tREG_${ucname},\n";
+ $regdef2 .= "\tREG_${classuc}_${ucname} = $idx,\n";
$regtypes_def .= <<EOF;
{
"${realname}",
${class_ptr},
+ REG_${classuc}_${ucname},
REG_${ucname},
${type},
&${arch}_single_reg_req_${old_classname}_${name}
};
EOF
+ $lastreg = $ucname;
$idx++;
}
- $regtypes_def .= "};\n";
-
- $regdef .= "\t$numregs = $idx\n";
- $regdef .= "};\n\n";
+ $regcounts .= "\tN_${class_name}_REGS = $numregs,\n";
$class_idx++;
}
-$classdef .= "\tN_CLASSES = ".scalar(keys(%reg_classes))."\n";
-$classdef .= "};\n\n";
+my $archuc = uc($arch);
-$tmp = uc($arch);
+$classdef .= "\tN_${archuc}_CLASSES = ".scalar(keys(%reg_classes))."\n";
+$classdef .= "};\n\n";
# generate header (external usage) file
open(OUT, ">$target_h") || die("Fatal error: Could not open $target_h, reason: $!\n");
* created by: $0 $specfile $target_dir
* \@date $creation_time
*/
-#ifndef FIRM_BE_${tmp}_GEN_${tmp}_REGALLOC_IF_H
-#define FIRM_BE_${tmp}_GEN_${tmp}_REGALLOC_IF_H
+#ifndef FIRM_BE_${archuc}_GEN_${archuc}_REGALLOC_IF_H
+#define FIRM_BE_${archuc}_GEN_${archuc}_REGALLOC_IF_H
#include "../bearch.h"
#include "${arch}_nodes_attr.h"
+enum reg_indices {
${regdef}
+ N_${archuc}_REGISTERS
+};
+enum {
+${regdef2}
+};
+
+enum {
+${regcounts}
+};
${classdef}
-${regtypes_decl}
-extern arch_register_class_t ${arch}_reg_classes[N_CLASSES];
+extern const arch_register_t ${arch}_registers[N_${archuc}_REGISTERS];
+
+extern arch_register_class_t ${arch}_reg_classes[N_${archuc}_CLASSES];
void ${arch}_register_init(void);
unsigned ${arch}_get_n_regs(void);
print OUT<<EOF;
${single_constraints}
+
+const arch_register_t ${arch}_registers[] = {
${regtypes_def}
+};
void ${arch}_register_init(void)
{
static sparc_isa_t sparc_isa_template = {
{
&sparc_isa_if, /* isa interface implementation */
- &sparc_gp_regs[REG_SP], /* stack pointer register */
- &sparc_gp_regs[REG_FRAME_POINTER], /* base pointer register */
+ N_SPARC_REGISTERS,
+ sparc_registers,
+ &sparc_registers[REG_SP], /* stack pointer register */
+ &sparc_registers[REG_FRAME_POINTER],/* base pointer register */
&sparc_reg_classes[CLASS_sparc_gp], /* link pointer register class */
-1, /* stack direction */
3, /* power of two stack alignment
static unsigned sparc_get_n_reg_class(void)
{
- return N_CLASSES;
+ return N_SPARC_CLASSES;
}
static const arch_register_class_t *sparc_get_reg_class(unsigned i)
{
- assert(i < N_CLASSES);
+ assert(i < N_SPARC_CLASSES);
return &sparc_reg_classes[i];
}
*/
static const arch_register_t *map_i_to_o_reg(const arch_register_t *reg)
{
- unsigned idx = arch_register_get_index(reg);
+ unsigned idx = reg->global_index;
assert(REG_I0 <= idx && idx <= REG_I7);
idx += REG_O0 - REG_I0;
assert(REG_O0 <= idx && idx <= REG_O7);
- return &sparc_gp_regs[idx];
+ return &sparc_registers[idx];
}
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type,
#include "gen_sparc_regalloc_if.h"
static const arch_register_t *const caller_saves[] = {
- &sparc_gp_regs[REG_G1],
- &sparc_gp_regs[REG_G2],
- &sparc_gp_regs[REG_G3],
- &sparc_gp_regs[REG_G4],
- &sparc_gp_regs[REG_O0],
- &sparc_gp_regs[REG_O1],
- &sparc_gp_regs[REG_O2],
- &sparc_gp_regs[REG_O3],
- &sparc_gp_regs[REG_O4],
- &sparc_gp_regs[REG_O5],
+ &sparc_registers[REG_G1],
+ &sparc_registers[REG_G2],
+ &sparc_registers[REG_G3],
+ &sparc_registers[REG_G4],
+ &sparc_registers[REG_O0],
+ &sparc_registers[REG_O1],
+ &sparc_registers[REG_O2],
+ &sparc_registers[REG_O3],
+ &sparc_registers[REG_O4],
+ &sparc_registers[REG_O5],
- &sparc_fp_regs[REG_F0],
- &sparc_fp_regs[REG_F1],
- &sparc_fp_regs[REG_F2],
- &sparc_fp_regs[REG_F3],
- &sparc_fp_regs[REG_F4],
- &sparc_fp_regs[REG_F5],
- &sparc_fp_regs[REG_F6],
- &sparc_fp_regs[REG_F7],
- &sparc_fp_regs[REG_F8],
- &sparc_fp_regs[REG_F9],
- &sparc_fp_regs[REG_F10],
- &sparc_fp_regs[REG_F11],
- &sparc_fp_regs[REG_F12],
- &sparc_fp_regs[REG_F13],
- &sparc_fp_regs[REG_F14],
- &sparc_fp_regs[REG_F15],
- &sparc_fp_regs[REG_F16],
- &sparc_fp_regs[REG_F17],
- &sparc_fp_regs[REG_F18],
- &sparc_fp_regs[REG_F19],
- &sparc_fp_regs[REG_F20],
- &sparc_fp_regs[REG_F21],
- &sparc_fp_regs[REG_F22],
- &sparc_fp_regs[REG_F23],
- &sparc_fp_regs[REG_F24],
- &sparc_fp_regs[REG_F25],
- &sparc_fp_regs[REG_F26],
- &sparc_fp_regs[REG_F27],
- &sparc_fp_regs[REG_F28],
- &sparc_fp_regs[REG_F29],
- &sparc_fp_regs[REG_F30],
- &sparc_fp_regs[REG_F31],
+ &sparc_registers[REG_F0],
+ &sparc_registers[REG_F1],
+ &sparc_registers[REG_F2],
+ &sparc_registers[REG_F3],
+ &sparc_registers[REG_F4],
+ &sparc_registers[REG_F5],
+ &sparc_registers[REG_F6],
+ &sparc_registers[REG_F7],
+ &sparc_registers[REG_F8],
+ &sparc_registers[REG_F9],
+ &sparc_registers[REG_F10],
+ &sparc_registers[REG_F11],
+ &sparc_registers[REG_F12],
+ &sparc_registers[REG_F13],
+ &sparc_registers[REG_F14],
+ &sparc_registers[REG_F15],
+ &sparc_registers[REG_F16],
+ &sparc_registers[REG_F17],
+ &sparc_registers[REG_F18],
+ &sparc_registers[REG_F19],
+ &sparc_registers[REG_F20],
+ &sparc_registers[REG_F21],
+ &sparc_registers[REG_F22],
+ &sparc_registers[REG_F23],
+ &sparc_registers[REG_F24],
+ &sparc_registers[REG_F25],
+ &sparc_registers[REG_F26],
+ &sparc_registers[REG_F27],
+ &sparc_registers[REG_F28],
+ &sparc_registers[REG_F29],
+ &sparc_registers[REG_F30],
+ &sparc_registers[REG_F31],
};
static const arch_register_t* const param_regs[] = {
- &sparc_gp_regs[REG_I0],
- &sparc_gp_regs[REG_I1],
- &sparc_gp_regs[REG_I2],
- &sparc_gp_regs[REG_I3],
- &sparc_gp_regs[REG_I4],
- &sparc_gp_regs[REG_I5],
+ &sparc_registers[REG_I0],
+ &sparc_registers[REG_I1],
+ &sparc_registers[REG_I2],
+ &sparc_registers[REG_I3],
+ &sparc_registers[REG_I4],
+ &sparc_registers[REG_I5],
};
static const arch_register_t* const float_result_regs[] = {
- &sparc_fp_regs[REG_F0],
- &sparc_fp_regs[REG_F1],
- &sparc_fp_regs[REG_F2],
- &sparc_fp_regs[REG_F3],
+ &sparc_registers[REG_F0],
+ &sparc_registers[REG_F1],
+ &sparc_registers[REG_F2],
+ &sparc_registers[REG_F3],
};
/** information about a single parameter or result */
static bool is_stack_pointer_relative(const ir_node *node)
{
- const arch_register_t *sp = &sparc_gp_regs[REG_SP];
+ const arch_register_t *sp = &sparc_registers[REG_SP];
return (is_sparc_St(node) && get_in_reg(node, n_sparc_St_ptr) == sp)
|| (is_sparc_Ld(node) && get_in_reg(node, n_sparc_Ld_ptr) == sp);
}
static const arch_register_t *get_next_fp_reg(const arch_register_t *reg)
{
unsigned index = reg->index;
- assert(reg == &sparc_fp_regs[index]);
+ assert(reg == &sparc_registers[index]);
index++;
- assert(index < N_sparc_fp_REGS);
- return &sparc_fp_regs[index];
+ assert(index - REG_F0 < N_sparc_fp_REGS);
+ return &sparc_registers[index];
}
static void emit_be_Copy(const ir_node *node)
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static beabi_helper_env_t *abihelper;
-static const arch_register_t *sp_reg = &sparc_gp_regs[REG_SP];
-static const arch_register_t *fp_reg = &sparc_gp_regs[REG_FRAME_POINTER];
+static const arch_register_t *sp_reg = &sparc_registers[REG_SP];
+static const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER];
static calling_convention_t *cconv = NULL;
static ir_mode *mode_gp;
static ir_mode *mode_fp;
static ir_node *get_g0(void)
{
- return be_prolog_get_reg_value(abihelper, &sparc_gp_regs[REG_G0]);
+ return be_prolog_get_reg_value(abihelper, &sparc_registers[REG_G0]);
}
typedef struct address_t {
/* stackpointer is important at function prolog */
be_prolog_add_reg(abihelper, sp_reg,
arch_register_req_type_produces_sp | arch_register_req_type_ignore);
- be_prolog_add_reg(abihelper, &sparc_gp_regs[REG_G0],
+ be_prolog_add_reg(abihelper, &sparc_registers[REG_G0],
arch_register_req_type_ignore);
/* function parameters in registers */
for (i = 0; i < get_method_n_params(function_type); ++i) {