#define SNPRINTF_BUF_LEN 128
static const ia32_isa_t *isa;
-static ia32_code_gen_t *cg;
static char pic_base_label[128];
static ir_label_t exc_label_id;
static int mark_spill_reload = 0;
/** Return the next block in Block schedule */
static ir_node *get_prev_block_sched(const ir_node *block)
{
- return get_irn_link(block);
+ return (ir_node*)get_irn_link(block);
}
/** Checks if the current block is a fall-through target. */
assert(reg && "no in register found");
- if (reg == &ia32_gp_regs[REG_GP_NOREG])
+ if (reg == &ia32_registers[REG_GP_NOREG])
panic("trying to emit noreg for %+F input %d", irn, pos);
return reg;
return buf;
}
-/*************************************************************
- * _ _ __ _ _
- * (_) | | / _| | | | |
- * _ __ _ __ _ _ __ | |_| |_ | |__ ___| |_ __ ___ _ __
- * | '_ \| '__| | '_ \| __| _| | '_ \ / _ \ | '_ \ / _ \ '__|
- * | |_) | | | | | | | |_| | | | | | __/ | |_) | __/ |
- * | .__/|_| |_|_| |_|\__|_| |_| |_|\___|_| .__/ \___|_|
- * | | | |
- * |_| |_|
- *************************************************************/
/**
* Emit the name of the 8bit low register
static ir_node *get_cfop_target_block(const ir_node *irn)
{
assert(get_irn_mode(irn) == mode_X);
- return get_irn_link(irn);
+ return (ir_node*)get_irn_link(irn);
}
/**
}
typedef enum ia32_emit_mod_t {
+ EMIT_NONE = 0,
EMIT_RESPECT_LS = 1U << 0,
EMIT_ALTERNATE_AM = 1U << 1,
EMIT_LONG = 1U << 2,
EMIT_HIGH_REG = 1U << 3,
EMIT_LOW_REG = 1U << 4
} ia32_emit_mod_t;
+ENUM_BITSET(ia32_emit_mod_t)
/**
* Emits address mode.
for (;;) {
const char *start = fmt;
- ia32_emit_mod_t mod = 0;
+ ia32_emit_mod_t mod = EMIT_NONE;
while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
++fmt;
return pnc;
}
-static pn_Cmp ia32_get_negated_pnc(pn_Cmp pnc)
+static int ia32_get_negated_pnc(int pnc)
{
ir_mode *mode = pnc & ia32_pn_Cmp_float ? mode_F : mode_Iu;
return get_negated_pnc(pnc, mode);
void ia32_emit_cmp_suffix_node(const ir_node *node, int flags_pos)
{
- pn_Cmp pnc = get_ia32_condcode(node);
+ int pnc = get_ia32_condcode(node);
pnc = determine_final_pnc(node, flags_pos, pnc);
ia32_emit_cmp_suffix(pnc);
int need_parity_label = 0;
const ir_node *proj_true;
const ir_node *proj_false;
- pn_Cmp pnc = get_ia32_condcode(node);
+ int pnc = get_ia32_condcode(node);
pnc = determine_final_pnc(node, 0, pnc);
{
const arch_register_t *dreg = get_out_reg(node, pn_ia32_Setcc_res);
- pn_Cmp pnc = get_ia32_condcode(node);
- pnc = determine_final_pnc(node, n_ia32_Setcc_eflags, pnc);
+ int pnc = get_ia32_condcode(node);
+ pnc = determine_final_pnc(node, n_ia32_Setcc_eflags, pnc);
if (pnc & ia32_pn_Cmp_float) {
switch (pnc & 0x0f) {
case pn_Cmp_Uo:
static void emit_ia32_CMovcc(const ir_node *node)
{
- const ia32_attr_t *attr = get_ia32_attr_const(node);
- const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
- pn_Cmp pnc = get_ia32_condcode(node);
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+ const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
+ int pnc = get_ia32_condcode(node);
const arch_register_t *in_true;
const arch_register_t *in_false;
ia32_emitf(node, "\tcmov%P %#AR, %#R\n", pnc, in_true, out);
}
-/*********************************************************
- * _ _ _
- * (_) | (_)
- * ___ _ __ ___ _| |_ _ _ _ _ __ ___ _ __ ___
- * / _ \ '_ ` _ \| | __| | | | | | '_ ` _ \| '_ \/ __|
- * | __/ | | | | | | |_ | | |_| | | | | | | |_) \__ \
- * \___|_| |_| |_|_|\__| | |\__,_|_| |_| |_| .__/|___/
- * _/ | | |
- * |__/ |_|
- *********************************************************/
/* jump table entry (target and corresponding number) */
-typedef struct _branch_t {
+typedef struct branch_t {
ir_node *target;
int value;
} branch_t;
/* jump table for switch generation */
-typedef struct _jmp_tbl_t {
+typedef struct jmp_tbl_t {
ir_node *defProj; /**< default target */
long min_value; /**< smallest switch case */
long max_value; /**< largest switch case */
ia32_emitf(NULL, "\n#NO_APP\n");
}
-/**********************************
- * _____ ____
- * / ____| | _ \
- * | | ___ _ __ _ _| |_) |
- * | | / _ \| '_ \| | | | _ <
- * | |___| (_) | |_) | |_| | |_) |
- * \_____\___/| .__/ \__, |____/
- * | | __/ |
- * |_| |___/
- **********************************/
/**
* Emit movsb/w instructions to make mov count divideable by 4
}
-
-/***************************
- * _____
- * / ____|
- * | | ___ _ ____ __
- * | | / _ \| '_ \ \ / /
- * | |___| (_) | | | \ V /
- * \_____\___/|_| |_|\_/
- *
- ***************************/
-
/**
* Emit code for conversions (I, FP), (FP, I) and (FP, FP).
*/
}
-/*******************************************
- * _ _
- * | | | |
- * | |__ ___ _ __ ___ __| | ___ ___
- * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
- * | |_) | __/ | | | (_) | (_| | __/\__ \
- * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
- *
- *******************************************/
-
/**
* Emits code to increase stack pointer.
*/
}
-/***********************************************************************************
- * _ __ _
- * (_) / _| | |
- * _ __ ___ __ _ _ _ __ | |_ _ __ __ _ _ __ ___ _____ _____ _ __| | __
- * | '_ ` _ \ / _` | | '_ \ | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
- * | | | | | | (_| | | | | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
- * |_| |_| |_|\__,_|_|_| |_| |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
- *
- ***********************************************************************************/
-
/**
* Enters the emitter functions for handled nodes into the generic
* pointer of an opcode.
#define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
#define IA32_EMIT(a) IA32_EMIT2(a,a)
#define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
-#define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
+#define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
#define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
-#define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
+#define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
/* first clear the generic function pointer for all ops */
clear_irp_opcodes_generic_func();
static int should_align_block(const ir_node *block)
{
static const double DELTA = .0001;
- ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_graph *irg = get_irn_irg(block);
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
ir_node *prev = get_prev_block_sched(block);
double block_freq;
double prev_freq = 0; /**< execfreq of the fallthrough block */
ir_graph *irg = current_ir_graph;
int need_label = block_needs_label(block);
int i, arity;
- ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
if (block == get_irg_end_block(irg))
return;
*/
static void ia32_gen_labels(ir_node *block, void *data)
{
- exc_entry **exc_list = data;
+ exc_entry **exc_list = (exc_entry**)data;
ir_node *pred;
int n;
*/
static int cmp_exc_entry(const void *a, const void *b)
{
- const exc_entry *ea = a;
- const exc_entry *eb = b;
+ const exc_entry *ea = (const exc_entry*)a;
+ const exc_entry *eb = (const exc_entry*)b;
if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
return -1;
/**
* Main driver. Emits the code for one routine.
*/
-void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
+void ia32_gen_routine(ir_graph *irg)
{
- ir_entity *entity = get_irg_entity(irg);
- exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
+ ir_entity *entity = get_irg_entity(irg);
+ exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ ir_node **blk_sched = irg_data->blk_sched;
int i, n;
- cg = ia32_cg;
- isa = cg->isa;
- do_pic = cg->birg->main_env->options->pic;
+ isa = (ia32_isa_t*) arch_env;
+ do_pic = be_get_irg_options(irg)->pic;
be_gas_elf_type_char = '@';
get_unique_label(pic_base_label, sizeof(pic_base_label), "PIC_BASE");
- be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
+ be_dbg_method_begin(entity);
be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
/* we use links to point to target blocks */
irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
/* initialize next block links */
- n = ARR_LEN(cg->blk_sched);
+ n = ARR_LEN(blk_sched);
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
- ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
+ ir_node *block = blk_sched[i];
+ ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
set_irn_link(block, prev);
}
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
+ ir_node *block = blk_sched[i];
ia32_gen_block(block);
}
static void build_reg_map(void)
{
- reg_gp_map[REG_EAX] = 0x0;
- reg_gp_map[REG_ECX] = 0x1;
- reg_gp_map[REG_EDX] = 0x2;
- reg_gp_map[REG_EBX] = 0x3;
- reg_gp_map[REG_ESP] = 0x4;
- reg_gp_map[REG_EBP] = 0x5;
- reg_gp_map[REG_ESI] = 0x6;
- reg_gp_map[REG_EDI] = 0x7;
+ reg_gp_map[REG_GP_EAX] = 0x0;
+ reg_gp_map[REG_GP_ECX] = 0x1;
+ reg_gp_map[REG_GP_EDX] = 0x2;
+ reg_gp_map[REG_GP_EBX] = 0x3;
+ reg_gp_map[REG_GP_ESP] = 0x4;
+ reg_gp_map[REG_GP_EBP] = 0x5;
+ reg_gp_map[REG_GP_ESI] = 0x6;
+ reg_gp_map[REG_GP_EDI] = 0x7;
pnc_map_signed[pn_Cmp_Eq] = 0x04;
pnc_map_signed[pn_Cmp_Lt] = 0x0C;
bemit_mod_am(ruval, node);
} else {
const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
- if (reg->index == REG_EAX) {
+ if (reg->index == REG_GP_EAX) {
bemit8(opcode_ax);
} else {
bemit8(opcode);
assert(cls0 == arch_register_get_class(in1) && "Register class mismatch at Perm");
if (cls0 == &ia32_reg_classes[CLASS_ia32_gp]) {
- if (in0->index == REG_EAX) {
+ if (in0->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[in1->index]);
- } else if (in1->index == REG_EAX) {
+ } else if (in1->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[in0->index]);
} else {
bemit8(0x87);
{
const arch_register_t *dreg = get_out_reg(node, pn_ia32_Setcc_res);
- pn_Cmp pnc = get_ia32_condcode(node);
- pnc = determine_final_pnc(node, n_ia32_Setcc_eflags, pnc);
+ int pnc = get_ia32_condcode(node);
+ pnc = determine_final_pnc(node, n_ia32_Setcc_eflags, pnc);
if (pnc & ia32_pn_Cmp_float) {
switch (pnc & 0x0f) {
case pn_Cmp_Uo:
const ia32_attr_t *attr = get_ia32_attr_const(node);
int ins_permuted = attr->data.ins_permuted;
const arch_register_t *out = arch_irn_get_register(node, pn_ia32_res);
- pn_Cmp pnc = get_ia32_condcode(node);
+ int pnc = get_ia32_condcode(node);
const arch_register_t *in_true;
const arch_register_t *in_false;
bemit_mod_am(7, node);
} else {
const arch_register_t *reg = get_in_reg(node, n_ia32_binary_left);
- if (reg->index == REG_EAX) {
+ if (reg->index == REG_GP_EAX) {
bemit8(0x3D);
} else {
bemit8(0x81);
if (is_ia32_Immediate(right)) {
if (get_ia32_op_type(node) == ia32_Normal) {
const arch_register_t *out = get_in_reg(node, n_ia32_Cmp_left);
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
bemit8(0x3C);
} else {
bemit8(0x80);
if (is_ia32_Immediate(right)) {
if (get_ia32_op_type(node) == ia32_Normal) {
const arch_register_t *out = get_in_reg(node, n_ia32_Test8Bit_left);
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
bemit8(0xA8);
} else {
bemit8(0xF6);
const arch_register_t *out = get_out_reg(node, 0);
bemit8(0x65); // gs:
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
bemit8(0xA1); // movl 0, %eax
} else {
bemit8(0x8B); // movl 0, %reg
/* helper function for bemit_minus64bit */
static void bemit_helper_xchg(const arch_register_t *src, const arch_register_t *dst)
{
- if (src->index == REG_EAX) {
+ if (src->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[dst->index]); // xchgl %eax, %dst
- } else if (dst->index == REG_EAX) {
+ } else if (dst->index == REG_GP_EAX) {
bemit8(0x90 + reg_gp_map[src->index]); // xchgl %src, %eax
} else {
bemit8(0x87); // xchgl %src, %dst
{
const arch_register_t *out = get_out_reg(node, 0);
- if (out->index == REG_EAX) {
+ if (out->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
ir_node *index = get_irn_n(node, n_ia32_index);
} else {
const arch_register_t *in = get_in_reg(node, n_ia32_Store_val);
- if (in->index == REG_EAX) {
+ if (in->index == REG_GP_EAX) {
ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
ir_node *index = get_irn_n(node, n_ia32_index);
}
}
-void ia32_gen_binary_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
+void ia32_gen_binary_routine(ir_graph *irg)
{
- ir_entity *entity = get_irg_entity(irg);
+ ir_entity *entity = get_irg_entity(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ ir_node **blk_sched = irg_data->blk_sched;
int i, n;
- cg = ia32_cg;
- isa = cg->isa;
+ isa = (ia32_isa_t*) arch_env;
ia32_register_binary_emitters();
irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
/* initialize next block links */
- n = ARR_LEN(cg->blk_sched);
+ n = ARR_LEN(blk_sched);
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
- ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
+ ir_node *block = blk_sched[i];
+ ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
set_irn_link(block, prev);
}
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
+ ir_node *block = blk_sched[i];
gen_binary_block(block);
}
}
-
-
void ia32_init_emitter(void)
{
lc_opt_entry_t *be_grp;