static char pic_base_label[128];
static ir_label_t exc_label_id;
+/** Return the next block in Block schedule */
+static ir_node *get_prev_block_sched(const ir_node *block)
+{
+ return get_irn_link(block);
+}
+
+static int is_fallthrough(const ir_node *cfgpred)
+{
+ ir_node *pred;
+
+ if (!is_Proj(cfgpred))
+ return 1;
+ pred = get_Proj_pred(cfgpred);
+ if (is_ia32_SwitchJmp(pred))
+ return 0;
+
+ return 1;
+}
+
+static int block_needs_label(const ir_node *block)
+{
+ int need_label = 1;
+ int n_cfgpreds = get_Block_n_cfgpreds(block);
+
+ if (n_cfgpreds == 0) {
+ need_label = 0;
+ } else if (n_cfgpreds == 1) {
+ ir_node *cfgpred = get_Block_cfgpred(block, 0);
+ ir_node *cfgpred_block = get_nodes_block(cfgpred);
+
+ if (get_prev_block_sched(block) == cfgpred_block
+ && is_fallthrough(cfgpred)) {
+ need_label = 0;
+ }
+ }
+
+ return need_label;
+}
+
/**
* Returns the register at in position pos.
*/
assert(reg && "no in register found");
- if(reg == &ia32_gp_regs[REG_GP_NOREG])
+ if (reg == &ia32_gp_regs[REG_GP_NOREG])
panic("trying to emit noreg for %+F input %d", irn, pos);
/* in case of unknown register: just return a valid register */
{
const char *reg_name;
- if(mode != NULL) {
+ if (mode != NULL) {
int size = get_mode_size_bits(mode);
- if(size == 8) {
- emit_8bit_register(reg);
- return;
- } else if(size == 16) {
- emit_16bit_register(reg);
- return;
- } else {
- assert(mode_is_float(mode) || size == 32);
+ switch (size) {
+ case 8: emit_8bit_register(reg); return;
+ case 16: emit_16bit_register(reg); return;
}
+ assert(mode_is_float(mode) || size == 32);
}
reg_name = arch_register_get_name(reg);
void ia32_emit_source_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = get_in_reg(node, pos);
emit_register(reg, NULL);
}
{
const arch_register_t *reg;
ir_node *in = get_irn_n(node, pos);
- if(is_ia32_Immediate(in)) {
+ if (is_ia32_Immediate(in)) {
emit_ia32_Immediate(in);
return;
}
static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
{
- if(mode_is_float(mode)) {
+ if (mode_is_float(mode)) {
switch(get_mode_size_bits(mode)) {
case 32: be_emit_char('s'); return;
case 64: be_emit_char('l'); return;
} else {
assert(mode_is_int(mode) || mode_is_reference(mode));
switch(get_mode_size_bits(mode)) {
+ /* gas docu says q is the suffix but gcc, objdump and icc use ll
+ * apparently */
case 64: be_emit_cstring("ll"); return;
- /* gas docu says q is the suffix but gcc, objdump and icc use
- ll apparently */
- case 32: be_emit_char('l'); return;
- case 16: be_emit_char('w'); return;
- case 8: be_emit_char('b'); return;
+ case 32: be_emit_char('l'); return;
+ case 16: be_emit_char('w'); return;
+ case 8: be_emit_char('b'); return;
}
}
- panic("Can't output mode_suffix for %+F\n", mode);
+ panic("Can't output mode_suffix for %+F", mode);
}
void ia32_emit_mode_suffix(const ir_node *node)
{
ir_mode *mode = get_ia32_ls_mode(node);
- if(mode == NULL)
+ if (mode == NULL)
mode = mode_Iu;
ia32_emit_mode_suffix_mode(mode);
void ia32_emit_x87_mode_suffix(const ir_node *node)
{
- ir_mode *mode = get_ia32_ls_mode(node);
- assert(mode != NULL);
/* we only need to emit the mode on address mode */
- if(get_ia32_op_type(node) != ia32_Normal)
+ if (get_ia32_op_type(node) != ia32_Normal) {
+ ir_mode *mode = get_ia32_ls_mode(node);
+ assert(mode != NULL);
ia32_emit_mode_suffix_mode(mode);
+ }
}
static char get_xmm_mode_suffix(ir_mode *mode)
{
assert(mode_is_float(mode));
switch(get_mode_size_bits(mode)) {
- case 32:
- return 's';
- case 64:
- return 'd';
- default:
- assert(0);
+ case 32: return 's';
+ case 64: return 'd';
+ default: panic("Invalid XMM mode");
}
- return '%';
}
void ia32_emit_xmm_mode_suffix(const ir_node *node)
void ia32_emit_extend_suffix(const ir_mode *mode)
{
- if(get_mode_size_bits(mode) == 32)
+ if (get_mode_size_bits(mode) == 32)
return;
- if(mode_is_signed(mode)) {
- be_emit_char('s');
- } else {
- be_emit_char('z');
- }
+ be_emit_char(mode_is_signed(mode) ? 's' : 'z');
}
void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
{
ir_node *in = get_irn_n(node, pos);
- if(is_ia32_Immediate(in)) {
+ if (is_ia32_Immediate(in)) {
emit_ia32_Immediate(in);
} else {
const ir_mode *mode = get_ia32_ls_mode(node);
/**
* Emits registers and/or address mode of a binary operation.
*/
-void ia32_emit_binop(const ir_node *node) {
+void ia32_emit_binop(const ir_node *node)
+{
const ir_node *right_op = get_irn_n(node, n_ia32_binary_right);
const ir_mode *mode = get_ia32_ls_mode(node);
const arch_register_t *reg_left;
switch(get_ia32_op_type(node)) {
case ia32_Normal:
reg_left = get_in_reg(node, n_ia32_binary_left);
- if(is_ia32_Immediate(right_op)) {
+ if (is_ia32_Immediate(right_op)) {
emit_ia32_Immediate(right_op);
be_emit_cstring(", ");
emit_register(reg_left, mode);
}
break;
case ia32_AddrModeS:
- if(is_ia32_Immediate(right_op)) {
+ if (is_ia32_Immediate(right_op)) {
emit_ia32_Immediate(right_op);
be_emit_cstring(", ");
ia32_emit_am(node);
/**
* Emits registers and/or address mode of a binary operation.
*/
-void ia32_emit_x87_binop(const ir_node *node) {
+void ia32_emit_x87_binop(const ir_node *node)
+{
switch(get_ia32_op_type(node)) {
case ia32_Normal:
{
/**
* Emits registers and/or address mode of a unary operation.
*/
-void ia32_emit_unop(const ir_node *node, int pos) {
+void ia32_emit_unop(const ir_node *node, int pos)
+{
const ir_node *op;
switch(get_ia32_op_type(node)) {
/**
* Emits address mode.
*/
-void ia32_emit_am(const ir_node *node) {
+void ia32_emit_am(const ir_node *node)
+{
ir_entity *ent = get_ia32_am_sc(node);
int offs = get_ia32_am_offs_int(node);
- ir_node *base = get_irn_n(node, 0);
+ ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
- ir_node *index = get_irn_n(node, 1);
+ ir_node *index = get_irn_n(node, n_ia32_index);
int has_index = !is_ia32_NoReg_GP(index);
/* just to be sure... */
ia32_emit_entity(ent, 0);
}
- if(offs != 0) {
- if(ent != NULL) {
+ /* also handle special case if nothing is set */
+ if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
+ if (ent != NULL) {
be_emit_irprintf("%+d", offs);
} else {
be_emit_irprintf("%d", offs);
scale = get_ia32_am_scale(node);
if (scale > 0) {
- be_emit_irprintf(",%d", 1 << get_ia32_am_scale(node));
+ be_emit_irprintf(",%d", 1 << scale);
}
}
be_emit_char(')');
}
-
- /* special case if nothing is set */
- if(ent == NULL && offs == 0 && !has_base && !has_index) {
- be_emit_char('0');
- }
}
static void emit_ia32_IMul(const ir_node *node)
ia32_emit_binop(node);
/* do we need the 3-address form? */
- if(is_ia32_NoReg_GP(left) ||
+ if (is_ia32_NoReg_GP(left) ||
get_in_reg(node, n_ia32_IMul_left) != out_reg) {
be_emit_cstring(", ");
emit_register(out_reg, get_ia32_ls_mode(node));
*/
static ir_node *find_original_value(ir_node *node)
{
- inc_irg_visited(current_ir_graph);
- while(1) {
- mark_irn_visited(node);
- if(be_is_Copy(node)) {
- node = be_get_Copy_op(node);
- } else if(be_is_CopyKeep(node)) {
- node = be_get_CopyKeep_op(node);
- } else if(is_Proj(node)) {
- ir_node *pred = get_Proj_pred(node);
- if(be_is_Perm(pred)) {
- node = get_irn_n(pred, get_Proj_proj(node));
- } else if(be_is_MemPerm(pred)) {
- node = get_irn_n(pred, get_Proj_proj(node) + 1);
- } else if(is_ia32_Load(pred)) {
- node = get_irn_n(pred, n_ia32_Load_mem);
- } else {
- return node;
- }
- } else if(is_ia32_Store(node)) {
- node = get_irn_n(node, n_ia32_Store_val);
- } else if(is_Phi(node)) {
- int i, arity;
- arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
- ir_node *in = get_irn_n(node, i);
- if(irn_visited(in))
- continue;
- node = in;
- break;
- }
- assert(i < arity);
+ if (irn_visited(node))
+ return NULL;
+
+ mark_irn_visited(node);
+ if (be_is_Copy(node)) {
+ return find_original_value(be_get_Copy_op(node));
+ } else if (be_is_CopyKeep(node)) {
+ return find_original_value(be_get_CopyKeep_op(node));
+ } else if (is_Proj(node)) {
+ ir_node *pred = get_Proj_pred(node);
+ if (be_is_Perm(pred)) {
+ return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
+ } else if (be_is_MemPerm(pred)) {
+ return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
+ } else if (is_ia32_Load(pred)) {
+ return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
} else {
return node;
}
+ } else if (is_ia32_Store(node)) {
+ return find_original_value(get_irn_n(node, n_ia32_Store_val));
+ } else if (is_Phi(node)) {
+ int i, arity;
+ arity = get_irn_arity(node);
+ for (i = 0; i < arity; ++i) {
+ ir_node *in = get_irn_n(node, i);
+ ir_node *res = find_original_value(in);
+
+ if (res != NULL)
+ return res;
+ }
+ return NULL;
+ } else {
+ return node;
}
}
const ia32_attr_t *flags_attr;
flags = skip_Proj(flags);
- if(is_ia32_Sahf(flags)) {
+ if (is_ia32_Sahf(flags)) {
ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
- if(!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
+ if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
|| is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
+ inc_irg_visited(current_ir_graph);
cmp = find_original_value(cmp);
+ assert(cmp != NULL);
assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
|| is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
}
flags_attr = get_ia32_attr_const(cmp);
- if(flags_attr->data.ins_permuted)
+ if (flags_attr->data.ins_permuted)
pnc = get_mirrored_pnc(pnc);
pnc |= ia32_pn_Cmp_float;
- } else if(is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
+ } else if (is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
|| is_ia32_Fucompi(flags)) {
flags_attr = get_ia32_attr_const(flags);
- if(flags_attr->data.ins_permuted)
+ if (flags_attr->data.ins_permuted)
pnc = get_mirrored_pnc(pnc);
pnc |= ia32_pn_Cmp_float;
} else {
#endif
flags_attr = get_ia32_attr_const(flags);
- if(flags_attr->data.ins_permuted)
+ if (flags_attr->data.ins_permuted)
pnc = get_mirrored_pnc(pnc);
- if(flags_attr->data.cmp_unsigned)
+ if (flags_attr->data.cmp_unsigned)
pnc |= ia32_pn_Cmp_unsigned;
}
{
const char *str;
- if((pnc & ia32_pn_Cmp_float) || (pnc & ia32_pn_Cmp_unsigned)) {
+ if ((pnc & ia32_pn_Cmp_float) || (pnc & ia32_pn_Cmp_unsigned)) {
pnc = pnc & 7;
assert(cmp2condition_u[pnc].num == pnc);
str = cmp2condition_u[pnc].name;
pn_Cmp pnc = get_ia32_condcode(node);
pnc = determine_final_pnc(node, flags_pos, pnc);
- if(attr->data.ins_permuted) {
- if(pnc & ia32_pn_Cmp_float) {
+ if (attr->data.ins_permuted) {
+ if (pnc & ia32_pn_Cmp_float) {
pnc = get_negated_pnc(pnc, mode_F);
} else {
pnc = get_negated_pnc(pnc, mode_Iu);
/**
* Returns the target block for a control flow node.
*/
-static ir_node *get_cfop_target_block(const ir_node *irn) {
+static ir_node *get_cfop_target_block(const ir_node *irn)
+{
+ assert(get_irn_mode(irn) == mode_X);
return get_irn_link(irn);
}
ia32_emit_block_name(block);
}
-/** Return the next block in Block schedule */
-static ir_node *next_blk_sched(const ir_node *block)
-{
- return get_irn_link(block);
-}
-
/**
* Returns the Proj with projection number proj and NOT mode_M
*/
-static ir_node *get_proj(const ir_node *node, long proj) {
+static ir_node *get_proj(const ir_node *node, long proj)
+{
const ir_edge_t *edge;
ir_node *src;
return NULL;
}
+static int can_be_fallthrough(const ir_node *node)
+{
+ ir_node *target_block = get_cfop_target_block(node);
+ ir_node *block = get_nodes_block(node);
+ return get_prev_block_sched(target_block) == block;
+}
+
/**
* Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
*/
const ir_node *proj_true;
const ir_node *proj_false;
const ir_node *block;
- const ir_node *next_block;
pn_Cmp pnc = get_ia32_condcode(node);
pnc = determine_final_pnc(node, 0, pnc);
assert(proj_false && "Jcc without false Proj");
block = get_nodes_block(node);
- next_block = next_blk_sched(block);
- if (get_cfop_target_block(proj_true) == next_block) {
+ if (can_be_fallthrough(proj_true)) {
/* exchange both proj's so the second one can be omitted */
const ir_node *t = proj_true;
proj_true = proj_false;
proj_false = t;
- if(pnc & ia32_pn_Cmp_float) {
+ if (pnc & ia32_pn_Cmp_float) {
pnc = get_negated_pnc(pnc, mode_F);
} else {
pnc = get_negated_pnc(pnc, mode_Iu);
case pn_Cmp_Le:
/* we need a local label if the false proj is a fallthrough
* as the falseblock might have no label emitted then */
- if (get_cfop_target_block(proj_false) == next_block) {
+ if (can_be_fallthrough(proj_false)) {
need_parity_label = 1;
be_emit_cstring("\tjp 1f");
} else {
be_emit_finish_line_gas(proj_true);
}
- if(need_parity_label) {
+ if (need_parity_label) {
be_emit_cstring("1:");
be_emit_write_line();
}
/* the second Proj might be a fallthrough */
- if (get_cfop_target_block(proj_false) != next_block) {
- be_emit_cstring("\tjmp ");
+ if (can_be_fallthrough(proj_false)) {
+ be_emit_cstring("\t/* fallthrough to ");
ia32_emit_cfop_target(proj_false);
+ be_emit_cstring(" */");
be_emit_finish_line_gas(proj_false);
} else {
- be_emit_cstring("\t/* fallthrough to ");
+ be_emit_cstring("\tjmp ");
ia32_emit_cfop_target(proj_false);
- be_emit_cstring(" */");
be_emit_finish_line_gas(proj_false);
}
}
get_irn_n(node, n_ia32_CMov_val_false));
/* should be same constraint fullfilled? */
- if(out == in_false) {
+ if (out == in_false) {
/* yes -> nothing to do */
- } else if(out == in_true) {
+ } else if (out == in_true) {
const arch_register_t *tmp;
assert(get_ia32_op_type(node) == ia32_Normal);
be_emit_finish_line_gas(node);
}
- if(ins_permuted) {
- if(pnc & ia32_pn_Cmp_float) {
+ if (ins_permuted) {
+ if (pnc & ia32_pn_Cmp_float) {
pnc = get_negated_pnc(pnc, mode_F);
} else {
pnc = get_negated_pnc(pnc, mode_Iu);
be_emit_cstring("\tcmov");
ia32_emit_cmp_suffix(pnc);
be_emit_char(' ');
- if(get_ia32_op_type(node) == ia32_AddrModeS) {
+ if (get_ia32_op_type(node) == ia32_AddrModeS) {
ia32_emit_am(node);
} else {
emit_register(in_true, get_ia32_ls_mode(node));
/**
* Compare two variables of type branch_t. Used to sort all switch cases
*/
-static int ia32_cmp_branch_t(const void *a, const void *b) {
+static int ia32_cmp_branch_t(const void *a, const void *b)
+{
branch_t *b1 = (branch_t *)a;
branch_t *b2 = (branch_t *)b;
*/
static void emit_Jmp(const ir_node *node)
{
- ir_node *block, *next_block;
+ ir_node *block;
/* for now, the code works for scheduled and non-schedules blocks */
block = get_nodes_block(node);
/* we have a block schedule */
- next_block = next_blk_sched(block);
- if (get_cfop_target_block(node) != next_block) {
- be_emit_cstring("\tjmp ");
- ia32_emit_cfop_target(node);
- } else {
+ if (can_be_fallthrough(node)) {
be_emit_cstring("\t/* fallthrough to ");
ia32_emit_cfop_target(node);
be_emit_cstring(" */");
+ } else {
+ be_emit_cstring("\tjmp ");
+ ia32_emit_cfop_target(node);
}
be_emit_finish_line_gas(node);
}
const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
be_emit_char('$');
- if(attr->symconst != NULL) {
- if(attr->sc_sign)
+ if (attr->symconst != NULL) {
+ if (attr->sc_sign)
be_emit_char('-');
ia32_emit_entity(attr->symconst, 0);
}
- if(attr->symconst == NULL || attr->offset != 0) {
- if(attr->symconst != NULL) {
+ if (attr->symconst == NULL || attr->offset != 0) {
+ if (attr->symconst != NULL) {
be_emit_irprintf("%+d", attr->offset);
} else {
be_emit_irprintf("0x%X", attr->offset);
/* parse number */
sscanf(s, "%d%n", &num, &p);
- if(num < 0) {
+ if (num < 0) {
ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
node);
return s;
s += p;
}
- if(num < 0 || num >= ARR_LEN(asm_regs)) {
+ if (num < 0 || num >= ARR_LEN(asm_regs)) {
ir_fprintf(stderr, "Error: Custom assembler references invalid "
"input/output (%+F)\n", node);
return s;
assert(asm_reg->valid);
/* get register */
- if(asm_reg->use_input == 0) {
+ if (asm_reg->use_input == 0) {
reg = get_out_reg(node, asm_reg->inout_pos);
} else {
ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
/* might be an immediate value */
- if(is_ia32_Immediate(pred)) {
+ if (is_ia32_Immediate(pred)) {
emit_ia32_Immediate(pred);
return s;
}
reg = get_in_reg(node, asm_reg->inout_pos);
}
- if(reg == NULL) {
+ if (reg == NULL) {
ir_fprintf(stderr, "Warning: no register assigned for %d asm op "
"(%+F)\n", num, node);
return s;
}
- if(asm_reg->memory) {
+ if (asm_reg->memory) {
be_emit_char('(');
}
/* emit it */
- if(modifier != 0) {
+ if (modifier != 0) {
be_emit_char('%');
switch(modifier) {
case 'b':
emit_register(reg, asm_reg->mode);
}
- if(asm_reg->memory) {
+ if (asm_reg->memory) {
be_emit_char(')');
}
ident *asm_text = attr->asm_text;
const char *s = get_id_str(asm_text);
- be_emit_cstring("# Begin ASM \t");
+ be_emit_cstring("#APP\t");
be_emit_finish_line_gas(node);
if (s[0] != '\t')
be_emit_char('\t');
while(*s != 0) {
- if(*s == '%') {
+ if (*s == '%') {
s = emit_asm_operand(node, s);
- continue;
} else {
- be_emit_char(*s);
+ be_emit_char(*s++);
}
- ++s;
}
be_emit_char('\n');
be_emit_write_line();
- be_emit_cstring("# End ASM\n");
+ be_emit_cstring("#NO_APP\n");
be_emit_write_line();
}
/**
* Emit movsb/w instructions to make mov count divideable by 4
*/
-static void emit_CopyB_prolog(unsigned size) {
+static void emit_CopyB_prolog(unsigned size)
+{
be_emit_cstring("\tcld");
be_emit_finish_line_gas(NULL);
be_emit_cstring("\tcvt");
- if(is_ia32_Conv_I2FP(node)) {
- if(ls_bits == 32) {
+ if (is_ia32_Conv_I2FP(node)) {
+ if (ls_bits == 32) {
be_emit_cstring("si2ss");
} else {
be_emit_cstring("si2sd");
}
- } else if(is_ia32_Conv_FP2I(node)) {
- if(ls_bits == 32) {
+ } else if (is_ia32_Conv_FP2I(node)) {
+ if (ls_bits == 32) {
be_emit_cstring("ss2si");
} else {
be_emit_cstring("sd2si");
}
} else {
assert(is_ia32_Conv_FP2FP(node));
- if(ls_bits == 32) {
+ if (ls_bits == 32) {
be_emit_cstring("sd2ss");
} else {
be_emit_cstring("ss2sd");
const arch_register_t *in_reg, *out_reg;
assert(!mode_is_float(smaller_mode));
- assert(smaller_bits == 8 || smaller_bits == 16 || smaller_bits == 32);
+ assert(smaller_bits == 8 || smaller_bits == 16);
signed_mode = mode_is_signed(smaller_mode);
- if(smaller_bits == 32) {
- // this should not happen as it's no convert
- assert(0);
- sign_suffix = "";
- } else {
- sign_suffix = signed_mode ? "s" : "z";
- }
+ sign_suffix = signed_mode ? "s" : "z";
out_reg = get_out_reg(node, 0);
break;
}
default:
- assert(0 && "unsupported op type for Conv");
+ panic("unsupported op type for Conv");
}
be_emit_finish_line_gas(node);
}
const arch_register_t *out = arch_get_irn_register(arch_env, node);
ir_mode *mode;
- if(in == out) {
+ if (in == out) {
return;
}
- if(is_unknown_reg(in))
+ if (is_unknown_reg(in))
return;
/* copies of vf nodes aren't real... */
- if(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
+ if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
return;
mode = get_irn_mode(node);
} else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
/* is a NOP */
} else {
- panic("unexpected register class in be_Perm (%+F)\n", node);
+ panic("unexpected register class in be_Perm (%+F)", node);
}
}
* Enters the emitter functions for handled nodes into the generic
* pointer of an opcode.
*/
-static void ia32_register_emitters(void) {
-
+static void ia32_register_emitters(void)
+{
#define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
#define IA32_EMIT(a) IA32_EMIT2(a,a)
#define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
/**
* Emits code for a node.
*/
-static void ia32_emit_node(const ir_node *node)
+static void ia32_emit_node(ir_node *node)
{
ir_op *op = get_irn_op(node);
* 16 bytes. However we should only do that if the alignment nops before the
* label aren't executed more often than we have jumps to the label.
*/
-static int should_align_block(ir_node *block, ir_node *prev)
+static int should_align_block(const ir_node *block)
{
static const double DELTA = .0001;
ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_node *prev = get_prev_block_sched(block);
double block_freq;
double prev_freq = 0; /**< execfreq of the fallthrough block */
double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
int i, n_cfgpreds;
- if(exec_freq == NULL)
+ if (exec_freq == NULL)
return 0;
- if(ia32_cg_config.label_alignment_factor <= 0)
+ if (ia32_cg_config.label_alignment_factor <= 0)
return 0;
block_freq = get_block_execfreq(exec_freq, block);
- if(block_freq < DELTA)
+ if (block_freq < DELTA)
return 0;
n_cfgpreds = get_Block_n_cfgpreds(block);
for(i = 0; i < n_cfgpreds; ++i) {
- ir_node *pred = get_Block_cfgpred_block(block, i);
- double pred_freq = get_block_execfreq(exec_freq, pred);
+ const ir_node *pred = get_Block_cfgpred_block(block, i);
+ double pred_freq = get_block_execfreq(exec_freq, pred);
- if(pred == prev) {
+ if (pred == prev) {
prev_freq += pred_freq;
} else {
jmp_freq += pred_freq;
}
}
- if(prev_freq < DELTA && !(jmp_freq < DELTA))
+ if (prev_freq < DELTA && !(jmp_freq < DELTA))
return 1;
jmp_freq /= prev_freq;
return jmp_freq > ia32_cg_config.label_alignment_factor;
}
-/**
- * Return non-zero, if a instruction in a fall-through.
- */
-static int is_fallthrough(ir_node *cfgpred)
-{
- ir_node *pred;
-
- if(!is_Proj(cfgpred))
- return 1;
- pred = get_Proj_pred(cfgpred);
- if(is_ia32_SwitchJmp(pred))
- return 0;
-
- return 1;
-}
-
/**
* Emit the block header for a block.
*
* @param block the block
* @param prev_block the previous block
*/
-static void ia32_emit_block_header(ir_node *block, ir_node *prev_block)
+static void ia32_emit_block_header(ir_node *block)
{
ir_graph *irg = current_ir_graph;
- int n_cfgpreds;
- int need_label = 1;
+ int need_label = block_needs_label(block);
int i, arity;
ir_exec_freq *exec_freq = cg->birg->exec_freq;
if (block == get_irg_end_block(irg) || block == get_irg_start_block(irg))
return;
- n_cfgpreds = get_Block_n_cfgpreds(block);
-
- if (n_cfgpreds == 0) {
- need_label = 0;
- } else if (n_cfgpreds == 1) {
- ir_node *cfgpred = get_Block_cfgpred(block, 0);
- if (get_nodes_block(cfgpred) == prev_block && is_fallthrough(cfgpred)) {
- need_label = 0;
- }
- }
-
if (ia32_cg_config.label_alignment > 0) {
/* align the current block if:
* a) if should be aligned due to its execution frequency
* b) there is no fall-through here
*/
- if (should_align_block(block, prev_block)) {
+ if (should_align_block(block)) {
ia32_emit_align_label();
} else {
/* if the predecessor block has no fall-through,
we can always align the label. */
int i;
- ir_node *check_node = NULL;
+ int has_fallthrough = 0;
- for (i = n_cfgpreds - 1; i >= 0; --i) {
+ for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
ir_node *cfg_pred = get_Block_cfgpred(block, i);
-
- if (get_nodes_block(skip_Proj(cfg_pred)) == prev_block) {
- check_node = cfg_pred;
+ if (can_be_fallthrough(cfg_pred)) {
+ has_fallthrough = 1;
break;
}
}
- if (check_node == NULL || !is_fallthrough(check_node))
+
+ if (!has_fallthrough)
ia32_emit_align_label();
}
}
* Walks over the nodes in a block connected by scheduling edges
* and emits code for each node.
*/
-static void ia32_gen_block(ir_node *block, ir_node *last_block)
+static void ia32_gen_block(ir_node *block)
{
- const ir_node *node;
+ ir_node *node;
- ia32_emit_block_header(block, last_block);
+ ia32_emit_block_header(block);
/* emit the contents of the block */
be_dbg_set_dbg_info(get_irn_dbg_info(block));
}
/**
- * Emit an exception label if the current instruction can fail.
+ * Assign and emit an exception label if the current instruction can fail.
*/
-void ia32_assign_exc_label(const ir_node *node)
+void ia32_assign_exc_label(ir_node *node)
{
if (get_ia32_exc_label(node)) {
/* assign a new ID to the instruction */
/**
* Compare two exception_entries.
*/
-static int cmp_exc_entry(const void *a, const void *b) {
+static int cmp_exc_entry(const void *a, const void *b)
+{
const exc_entry *ea = a;
const exc_entry *eb = b;
*/
void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
{
- ir_node *block;
- ir_node *last_block = NULL;
ir_entity *entity = get_irg_entity(irg);
exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
int i, n;
be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
/* we use links to point to target blocks */
- set_using_irn_link(irg);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
+ /* initialize next block links */
n = ARR_LEN(cg->blk_sched);
- for (i = 0; i < n;) {
- ir_node *next_bl;
+ for (i = 0; i < n; ++i) {
+ ir_node *block = cg->blk_sched[i];
+ ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
+
+ set_irn_link(block, prev);
+ }
- block = cg->blk_sched[i];
- ++i;
- next_bl = i < n ? cg->blk_sched[i] : NULL;
+ for (i = 0; i < n; ++i) {
+ ir_node *block = cg->blk_sched[i];
- /* set here the link. the emitter expects to find the next block here */
- set_irn_link(block, next_bl);
- ia32_gen_block(block, last_block);
- last_block = block;
+ ia32_gen_block(block);
}
be_gas_emit_function_epilog(entity);
be_emit_char('\n');
be_emit_write_line();
- clear_using_irn_link(irg);
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
/* Sort the exception table using the exception label id's.
Those are ascending with ascending addresses. */