/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "execfreq.h"
#include "error.h"
#include "raw_bitset.h"
+#include "dbginfo.h"
#include "../besched_t.h"
#include "../benode_t.h"
#include "../beemitter.h"
#include "../begnuas.h"
#include "../beirg_t.h"
+#include "../be_dbgout.h"
#include "ia32_emitter.h"
#include "gen_ia32_emitter.h"
#include "ia32_nodes_attr.h"
#include "ia32_new_nodes.h"
#include "ia32_map_regs.h"
+#include "ia32_architecture.h"
#include "bearch_ia32_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static const arch_env_t *arch_env;
static const ia32_isa_t *isa;
static ia32_code_gen_t *cg;
+static int do_pic;
+static char pic_base_label[128];
+static ir_label_t exc_label_id;
+
+/** Return the next block in Block schedule */
+static ir_node *get_prev_block_sched(const ir_node *block)
+{
+ return get_irn_link(block);
+}
+
+static int is_fallthrough(const ir_node *cfgpred)
+{
+ ir_node *pred;
+
+ if (!is_Proj(cfgpred))
+ return 1;
+ pred = get_Proj_pred(cfgpred);
+ if (is_ia32_SwitchJmp(pred))
+ return 0;
+
+ return 1;
+}
+
+static int block_needs_label(const ir_node *block)
+{
+ int need_label = 1;
+ int n_cfgpreds = get_Block_n_cfgpreds(block);
+
+ if (n_cfgpreds == 0) {
+ need_label = 0;
+ } else if (n_cfgpreds == 1) {
+ ir_node *cfgpred = get_Block_cfgpred(block, 0);
+ ir_node *cfgpred_block = get_nodes_block(cfgpred);
+
+ if (get_prev_block_sched(block) == cfgpred_block
+ && is_fallthrough(cfgpred)) {
+ need_label = 0;
+ }
+ }
+
+ return need_label;
+}
/**
* Returns the register at in position pos.
assert(reg && "no in register found");
- if(reg == &ia32_gp_regs[REG_GP_NOREG])
+ if (reg == &ia32_gp_regs[REG_GP_NOREG])
panic("trying to emit noreg for %+F input %d", irn, pos);
/* in case of unknown register: just return a valid register */
{
const char *reg_name;
- if(mode != NULL) {
+ if (mode != NULL) {
int size = get_mode_size_bits(mode);
- if(size == 8) {
- emit_8bit_register(reg);
- return;
- } else if(size == 16) {
- emit_16bit_register(reg);
- return;
- } else {
- assert(mode_is_float(mode) || size == 32);
+ switch (size) {
+ case 8: emit_8bit_register(reg); return;
+ case 16: emit_16bit_register(reg); return;
}
+ assert(mode_is_float(mode) || size == 32);
}
reg_name = arch_register_get_name(reg);
void ia32_emit_source_register(const ir_node *node, int pos)
{
- const arch_register_t *reg = get_in_reg(node, pos);
+ const arch_register_t *reg = get_in_reg(node, pos);
emit_register(reg, NULL);
}
{
const arch_register_t *reg;
ir_node *in = get_irn_n(node, pos);
- if(is_ia32_Immediate(in)) {
+ if (is_ia32_Immediate(in)) {
emit_ia32_Immediate(in);
return;
}
static void ia32_emit_mode_suffix_mode(const ir_mode *mode)
{
- if(mode_is_float(mode)) {
+ if (mode_is_float(mode)) {
switch(get_mode_size_bits(mode)) {
case 32: be_emit_char('s'); return;
case 64: be_emit_char('l'); return;
- case 80: be_emit_char('t'); return;
+ case 80:
+ case 96: be_emit_char('t'); return;
}
} else {
assert(mode_is_int(mode) || mode_is_reference(mode));
switch(get_mode_size_bits(mode)) {
+ /* gas docu says q is the suffix but gcc, objdump and icc use ll
+ * apparently */
case 64: be_emit_cstring("ll"); return;
- /* gas docu says q is the suffix but gcc, objdump and icc use
- ll apparently */
- case 32: be_emit_char('l'); return;
- case 16: be_emit_char('w'); return;
- case 8: be_emit_char('b'); return;
+ case 32: be_emit_char('l'); return;
+ case 16: be_emit_char('w'); return;
+ case 8: be_emit_char('b'); return;
}
}
- panic("Can't output mode_suffix for %+F\n", mode);
+ panic("Can't output mode_suffix for %+F", mode);
}
void ia32_emit_mode_suffix(const ir_node *node)
{
ir_mode *mode = get_ia32_ls_mode(node);
- if(mode == NULL)
+ if (mode == NULL)
mode = mode_Iu;
ia32_emit_mode_suffix_mode(mode);
void ia32_emit_x87_mode_suffix(const ir_node *node)
{
- ir_mode *mode = get_ia32_ls_mode(node);
- assert(mode != NULL);
/* we only need to emit the mode on address mode */
- if(get_ia32_op_type(node) != ia32_Normal)
+ if (get_ia32_op_type(node) != ia32_Normal) {
+ ir_mode *mode = get_ia32_ls_mode(node);
+ assert(mode != NULL);
ia32_emit_mode_suffix_mode(mode);
+ }
}
-static
-char get_xmm_mode_suffix(ir_mode *mode)
+static char get_xmm_mode_suffix(ir_mode *mode)
{
assert(mode_is_float(mode));
switch(get_mode_size_bits(mode)) {
- case 32:
- return 's';
- case 64:
- return 'd';
- default:
- assert(0);
+ case 32: return 's';
+ case 64: return 'd';
+ default: panic("Invalid XMM mode");
}
- return '%';
}
void ia32_emit_xmm_mode_suffix(const ir_node *node)
void ia32_emit_extend_suffix(const ir_mode *mode)
{
- if(get_mode_size_bits(mode) == 32)
+ if (get_mode_size_bits(mode) == 32)
return;
- if(mode_is_signed(mode)) {
- be_emit_char('s');
- } else {
- be_emit_char('z');
- }
+ be_emit_char(mode_is_signed(mode) ? 's' : 'z');
}
-static
-void ia32_emit_function_object(const char *name)
-{
- switch (be_gas_flavour) {
- case GAS_FLAVOUR_NORMAL:
- be_emit_cstring("\t.type\t");
- be_emit_string(name);
- be_emit_cstring(", @function\n");
- be_emit_write_line();
- break;
- case GAS_FLAVOUR_MINGW:
- be_emit_cstring("\t.def\t");
- be_emit_string(name);
- be_emit_cstring(";\t.scl\t2;\t.type\t32;\t.endef\n");
- be_emit_write_line();
- break;
- default:
- break;
- }
-}
-
-static
-void ia32_emit_function_size(const char *name)
-{
- switch (be_gas_flavour) {
- case GAS_FLAVOUR_NORMAL:
- be_emit_cstring("\t.size\t");
- be_emit_string(name);
- be_emit_cstring(", .-");
- be_emit_string(name);
- be_emit_char('\n');
- be_emit_write_line();
- break;
- default:
- break;
- }
-}
-
-
void ia32_emit_source_register_or_immediate(const ir_node *node, int pos)
{
ir_node *in = get_irn_n(node, pos);
- if(is_ia32_Immediate(in)) {
+ if (is_ia32_Immediate(in)) {
emit_ia32_Immediate(in);
} else {
const ir_mode *mode = get_ia32_ls_mode(node);
/**
* Emits registers and/or address mode of a binary operation.
*/
-void ia32_emit_binop(const ir_node *node) {
+void ia32_emit_binop(const ir_node *node)
+{
const ir_node *right_op = get_irn_n(node, n_ia32_binary_right);
const ir_mode *mode = get_ia32_ls_mode(node);
const arch_register_t *reg_left;
switch(get_ia32_op_type(node)) {
case ia32_Normal:
reg_left = get_in_reg(node, n_ia32_binary_left);
- if(is_ia32_Immediate(right_op)) {
+ if (is_ia32_Immediate(right_op)) {
emit_ia32_Immediate(right_op);
be_emit_cstring(", ");
emit_register(reg_left, mode);
}
break;
case ia32_AddrModeS:
- if(is_ia32_Immediate(right_op)) {
+ if (is_ia32_Immediate(right_op)) {
emit_ia32_Immediate(right_op);
be_emit_cstring(", ");
ia32_emit_am(node);
/**
* Emits registers and/or address mode of a binary operation.
*/
-void ia32_emit_x87_binop(const ir_node *node) {
+void ia32_emit_x87_binop(const ir_node *node)
+{
switch(get_ia32_op_type(node)) {
case ia32_Normal:
{
}
}
-void ia32_emit_am_or_dest_register(const ir_node *node,
- int pos) {
- if(get_ia32_op_type(node) == ia32_Normal) {
- ia32_emit_dest_register(node, pos);
- } else {
- assert(get_ia32_op_type(node) == ia32_AddrModeD);
- ia32_emit_am(node);
- }
-}
-
/**
* Emits registers and/or address mode of a unary operation.
*/
-void ia32_emit_unop(const ir_node *node, int pos) {
+void ia32_emit_unop(const ir_node *node, int pos)
+{
const ir_node *op;
switch(get_ia32_op_type(node)) {
}
}
+static void ia32_emit_entity(ir_entity *entity, int no_pic_adjust)
+{
+ ident *id;
+
+ set_entity_backend_marked(entity, 1);
+ id = get_entity_ld_ident(entity);
+ be_emit_ident(id);
+
+ if (get_entity_owner(entity) == get_tls_type()) {
+ if (get_entity_visibility(entity) == visibility_external_allocated) {
+ be_emit_cstring("@INDNTPOFF");
+ } else {
+ be_emit_cstring("@NTPOFF");
+ }
+ }
+
+ if (!no_pic_adjust && do_pic) {
+ /* TODO: only do this when necessary */
+ be_emit_char('-');
+ be_emit_string(pic_base_label);
+ }
+}
+
/**
* Emits address mode.
*/
-void ia32_emit_am(const ir_node *node) {
+void ia32_emit_am(const ir_node *node)
+{
ir_entity *ent = get_ia32_am_sc(node);
int offs = get_ia32_am_offs_int(node);
- ir_node *base = get_irn_n(node, 0);
+ ir_node *base = get_irn_n(node, n_ia32_base);
int has_base = !is_ia32_NoReg_GP(base);
- ir_node *index = get_irn_n(node, 1);
+ ir_node *index = get_irn_n(node, n_ia32_index);
int has_index = !is_ia32_NoReg_GP(index);
/* just to be sure... */
/* emit offset */
if (ent != NULL) {
- ident *id;
-
- set_entity_backend_marked(ent, 1);
- id = get_entity_ld_ident(ent);
if (is_ia32_am_sc_sign(node))
be_emit_char('-');
- be_emit_ident(id);
-
- if(get_entity_owner(ent) == get_tls_type()) {
- if (get_entity_visibility(ent) == visibility_external_allocated) {
- be_emit_cstring("@INDNTPOFF");
- } else {
- be_emit_cstring("@NTPOFF");
- }
- }
+ ia32_emit_entity(ent, 0);
}
- if(offs != 0) {
- if(ent != NULL) {
+ /* also handle special case if nothing is set */
+ if (offs != 0 || (ent == NULL && !has_base && !has_index)) {
+ if (ent != NULL) {
be_emit_irprintf("%+d", offs);
} else {
be_emit_irprintf("%d", offs);
scale = get_ia32_am_scale(node);
if (scale > 0) {
- be_emit_irprintf(",%d", 1 << get_ia32_am_scale(node));
+ be_emit_irprintf(",%d", 1 << scale);
}
}
be_emit_char(')');
}
+}
+
+static void emit_ia32_IMul(const ir_node *node)
+{
+ ir_node *left = get_irn_n(node, n_ia32_IMul_left);
+ const arch_register_t *out_reg = get_out_reg(node, pn_ia32_IMul_res);
- /* special case if nothing is set */
- if(ent == NULL && offs == 0 && !has_base && !has_index) {
- be_emit_char('0');
+ be_emit_cstring("\timul");
+ ia32_emit_mode_suffix(node);
+ be_emit_char(' ');
+
+ ia32_emit_binop(node);
+
+ /* do we need the 3-address form? */
+ if (is_ia32_NoReg_GP(left) ||
+ get_in_reg(node, n_ia32_IMul_left) != out_reg) {
+ be_emit_cstring(", ");
+ emit_register(out_reg, get_ia32_ls_mode(node));
}
+ be_emit_finish_line_gas(node);
}
/*************************************************
{ NULL, pn_Cmp_Leg }, /* always true */
};
-enum {
- ia32_pn_Cmp_unsigned = 0x1000,
- ia32_pn_Cmp_float = 0x2000,
-};
-
/**
* walks up a tree of copies/perms/spills/reloads to find the original value
* that is moved around
*/
static ir_node *find_original_value(ir_node *node)
{
- inc_irg_visited(current_ir_graph);
- while(1) {
- mark_irn_visited(node);
- if(be_is_Copy(node)) {
- node = be_get_Copy_op(node);
- } else if(be_is_CopyKeep(node)) {
- node = be_get_CopyKeep_op(node);
- } else if(is_Proj(node)) {
- ir_node *pred = get_Proj_pred(node);
- if(be_is_Perm(pred)) {
- node = get_irn_n(pred, get_Proj_proj(node));
- } else if(be_is_MemPerm(pred)) {
- node = get_irn_n(pred, get_Proj_proj(node) + 1);
- } else if(is_ia32_Load(pred)) {
- node = get_irn_n(pred, n_ia32_Load_mem);
- } else {
- return node;
- }
- } else if(is_ia32_Store(node)) {
- node = get_irn_n(node, n_ia32_Store_val);
- } else if(is_Phi(node)) {
- int i, arity;
- arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
- ir_node *in = get_irn_n(node, i);
- if(irn_visited(in))
- continue;
- node = in;
- break;
- }
- assert(i < arity);
+ if (irn_visited(node))
+ return NULL;
+
+ mark_irn_visited(node);
+ if (be_is_Copy(node)) {
+ return find_original_value(be_get_Copy_op(node));
+ } else if (be_is_CopyKeep(node)) {
+ return find_original_value(be_get_CopyKeep_op(node));
+ } else if (is_Proj(node)) {
+ ir_node *pred = get_Proj_pred(node);
+ if (be_is_Perm(pred)) {
+ return find_original_value(get_irn_n(pred, get_Proj_proj(node)));
+ } else if (be_is_MemPerm(pred)) {
+ return find_original_value(get_irn_n(pred, get_Proj_proj(node) + 1));
+ } else if (is_ia32_Load(pred)) {
+ return find_original_value(get_irn_n(pred, n_ia32_Load_mem));
} else {
return node;
}
+ } else if (is_ia32_Store(node)) {
+ return find_original_value(get_irn_n(node, n_ia32_Store_val));
+ } else if (is_Phi(node)) {
+ int i, arity;
+ arity = get_irn_arity(node);
+ for (i = 0; i < arity; ++i) {
+ ir_node *in = get_irn_n(node, i);
+ ir_node *res = find_original_value(in);
+
+ if (res != NULL)
+ return res;
+ }
+ return NULL;
+ } else {
+ return node;
}
}
const ia32_attr_t *flags_attr;
flags = skip_Proj(flags);
- if(is_ia32_Sahf(flags)) {
+ if (is_ia32_Sahf(flags)) {
ir_node *cmp = get_irn_n(flags, n_ia32_Sahf_val);
- if(!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
+ if (!(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
|| is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp))) {
+ inc_irg_visited(current_ir_graph);
cmp = find_original_value(cmp);
+ assert(cmp != NULL);
assert(is_ia32_FucomFnstsw(cmp) || is_ia32_FucompFnstsw(cmp)
|| is_ia32_FucomppFnstsw(cmp) || is_ia32_FtstFnstsw(cmp));
}
flags_attr = get_ia32_attr_const(cmp);
- if(flags_attr->data.ins_permuted)
+ if (flags_attr->data.ins_permuted)
pnc = get_mirrored_pnc(pnc);
pnc |= ia32_pn_Cmp_float;
- } else if(is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
+ } else if (is_ia32_Ucomi(flags) || is_ia32_Fucomi(flags)
|| is_ia32_Fucompi(flags)) {
flags_attr = get_ia32_attr_const(flags);
- if(flags_attr->data.ins_permuted)
+ if (flags_attr->data.ins_permuted)
pnc = get_mirrored_pnc(pnc);
pnc |= ia32_pn_Cmp_float;
} else {
+#if 0
assert(is_ia32_Cmp(flags) || is_ia32_Test(flags)
|| is_ia32_Cmp8Bit(flags) || is_ia32_Test8Bit(flags));
+#endif
flags_attr = get_ia32_attr_const(flags);
- if(flags_attr->data.ins_permuted)
+ if (flags_attr->data.ins_permuted)
pnc = get_mirrored_pnc(pnc);
- if(flags_attr->data.cmp_unsigned)
+ if (flags_attr->data.cmp_unsigned)
pnc |= ia32_pn_Cmp_unsigned;
}
{
const char *str;
- if((pnc & ia32_pn_Cmp_float) || (pnc & ia32_pn_Cmp_unsigned)) {
+ if ((pnc & ia32_pn_Cmp_float) || (pnc & ia32_pn_Cmp_unsigned)) {
pnc = pnc & 7;
assert(cmp2condition_u[pnc].num == pnc);
str = cmp2condition_u[pnc].name;
pn_Cmp pnc = get_ia32_condcode(node);
pnc = determine_final_pnc(node, flags_pos, pnc);
- if(attr->data.ins_permuted) {
- if(pnc & ia32_pn_Cmp_float) {
+ if (attr->data.ins_permuted) {
+ if (pnc & ia32_pn_Cmp_float) {
pnc = get_negated_pnc(pnc, mode_F);
} else {
pnc = get_negated_pnc(pnc, mode_Iu);
/**
* Returns the target block for a control flow node.
*/
-static
-ir_node *get_cfop_target_block(const ir_node *irn) {
+static ir_node *get_cfop_target_block(const ir_node *irn)
+{
+ assert(get_irn_mode(irn) == mode_X);
return get_irn_link(irn);
}
/**
* Emits a block label for the given block.
*/
-static
-void ia32_emit_block_name(const ir_node *block)
+static void ia32_emit_block_name(const ir_node *block)
{
if (has_Block_label(block)) {
- be_emit_string(be_gas_label_prefix());
- be_emit_irprintf("%u", (unsigned)get_Block_label(block));
+ be_emit_string(be_gas_block_label_prefix());
+ be_emit_irprintf("%lu", get_Block_label(block));
} else {
be_emit_cstring(BLOCK_PREFIX);
- be_emit_irprintf("%d", get_irn_node_nr(block));
+ be_emit_irprintf("%ld", get_irn_node_nr(block));
}
}
+/**
+ * Emits an exception label for a given node.
+ */
+static void ia32_emit_exc_label(const ir_node *node)
+{
+ be_emit_string(be_gas_insn_label_prefix());
+ be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
+}
+
/**
* Emits the target label for a control flow node.
*/
ia32_emit_block_name(block);
}
-/** Return the next block in Block schedule */
-static ir_node *next_blk_sched(const ir_node *block)
-{
- return get_irn_link(block);
-}
-
/**
* Returns the Proj with projection number proj and NOT mode_M
*/
-static ir_node *get_proj(const ir_node *node, long proj) {
+static ir_node *get_proj(const ir_node *node, long proj)
+{
const ir_edge_t *edge;
ir_node *src;
return NULL;
}
+static int can_be_fallthrough(const ir_node *node)
+{
+ ir_node *target_block = get_cfop_target_block(node);
+ ir_node *block = get_nodes_block(node);
+ return get_prev_block_sched(target_block) == block;
+}
+
/**
* Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
*/
static void emit_ia32_Jcc(const ir_node *node)
{
+ int need_parity_label = 0;
const ir_node *proj_true;
const ir_node *proj_false;
const ir_node *block;
- const ir_node *next_block;
pn_Cmp pnc = get_ia32_condcode(node);
pnc = determine_final_pnc(node, 0, pnc);
assert(proj_false && "Jcc without false Proj");
block = get_nodes_block(node);
- next_block = next_blk_sched(block);
- if (get_cfop_target_block(proj_true) == next_block) {
+ if (can_be_fallthrough(proj_true)) {
/* exchange both proj's so the second one can be omitted */
const ir_node *t = proj_true;
proj_true = proj_false;
proj_false = t;
- if(pnc & ia32_pn_Cmp_float) {
+ if (pnc & ia32_pn_Cmp_float) {
pnc = get_negated_pnc(pnc, mode_F);
} else {
pnc = get_negated_pnc(pnc, mode_Iu);
/* Some floating point comparisons require a test of the parity flag,
* which indicates that the result is unordered */
switch (pnc & 15) {
- case pn_Cmp_Uo:
+ case pn_Cmp_Uo: {
be_emit_cstring("\tjp ");
ia32_emit_cfop_target(proj_true);
be_emit_finish_line_gas(proj_true);
break;
+ }
case pn_Cmp_Leg:
be_emit_cstring("\tjnp ");
case pn_Cmp_Eq:
case pn_Cmp_Lt:
case pn_Cmp_Le:
- be_emit_cstring("\tjp ");
- ia32_emit_cfop_target(proj_false);
+ /* we need a local label if the false proj is a fallthrough
+ * as the falseblock might have no label emitted then */
+ if (can_be_fallthrough(proj_false)) {
+ need_parity_label = 1;
+ be_emit_cstring("\tjp 1f");
+ } else {
+ be_emit_cstring("\tjp ");
+ ia32_emit_cfop_target(proj_false);
+ }
be_emit_finish_line_gas(proj_false);
goto emit_jcc;
be_emit_finish_line_gas(proj_true);
}
+ if (need_parity_label) {
+ be_emit_cstring("1:");
+ be_emit_write_line();
+ }
+
/* the second Proj might be a fallthrough */
- if (get_cfop_target_block(proj_false) != next_block) {
- be_emit_cstring("\tjmp ");
+ if (can_be_fallthrough(proj_false)) {
+ be_emit_cstring("\t/* fallthrough to ");
ia32_emit_cfop_target(proj_false);
+ be_emit_cstring(" */");
be_emit_finish_line_gas(proj_false);
} else {
- be_emit_cstring("\t/* fallthrough to ");
+ be_emit_cstring("\tjmp ");
ia32_emit_cfop_target(proj_false);
- be_emit_cstring(" */");
be_emit_finish_line_gas(proj_false);
}
}
get_irn_n(node, n_ia32_CMov_val_false));
/* should be same constraint fullfilled? */
- if(out == in_false) {
+ if (out == in_false) {
/* yes -> nothing to do */
- } else if(out == in_true) {
+ } else if (out == in_true) {
const arch_register_t *tmp;
assert(get_ia32_op_type(node) == ia32_Normal);
be_emit_finish_line_gas(node);
}
- if(ins_permuted) {
- if(pnc & ia32_pn_Cmp_float) {
+ if (ins_permuted) {
+ if (pnc & ia32_pn_Cmp_float) {
pnc = get_negated_pnc(pnc, mode_F);
} else {
pnc = get_negated_pnc(pnc, mode_Iu);
be_emit_cstring("\tcmov");
ia32_emit_cmp_suffix(pnc);
be_emit_char(' ');
- if(get_ia32_op_type(node) == ia32_AddrModeS) {
+ if (get_ia32_op_type(node) == ia32_AddrModeS) {
ia32_emit_am(node);
} else {
emit_register(in_true, get_ia32_ls_mode(node));
/**
* Compare two variables of type branch_t. Used to sort all switch cases
*/
-static
-int ia32_cmp_branch_t(const void *a, const void *b) {
+static int ia32_cmp_branch_t(const void *a, const void *b)
+{
branch_t *b1 = (branch_t *)a;
branch_t *b2 = (branch_t *)b;
* possible otherwise a cmp-jmp cascade). Port from
* cggg ia32 backend
*/
-static
-void emit_ia32_SwitchJmp(const ir_node *node) {
+static void emit_ia32_SwitchJmp(const ir_node *node)
+{
unsigned long interval;
int last_value, i;
long pnc;
+ long default_pn;
jmp_tbl_t tbl;
ir_node *proj;
const ir_edge_t *edge;
tbl.label = xmalloc(SNPRINTF_BUF_LEN);
tbl.label = get_unique_label(tbl.label, SNPRINTF_BUF_LEN, ".TBL_");
tbl.defProj = NULL;
- tbl.num_branches = get_irn_n_edges(node);
+ tbl.num_branches = get_irn_n_edges(node) - 1;
tbl.branches = xcalloc(tbl.num_branches, sizeof(tbl.branches[0]));
tbl.min_value = INT_MAX;
tbl.max_value = INT_MIN;
+ default_pn = get_ia32_condcode(node);
i = 0;
/* go over all proj's and collect them */
foreach_out_edge(node, edge) {
pnc = get_Proj_proj(proj);
- /* create branch entry */
- tbl.branches[i].target = proj;
- tbl.branches[i].value = pnc;
-
- tbl.min_value = pnc < tbl.min_value ? pnc : tbl.min_value;
- tbl.max_value = pnc > tbl.max_value ? pnc : tbl.max_value;
-
/* check for default proj */
- if (pnc == get_ia32_condcode(node)) {
- assert(tbl.defProj == NULL && "found two defProjs at SwitchJmp");
+ if (pnc == default_pn) {
+ assert(tbl.defProj == NULL && "found two default Projs at SwitchJmp");
tbl.defProj = proj;
+ } else {
+ tbl.min_value = pnc < tbl.min_value ? pnc : tbl.min_value;
+ tbl.max_value = pnc > tbl.max_value ? pnc : tbl.max_value;
+
+ /* create branch entry */
+ tbl.branches[i].target = proj;
+ tbl.branches[i].value = pnc;
+ ++i;
}
- i++;
}
+ assert(i == tbl.num_branches);
/* sort the branches by their number */
qsort(tbl.branches, tbl.num_branches, sizeof(tbl.branches[0]), ia32_cmp_branch_t);
*/
static void emit_Jmp(const ir_node *node)
{
- ir_node *block, *next_block;
+ ir_node *block;
/* for now, the code works for scheduled and non-schedules blocks */
block = get_nodes_block(node);
/* we have a block schedule */
- next_block = next_blk_sched(block);
- if (get_cfop_target_block(node) != next_block) {
- be_emit_cstring("\tjmp ");
- ia32_emit_cfop_target(node);
- } else {
+ if (can_be_fallthrough(node)) {
be_emit_cstring("\t/* fallthrough to ");
ia32_emit_cfop_target(node);
be_emit_cstring(" */");
+ } else {
+ be_emit_cstring("\tjmp ");
+ ia32_emit_cfop_target(node);
}
be_emit_finish_line_gas(node);
}
const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
be_emit_char('$');
- if(attr->symconst != NULL) {
- ident *id = get_entity_ld_ident(attr->symconst);
-
- if(attr->sc_sign)
+ if (attr->symconst != NULL) {
+ if (attr->sc_sign)
be_emit_char('-');
- be_emit_ident(id);
+ ia32_emit_entity(attr->symconst, 0);
}
- if(attr->symconst == NULL || attr->offset != 0) {
- if(attr->symconst != NULL) {
+ if (attr->symconst == NULL || attr->offset != 0) {
+ if (attr->symconst != NULL) {
be_emit_irprintf("%+d", attr->offset);
} else {
be_emit_irprintf("0x%X", attr->offset);
/* parse number */
sscanf(s, "%d%n", &num, &p);
- if(num < 0) {
+ if (num < 0) {
ir_fprintf(stderr, "Warning: Couldn't parse assembler operand (%+F)\n",
node);
return s;
s += p;
}
- if(num < 0 || num >= ARR_LEN(asm_regs)) {
+ if (num < 0 || num >= ARR_LEN(asm_regs)) {
ir_fprintf(stderr, "Error: Custom assembler references invalid "
"input/output (%+F)\n", node);
return s;
assert(asm_reg->valid);
/* get register */
- if(asm_reg->use_input == 0) {
+ if (asm_reg->use_input == 0) {
reg = get_out_reg(node, asm_reg->inout_pos);
} else {
ir_node *pred = get_irn_n(node, asm_reg->inout_pos);
/* might be an immediate value */
- if(is_ia32_Immediate(pred)) {
+ if (is_ia32_Immediate(pred)) {
emit_ia32_Immediate(pred);
return s;
}
reg = get_in_reg(node, asm_reg->inout_pos);
}
- if(reg == NULL) {
+ if (reg == NULL) {
ir_fprintf(stderr, "Warning: no register assigned for %d asm op "
"(%+F)\n", num, node);
return s;
}
- if(asm_reg->memory) {
+ if (asm_reg->memory) {
be_emit_char('(');
}
/* emit it */
- if(modifier != 0) {
+ if (modifier != 0) {
be_emit_char('%');
switch(modifier) {
case 'b':
emit_register(reg, asm_reg->mode);
}
- if(asm_reg->memory) {
+ if (asm_reg->memory) {
be_emit_char(')');
}
ident *asm_text = attr->asm_text;
const char *s = get_id_str(asm_text);
- be_emit_cstring("# Begin ASM \t");
+ be_emit_cstring("#APP\t");
be_emit_finish_line_gas(node);
if (s[0] != '\t')
be_emit_char('\t');
while(*s != 0) {
- if(*s == '%') {
+ if (*s == '%') {
s = emit_asm_operand(node, s);
- continue;
} else {
- be_emit_char(*s);
+ be_emit_char(*s++);
}
- ++s;
}
be_emit_char('\n');
be_emit_write_line();
- be_emit_cstring("# End ASM\n");
+ be_emit_cstring("#NO_APP\n");
be_emit_write_line();
}
/**
* Emit movsb/w instructions to make mov count divideable by 4
*/
-static void emit_CopyB_prolog(unsigned size) {
+static void emit_CopyB_prolog(unsigned size)
+{
be_emit_cstring("\tcld");
be_emit_finish_line_gas(NULL);
be_emit_cstring("\tcvt");
- if(is_ia32_Conv_I2FP(node)) {
- if(ls_bits == 32) {
+ if (is_ia32_Conv_I2FP(node)) {
+ if (ls_bits == 32) {
be_emit_cstring("si2ss");
} else {
be_emit_cstring("si2sd");
}
- } else if(is_ia32_Conv_FP2I(node)) {
- if(ls_bits == 32) {
+ } else if (is_ia32_Conv_FP2I(node)) {
+ if (ls_bits == 32) {
be_emit_cstring("ss2si");
} else {
be_emit_cstring("sd2si");
}
} else {
assert(is_ia32_Conv_FP2FP(node));
- if(ls_bits == 32) {
+ if (ls_bits == 32) {
be_emit_cstring("sd2ss");
} else {
be_emit_cstring("ss2sd");
const arch_register_t *in_reg, *out_reg;
assert(!mode_is_float(smaller_mode));
- assert(smaller_bits == 8 || smaller_bits == 16 || smaller_bits == 32);
+ assert(smaller_bits == 8 || smaller_bits == 16);
signed_mode = mode_is_signed(smaller_mode);
- if(smaller_bits == 32) {
- // this should not happen as it's no convert
- assert(0);
- sign_suffix = "";
- } else {
- sign_suffix = signed_mode ? "s" : "z";
- }
+ sign_suffix = signed_mode ? "s" : "z";
out_reg = get_out_reg(node, 0);
break;
}
default:
- assert(0 && "unsupported op type for Conv");
+ panic("unsupported op type for Conv");
}
be_emit_finish_line_gas(node);
}
be_emit_cstring("\tcall ");
if (ent) {
- set_entity_backend_marked(ent, 1);
- be_emit_string(get_entity_ld_name(ent));
+ ia32_emit_entity(ent, 1);
} else {
const arch_register_t *reg = get_in_reg(node, be_pos_Call_ptr);
be_emit_char('*');
const arch_register_t *out = arch_get_irn_register(arch_env, node);
ir_mode *mode;
- if(in == out) {
+ if (in == out) {
return;
}
- if(is_unknown_reg(in))
+ if (is_unknown_reg(in))
return;
/* copies of vf nodes aren't real... */
- if(arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
+ if (arch_register_get_class(in) == &ia32_reg_classes[CLASS_ia32_vfp])
return;
mode = get_irn_mode(node);
} else if (cls0 == &ia32_reg_classes[CLASS_ia32_st]) {
/* is a NOP */
} else {
- panic("unexpected register class in be_Perm (%+F)\n", node);
+ panic("unexpected register class in be_Perm (%+F)", node);
}
}
emit_sbb( node, in_hi, out_hi);
}
+static void emit_ia32_GetEIP(const ir_node *node)
+{
+ be_emit_cstring("\tcall ");
+ be_emit_string(pic_base_label);
+ be_emit_finish_line_gas(node);
+
+ be_emit_string(pic_base_label);
+ be_emit_cstring(":\n");
+ be_emit_write_line();
+
+ be_emit_cstring("\tpopl ");
+ ia32_emit_dest_register(node, 0);
+ be_emit_char('\n');
+ be_emit_write_line();
+}
+
static void emit_be_Return(const ir_node *node)
{
unsigned pop;
be_emit_cstring("\tret");
pop = be_Return_get_pop(node);
- if(pop > 0) {
+ if (pop > 0 || be_Return_get_emit_pop(node)) {
be_emit_irprintf(" $%d", pop);
}
be_emit_finish_line_gas(node);
* Enters the emitter functions for handled nodes into the generic
* pointer of an opcode.
*/
-static
-void ia32_register_emitters(void) {
-
+static void ia32_register_emitters(void)
+{
#define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
#define IA32_EMIT(a) IA32_EMIT2(a,a)
#define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
/* other ia32 emitter functions */
IA32_EMIT(Asm);
IA32_EMIT(CMov);
+ IA32_EMIT(IMul);
IA32_EMIT(SwitchJmp);
IA32_EMIT(CopyB);
IA32_EMIT(CopyB_i);
IA32_EMIT(LdTls);
IA32_EMIT(Minus64Bit);
IA32_EMIT(Jcc);
+ IA32_EMIT(GetEIP);
/* benode emitter */
BE_EMIT(Call);
#undef IA32_EMIT
}
-static const char *last_name = NULL;
-static unsigned last_line = -1;
-static unsigned num = -1;
-
-/**
- * Emit the debug support for node node.
- */
-static void ia32_emit_dbg(const ir_node *node)
-{
- dbg_info *db = get_irn_dbg_info(node);
- unsigned lineno;
- const char *fname = be_retrieve_dbg_info(db, &lineno);
-
- if (! cg->birg->main_env->options->stabs_debug_support)
- return;
-
- if (fname) {
- if (last_name != fname) {
- last_line = -1;
- be_dbg_include_begin(cg->birg->main_env->db_handle, fname);
- last_name = fname;
- }
- if (last_line != lineno) {
- char name[64];
-
- snprintf(name, sizeof(name), ".LM%u", ++num);
- last_line = lineno;
- be_dbg_line(cg->birg->main_env->db_handle, lineno, name);
- be_emit_string(name);
- be_emit_cstring(":\n");
- be_emit_write_line();
- }
- }
-}
-
typedef void (*emit_func_ptr) (const ir_node *);
/**
* Emits code for a node.
*/
-static void ia32_emit_node(const ir_node *node)
+static void ia32_emit_node(ir_node *node)
{
ir_op *op = get_irn_op(node);
DBG((dbg, LEVEL_1, "emitting code for %+F\n", node));
+ if (is_ia32_irn(node) && get_ia32_exc_label(node)) {
+ /* emit the exception label of this instruction */
+ ia32_assign_exc_label(node);
+ }
if (op->ops.generic) {
emit_func_ptr func = (emit_func_ptr) op->ops.generic;
- ia32_emit_dbg(node);
+
+ be_dbg_set_dbg_info(get_irn_dbg_info(node));
+
(*func) (node);
} else {
emit_Nothing(node);
be_emit_write_line();
}
-/**
- * Emits gas alignment directives for Functions depended on cpu architecture.
- */
-static void ia32_emit_align_func(cpu_support cpu)
-{
- unsigned align;
- unsigned maximum_skip;
-
- switch (cpu) {
- case arch_i386:
- align = 2;
- break;
- case arch_i486:
- align = 4;
- break;
- case arch_k6:
- align = 5;
- break;
- default:
- align = 4;
- }
- maximum_skip = (1 << align) - 1;
- ia32_emit_alignment(align, maximum_skip);
-}
-
/**
* Emits gas alignment directives for Labels depended on cpu architecture.
*/
-static void ia32_emit_align_label(cpu_support cpu)
+static void ia32_emit_align_label(void)
{
- unsigned align; unsigned maximum_skip;
-
- switch (cpu) {
- case arch_i386:
- align = 2;
- break;
- case arch_i486:
- align = 4;
- break;
- case arch_k6:
- align = 5;
- break;
- default:
- align = 4;
- }
- maximum_skip = (1 << align) - 1;
+ unsigned align = ia32_cg_config.label_alignment;
+ unsigned maximum_skip = ia32_cg_config.label_alignment_max_skip;
ia32_emit_alignment(align, maximum_skip);
}
/**
- * Test wether a block should be aligned.
+ * Test whether a block should be aligned.
* For cpus in the P4/Athlon class it is useful to align jump labels to
* 16 bytes. However we should only do that if the alignment nops before the
* label aren't executed more often than we have jumps to the label.
*/
-static int should_align_block(ir_node *block, ir_node *prev)
+static int should_align_block(const ir_node *block)
{
static const double DELTA = .0001;
ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_node *prev = get_prev_block_sched(block);
double block_freq;
double prev_freq = 0; /**< execfreq of the fallthrough block */
double jmp_freq = 0; /**< execfreq of all non-fallthrough blocks */
- cpu_support cpu = isa->opt_arch;
int i, n_cfgpreds;
- if(exec_freq == NULL)
+ if (exec_freq == NULL)
return 0;
- if(cpu == arch_i386 || cpu == arch_i486)
+ if (ia32_cg_config.label_alignment_factor <= 0)
return 0;
block_freq = get_block_execfreq(exec_freq, block);
- if(block_freq < DELTA)
+ if (block_freq < DELTA)
return 0;
n_cfgpreds = get_Block_n_cfgpreds(block);
for(i = 0; i < n_cfgpreds; ++i) {
- ir_node *pred = get_Block_cfgpred_block(block, i);
- double pred_freq = get_block_execfreq(exec_freq, pred);
+ const ir_node *pred = get_Block_cfgpred_block(block, i);
+ double pred_freq = get_block_execfreq(exec_freq, pred);
- if(pred == prev) {
+ if (pred == prev) {
prev_freq += pred_freq;
} else {
jmp_freq += pred_freq;
}
}
- if(prev_freq < DELTA && !(jmp_freq < DELTA))
+ if (prev_freq < DELTA && !(jmp_freq < DELTA))
return 1;
jmp_freq /= prev_freq;
- switch (cpu) {
- case arch_athlon:
- case arch_athlon_64:
- case arch_k6:
- return jmp_freq > 3;
- default:
- return jmp_freq > 2;
- }
+ return jmp_freq > ia32_cg_config.label_alignment_factor;
}
-static void ia32_emit_block_header(ir_node *block, ir_node *prev)
+/**
+ * Emit the block header for a block.
+ *
+ * @param block the block
+ * @param prev_block the previous block
+ */
+static void ia32_emit_block_header(ir_node *block)
{
- int n_cfgpreds;
- int need_label;
+ ir_graph *irg = current_ir_graph;
+ int need_label = block_needs_label(block);
int i, arity;
- ir_exec_freq *exec_freq = cg->birg->exec_freq;
+ ir_exec_freq *exec_freq = cg->birg->exec_freq;
- n_cfgpreds = get_Block_n_cfgpreds(block);
- need_label = (n_cfgpreds != 0);
+ if (block == get_irg_end_block(irg) || block == get_irg_start_block(irg))
+ return;
- if (should_align_block(block, prev)) {
- assert(need_label);
- ia32_emit_align_label(isa->opt_arch);
+ if (ia32_cg_config.label_alignment > 0) {
+ /* align the current block if:
+ * a) if should be aligned due to its execution frequency
+ * b) there is no fall-through here
+ */
+ if (should_align_block(block)) {
+ ia32_emit_align_label();
+ } else {
+ /* if the predecessor block has no fall-through,
+ we can always align the label. */
+ int i;
+ int has_fallthrough = 0;
+
+ for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
+ ir_node *cfg_pred = get_Block_cfgpred(block, i);
+ if (can_be_fallthrough(cfg_pred)) {
+ has_fallthrough = 1;
+ break;
+ }
+ }
+
+ if (!has_fallthrough)
+ ia32_emit_align_label();
+ }
}
- if(need_label) {
+ if (need_label || has_Block_label(block)) {
ia32_emit_block_name(block);
be_emit_char(':');
be_emit_pad_comment();
- be_emit_cstring(" /* preds:");
-
- /* emit list of pred blocks in comment */
- arity = get_irn_arity(block);
- for (i = 0; i < arity; ++i) {
- ir_node *predblock = get_Block_cfgpred_block(block, i);
- be_emit_irprintf(" %d", get_irn_node_nr(predblock));
- }
+ be_emit_cstring(" /* ");
} else {
be_emit_cstring("\t/* ");
ia32_emit_block_name(block);
be_emit_cstring(": ");
}
+
+ be_emit_cstring("preds:");
+
+ /* emit list of pred blocks in comment */
+ arity = get_irn_arity(block);
+ for (i = 0; i < arity; ++i) {
+ ir_node *predblock = get_Block_cfgpred_block(block, i);
+ be_emit_irprintf(" %d", get_irn_node_nr(predblock));
+ }
if (exec_freq != NULL) {
be_emit_irprintf(" freq: %f",
get_block_execfreq(exec_freq, block));
* Walks over the nodes in a block connected by scheduling edges
* and emits code for each node.
*/
-static void ia32_gen_block(ir_node *block, ir_node *last_block)
+static void ia32_gen_block(ir_node *block)
{
- const ir_node *node;
+ ir_node *node;
- ia32_emit_block_header(block, last_block);
+ ia32_emit_block_header(block);
/* emit the contents of the block */
- ia32_emit_dbg(block);
+ be_dbg_set_dbg_info(get_irn_dbg_info(block));
sched_foreach(block, node) {
ia32_emit_node(node);
}
}
-/**
- * Emits code for function start.
- */
-static void ia32_emit_func_prolog(ir_graph *irg)
-{
- ir_entity *irg_ent = get_irg_entity(irg);
- const char *irg_name = get_entity_ld_name(irg_ent);
- cpu_support cpu = isa->opt_arch;
- const be_irg_t *birg = cg->birg;
-
- /* write the begin line (used by scripts processing the assembler... */
- be_emit_write_line();
- be_emit_cstring("# -- Begin ");
- be_emit_string(irg_name);
- be_emit_char('\n');
- be_emit_write_line();
-
- be_gas_emit_switch_section(GAS_SECTION_TEXT);
- be_dbg_method_begin(birg->main_env->db_handle, irg_ent, be_abi_get_stack_layout(birg->abi));
- ia32_emit_align_func(cpu);
- if (get_entity_visibility(irg_ent) == visibility_external_visible) {
- be_emit_cstring(".global ");
- be_emit_string(irg_name);
- be_emit_char('\n');
- be_emit_write_line();
- }
- ia32_emit_function_object(irg_name);
- be_emit_string(irg_name);
- be_emit_cstring(":\n");
- be_emit_write_line();
-}
-
-/**
- * Emits code for function end
- */
-static void ia32_emit_func_epilog(ir_graph *irg)
-{
- const char *irg_name = get_entity_ld_name(get_irg_entity(irg));
- const be_irg_t *birg = cg->birg;
-
- ia32_emit_function_size(irg_name);
- be_dbg_method_end(birg->main_env->db_handle);
-
- be_emit_cstring("# -- End ");
- be_emit_string(irg_name);
- be_emit_char('\n');
- be_emit_write_line();
-
- be_emit_char('\n');
- be_emit_write_line();
-}
+typedef struct exc_entry {
+ ir_node *exc_instr; /** The instruction that can issue an exception. */
+ ir_node *block; /** The block to call then. */
+} exc_entry;
/**
* Block-walker:
- * Sets labels for control flow nodes (jump target)
+ * Sets labels for control flow nodes (jump target).
+ * Links control predecessors to there destination blocks.
*/
static void ia32_gen_labels(ir_node *block, void *data)
{
+ exc_entry **exc_list = data;
ir_node *pred;
- int n = get_Block_n_cfgpreds(block);
- (void) data;
+ int n;
- for (n--; n >= 0; n--) {
+ for (n = get_Block_n_cfgpreds(block) - 1; n >= 0; --n) {
pred = get_Block_cfgpred(block, n);
set_irn_link(pred, block);
+
+ pred = skip_Proj(pred);
+ if (is_ia32_irn(pred) && get_ia32_exc_label(pred)) {
+ exc_entry e;
+
+ e.exc_instr = pred;
+ e.block = block;
+ ARR_APP1(exc_entry, *exc_list, e);
+ set_irn_link(pred, block);
+ }
}
}
/**
- * Emit an exception label if the current instruction can fail.
+ * Assign and emit an exception label if the current instruction can fail.
*/
-void ia32_emit_exc_label(const ir_node *node)
+void ia32_assign_exc_label(ir_node *node)
{
if (get_ia32_exc_label(node)) {
- be_emit_irprintf(".EXL%u\n", 0);
+ /* assign a new ID to the instruction */
+ set_ia32_exc_label_id(node, ++exc_label_id);
+ /* print it */
+ ia32_emit_exc_label(node);
+ be_emit_char(':');
+ be_emit_pad_comment();
+ be_emit_cstring("/* exception to Block ");
+ ia32_emit_cfop_target(node);
+ be_emit_cstring(" */\n");
be_emit_write_line();
}
}
+/**
+ * Compare two exception_entries.
+ */
+static int cmp_exc_entry(const void *a, const void *b)
+{
+ const exc_entry *ea = a;
+ const exc_entry *eb = b;
+
+ if (get_ia32_exc_label_id(ea->exc_instr) < get_ia32_exc_label_id(eb->exc_instr))
+ return -1;
+ return +1;
+}
+
/**
* Main driver. Emits the code for one routine.
*/
void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
{
- ir_node *block;
- ir_node *last_block = NULL;
+ ir_entity *entity = get_irg_entity(irg);
+ exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
int i, n;
cg = ia32_cg;
- isa = (const ia32_isa_t*) cg->arch_env->isa;
+ isa = (const ia32_isa_t*) cg->arch_env;
arch_env = cg->arch_env;
+ do_pic = cg->birg->main_env->options->pic;
ia32_register_emitters();
- ia32_emit_func_prolog(irg);
- irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
+ get_unique_label(pic_base_label, sizeof(pic_base_label), ".PIC_BASE");
+
+ be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
+ be_gas_emit_function_prolog(entity, ia32_cg_config.function_alignment);
+
+ /* we use links to point to target blocks */
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+ irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
+ /* initialize next block links */
n = ARR_LEN(cg->blk_sched);
- for (i = 0; i < n;) {
- ir_node *next_bl;
+ for (i = 0; i < n; ++i) {
+ ir_node *block = cg->blk_sched[i];
+ ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
- block = cg->blk_sched[i];
- ++i;
- next_bl = i < n ? cg->blk_sched[i] : NULL;
+ set_irn_link(block, prev);
+ }
+
+ for (i = 0; i < n; ++i) {
+ ir_node *block = cg->blk_sched[i];
- /* set here the link. the emitter expects to find the next block here */
- set_irn_link(block, next_bl);
- ia32_gen_block(block, last_block);
- last_block = block;
+ ia32_gen_block(block);
}
- ia32_emit_func_epilog(irg);
+ be_gas_emit_function_epilog(entity);
+ be_dbg_method_end();
+ be_emit_char('\n');
+ be_emit_write_line();
+
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+
+ /* Sort the exception table using the exception label id's.
+ Those are ascending with ascending addresses. */
+ qsort(exc_list, ARR_LEN(exc_list), sizeof(exc_list[0]), cmp_exc_entry);
+ {
+ int i;
+
+ for (i = 0; i < ARR_LEN(exc_list); ++i) {
+ be_emit_cstring("\t.long ");
+ ia32_emit_exc_label(exc_list[i].exc_instr);
+ be_emit_char('\n');
+ be_emit_cstring("\t.long ");
+ ia32_emit_block_name(exc_list[i].block);
+ be_emit_char('\n');
+ }
+ }
+ DEL_ARR_F(exc_list);
}
void ia32_init_emitter(void)