/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "config.h"
#endif
-#include <libcore/lc_opts.h>
-#include <libcore/lc_opts_enum.h>
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
#include <math.h>
#include "irgopt.h"
#include "irbitset.h"
#include "irgopt.h"
+#include "irdump_grgen.h"
#include "pdeq.h"
#include "pset.h"
#include "debug.h"
#include "error.h"
#include "xmalloc.h"
#include "irtools.h"
+#include "iroptimize.h"
+#include "instrument.h"
#include "../beabi.h"
#include "../beirg_t.h"
#include "ia32_finish.h"
#include "ia32_util.h"
#include "ia32_fpu.h"
+#include "ia32_architecture.h"
+
+#ifdef FIRM_GRGEN_BE
+#include "ia32_pbqp_transform.h"
+#endif
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
&ia32_xmm_regs[REG_XMM_NOREG]);
}
-/* Creates the unique per irg FP NoReg node. */
-ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
- return USE_SSE2(cg) ? ia32_new_NoReg_xmm(cg) : ia32_new_NoReg_vfp(cg);
-}
-
ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
&ia32_gp_regs[REG_GP_UKNWN]);
if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
return ia32_new_NoReg_gp(cg);
- return ia32_new_NoReg_fp(cg);
+ if (ia32_cg_config.use_sse2) {
+ return ia32_new_NoReg_xmm(cg);
+ } else {
+ return ia32_new_NoReg_vfp(cg);
+ }
}
/**************************************************
set_ia32_frame_ent(irn, ent);
}
-static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
+static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias)
+{
const ia32_irn_ops_t *ops = self;
- if (get_ia32_frame_ent(irn)) {
- if (is_ia32_Pop(irn)) {
- int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
- if (omit_fp) {
- /* Pop nodes modify the stack pointer before calculating the destination
- * address, so fix this here
- */
- bias -= 4;
- }
- }
+ if (get_ia32_frame_ent(irn) == NULL)
+ return;
- add_ia32_am_offs_int(irn, bias);
+ if (is_ia32_Pop(irn)) {
+ int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
+ if (omit_fp) {
+ /* Pop nodes modify the stack pointer before calculating the
+ * destination address, so fix this here
+ */
+ bias -= 4;
+ }
}
+ add_ia32_am_offs_int(irn, bias);
}
static int ia32_get_sp_bias(const void *self, const ir_node *node)
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
add_irn_dep(curr_sp, *mem);
} else {
- const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
- ia32_code_gen_t *cg = isa->cg;
ir_mode *mode_bp = env->isa->bp->reg_class->mode;
ir_graph *irg = current_ir_graph;
- if (ARCH_AMD(isa->opt_arch)) {
+ if (ia32_cg_config.use_leave) {
ir_node *leave;
/* leave */
curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
} else {
- ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *pop;
/* the old SP is not needed anymore (kill the proj) */
be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
/* pop ebp */
- pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, *mem, curr_sp);
+ pop = new_rd_ia32_Pop(NULL, env->irg, bl, *mem, curr_sp);
set_ia32_flags(pop, arch_irn_flags_ignore);
curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
*/
static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
{
- int cost;
+ int cost;
ia32_op_type_t op_tp;
- const ia32_irn_ops_t *ops = self;
+ (void) self;
if (is_Proj(irn))
return 0;
if (is_ia32_CopyB(irn)) {
cost = 250;
- if (ARCH_INTEL(ops->cg->arch))
- cost += 150;
}
else if (is_ia32_CopyB_i(irn)) {
int size = get_ia32_copyb_size(irn);
cost = 20 + (int)ceil((4/3) * size);
- if (ARCH_INTEL(ops->cg->arch))
- cost += 150;
}
/* in case of address mode operations add additional cycles */
else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
}
/**
- * Check if irn can load it's operand at position i from memory (source addressmode).
+ * Check if irn can load its operand at position i from memory (source addressmode).
* @param self Pointer to irn ops itself
* @param irn The irn to be checked
* @param i The operands position
ia32_perform_memory_operand,
};
-ia32_irn_ops_t ia32_irn_ops = {
+static ia32_irn_ops_t ia32_irn_ops = {
&ia32_irn_ops_if,
NULL
};
* |___/
**************************************************/
+static ir_entity *mcount = NULL;
+
+#define ID(s) new_id_from_chars(s, sizeof(s) - 1)
+
static void ia32_before_abi(void *self) {
+ lower_mode_b_config_t lower_mode_b_config = {
+ mode_Iu, /* lowered mode */
+ mode_Bu, /* prefered mode for set */
+ 0, /* don't lower direct compares */
+ };
ia32_code_gen_t *cg = self;
- ir_lower_mode_b(cg->irg, mode_Iu, 0);
- if(cg->dump)
+ ir_lower_mode_b(cg->irg, &lower_mode_b_config);
+ if (cg->dump)
be_dump(cg->irg, "-lower_modeb", dump_ir_block_graph_sched);
+ if (cg->gprof) {
+ if (mcount == NULL) {
+ ir_type *tp = new_type_method(ID("FKT.mcount"), 0, 0);
+ mcount = new_entity(get_glob_type(), ID("mcount"), tp);
+ /* FIXME: enter the right ld_ident here */
+ set_entity_ld_ident(mcount, get_entity_ident(mcount));
+ set_entity_visibility(mcount, visibility_external_allocated);
+ }
+ instrument_initcall(cg->irg, mcount);
+ }
}
/**
edges_activate(cg->irg);
#endif
- if(cg->dump)
+ if (cg->dump)
be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
- /* transform nodes into assembler instructions */
+#ifdef FIRM_GRGEN_BE
+ /* transform nodes into assembler instructions by PBQP magic */
+ ia32_transform_graph_by_pbqp(cg);
+#endif
+
+ if (cg->dump)
+ be_dump(cg->irg, "-after_pbqp_transform", dump_ir_block_graph_sched);
+
+ /* transform remaining nodes into assembler instructions */
ia32_transform_graph(cg);
/* do local optimisations (mainly CSE) */
clear_ia32_am_sc_sign(node);
/* rewire mem-proj */
- if(get_irn_mode(node) == mode_T) {
+ if (get_irn_mode(node) == mode_T) {
mem_proj = NULL;
foreach_out_edge(node, edge) {
ir_node *out = get_edge_src_irn(edge);
}
set_ia32_op_type(node, ia32_Normal);
- if(sched_is_scheduled(node))
+ if (sched_is_scheduled(node))
sched_add_before(node, load);
}
ir_node *block;
ir_node *copy;
- if(is_Block(after)) {
+ if (is_Block(after)) {
block = after;
} else {
block = get_nodes_block(after);
}
if (mode_is_float(spillmode)) {
- if (USE_SSE2(cg))
+ if (ia32_cg_config.use_sse2)
new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem, spillmode);
else
new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem, spillmode);
}
if (mode_is_float(mode)) {
- if (USE_SSE2(cg))
+ if (ia32_cg_config.use_sse2)
store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, nomem, val);
else
store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, nomem, val, mode);
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, new_NoMem(), sp);
+ ir_node *pop = new_rd_ia32_PopMem(dbg, irg, block, frame, noreg, new_NoMem(), sp);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
arity = be_get_MemPerm_entity_arity(node);
pops = alloca(arity * sizeof(pops[0]));
- // create pushs
+ /* create Pushs */
for(i = 0; i < arity; ++i) {
ir_entity *inent = be_get_MemPerm_in_entity(node, i);
ir_entity *outent = be_get_MemPerm_out_entity(node, i);
ir_type *enttype = get_entity_type(inent);
- int entbits = get_type_size_bits(enttype);
- int entbits2 = get_type_size_bits(get_entity_type(outent));
+ unsigned entsize = get_type_size_bytes(enttype);
+ unsigned entsize2 = get_type_size_bytes(get_entity_type(outent));
ir_node *mem = get_irn_n(node, i + 1);
ir_node *push;
/* work around cases where entities have different sizes */
- if(entbits2 < entbits)
- entbits = entbits2;
- assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
+ if(entsize2 < entsize)
+ entsize = entsize2;
+ assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
push = create_push(cg, node, node, sp, mem, inent);
sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
- if(entbits == 64) {
- // add another push after the first one
+ if(entsize == 8) {
+ /* add another push after the first one */
push = create_push(cg, node, node, sp, mem, inent);
add_ia32_am_offs_int(push, 4);
sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
set_irn_n(node, i, new_Bad());
}
- // create pops
+ /* create pops */
for(i = arity - 1; i >= 0; --i) {
ir_entity *inent = be_get_MemPerm_in_entity(node, i);
ir_entity *outent = be_get_MemPerm_out_entity(node, i);
ir_type *enttype = get_entity_type(outent);
- int entbits = get_type_size_bits(enttype);
- int entbits2 = get_type_size_bits(get_entity_type(inent));
+ unsigned entsize = get_type_size_bytes(enttype);
+ unsigned entsize2 = get_type_size_bytes(get_entity_type(inent));
ir_node *pop;
/* work around cases where entities have different sizes */
- if(entbits2 < entbits)
- entbits = entbits2;
- assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
+ if(entsize2 < entsize)
+ entsize = entsize2;
+ assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
pop = create_pop(cg, node, node, sp, outent);
sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
- if(entbits == 64) {
+ if(entsize == 8) {
add_ia32_am_offs_int(pop, 4);
- // add another pop after the first one
+ /* add another pop after the first one */
pop = create_pop(cg, node, node, sp, outent);
sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
}
keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
sched_add_before(node, keep);
- // exchange memprojs
+ /* exchange memprojs */
foreach_out_edge_safe(node, edge, next) {
ir_node *proj = get_edge_src_irn(edge);
int p = get_Proj_proj(proj);
set_Proj_proj(proj, pn_ia32_Pop_M);
}
- // remove memperm
+ /* remove memperm */
arity = get_irn_arity(node);
for(i = 0; i < arity; ++i) {
set_irn_n(node, i, new_Bad());
be_node_needs_frame_entity(env, node, mode, align);
} else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
|| is_ia32_vfld(node)) {
- const ir_mode *mode = get_ia32_ls_mode(node);
- int align = 4;
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = 4;
be_node_needs_frame_entity(env, node, mode, align);
} else if(is_ia32_FldCW(node)) {
- const ir_mode *mode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
- int align = 4;
+ /* although 2 byte would be enough 4 byte performs best */
+ const ir_mode *mode = mode_Iu;
+ int align = 4;
be_node_needs_frame_entity(env, node, mode, align);
} else {
#ifndef NDEBUG
free(cg);
}
+/**
+ * Returns the node representing the PIC base.
+ */
+static ir_node *ia32_get_pic_base(void *self) {
+ ir_node *block;
+ ia32_code_gen_t *cg = self;
+ ir_node *get_eip = cg->get_eip;
+ if (get_eip != NULL)
+ return get_eip;
+
+ block = get_irg_start_block(cg->irg);
+ get_eip = new_rd_ia32_GetEIP(NULL, cg->irg, block);
+ cg->get_eip = get_eip;
+
+ add_irn_dep(get_eip, get_irg_frame(cg->irg));
+
+ return get_eip;
+}
+
static void *ia32_cg_init(be_irg_t *birg);
static const arch_code_generator_if_t ia32_code_gen_if = {
ia32_cg_init,
+ ia32_get_pic_base, /* return node used as base in pic code addresses */
ia32_before_abi, /* before abi introduce hook */
ia32_prepare_graph,
NULL, /* spill */
* Initializes a IA32 code generator.
*/
static void *ia32_cg_init(be_irg_t *birg) {
- ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
+ ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env.isa;
ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
cg->impl = &ia32_code_gen_if;
cg->irg = birg->irg;
cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
- cg->arch_env = birg->main_env->arch_env;
+ cg->arch_env = &birg->main_env->arch_env;
cg->isa = isa;
cg->birg = birg;
cg->blk_sched = NULL;
- cg->fp_kind = isa->fp_kind;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
+ cg->gprof = (birg->main_env->options->gprof) ? 1 : 0;
- /* copy optimizations from isa for easier access */
- cg->opt = isa->opt;
- cg->arch = isa->arch;
- cg->opt_arch = isa->opt_arch;
+ if (cg->gprof) {
+ /* Linux gprof implementation needs base pointer */
+ birg->main_env->options->omit_fp = 0;
+ }
/* enter it */
isa->cg = cg;
&ia32_gp_regs[REG_ESP], /* stack pointer register */
&ia32_gp_regs[REG_EBP], /* base pointer register */
-1, /* stack direction */
+ 16, /* stack alignment */
NULL, /* main environment */
7, /* costs for a spill instruction */
5, /* costs for a reload instruction */
NULL, /* 8bit register names high */
NULL, /* types */
NULL, /* tv_ents */
- (0 |
- IA32_OPT_INCDEC | /* optimize add 1, sub 1 into inc/dec default: on */
- IA32_OPT_CC),
- arch_pentium_4, /* instruction architecture */
- arch_pentium_4, /* optimize for architecture */
- fp_x87, /* floating point mode */
NULL, /* current code generator */
+ NULL, /* abstract machine */
#ifndef NDEBUG
NULL, /* name obstack */
- 0 /* name obst size */
#endif
};
-static void set_arch_costs(enum cpu_support arch);
-
/**
* Initializes the backend ISA.
*/
ia32_register_init();
ia32_create_opcodes();
- set_arch_costs(isa->opt_arch);
-
- if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
- (ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
- /* no SSE2 for these cpu's */
- isa->fp_kind = fp_x87;
-
- if (ARCH_INTEL(isa->opt_arch) && isa->opt_arch >= arch_pentium_4) {
- /* Pentium 4 don't like inc and dec instructions */
- isa->opt &= ~IA32_OPT_INCDEC;
- }
-
be_emit_init(file_handle);
isa->regs_16bit = pmap_create();
isa->regs_8bit = pmap_create();
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self, const ir_mode *mode) {
- const ia32_isa_t *isa = self;
+const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self,
+ const ir_mode *mode)
+{
+ (void) self;
+
if (mode_is_float(mode)) {
- return USE_SSE2(isa) ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
+ return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
}
else
return &ia32_reg_classes[CLASS_ia32_gp];
* @param method_type The type of the method (procedure) in question.
* @param abi The abi object to be modified
*/
-static void ia32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
- const ia32_isa_t *isa = self;
+static void ia32_get_call_abi(const void *self, ir_type *method_type,
+ be_abi_call_t *abi)
+{
ir_type *tp;
ir_mode *mode;
unsigned cc;
int n, i, regnum;
be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
+ (void) self;
/* set abi flags for calls */
call_flags.bits.left_to_right = 0; /* always last arg first on stack */
} else {
cc = get_method_calling_convention(method_type);
if (get_method_additional_properties(method_type) & mtp_property_private
- && (ia32_isa_template.opt & IA32_OPT_CC)) {
+ && (ia32_cg_config.optimize_cc)) {
/* set the calling conventions to register parameter */
cc = (cc & ~cc_bits) | cc_reg_param;
}
tp = get_method_param_type(method_type, i);
mode = get_type_mode(tp);
if (mode != NULL) {
- reg = ia32_get_RegParam_reg(isa->cg, cc, regnum, mode);
+ reg = ia32_get_RegParam_reg(cc, regnum, mode);
}
if (reg != NULL) {
be_abi_call_param_reg(abi, i, reg);
(void)i;
(void)j;
+ if(!ia32_cg_config.use_cmov) {
+ /* TODO: we could still handle abs(x)... */
+ return 0;
+ }
+
/* we can't handle psis with 64bit compares yet */
if(is_Proj(sel)) {
ir_node *pred = get_Proj_pred(sel);
return 1;
}
-typedef struct insn_const {
- int add_cost; /**< cost of an add instruction */
- int lea_cost; /**< cost of a lea instruction */
- int const_shf_cost; /**< cost of a constant shift instruction */
- int cost_mul_start; /**< starting cost of a multiply instruction */
- int cost_mul_bit; /**< cost of multiply for every set bit */
-} insn_const;
-
-/* costs for the i386 */
-static const insn_const i386_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 2, /* cost of a constant shift instruction */
- 6, /* starting cost of a multiply instruction */
- 1 /* cost of multiply for every set bit */
-};
-
-/* costs for the i486 */
-static const insn_const i486_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 2, /* cost of a constant shift instruction */
- 12, /* starting cost of a multiply instruction */
- 1 /* cost of multiply for every set bit */
-};
-
-/* costs for the Pentium */
-static const insn_const pentium_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 1, /* cost of a constant shift instruction */
- 11, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-/* costs for the Pentium Pro */
-static const insn_const pentiumpro_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 1, /* cost of a constant shift instruction */
- 4, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-/* costs for the K6 */
-static const insn_const k6_cost = {
- 1, /* cost of an add instruction */
- 2, /* cost of a lea instruction */
- 1, /* cost of a constant shift instruction */
- 3, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-/* costs for the Athlon */
-static const insn_const athlon_cost = {
- 1, /* cost of an add instruction */
- 2, /* cost of a lea instruction */
- 1, /* cost of a constant shift instruction */
- 5, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-/* costs for the Pentium 4 */
-static const insn_const pentium4_cost = {
- 1, /* cost of an add instruction */
- 3, /* cost of a lea instruction */
- 4, /* cost of a constant shift instruction */
- 15, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-/* costs for the Core */
-static const insn_const core_cost = {
- 1, /* cost of an add instruction */
- 1, /* cost of a lea instruction */
- 1, /* cost of a constant shift instruction */
- 10, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-/* costs for the generic */
-static const insn_const generic_cost = {
- 1, /* cost of an add instruction */
- 2, /* cost of a lea instruction */
- 1, /* cost of a constant shift instruction */
- 4, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
-};
-
-static const insn_const *arch_costs = &generic_cost;
-
-static void set_arch_costs(enum cpu_support arch) {
- switch (arch) {
- case arch_i386:
- arch_costs = &i386_cost;
- break;
- case arch_i486:
- arch_costs = &i486_cost;
- break;
- case arch_pentium:
- case arch_pentium_mmx:
- arch_costs = &pentium_cost;
- break;
- case arch_pentium_pro:
- case arch_pentium_2:
- case arch_pentium_3:
- arch_costs = &pentiumpro_cost;
- break;
- case arch_pentium_4:
- arch_costs = &pentium4_cost;
- break;
- case arch_pentium_m:
- arch_costs = &pentiumpro_cost;
- break;
- case arch_core:
- arch_costs = &core_cost;
- break;
- case arch_k6:
- arch_costs = &k6_cost;
- break;
- case arch_athlon:
- case arch_athlon_xp:
- case arch_athlon_64:
- case arch_opteron:
- arch_costs = &athlon_cost;
- break;
- case arch_generic:
- default:
- arch_costs = &generic_cost;
- }
-}
-
-/**
- * Evaluate a given simple instruction.
- */
-static int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
- int cost;
-
- switch (kind) {
- case MUL:
- cost = arch_costs->cost_mul_start;
- if (arch_costs->cost_mul_bit > 0) {
- char *bitstr = get_tarval_bitpattern(tv);
- int i;
-
- for (i = 0; bitstr[i] != '\0'; ++i) {
- if (bitstr[i] == '1') {
- cost += arch_costs->cost_mul_bit;
- }
- }
- free(bitstr);
- }
- return cost;
- case LEA:
- return arch_costs->lea_cost;
- case ADD:
- case SUB:
- return arch_costs->add_cost;
- case SHIFT:
- return arch_costs->const_shf_cost;
- case ZERO:
- return arch_costs->add_cost;
- default:
- return 1;
- }
-}
-
/**
* Returns the libFirm configuration parameter for this backend.
*/
NULL, /* will be set below */
};
+ ia32_setup_cg_config();
+
p.dep_param = &ad;
p.if_conv_info = &ifconv;
return &p;
}
-/* instruction set architectures. */
-static const lc_opt_enum_int_items_t arch_items[] = {
- { "386", arch_i386, },
- { "486", arch_i486, },
- { "pentium", arch_pentium, },
- { "586", arch_pentium, },
- { "pentiumpro", arch_pentium_pro, },
- { "686", arch_pentium_pro, },
- { "pentiummmx", arch_pentium_mmx, },
- { "pentium2", arch_pentium_2, },
- { "p2", arch_pentium_2, },
- { "pentium3", arch_pentium_3, },
- { "p3", arch_pentium_3, },
- { "pentium4", arch_pentium_4, },
- { "p4", arch_pentium_4, },
- { "pentiumm", arch_pentium_m, },
- { "pm", arch_pentium_m, },
- { "core", arch_core, },
- { "k6", arch_k6, },
- { "athlon", arch_athlon, },
- { "athlon-xp", arch_athlon_xp, },
- { "athlon64", arch_athlon_64, },
- { "opteron", arch_opteron, },
- { "generic", arch_generic, },
- { NULL, 0 }
-};
-
-static lc_opt_enum_int_var_t arch_var = {
- &ia32_isa_template.arch, arch_items
-};
-
-static lc_opt_enum_int_var_t opt_arch_var = {
- &ia32_isa_template.opt_arch, arch_items
-};
-
-static const lc_opt_enum_int_items_t fp_unit_items[] = {
- { "x87" , fp_x87 },
- { "sse2", fp_sse2 },
- { NULL, 0 }
-};
-
-static lc_opt_enum_int_var_t fp_unit_var = {
- &ia32_isa_template.fp_kind, fp_unit_items
-};
-
static const lc_opt_enum_int_items_t gas_items[] = {
- { "normal", GAS_FLAVOUR_NORMAL },
+ { "elf", GAS_FLAVOUR_ELF },
{ "mingw", GAS_FLAVOUR_MINGW },
+ { "yasm", GAS_FLAVOUR_YASM },
+ { "macho", GAS_FLAVOUR_MACH_O },
{ NULL, 0 }
};
};
static const lc_opt_table_entry_t ia32_options[] = {
- LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
- LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
- LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
- LC_OPT_ENT_NEGBIT("nooptcc", "do not optimize calling convention", &ia32_isa_template.opt, IA32_OPT_CC),
- LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
+ LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
+ LC_OPT_ENT_INT("stackalign", "set stack alignment for calls",
+ &ia32_isa_template.arch_isa.stack_alignment),
LC_OPT_LAST
};
void be_init_arch_ia32(void)
{
- lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
lc_opt_add_table(ia32_grp, ia32_options);
ia32_init_optimize();
ia32_init_transform();
ia32_init_x87();
+ ia32_init_architecture();
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);