#include "instrument.h"
#include "../beabi.h"
-#include "../beirg_t.h"
-#include "../benode_t.h"
+#include "../beirg.h"
+#include "../benode.h"
#include "../belower.h"
-#include "../besched_t.h"
+#include "../besched.h"
#include "be.h"
#include "../be_t.h"
#include "../beirgmod.h"
#include "../bestate.h"
#include "../beflags.h"
#include "../betranshlp.h"
+#include "../belistsched.h"
#include "bearch_ia32_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-/* TODO: ugly */
-static set *cur_reg_set = NULL;
-
ir_mode *mode_fpcw = NULL;
ia32_code_gen_t *ia32_current_cg = NULL;
typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
/**
- * Used to create a Pseudo-Register or Unknown node.
+ * Used to create per-graph unique pseudo nodes.
*/
static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
create_const_node_func func,
}
/* Creates the unique per irg GP NoReg node. */
-ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
&ia32_gp_regs[REG_GP_NOREG]);
}
-ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
&ia32_vfp_regs[REG_VFP_NOREG]);
}
-ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
+ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
&ia32_xmm_regs[REG_XMM_NOREG]);
}
-ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->unknown_gp, new_bd_ia32_Unknown_GP,
&ia32_gp_regs[REG_GP_UKNWN]);
}
-ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->unknown_vfp, new_bd_ia32_Unknown_VFP,
&ia32_vfp_regs[REG_VFP_UKNWN]);
}
-ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->unknown_xmm, new_bd_ia32_Unknown_XMM,
&ia32_xmm_regs[REG_XMM_UKNWN]);
}
-ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
+ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg)
+{
return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
&ia32_fp_cw_regs[REG_FPCW]);
}
* |___/
**************************************************/
-/**
- * Return register requirements for an ia32 node.
- * If the node returns a tuple (mode_T) then the proj's
- * will be asked for this information.
- */
-static const arch_register_req_t *ia32_get_irn_reg_req(const ir_node *node,
- int pos)
+static const arch_register_req_t *get_ia32_SwitchJmp_out_req(
+ const ir_node *node, int pos)
{
- ir_mode *mode = get_irn_mode(node);
- long node_pos;
-
- if (mode == mode_X || is_Block(node)) {
- return arch_no_register_req;
- }
-
- if (mode == mode_T && pos < 0) {
- return arch_no_register_req;
- }
-
- node_pos = pos == -1 ? 0 : pos;
- if (is_Proj(node)) {
- if (mode == mode_M || pos >= 0) {
- return arch_no_register_req;
- }
-
- node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
- node = skip_Proj_const(node);
- }
-
- if (is_ia32_irn(node)) {
- const arch_register_req_t *req;
- if (pos >= 0)
- req = get_ia32_in_req(node, pos);
- else
- req = get_ia32_out_req(node, node_pos);
-
- assert(req != NULL);
-
- return req;
- }
-
- /* unknowns should be transformed already */
+ (void) node;
+ (void) pos;
return arch_no_register_req;
}
-static arch_irn_class_t ia32_classify(const ir_node *irn) {
+static arch_irn_class_t ia32_classify(const ir_node *irn)
+{
arch_irn_class_t classification = 0;
- irn = skip_Proj_const(irn);
-
- if (is_cfop(irn))
- classification |= arch_irn_class_branch;
-
- if (! is_ia32_irn(irn))
- return classification;
+ assert(is_ia32_irn(irn));
if (is_ia32_is_reload(irn))
classification |= arch_irn_class_reload;
ir_graph *irg; /**< The associated graph. */
} ia32_abi_env_t;
-static ir_entity *ia32_get_frame_entity(const ir_node *irn) {
+static ir_entity *ia32_get_frame_entity(const ir_node *irn)
+{
return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
}
-static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent) {
+static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent)
+{
set_ia32_frame_ent(irn, ent);
}
ia32_curr_fp_ommitted = env->flags.try_omit_fp;
if (! env->flags.try_omit_fp) {
- ir_graph *irg = env->irg;
- ir_node *bl = get_irg_start_block(irg);
+ ir_node *bl = get_irg_start_block(env->irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
ir_node *noreg = ia32_new_NoReg_gp(cg);
/* push ebp */
push = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
- curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
- *mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
+ curr_sp = new_r_Proj(bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
+ *mem = new_r_Proj(bl, push, mode_M, pn_ia32_Push_M);
/* the push must have SP out register */
arch_set_irn_register(curr_sp, arch_env->sp);
*stack_bias -= 4;
/* move esp to ebp */
- curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
+ curr_bp = be_new_Copy(arch_env->bp->reg_class, bl, curr_sp);
be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
arch_register_req_type_ignore);
/* beware: the copy must be done before any other sp use */
- curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
+ curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
arch_register_req_type_produces_sp);
const arch_env_t *arch_env = env->aenv;
ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
- ir_graph *irg = env->irg;
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
- curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
+ curr_sp = be_new_IncSP(arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
} else {
ir_mode *mode_bp = arch_env->bp->reg_class->mode;
/* leave */
leave = new_bd_ia32_Leave(NULL, bl, curr_bp);
- curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
- curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
+ curr_bp = new_r_Proj(bl, leave, mode_bp, pn_ia32_Leave_frame);
+ curr_sp = new_r_Proj(bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
} else {
ir_node *pop;
kill_node(curr_sp);
/* copy ebp to esp */
- curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
+ curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], bl, curr_bp);
arch_set_irn_register(curr_sp, arch_env->sp);
be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
arch_register_req_type_ignore);
/* pop ebp */
pop = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
- curr_bp = new_r_Proj(irg, bl, pop, mode_bp, pn_ia32_Pop_res);
- curr_sp = new_r_Proj(irg, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
+ curr_bp = new_r_Proj(bl, pop, mode_bp, pn_ia32_Pop_res);
+ curr_sp = new_r_Proj(bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
- *mem = new_r_Proj(irg, bl, pop, mode_M, pn_ia32_Pop_M);
+ *mem = new_r_Proj(bl, pop, mode_M, pn_ia32_Pop_M);
}
arch_set_irn_register(curr_sp, arch_env->sp);
arch_set_irn_register(curr_bp, arch_env->bp);
* Destroy the callback object.
* @param self The callback object.
*/
-static void ia32_abi_done(void *self) {
+static void ia32_abi_done(void *self)
+{
free(self);
}
/**
* Build the between type and entities if not already build.
*/
-static void ia32_build_between_type(void) {
+static void ia32_build_between_type(void)
+{
#define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
if (! between_type) {
ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
/**
* Return the stack entity that contains the return address.
*/
-ir_entity *ia32_get_return_address_entity(void) {
+ir_entity *ia32_get_return_address_entity(void)
+{
ia32_build_between_type();
return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
}
/**
* Return the stack entity that contains the frame address.
*/
-ir_entity *ia32_get_frame_address_entity(void) {
+ir_entity *ia32_get_frame_address_entity(void)
+{
ia32_build_between_type();
return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
}
* @param obstack The obstack to use for allocation of the returned nodes array
* @return The inverse operation or NULL if operation invertible
*/
-static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
+static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
+{
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
ia32_abi_epilogue
};
-/* fill register allocator interface */
-
+/* register allocator interface */
static const arch_irn_ops_t ia32_irn_ops = {
- ia32_get_irn_reg_req,
+ get_ia32_in_req,
+ get_ia32_out_req,
+ ia32_classify,
+ ia32_get_frame_entity,
+ ia32_set_frame_entity,
+ ia32_set_frame_offset,
+ ia32_get_sp_bias,
+ ia32_get_inverse,
+ ia32_get_op_estimated_cost,
+ ia32_possible_memory_operand,
+ ia32_perform_memory_operand,
+};
+
+/* special register allocator interface for SwitchJmp
+ as it possibly has a WIDE range of Proj numbers.
+ We don't want to allocate output for register constraints for
+ all these. */
+static const arch_irn_ops_t ia32_SwitchJmp_irn_ops = {
+ /* Note: we also use SwitchJmp_out_req for the inputs too:
+ This is because the bearch API has a conceptual problem at the moment.
+ Querying for negative proj numbers which can happen for switchs
+ isn't possible and will result in inputs getting queried */
+ get_ia32_SwitchJmp_out_req,
+ get_ia32_SwitchJmp_out_req,
ia32_classify,
ia32_get_frame_entity,
ia32_set_frame_entity,
#define ID(s) new_id_from_chars(s, sizeof(s) - 1)
-static void ia32_before_abi(void *self) {
+static void ia32_before_abi(void *self)
+{
lower_mode_b_config_t lower_mode_b_config = {
mode_Iu, /* lowered mode */
mode_Bu, /* preferred mode for set */
*/
static void ia32_prepare_graph(void *self)
{
- ia32_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- /* do local optimizations */
- optimize_graph_df(irg);
-
- /* we have to do cfopt+remove_critical_edges as we can't have Bad-blocks
- * or critical edges in the backend */
- optimize_cf(irg);
- remove_critical_cf_edges(irg);
-
- /* TODO: we often have dead code reachable through out-edges here. So for
- * now we rebuild edges (as we need correct user count for code selection)
- */
-#if 1
- edges_deactivate(cg->irg);
- edges_activate(cg->irg);
-#endif
-
- if (cg->dump)
- be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
+ ia32_code_gen_t *cg = self;
switch (be_transformer) {
case TRANSFORMER_DEFAULT:
ir_node *turn_back_am(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *base = get_irn_n(node, n_ia32_base);
ir_node *noreg;
ir_node *load = new_bd_ia32_Load(dbgi, block, base, index, mem);
- ir_node *load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
+ ir_node *load_res = new_rd_Proj(dbgi, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
if (is_ia32_is_reload(node))
/**
* Called before the register allocator.
*/
-static void ia32_before_ra(void *self) {
+static void ia32_before_ra(void *self)
+{
ia32_code_gen_t *cg = self;
/* setup fpu rounding modes */
/**
* Transforms a be_Reload into a ia32 Load.
*/
-static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
+static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node)
+{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
DBG_OPT_RELOAD2LD(node, new_op);
- proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
+ proj = new_rd_Proj(dbg, block, new_op, mode, pn_ia32_Load_res);
if (sched_point) {
sched_add_after(sched_point, new_op);
/**
* Transforms a be_Spill node into a ia32 Store.
*/
-static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
+static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node)
+{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
exchange(node, store);
}
-static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
+static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
+{
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
return push;
}
-static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
+static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
+{
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_graph *irg = get_irn_irg(node);
+ ir_graph *irg = get_irn_irg(node);
ir_node *frame = get_irg_frame(irg);
ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
{
- ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_mode *spmode = mode_Iu;
const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
ir_node *sp;
- sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
+ sp = new_rd_Proj(dbg, block, pred, spmode, pos);
arch_set_irn_register(sp, spreg);
return sp;
*/
static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
{
- ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
int arity = be_get_MemPerm_entity_arity(node);
}
in[0] = sp;
- keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
+ keep = be_new_Keep(block, 1, in);
sched_add_before(node, keep);
/* exchange memprojs */
/**
* Block-Walker: Calls the transform functions Spill and Reload.
*/
-static void ia32_after_ra_walker(ir_node *block, void *env) {
+static void ia32_after_ra_walker(ir_node *block, void *env)
+{
ir_node *node, *prev;
ia32_code_gen_t *cg = env;
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
*/
-static void ia32_after_ra(void *self) {
+static void ia32_after_ra(void *self)
+{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
* virtual with real x87 instructions, creating a block schedule and peephole
* optimisations.
*/
-static void ia32_finish(void *self) {
+static void ia32_finish(void *self)
+{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
* Emits the code, closes the output file and frees
* the code generator interface.
*/
-static void ia32_codegen(void *self) {
+static void ia32_codegen(void *self)
+{
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- ia32_gen_routine(cg, irg);
-
- cur_reg_set = NULL;
+ if (ia32_cg_config.emit_machcode) {
+ ia32_gen_binary_routine(cg, irg);
+ } else {
+ ia32_gen_routine(cg, irg);
+ }
/* remove it from the isa */
cg->isa->cg = NULL;
ia32_current_cg = NULL;
/* de-allocate code generator */
- del_set(cg->reg_set);
free(cg);
}
/**
* Returns the node representing the PIC base.
*/
-static ir_node *ia32_get_pic_base(void *self) {
+static ir_node *ia32_get_pic_base(void *self)
+{
ir_node *block;
ia32_code_gen_t *cg = self;
ir_node *get_eip = cg->get_eip;
/**
* Initializes a IA32 code generator.
*/
-static void *ia32_cg_init(be_irg_t *birg) {
+static void *ia32_cg_init(be_irg_t *birg)
+{
ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
cg->impl = &ia32_code_gen_if;
cg->irg = birg->irg;
- cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
cg->birg = birg;
cg->blk_sched = NULL;
}
#endif /* NDEBUG */
- cur_reg_set = cg->reg_set;
-
assert(ia32_current_cg == NULL);
ia32_current_cg = cg;
&ia32_isa_if, /* isa interface implementation */
&ia32_gp_regs[REG_ESP], /* stack pointer register */
&ia32_gp_regs[REG_EBP], /* base pointer register */
+ &ia32_reg_classes[CLASS_ia32_gp], /* static link pointer register class */
-1, /* stack direction */
2, /* power of two stack alignment, 2^2 == 4 */
NULL, /* main environment */
/**
* Initializes the backend ISA.
*/
-static arch_env_t *ia32_init(FILE *file_handle) {
+static arch_env_t *ia32_init(FILE *file_handle)
+{
static int inited = 0;
ia32_isa_t *isa;
int i, n;
ia32_register_init();
ia32_create_opcodes(&ia32_irn_ops);
+ /* special handling for SwitchJmp */
+ op_ia32_SwitchJmp->ops.be_ops = &ia32_SwitchJmp_irn_ops;
be_emit_init(file_handle);
isa->regs_16bit = pmap_create();
/**
* Closes the output file and frees the ISA structure.
*/
-static void ia32_done(void *self) {
+static void ia32_done(void *self)
+{
ia32_isa_t *isa = self;
/* emit now all global declarations */
* - the virtual floating point registers
* - the SSE vector register set
*/
-static unsigned ia32_get_n_reg_class(const void *self) {
- (void) self;
+static unsigned ia32_get_n_reg_class(void)
+{
return N_CLASSES;
}
/**
* Return the register class for index i.
*/
-static const arch_register_class_t *ia32_get_reg_class(const void *self,
- unsigned i)
+static const arch_register_class_t *ia32_get_reg_class(unsigned i)
{
- (void) self;
assert(i < N_CLASSES);
return &ia32_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *ia32_get_reg_class_for_mode(const void *self,
- const ir_mode *mode)
+const arch_register_class_t *ia32_get_reg_class_for_mode(const ir_mode *mode)
{
- (void) self;
-
if (mode_is_float(mode)) {
return ia32_cg_config.use_sse2 ? &ia32_reg_classes[CLASS_ia32_xmm] : &ia32_reg_classes[CLASS_ia32_vfp];
}
return &ia32_reg_classes[CLASS_ia32_gp];
}
+/**
+ * Returns the register for parameter nr.
+ */
+static const arch_register_t *ia32_get_RegParam_reg(unsigned cc, unsigned nr,
+ const ir_mode *mode)
+{
+ static const arch_register_t *gpreg_param_reg_fastcall[] = {
+ &ia32_gp_regs[REG_ECX],
+ &ia32_gp_regs[REG_EDX],
+ NULL
+ };
+ static const unsigned MAXNUM_GPREG_ARGS = 3;
+
+ static const arch_register_t *gpreg_param_reg_regparam[] = {
+ &ia32_gp_regs[REG_EAX],
+ &ia32_gp_regs[REG_EDX],
+ &ia32_gp_regs[REG_ECX]
+ };
+
+ static const arch_register_t *gpreg_param_reg_this[] = {
+ &ia32_gp_regs[REG_ECX],
+ NULL,
+ NULL
+ };
+
+ static const arch_register_t *fpreg_sse_param_reg_std[] = {
+ &ia32_xmm_regs[REG_XMM0],
+ &ia32_xmm_regs[REG_XMM1],
+ &ia32_xmm_regs[REG_XMM2],
+ &ia32_xmm_regs[REG_XMM3],
+ &ia32_xmm_regs[REG_XMM4],
+ &ia32_xmm_regs[REG_XMM5],
+ &ia32_xmm_regs[REG_XMM6],
+ &ia32_xmm_regs[REG_XMM7]
+ };
+
+ static const arch_register_t *fpreg_sse_param_reg_this[] = {
+ NULL, /* in case of a "this" pointer, the first parameter must not be a float */
+ };
+ static const unsigned MAXNUM_SSE_ARGS = 8;
+
+ if ((cc & cc_this_call) && nr == 0)
+ return gpreg_param_reg_this[0];
+
+ if (! (cc & cc_reg_param))
+ return NULL;
+
+ if (mode_is_float(mode)) {
+ if (!ia32_cg_config.use_sse2 || (cc & cc_fpreg_param) == 0)
+ return NULL;
+ if (nr >= MAXNUM_SSE_ARGS)
+ return NULL;
+
+ if (cc & cc_this_call) {
+ return fpreg_sse_param_reg_this[nr];
+ }
+ return fpreg_sse_param_reg_std[nr];
+ } else if (mode_is_int(mode) || mode_is_reference(mode)) {
+ unsigned num_regparam;
+
+ if (get_mode_size_bits(mode) > 32)
+ return NULL;
+
+ if (nr >= MAXNUM_GPREG_ARGS)
+ return NULL;
+
+ if (cc & cc_this_call) {
+ return gpreg_param_reg_this[nr];
+ }
+ num_regparam = cc & ~cc_bits;
+ if (num_regparam == 0) {
+ /* default fastcall */
+ return gpreg_param_reg_fastcall[nr];
+ }
+ if (nr < num_regparam)
+ return gpreg_param_reg_regparam[nr];
+ return NULL;
+ }
+
+ panic("unknown argument mode");
+}
+
/**
* Get the ABI restrictions for procedure calls.
* @param self The this pointer.
call_flags.bits.store_args_sequential = 0;
/* call_flags.bits.try_omit_fp not changed: can handle both settings */
call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
- call_flags.bits.call_has_imm = 0; /* No call immediates, we handle this by ourselves */
+ call_flags.bits.call_has_imm = 0; /* No call immediate, we handle this by ourselves */
/* set parameter passing style */
be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
} else {
if (get_method_additional_properties(method_type) & mtp_property_private &&
ia32_cg_config.optimize_cc) {
- /* set the calling conventions to register parameter */
- cc = (cc & ~(cc_bits|cc_this_call)) | cc_reg_param;
+ /* set the fast calling conventions (allowing up to 3) */
+ cc = SET_FASTCALL(cc) | 3;
}
}
/**
* Returns the estimated execution time of an ia32 irn.
*/
-static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
+static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn)
+{
(void) env;
return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
}
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
-static int ia32_get_reg_class_alignment(const void *self,
- const arch_register_class_t *cls)
+static int ia32_get_reg_class_alignment(const arch_register_class_t *cls)
{
ir_mode *mode = arch_register_class_mode(cls);
int bytes = get_mode_size_bytes(mode);
- (void) self;
if (mode_is_float(mode) && bytes > 8)
return 16;
}
static const be_execution_unit_t ***ia32_get_allowed_execution_units(
- const void *self, const ir_node *irn)
+ const ir_node *irn)
{
static const be_execution_unit_t *_allowed_units_BRANCH[] = {
&ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
NULL
};
const be_execution_unit_t ***ret;
- (void) self;
if (is_ia32_irn(irn)) {
ret = get_ia32_exec_units(irn);
/**
* Return the abstract ia32 machine.
*/
-static const be_machine_t *ia32_get_machine(const void *self) {
+static const be_machine_t *ia32_get_machine(const void *self)
+{
const ia32_isa_t *isa = self;
return isa->cpu;
}
return NULL;
}
-static void ia32_mark_remat(const void *self, ir_node *node) {
- (void) self;
+static void ia32_mark_remat(ir_node *node)
+{
if (is_ia32_irn(node)) {
set_ia32_is_remat(node);
}
/**
* Check for Abs or -Abs.
*/
-static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+static int psi_is_Abs_or_Nabs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f)
+{
ir_node *l, *r;
pn_Cmp pnc;
/**
* Check for Abs only
*/
-static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f) {
+static int psi_is_Abs(ir_node *cmp, ir_node *sel, ir_node *t, ir_node *f)
+{
ir_node *l, *r;
pn_Cmp pnc;
ir_node *f = get_Phi_pred(phi, j);
/* always support Mux(!float, C1, C2) */
- if (is_Const(t) && is_Const(f) && !mode_is_float(get_irn_mode(cl)))
- continue;
+ if (is_Const(t) && is_Const(f) && !mode_is_float(get_irn_mode(cl))) {
+ switch (be_transformer) {
+ case TRANSFORMER_DEFAULT:
+ /* always support Mux(!float, C1, C2) */
+ continue;
+#ifdef FIRM_GRGEN_BE
+ case TRANSFORMER_PBQP:
+ case TRANSFORMER_RAND:
+ /* no support for Mux(*, C1, C2) */
+ return 0;
+#endif
+ default:
+ panic("invalid transformer");
+ }
+ }
/* only abs or nabs supported */
if (! psi_is_Abs_or_Nabs(cmp, sel, t, f))
return 0;
return 0;
}
-static asm_constraint_flags_t ia32_parse_asm_constraint(const void *self, const char **c)
+static asm_constraint_flags_t ia32_parse_asm_constraint(const char **c)
{
- (void) self;
(void) c;
/* we already added all our simple flags to the flags modifier list in
return ASM_CONSTRAINT_FLAG_INVALID;
}
-static int ia32_is_valid_clobber(const void *self, const char *clobber)
+static int ia32_is_valid_clobber(const char *clobber)
{
- (void) self;
-
return ia32_get_clobber_register(clobber) != NULL;
}
*/
static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
{
- ir_graph *irg = get_Block_irg(block);
ir_node *st, *p = trampoline;
ir_mode *mode = get_irn_mode(p);
/* mov ecx,<env> */
- st = new_r_Store(irg, block, mem, p, new_Const_long(mode_Bu, 0xb9), 0);
- mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
- p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 1), mode);
- st = new_r_Store(irg, block, mem, p, env, 0);
- mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
- p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 4), mode);
+ st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xb9), 0);
+ mem = new_r_Proj(block, st, mode_M, pn_Store_M);
+ p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
+ st = new_r_Store(block, mem, p, env, 0);
+ mem = new_r_Proj(block, st, mode_M, pn_Store_M);
+ p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
/* jmp <callee> */
- st = new_r_Store(irg, block, mem, p, new_Const_long(mode_Bu, 0xe9), 0);
- mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
- p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 1), mode);
- st = new_r_Store(irg, block, mem, p, callee, 0);
- mem = new_r_Proj(irg, block, st, mode_M, pn_Store_M);
- p = new_r_Add(irg, block, p, new_Const_long(mode_Iu, 4), mode);
+ st = new_r_Store(block, mem, p, new_Const_long(mode_Bu, 0xe9), 0);
+ mem = new_r_Proj(block, st, mode_M, pn_Store_M);
+ p = new_r_Add(block, p, new_Const_long(mode_Iu, 1), mode);
+ st = new_r_Store(block, mem, p, callee, 0);
+ mem = new_r_Proj(block, st, mode_M, pn_Store_M);
+ p = new_r_Add(block, p, new_Const_long(mode_Iu, 4), mode);
return mem;
}
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *ia32_get_libfirm_params(void) {
+static const backend_params *ia32_get_libfirm_params(void)
+{
static const ir_settings_if_conv_t ifconv = {
4, /* maxdepth, doesn't matter for Mux-conversion */
ia32_is_mux_allowed /* allows or disallows Mux creation for given selector */