#include "irprintf.h"
#include "iredges_t.h"
#include "ircons.h"
+#include "irflag.h"
#include "irgmod.h"
#include "irgopt.h"
#include "irbitset.h"
#include "irgopt.h"
-#include "irdump_grgen.h"
#include "pdeq.h"
#include "pset.h"
#include "debug.h"
* If the node returns a tuple (mode_T) then the proj's
* will be asked for this information.
*/
-static const arch_register_req_t *ia32_get_irn_reg_req(const void *self,
- const ir_node *node,
+static const arch_register_req_t *ia32_get_irn_reg_req(const ir_node *node,
int pos)
{
ir_mode *mode = get_irn_mode(node);
long node_pos;
- (void)self;
if (mode == mode_X || is_Block(node)) {
return arch_no_register_req;
}
return arch_no_register_req;
}
-static void ia32_set_irn_reg(const void *self, ir_node *irn,
- const arch_register_t *reg)
+static void ia32_set_irn_reg(ir_node *irn, const arch_register_t *reg)
{
int pos = 0;
- (void) self;
if (get_irn_mode(irn) == mode_X) {
return;
}
}
-static const arch_register_t *ia32_get_irn_reg(const void *self,
- const ir_node *irn)
+static const arch_register_t *ia32_get_irn_reg(const ir_node *irn)
{
int pos = 0;
const arch_register_t *reg = NULL;
- (void) self;
if (is_Proj(irn)) {
return reg;
}
-static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
+static arch_irn_class_t ia32_classify(const ir_node *irn) {
arch_irn_class_t classification = arch_irn_class_normal;
- (void) self;
irn = skip_Proj_const(irn);
return classification;
}
-static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
+static arch_irn_flags_t ia32_get_flags(const ir_node *irn) {
arch_irn_flags_t flags = arch_irn_flags_none;
- (void) self;
if (is_Unknown(irn))
return arch_irn_flags_ignore;
*/
typedef struct {
be_abi_call_flags_bits_t flags; /**< The call flags. */
- const arch_isa_t *isa; /**< The ISA handle. */
const arch_env_t *aenv; /**< The architecture environment. */
ir_graph *irg; /**< The associated graph. */
} ia32_abi_env_t;
-static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
- (void) self;
+static ir_entity *ia32_get_frame_entity(const ir_node *irn) {
return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
}
-static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
- (void) self;
+static void ia32_set_frame_entity(ir_node *irn, ir_entity *ent) {
set_ia32_frame_ent(irn, ent);
}
-static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias)
+static void ia32_set_frame_offset(ir_node *irn, int bias)
{
- const ia32_irn_ops_t *ops = self;
-
if (get_ia32_frame_ent(irn) == NULL)
return;
if (is_ia32_Pop(irn) || is_ia32_PopMem(irn)) {
- int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
+ ia32_code_gen_t *cg = ia32_current_cg;
+ int omit_fp = be_abi_omit_fp(cg->birg->abi);
if (omit_fp) {
/* Pop nodes modify the stack pointer before calculating the
* destination address, so fix this here
add_ia32_am_offs_int(irn, bias);
}
-static int ia32_get_sp_bias(const void *self, const ir_node *node)
+static int ia32_get_sp_bias(const ir_node *node)
{
- (void) self;
-
if (is_ia32_Push(node))
return 4;
{
ia32_abi_env_t *env = self;
if(env->flags.try_omit_fp)
- pset_insert_ptr(s, env->isa->bp);
+ pset_insert_ptr(s, env->aenv->bp);
}
/**
*/
static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
{
- ia32_abi_env_t *env = self;
- const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
- ia32_code_gen_t *cg = isa->cg;
+ ia32_abi_env_t *env = self;
+ ia32_code_gen_t *cg = ia32_current_cg;
+ const arch_env_t *arch_env = env->aenv;
if (! env->flags.try_omit_fp) {
- ir_node *bl = get_irg_start_block(env->irg);
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_node *push;
+ ir_graph *irg =env->irg;
+ ir_node *bl = get_irg_start_block(irg);
+ ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
+ ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *push;
/* ALL nodes representing bp must be set to ignore. */
be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
/* push ebp */
- push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
- curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
- *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
+ push = new_rd_ia32_Push(NULL, irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
+ curr_sp = new_r_Proj(irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
+ *mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
/* the push must have SP out register */
- arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+ arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
set_ia32_flags(push, arch_irn_flags_ignore);
/* move esp to ebp */
- curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
- be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
- arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
+ curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
+ be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
+ arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
/* beware: the copy must be done before any other sp use */
- curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
- be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
- arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+ curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
+ be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), arch_env->sp);
+ arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
- be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
- be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
+ be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
+ be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
- return env->isa->bp;
+ return arch_env->bp;
}
- return env->isa->sp;
+ return arch_env->sp;
}
/**
*/
static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
{
- ia32_abi_env_t *env = self;
- ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
- ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
+ ia32_abi_env_t *env = self;
+ const arch_env_t *arch_env = env->aenv;
+ ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
+ ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
+ ir_graph *irg = env->irg;
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
+ curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
add_irn_dep(curr_sp, *mem);
} else {
- ir_mode *mode_bp = env->isa->bp->reg_class->mode;
- ir_graph *irg = current_ir_graph;
+ ir_mode *mode_bp = arch_env->bp->reg_class->mode;
if (ia32_cg_config.use_leave) {
ir_node *leave;
/* copy ebp to esp */
curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
- arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+ arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
/* pop ebp */
*mem = new_r_Proj(irg, bl, pop, mode_M, pn_ia32_Pop_M);
}
- arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
- arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
+ arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
+ arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
}
- be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
- be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
+ be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
+ be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
}
/**
env->flags = fl.bits;
env->irg = irg;
env->aenv = aenv;
- env->isa = aenv->isa;
return env;
}
*
* @return The estimated cycle count for this operation
*/
-static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
+static int ia32_get_op_estimated_cost(const ir_node *irn)
{
int cost;
ia32_op_type_t op_tp;
- (void) self;
if (is_Proj(irn))
return 0;
* @param obstack The obstack to use for allocation of the returned nodes array
* @return The inverse operation or NULL if operation invertible
*/
-static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
+static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
ir_graph *irg;
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
dbg_info *dbg;
- (void) self;
/* we cannot invert non-ia32 irns */
if (! is_ia32_irn(irn))
* @param i The operands position
* @return Non-Zero if operand can be loaded
*/
-static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
+static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i) {
ir_node *op = get_irn_n(irn, i);
const ir_mode *mode = get_irn_mode(op);
const ir_mode *spillmode = get_spill_mode(op);
- (void) self;
if (
(i != n_ia32_binary_left && i != n_ia32_binary_right) || /* a "real" operand position must be requested */
return 1;
}
-static void ia32_perform_memory_operand(const void *self, ir_node *irn,
- ir_node *spill, unsigned int i)
+static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
+ unsigned int i)
{
- const ia32_irn_ops_t *ops = self;
+ ia32_code_gen_t *cg = ia32_current_cg;
- assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
+ assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
if (i == n_ia32_binary_left) {
ia32_swap_left_right(irn);
set_ia32_need_stackent(irn);
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
- set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(ops->cg, irn, n_ia32_binary_right));
+ set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
set_irn_n(irn, n_ia32_mem, spill);
/* immediates are only allowed on the right side */
/* fill register allocator interface */
-static const arch_irn_ops_if_t ia32_irn_ops_if = {
+static const arch_irn_ops_t ia32_irn_ops = {
ia32_get_irn_reg_req,
ia32_set_irn_reg,
ia32_get_irn_reg,
ia32_perform_memory_operand,
};
-static ia32_irn_ops_t ia32_irn_ops = {
- &ia32_irn_ops_if,
- NULL
-};
-
-
-
/**************************************************
* _ _ __
* | | (_)/ _|
static void ia32_prepare_graph(void *self) {
ia32_code_gen_t *cg = self;
- /* do local optimisations */
+ /* do local optimizations */
optimize_graph_df(cg->irg);
/* TODO: we often have dead code reachable through out-edges here. So for
be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
#ifdef FIRM_GRGEN_BE
+ // disable CSE, because of two-step node-construction
+ set_opt_cse(0);
+
/* transform nodes into assembler instructions by PBQP magic */
ia32_transform_graph_by_pbqp(cg);
-#endif
if (cg->dump)
be_dump(cg->irg, "-after_pbqp_transform", dump_ir_block_graph_sched);
+ set_opt_cse(1);
+#else
/* transform remaining nodes into assembler instructions */
ia32_transform_graph(cg);
+#endif
- /* do local optimisations (mainly CSE) */
+ /* do local optimizations (mainly CSE) */
optimize_graph_df(cg->irg);
if (cg->dump)
mem_proj = NULL;
foreach_out_edge(node, edge) {
ir_node *out = get_edge_src_irn(edge);
- if(get_Proj_proj(out) == pn_ia32_mem) {
+ if(get_irn_mode(out) == mode_M) {
+ assert(mem_proj == NULL);
mem_proj = out;
- break;
}
}
type = get_ia32_op_type(node);
switch (type) {
- case ia32_AddrModeS: turn_back_am(node); break;
+ case ia32_AddrModeS:
+ turn_back_am(node);
+ break;
case ia32_AddrModeD:
/* TODO implement this later... */
* Initializes a IA32 code generator.
*/
static void *ia32_cg_init(be_irg_t *birg) {
- ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env.isa;
+ ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
cg->impl = &ia32_code_gen_if;
cg->irg = birg->irg;
cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
- cg->arch_env = &birg->main_env->arch_env;
cg->isa = isa;
+ cg->arch_env = birg->main_env->arch_env;
cg->birg = birg;
cg->blk_sched = NULL;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
cur_reg_set = cg->reg_set;
- ia32_irn_ops.cg = cg;
-
assert(ia32_current_cg == NULL);
ia32_current_cg = cg;
&ia32_gp_regs[REG_ESP], /* stack pointer register */
&ia32_gp_regs[REG_EBP], /* base pointer register */
-1, /* stack direction */
- 16, /* stack alignment */
+ 4, /* power of two stack alignment, 2^4 == 16 */
NULL, /* main environment */
7, /* costs for a spill instruction */
5, /* costs for a reload instruction */
/**
* Initializes the backend ISA.
*/
-static void *ia32_init(FILE *file_handle) {
+static arch_env_t *ia32_init(FILE *file_handle) {
static int inited = 0;
ia32_isa_t *isa;
}
ia32_register_init();
- ia32_create_opcodes();
+ ia32_create_opcodes(&ia32_irn_ops);
be_emit_init(file_handle);
isa->regs_16bit = pmap_create();
*/
inc_master_type_visited();
- return isa;
+ return &isa->arch_env;
}
ia32_isa_t *isa = self;
/* emit now all global declarations */
- be_gas_emit_decls(isa->arch_isa.main_env, 1);
+ be_gas_emit_decls(isa->arch_env.main_env, 1);
pmap_destroy(isa->regs_16bit);
pmap_destroy(isa->regs_8bit);
}
}
-
-static const void *ia32_get_irn_ops(const arch_irn_handler_t *self,
- const ir_node *irn)
-{
- (void) self;
- (void) irn;
- return &ia32_irn_ops;
-}
-
-const arch_irn_handler_t ia32_irn_handler = {
- ia32_get_irn_ops
-};
-
-const arch_irn_handler_t *ia32_get_irn_handler(const void *self)
-{
- (void) self;
- return &ia32_irn_handler;
-}
-
int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn)
{
(void) block_env;
* Returns the estimated execution time of an ia32 irn.
*/
static sched_timestep_t ia32_sched_exectime(void *env, const ir_node *irn) {
- const arch_env_t *arch_env = env;
- return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(arch_get_irn_ops(arch_env, irn), irn) : 1;
+ (void) env;
+ return is_ia32_irn(irn) ? ia32_get_op_estimated_cost(irn) : 1;
}
list_sched_selector_t ia32_sched_selector;
*/
static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
{
- ir_node *phi, *left;
+ ir_node *phi;
ir_node *cmp = NULL;
- ir_mode *cmp_mode;
- if (ia32_cg_config.use_cmov) {
- /* we can't handle psis with 64bit compares yet */
- if (is_Proj(sel)) {
- cmp = get_Proj_pred(sel);
- if (is_Cmp(cmp)) {
- left = get_Cmp_left(cmp);
- cmp_mode = get_irn_mode(left);
- if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32)
- return 0;
- } else {
- cmp = NULL;
- }
+ /* we can't handle psis with 64bit compares yet */
+ if (is_Proj(sel)) {
+ cmp = get_Proj_pred(sel);
+ if (is_Cmp(cmp)) {
+ ir_node *left = get_Cmp_left(cmp);
+ ir_mode *cmp_mode = get_irn_mode(left);
+ if (!mode_is_float(cmp_mode) && get_mode_size_bits(cmp_mode) > 32)
+ return 0;
+ } else {
+ cmp = NULL;
}
+ }
+ if (ia32_cg_config.use_cmov) {
if (ia32_cg_config.use_sse2 && cmp != NULL) {
pn_Cmp pn = get_Proj_proj(sel);
ir_node *cl = get_Cmp_left(cmp);
ir_node *cr = get_Cmp_right(cmp);
/* check the Phi nodes: no 64bit and no floating point cmov */
- for (phi = phi_list; phi; phi = get_irn_link(phi)) {
+ for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
if (mode_is_float(mode)) {
}
} else {
/* check the Phi nodes: no 64bit and no floating point cmov */
- for (phi = phi_list; phi; phi = get_irn_link(phi)) {
+ for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
if (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
pn_Cmp pn;
/* No cmov, only some special cases */
- if (! is_Proj(sel))
+ if (cmp == NULL)
return 0;
- cmp = get_Proj_pred(sel);
- if (! is_Cmp(cmp))
- return 0;
-
- left = get_Cmp_left(cmp);
- cmp_mode = get_irn_mode(left);
/* Now some supported cases here */
pn = get_Proj_proj(sel);
cl = get_Cmp_left(cmp);
cr = get_Cmp_right(cmp);
- for (phi = phi_list; phi; phi = get_irn_link(phi)) {
+ for (phi = phi_list; phi; phi = get_Phi_next(phi)) {
ir_mode *mode = get_irn_mode(phi);
int res = 0;
ir_node *t, *f;
static backend_params p = {
1, /* need dword lowering */
1, /* support inline assembly */
+ 0, /* no immediate floating point mode. */
NULL, /* no additional opcodes */
NULL, /* will be set later */
ia32_create_intrinsic_fkt,
&intrinsic_env, /* context for ia32_create_intrinsic_fkt */
NULL, /* will be set below */
+ NULL /* will be set below */
};
ia32_setup_cg_config();
static const lc_opt_table_entry_t ia32_options[] = {
LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
- LC_OPT_ENT_INT("stackalign", "set stack alignment for calls",
- &ia32_isa_template.arch_isa.stack_alignment),
+ LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
+ &ia32_isa_template.arch_env.stack_alignment),
LC_OPT_LAST
};
ia32_get_reg_class,
ia32_get_reg_class_for_mode,
ia32_get_call_abi,
- ia32_get_irn_handler,
ia32_get_code_generator_if,
ia32_get_list_sched_selector,
ia32_get_ilp_sched_selector,