- No #ifdef WITH_LIBCORE anymore in backend, compilation without libcore was broken...
[libfirm] / ir / be / ia32 / bearch_ia32.c
index 0c7b189..699df5c 100644 (file)
@@ -3,9 +3,8 @@
  * @author Christian Wuerdig
  * $Id$
  */
-
 #ifdef HAVE_CONFIG_H
-#include "config.h"
+#include <config.h>
 #endif
 
 #ifdef HAVE_MALLOC_H
 #include <alloca.h>
 #endif
 
-#ifdef WITH_LIBCORE
 #include <libcore/lc_opts.h>
 #include <libcore/lc_opts_enum.h>
-#endif /* WITH_LIBCORE */
 
 #include <math.h>
 
@@ -76,7 +73,9 @@ static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
                                     create_const_node_func func, arch_register_t* reg)
 {
        ir_node *block, *res;
+       ir_node *in[1];
        ir_node *startnode;
+       ir_node *keep;
 
        if(*place != NULL)
                return *place;
@@ -86,9 +85,15 @@ static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
        arch_set_irn_register(cg->arch_env, res, reg);
        *place = res;
 
+       /* keep the node so it isn't accidently removed when unused ... */
+       in[0] = res;
+       keep = be_new_Keep(arch_register_get_class(reg), cg->irg, block, 1, in);
+
+       /* schedule the node if we already have a scheduled program */
        startnode = get_irg_start(cg->irg);
        if(sched_is_scheduled(startnode)) {
-               sched_add_before(startnode, res);
+               sched_add_after(startnode, res);
+               sched_add_after(res, keep);
        }
 
        return res;
@@ -317,7 +322,7 @@ static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
        if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
                classification |= arch_irn_class_store;
 
-       if (is_ia32_got_reload(irn))
+       if (is_ia32_need_stackent(irn))
                classification |= arch_irn_class_reload;
 
        return classification;
@@ -368,7 +373,9 @@ static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
        const ia32_irn_ops_t *ops = self;
 
        if (get_ia32_frame_ent(irn)) {
-               if(is_ia32_Pop(irn)) {
+               ia32_am_flavour_t am_flav;
+
+               if (is_ia32_Pop(irn)) {
                        int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
                        if (omit_fp) {
                                /* Pop nodes modify the stack pointer before calculating the destination
@@ -380,7 +387,7 @@ static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
 
                DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
 
-               ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
+               am_flav  = get_ia32_am_flavour(irn);
                am_flav |= ia32_O;
                set_ia32_am_flavour(irn, am_flav);
 
@@ -415,6 +422,161 @@ static void ia32_abi_dont_save_regs(void *self, pset *s)
                pset_insert_ptr(s, env->isa->bp);
 }
 
+#if 0
+static unsigned count_callee_saves(ia32_code_gen_t *cg)
+{
+       unsigned callee_saves = 0;
+       int c, num_reg_classes;
+       arch_isa_if_t *isa;
+
+       num_reg_classes = arch_isa_get_n_reg_class(isa);
+       for(c = 0; c < num_reg_classes; ++c) {
+               int r, num_registers;
+               arch_register_class_t *regclass = arch_isa_get_reg_class(isa, c);
+
+               num_registers = arch_register_class_n_regs(regclass);
+               for(r = 0; r < num_registers; ++r) {
+                       arch_register_t *reg = arch_register_for_index(regclass, r);
+                       if(arch_register_type_is(reg, callee_save))
+                               callee_saves++;
+               }
+       }
+
+       return callee_saves;
+}
+
+static void create_callee_save_regprojs(ia32_code_gen_t *cg, ir_node *regparams)
+{
+       int c, num_reg_classes;
+       arch_isa_if_t *isa;
+       long n = 0;
+
+       num_reg_classes = arch_isa_get_n_reg_class(isa);
+       cg->initial_regs = obstack_alloc(cg->obst,
+                                        num_reg_classes * sizeof(cg->initial_regs[0]));
+
+       for(c = 0; c < num_reg_classes; ++c) {
+               int r, num_registers;
+               ir_node **initial_regclass;
+               arch_register_class_t *regclass = arch_isa_get_reg_class(isa, c);
+
+               num_registers = arch_register_class_n_regs(regclass);
+               initial_regclass = obstack_alloc(num_registers * sizeof(initial_regclass[0]));
+               for(r = 0; r < num_registers; ++r) {
+                       ir_node *proj;
+                       arch_register_t *reg = arch_register_for_index(regclass, r);
+                       if(!arch_register_type_is(reg, callee_save))
+                               continue;
+
+                       proj = new_r_Proj(irg, start_block, regparams, n);
+                       be_set_constr_single_reg(regparams, n, reg);
+                       arch_set_irn_register(cg->arch_env, proj, reg);
+
+                       initial_regclass[r] = proj;
+                       n++;
+               }
+               cg->initial_regs[c] = initial_regclass;
+       }
+}
+
+static void callee_saves_obstack_grow(ia32_code_gen_t *cg)
+{
+       int c, num_reg_classes;
+       arch_isa_if_t *isa;
+
+       for(c = 0; c < num_reg_classes; ++c) {
+               int r, num_registers;
+
+               num_registers = arch_register_class_n_regs(regclass);
+               for(r = 0; r < num_registers; ++r) {
+                       ir_node *proj;
+                       arch_register_t *reg = arch_register_for_index(regclass, r);
+                       if(!arch_register_type_is(reg, callee_save))
+                               continue;
+
+                       proj = cg->initial_regs[c][r];
+                       obstack_ptr_grow(cg->obst, proj);
+               }
+       }
+}
+
+static unsigned count_parameters_in_regs(ia32_code_gen_t *cg)
+{
+       return 0;
+}
+
+static void ia32_gen_prologue(ia32_code_gen_t *cg)
+{
+       ir_graph *irg = cg->irg;
+       ir_node *start_block = get_irg_start_block(irg);
+       ir_node *sp;
+       ir_node *regparams;
+       int n_regparams_out;
+
+       /* Create the regparams node */
+       n_regparams_out = count_callee_saves(cg) + count_parameters_in_regs(cg);
+       regparams = be_new_RegParams(irg, start_block, n_regparams_out);
+
+       create_callee_save_regprojs(cg, regparams);
+
+       /* Setup the stack */
+       if(!omit_fp) {
+               ir_node *bl      = get_irg_start_block(env->irg);
+               ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
+               ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
+               ir_node *noreg = ia32_new_NoReg_gp(cg);
+               ir_node *push;
+
+               /* push ebp */
+               push    = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
+               curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
+               *mem    = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
+
+               /* the push must have SP out register */
+               arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+               set_ia32_flags(push, arch_irn_flags_ignore);
+
+               /* move esp to ebp */
+               curr_bp  = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
+               be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
+               arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
+               be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
+
+               /* beware: the copy must be done before any other sp use */
+               curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
+               be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
+               arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+               be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
+
+               be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
+               be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
+       }
+
+       sp = be_new_IncSP(sp, irg, start_block, initialsp, BE_STACK_FRAME_SIZE_EXPAND);
+       set_irg_frame(irg, sp);
+}
+
+static void ia32_gen_epilogue(ia32_code_gen_t *cg)
+{
+       int n_callee_saves = count_callee_saves(cg);
+       int n_results_regs = 0;
+       int barrier_size;
+       ir_node *barrier;
+       ir_node *end_block = get_irg_end_block(irg);
+       ir_node **in;
+
+       /* We have to make sure that all reloads occur before the stack frame
+          gets destroyed, so we create a barrier for all callee-save and return
+          values here */
+       barrier_size = n_callee_saves + n_results_regs;
+       barrier = be_new_Barrier(irg, end_block, barrier_size,
+
+       /* simply remove the stack frame here */
+       curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
+       add_irn_dep(curr_sp, *mem);
+}
+#endif
+
 /**
  * Generate the routine prologue.
  *
@@ -725,16 +887,16 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
                                inverse->costs += 1;
                        }
                        break;
-               case iro_ia32_Eor:
+               case iro_ia32_Xor:
                        if (get_ia32_immop_type(irn) != ia32_ImmNone) {
                                /* xor with const: inverse = xor */
-                               inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
+                               inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
                                inverse->costs   += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
                                copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
                        }
                        else {
                                /* normal xor */
-                               inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
+                               inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
                                inverse->costs   += 1;
                        }
                        break;
@@ -743,8 +905,8 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
                        inverse->costs   += 1;
                        break;
                }
-               case iro_ia32_Minus: {
-                       inverse->nodes[0] = new_rd_ia32_Minus(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
+               case iro_ia32_Neg: {
+                       inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
                        inverse->costs   += 1;
                        break;
                }
@@ -756,28 +918,21 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
        return inverse;
 }
 
+static ir_mode *get_spill_mode_mode(const ir_mode *mode)
+{
+       if(mode_is_float(mode))
+               return mode_D;
+
+       return mode_Iu;
+}
+
 /**
  * Get the mode that should be used for spilling value node
  */
-static ir_mode *get_spill_mode(ia32_code_gen_t *cg, const ir_node *node)
+static ir_mode *get_spill_mode(const ir_node *node)
 {
        ir_mode *mode = get_irn_mode(node);
-       if (mode_is_float(mode)) {
-#if 0
-               // super exact spilling...
-               if (USE_SSE2(cg))
-                       return mode_D;
-               else
-                       return mode_E;
-#else
-               return mode_D;
-#endif
-       }
-       else
-               return mode_Is;
-
-       assert(0);
-       return mode;
+       return get_spill_mode_mode(mode);
 }
 
 /**
@@ -801,11 +956,9 @@ static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spil
  * @return Non-Zero if operand can be loaded
  */
 static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
-       const ia32_irn_ops_t *ops = self;
-       ia32_code_gen_t      *cg  = ops->cg;
        ir_node *op = get_irn_n(irn, i);
        const ir_mode *mode = get_irn_mode(op);
-       const ir_mode *spillmode = get_spill_mode(cg, op);
+       const ir_mode *spillmode = get_spill_mode(op);
 
        if (! is_ia32_irn(irn)                            ||  /* must be an ia32 irn */
                get_irn_arity(irn) != 5                       ||  /* must be a binary operation */
@@ -837,7 +990,7 @@ static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node
        set_ia32_am_flavour(irn, ia32_B);
        set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
        set_ia32_use_frame(irn);
-       set_ia32_got_reload(irn);
+       set_ia32_need_stackent(irn);
 
        set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
        set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
@@ -891,23 +1044,6 @@ ia32_irn_ops_t ia32_irn_ops = {
  *                       |___/
  **************************************************/
 
-/**
- * Transform the Thread Local Store base.
- */
-static void transform_tls(ir_graph *irg) {
-       ir_node *irn = get_irg_tls(irg);
-
-       if (irn) {
-               dbg_info *dbg = get_irn_dbg_info(irn);
-               ir_node  *blk = get_nodes_block(irn);
-               ir_node  *newn;
-               newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
-
-               exchange(irn, newn);
-               set_irg_tls(irg, newn);
-       }
-}
-
 /**
  * Transforms the standard firm graph into
  * an ia32 firm graph
@@ -918,17 +1054,15 @@ static void ia32_prepare_graph(void *self) {
 
        FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
 
-       /* 1st: transform constants and psi condition trees */
+       /* 1st: transform psi condition trees */
        ia32_pre_transform_phase(cg);
 
        /* 2nd: transform all remaining nodes */
-       transform_tls(cg->irg);
        ia32_transform_graph(cg);
        // Matze: disabled for now. Because after transformation start block has no
-       // self-loop anymore so it will probably melt with its successor block.
-       //
-       // This will bring several nodes to the startblock and we still can't
-       // handle spill before the initial IncSP nicely
+       // self-loop anymore so it might be merged with its successor block. This
+       // will bring several nodes to the startblock which sometimes get scheduled
+       // before the initial IncSP/Barrier
        //local_optimize_graph(cg->irg);
 
        if (cg->dump)
@@ -1049,7 +1183,7 @@ static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
        ir_node *block       = get_nodes_block(node);
        ir_entity *ent       = be_get_frame_entity(node);
        ir_mode *mode        = get_irn_mode(node);
-       ir_mode *spillmode   = get_spill_mode(cg, node);
+       ir_mode *spillmode   = get_spill_mode(node);
        ir_node *noreg       = ia32_new_NoReg_gp(cg);
        ir_node *sched_point = NULL;
        ir_node *ptr         = get_irg_frame(irg);
@@ -1106,7 +1240,7 @@ static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
        ir_node *block = get_nodes_block(node);
        ir_entity *ent = be_get_frame_entity(node);
        const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
-       ir_mode *mode  = get_spill_mode(cg, spillval);
+       ir_mode *mode  = get_spill_mode(spillval);
        ir_node *noreg = ia32_new_NoReg_gp(cg);
        ir_node *nomem = new_rd_NoMem(irg);
        ir_node *ptr   = get_irg_frame(irg);
@@ -1319,16 +1453,16 @@ static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
        be_fec_env_t *env = data;
 
        if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
-               const ir_mode *mode = get_irn_mode(node);
+               const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
                int align = get_mode_size_bytes(mode);
                be_node_needs_frame_entity(env, node, mode, align);
        } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
                  && is_ia32_use_frame(node)) {
-               if (is_ia32_Load(node)) {
+               if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
                        const ir_mode *mode = get_ia32_ls_mode(node);
                        int align = get_mode_size_bytes(mode);
                        be_node_needs_frame_entity(env, node, mode, align);
-               } else if (is_ia32_vfild(node)) {
+               } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)) {
                        const ir_mode *mode = get_ia32_ls_mode(node);
                        int align = 4;
                        be_node_needs_frame_entity(env, node, mode, align);
@@ -1341,7 +1475,8 @@ static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
                        if(!is_ia32_Store(node)
                                        && !is_ia32_xStore(node)
                                        && !is_ia32_xStoreSimple(node)
-                                       && !is_ia32_vfist(node)) {
+                                       && !is_ia32_vfist(node)
+                                       && !is_ia32_GetST0(node)) {
                                assert(0);
                        }
 #endif
@@ -1398,7 +1533,7 @@ static void ia32_codegen(void *self) {
        ia32_code_gen_t *cg = self;
        ir_graph        *irg = cg->irg;
 
-       ia32_gen_routine(cg->isa->out, irg, cg);
+       ia32_gen_routine(cg, cg->isa->out, irg);
 
        cur_reg_set = NULL;
 
@@ -1595,7 +1730,6 @@ static void *ia32_init(FILE *file_handle) {
 
        ia32_handle_intrinsics();
        ia32_switch_section(isa->out, NO_SECTION);
-       fprintf(isa->out, "\t.intel_syntax\n");
 
        /* needed for the debug support */
        ia32_switch_section(isa->out, SECTION_TEXT);
@@ -1830,18 +1964,18 @@ static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void
                &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
                NULL,
        };
-       static const be_execution_unit_t *_allowed_units_ALU[] = {
-               &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU1],
-               &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU2],
-               &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU3],
-               &ia32_execution_units_ALU[IA32_EXECUNIT_TP_ALU_ALU4],
+       static const be_execution_unit_t *_allowed_units_GP[] = {
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
+               &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
                NULL,
        };
        static const be_execution_unit_t *_allowed_units_DUMMY[] = {
-               &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY1],
-               &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY2],
-               &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY3],
-               &ia32_execution_units_DUMMY[IA32_EXECUNIT_TP_DUMMY_DUMMY4],
+               &be_machine_execution_units_DUMMY[0],
                NULL,
        };
        static const be_execution_unit_t **_units_callret[] = {
@@ -1849,7 +1983,7 @@ static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void
                NULL
        };
        static const be_execution_unit_t **_units_other[] = {
-               _allowed_units_ALU,
+               _allowed_units_GP,
                NULL
        };
        static const be_execution_unit_t **_units_dummy[] = {
@@ -1890,7 +2024,7 @@ static const be_machine_t *ia32_get_machine(const void *self) {
 /**
  * Return irp irgs in the desired order.
  */
-static ir_graph **ia32_get_irg_list(const void *self, ir_graph **irg_list) {
+static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list) {
        return NULL;
 }
 
@@ -1970,7 +2104,6 @@ static const backend_params *ia32_get_libfirm_params(void) {
        p.if_conv_info = &ifconv;
        return &p;
 }
-#ifdef WITH_LIBCORE
 
 /* instruction set architectures. */
 static const lc_opt_enum_int_items_t arch_items[] = {
@@ -2037,7 +2170,6 @@ static const lc_opt_table_entry_t ia32_options[] = {
        LC_OPT_ENT_ENUM_INT("gasmode",   "set the GAS compatibility mode", &gas_var),
        { NULL }
 };
-#endif /* WITH_LIBCORE */
 
 const arch_isa_if_t ia32_isa_if = {
        ia32_init,