-/**
- * This is the main ia32 firm backend driver.
- * @author Christian Wuerdig
- * $Id$
+/*
+ * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
*/
+/**
+ * @file
+ * @brief This is the main ia32 firm backend driver.
+ * @author Christian Wuerdig
+ * @version $Id$
+ */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
-#ifdef HAVE_MALLOC_H
-#include <malloc.h>
-#endif
-
-#ifdef HAVE_ALLOCA_H
-#include <alloca.h>
-#endif
-
-#ifdef WITH_LIBCORE
#include <libcore/lc_opts.h>
#include <libcore/lc_opts_enum.h>
-#endif /* WITH_LIBCORE */
#include <math.h>
#include "irgmod.h"
#include "irgopt.h"
#include "irbitset.h"
+#include "irgopt.h"
#include "pdeq.h"
+#include "pset.h"
#include "debug.h"
+#include "error.h"
+#include "xmalloc.h"
-#include "../beabi.h" /* the general register allocator interface */
+#include "../beabi.h"
+#include "../beirg_t.h"
#include "../benode_t.h"
#include "../belower.h"
#include "../besched_t.h"
#include "../beirgmod.h"
#include "../be_dbgout.h"
#include "../beblocksched.h"
+#include "../bemachine.h"
+#include "../beilpsched.h"
+#include "../bespillslots.h"
+#include "../bemodule.h"
+#include "../begnuas.h"
+#include "../bestate.h"
+
#include "bearch_ia32_t.h"
-#include "ia32_new_nodes.h" /* ia32 nodes interface */
-#include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
-#include "ia32_gen_decls.h" /* interface declaration emitter */
+#include "ia32_new_nodes.h"
+#include "gen_ia32_regalloc_if.h"
+#include "gen_ia32_machine.h"
#include "ia32_transform.h"
#include "ia32_emitter.h"
#include "ia32_map_regs.h"
#include "ia32_dbg_stat.h"
#include "ia32_finish.h"
#include "ia32_util.h"
+#include "ia32_fpu.h"
-#define DEBUG_MODULE "firm.be.ia32.isa"
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
/* TODO: ugly */
static set *cur_reg_set = NULL;
+ir_mode *mode_fpcw = NULL;
+
+typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
+
+static INLINE ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
+ create_const_node_func func,
+ const arch_register_t* reg)
+{
+ ir_node *block, *res;
+
+ if(*place != NULL)
+ return *place;
+
+ block = get_irg_start_block(cg->irg);
+ res = func(NULL, cg->irg, block);
+ arch_set_irn_register(cg->arch_env, res, reg);
+ *place = res;
+
+ add_irn_dep(get_irg_end(cg->irg), res);
+ /* add_irn_dep(get_irg_start(cg->irg), res); */
+
+ return res;
+}
+
/* Creates the unique per irg GP NoReg node. */
ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg) {
- return be_abi_get_callee_save_irn(cg->birg->abi, &ia32_gp_regs[REG_GP_NOREG]);
+ return create_const(cg, &cg->noreg_gp, new_rd_ia32_NoReg_GP,
+ &ia32_gp_regs[REG_GP_NOREG]);
+}
+
+ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg) {
+ return create_const(cg, &cg->noreg_vfp, new_rd_ia32_NoReg_VFP,
+ &ia32_vfp_regs[REG_VFP_NOREG]);
+}
+
+ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg) {
+ return create_const(cg, &cg->noreg_xmm, new_rd_ia32_NoReg_XMM,
+ &ia32_xmm_regs[REG_XMM_NOREG]);
}
/* Creates the unique per irg FP NoReg node. */
ir_node *ia32_new_NoReg_fp(ia32_code_gen_t *cg) {
- return be_abi_get_callee_save_irn(cg->birg->abi,
- USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
+ return USE_SSE2(cg) ? ia32_new_NoReg_xmm(cg) : ia32_new_NoReg_vfp(cg);
+}
+
+ir_node *ia32_new_Unknown_gp(ia32_code_gen_t *cg) {
+ return create_const(cg, &cg->unknown_gp, new_rd_ia32_Unknown_GP,
+ &ia32_gp_regs[REG_GP_UKNWN]);
+}
+
+ir_node *ia32_new_Unknown_vfp(ia32_code_gen_t *cg) {
+ return create_const(cg, &cg->unknown_vfp, new_rd_ia32_Unknown_VFP,
+ &ia32_vfp_regs[REG_VFP_UKNWN]);
+}
+
+ir_node *ia32_new_Unknown_xmm(ia32_code_gen_t *cg) {
+ return create_const(cg, &cg->unknown_xmm, new_rd_ia32_Unknown_XMM,
+ &ia32_xmm_regs[REG_XMM_UKNWN]);
+}
+
+ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg) {
+ return create_const(cg, &cg->fpu_trunc_mode, new_rd_ia32_ChangeCW,
+ &ia32_fp_cw_regs[REG_FPCW]);
}
+
/**
* Returns gp_noreg or fp_noreg, depending in input requirements.
*/
ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
- arch_register_req_t req;
- const arch_register_req_t *p_req;
+ const arch_register_req_t *req;
- p_req = arch_get_register_req(cg->arch_env, &req, irn, pos);
- assert(p_req && "Missing register requirements");
- if (p_req->cls == &ia32_reg_classes[CLASS_ia32_gp])
+ req = arch_get_register_req(cg->arch_env, irn, pos);
+ assert(req != NULL && "Missing register requirements");
+ if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
return ia32_new_NoReg_gp(cg);
- else
- return ia32_new_NoReg_fp(cg);
+
+ return ia32_new_NoReg_fp(cg);
}
/**************************************************
* If the node returns a tuple (mode_T) then the proj's
* will be asked for this information.
*/
-static const arch_register_req_t *ia32_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) {
- const ia32_irn_ops_t *ops = self;
- const ia32_register_req_t *irn_req;
- long node_pos = pos == -1 ? 0 : pos;
- ir_mode *mode = is_Block(irn) ? NULL : get_irn_mode(irn);
- FIRM_DBG_REGISTER(firm_dbg_module_t *mod, DEBUG_MODULE);
-
- if (is_Block(irn) || mode == mode_M || mode == mode_X) {
- DBG((mod, LEVEL_1, "ignoring Block, mode_M, mode_X node %+F\n", irn));
- return NULL;
+static const arch_register_req_t *ia32_get_irn_reg_req(const void *self,
+ const ir_node *node,
+ int pos) {
+ long node_pos = pos == -1 ? 0 : pos;
+ ir_mode *mode = is_Block(node) ? NULL : get_irn_mode(node);
+
+ if (is_Block(node) || mode == mode_X) {
+ return arch_no_register_req;
}
if (mode == mode_T && pos < 0) {
- DBG((mod, LEVEL_1, "ignoring request OUT requirements for node %+F\n", irn));
- return NULL;
+ return arch_no_register_req;
}
- DBG((mod, LEVEL_1, "get requirements at pos %d for %+F ... ", pos, irn));
+ if (is_Proj(node)) {
+ if(mode == mode_M)
+ return arch_no_register_req;
- if (is_Proj(irn)) {
if(pos >= 0) {
- DBG((mod, LEVEL_1, "ignoring request IN requirements for node %+F\n", irn));
- return NULL;
+ return arch_no_register_req;
}
- node_pos = (pos == -1) ? get_Proj_proj(irn) : pos;
- irn = skip_Proj(irn);
-
- DB((mod, LEVEL_1, "skipping Proj, going to %+F at pos %d ... ", irn, node_pos));
+ node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
+ node = skip_Proj_const(node);
}
- if (is_ia32_irn(irn)) {
- irn_req = (pos >= 0) ? get_ia32_in_req(irn, pos) : get_ia32_out_req(irn, node_pos);
- if (irn_req == NULL) {
- /* no requirements */
- return NULL;
- }
-
- DB((mod, LEVEL_1, "returning reqs for %+F at pos %d\n", irn, pos));
-
- memcpy(req, &(irn_req->req), sizeof(*req));
+ if (is_ia32_irn(node)) {
+ const arch_register_req_t *req;
+ if(pos >= 0)
+ req = get_ia32_in_req(node, pos);
+ else
+ req = get_ia32_out_req(node, node_pos);
- if (arch_register_req_is(&(irn_req->req), should_be_same)) {
- assert(irn_req->same_pos >= 0 && "should be same constraint for in -> out NYI");
- req->other_same = get_irn_n(irn, irn_req->same_pos);
- }
+ assert(req != NULL);
- if (arch_register_req_is(&(irn_req->req), should_be_different)) {
- assert(irn_req->different_pos >= 0 && "should be different constraint for in -> out NYI");
- req->other_different = get_irn_n(irn, irn_req->different_pos);
- }
- }
- else {
- /* treat Unknowns like Const with default requirements */
- if (is_Unknown(irn)) {
- DB((mod, LEVEL_1, "returning UKNWN reqs for %+F\n", irn));
- if (mode_is_float(mode)) {
- if (USE_SSE2(ops->cg))
- memcpy(req, &(ia32_default_req_ia32_xmm_xmm_UKNWN), sizeof(*req));
- else
- memcpy(req, &(ia32_default_req_ia32_vfp_vfp_UKNWN), sizeof(*req));
- }
- else if (mode_is_int(mode) || mode_is_reference(mode))
- memcpy(req, &(ia32_default_req_ia32_gp_gp_UKNWN), sizeof(*req));
- else if (mode == mode_T || mode == mode_M) {
- DBG((mod, LEVEL_1, "ignoring Unknown node %+F\n", irn));
- return NULL;
- }
- else
- assert(0 && "unsupported Unknown-Mode");
- }
- else {
- DB((mod, LEVEL_1, "returning NULL for %+F (not ia32)\n", irn));
- req = NULL;
- }
+ return req;
}
- return req;
+ /* unknowns should be transformed already */
+ assert(!is_Unknown(node));
+
+ return arch_no_register_req;
}
static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
int pos = 0;
- const ia32_irn_ops_t *ops = self;
if (get_irn_mode(irn) == mode_X) {
return;
}
- DBG((ops->cg->mod, LEVEL_1, "ia32 assigned register %s to node %+F\n", reg->name, irn));
-
if (is_Proj(irn)) {
pos = get_Proj_proj(irn);
irn = skip_Proj(irn);
slots = get_ia32_slots(irn);
slots[pos] = reg;
- }
- else {
+ } else {
ia32_set_firm_reg(irn, reg, cur_reg_set);
}
}
}
pos = get_Proj_proj(irn);
- irn = skip_Proj(irn);
+ irn = skip_Proj_const(irn);
}
if (is_ia32_irn(irn)) {
const arch_register_t **slots;
slots = get_ia32_slots(irn);
reg = slots[pos];
- }
- else {
+ } else {
reg = ia32_get_firm_reg(irn, cur_reg_set);
}
static arch_irn_class_t ia32_classify(const void *self, const ir_node *irn) {
arch_irn_class_t classification = arch_irn_class_normal;
- irn = skip_Proj(irn);
+ irn = skip_Proj_const(irn);
if (is_cfop(irn))
classification |= arch_irn_class_branch;
if (is_ia32_Ld(irn))
classification |= arch_irn_class_load;
- if (is_ia32_St(irn) || is_ia32_Store8Bit(irn))
+ if (is_ia32_St(irn))
classification |= arch_irn_class_store;
- if (is_ia32_got_reload(irn))
+ if (is_ia32_need_stackent(irn))
classification |= arch_irn_class_reload;
return classification;
}
static arch_irn_flags_t ia32_get_flags(const void *self, const ir_node *irn) {
- arch_irn_flags_t flags;
- ir_node *pred = is_Proj(irn) && mode_is_datab(get_irn_mode(irn)) ? get_Proj_pred(irn) : NULL;
+ arch_irn_flags_t flags = arch_irn_flags_none;
if (is_Unknown(irn))
- flags = arch_irn_flags_ignore;
- else {
- /* pred is only set, if we have a Proj */
- flags = pred && is_ia32_irn(pred) ? get_ia32_out_flags(pred, get_Proj_proj(irn)) : arch_irn_flags_none;
+ return arch_irn_flags_ignore;
- irn = skip_Proj(irn);
- if (is_ia32_irn(irn))
- flags |= get_ia32_flags(irn);
+ if(is_Proj(irn) && mode_is_datab(get_irn_mode(irn))) {
+ ir_node *pred = get_Proj_pred(irn);
+
+ if(is_ia32_irn(pred)) {
+ flags = get_ia32_out_flags(pred, get_Proj_proj(irn));
+ }
+
+ irn = pred;
+ }
+
+ if (is_ia32_irn(irn)) {
+ flags |= get_ia32_flags(irn);
}
return flags;
ir_graph *irg; /**< The associated graph. */
} ia32_abi_env_t;
-static entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
+static ir_entity *ia32_get_frame_entity(const void *self, const ir_node *irn) {
return is_ia32_irn(irn) ? get_ia32_frame_ent(irn) : NULL;
}
-static void ia32_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
+static void ia32_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) {
set_ia32_frame_ent(irn, ent);
}
static void ia32_set_frame_offset(const void *self, ir_node *irn, int bias) {
- char buf[64];
const ia32_irn_ops_t *ops = self;
if (get_ia32_frame_ent(irn)) {
- ia32_am_flavour_t am_flav = get_ia32_am_flavour(irn);
+ ia32_am_flavour_t am_flav;
- if(is_ia32_Pop(irn)) {
+ if (is_ia32_Pop(irn)) {
int omit_fp = be_abi_omit_fp(ops->cg->birg->abi);
if (omit_fp) {
/* Pop nodes modify the stack pointer before calculating the destination
}
}
- DBG((ops->cg->mod, LEVEL_1, "stack biased %+F with %d\n", irn, bias));
-
- snprintf(buf, sizeof(buf), "%d", bias);
+ am_flav = get_ia32_am_flavour(irn);
+ am_flav |= ia32_O;
+ set_ia32_am_flavour(irn, am_flav);
- if (get_ia32_op_type(irn) == ia32_Normal) {
- set_ia32_cnst(irn, buf);
- } else {
- add_ia32_am_offs(irn, buf);
- am_flav |= ia32_O;
- set_ia32_am_flavour(irn, am_flav);
- }
+ add_ia32_am_offs_int(irn, bias);
}
}
long proj = get_Proj_proj(irn);
ir_node *pred = get_Proj_pred(irn);
- if (proj == pn_ia32_Push_stack && is_ia32_Push(pred))
+ if (is_ia32_Push(pred) && proj == pn_ia32_Push_stack)
return 4;
- if (proj == pn_ia32_Pop_stack && is_ia32_Pop(pred))
+ if (is_ia32_Pop(pred) && proj == pn_ia32_Pop_stack)
return -4;
}
static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
{
ia32_abi_env_t *env = self;
+ const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
+ ia32_code_gen_t *cg = isa->cg;
if (! env->flags.try_omit_fp) {
ir_node *bl = get_irg_start_block(env->irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
- ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *push;
+ /* ALL nodes representing bp must be set to ignore. */
+ be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
+
/* push ebp */
push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
/* simply remove the stack frame here */
curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
add_irn_dep(curr_sp, *mem);
- }
- else {
+ } else {
const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
+ ia32_code_gen_t *cg = isa->cg;
ir_mode *mode_bp = env->isa->bp->reg_class->mode;
/* gcc always emits a leave at the end of a routine */
set_ia32_flags(leave, arch_irn_flags_ignore);
curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
- *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
- }
- else {
- ir_node *noreg = be_abi_reg_map_get(reg_map, &ia32_gp_regs[REG_GP_NOREG]);
+ } else {
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *pop;
/* copy ebp to esp */
set_ia32_flags(pop, arch_irn_flags_ignore);
curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
- *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
+
+ *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
}
arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
ia32_abi_env_t *env = self;
- if ( !between_type) {
- entity *old_bp_ent;
- entity *ret_addr_ent;
- entity *omit_fp_ret_addr_ent;
+ if (! between_type) {
+ ir_entity *old_bp_ent;
+ ir_entity *ret_addr_ent;
+ ir_entity *omit_fp_ret_addr_ent;
- ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
- ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
+ ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
+ ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
between_type = new_type_struct(IDENT("ia32_between_type"));
old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
- set_entity_offset_bytes(old_bp_ent, 0);
- set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
+ set_entity_offset(old_bp_ent, 0);
+ set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
set_type_state(between_type, layout_fixed);
omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
- set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
+ set_entity_offset(omit_fp_ret_addr_ent, 0);
set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
set_type_state(omit_fp_between_type, layout_fixed);
}
const ia32_irn_ops_t *ops = self;
if (is_Proj(irn))
- return 0;
+ return 0;
+ if (!is_ia32_irn(irn))
+ return 0;
assert(is_ia32_irn(irn));
static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
ir_graph *irg;
ir_mode *mode;
+ ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
- int pnc;
+ dbg_info *dbg;
/* we cannot invert non-ia32 irns */
if (! is_ia32_irn(irn))
if (get_ia32_op_type(irn) != ia32_Normal)
return NULL;
- irg = get_irn_irg(irn);
- block = get_nodes_block(irn);
- mode = get_ia32_res_mode(irn);
- noreg = get_irn_n(irn, 0);
- nomem = new_r_NoMem(irg);
+ irg = get_irn_irg(irn);
+ block = get_nodes_block(irn);
+ mode = get_irn_mode(irn);
+ irn_mode = get_irn_mode(irn);
+ noreg = get_irn_n(irn, 0);
+ nomem = new_r_NoMem(irg);
+ dbg = get_irn_dbg_info(irn);
/* initialize structure */
inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
inverse->costs = 0;
- inverse->n = 2;
+ inverse->n = 1;
switch (get_ia32_irn_opcode(irn)) {
case iro_ia32_Add:
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
- inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Add_res;
+ inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
- inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Sub_res;
+ inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
- inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
- pnc = pn_ia32_Sub_res;
+ inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem);
inverse->costs += 2;
}
break;
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
- inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Add_res;
+ inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
if (i == 2) {
- inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
+ inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem);
}
else {
- inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
+ inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem);
}
- pnc = pn_ia32_Sub_res;
inverse->costs += 1;
}
break;
- case iro_ia32_Eor:
+ case iro_ia32_Xor:
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
- pnc = pn_ia32_Eor_res;
+ inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
- pnc = pn_ia32_Eor_res;
+ inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
inverse->costs += 1;
}
break;
case iro_ia32_Not: {
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
- inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
- pnc = pn_ia32_Not_res;
+ inverse->nodes[0] = new_rd_ia32_Not(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
inverse->costs += 1;
break;
}
- case iro_ia32_Minus: {
- ir_node *proj = ia32_get_res_proj(irn);
- assert(proj);
-
- inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
- pnc = pn_ia32_Minus_res;
+ case iro_ia32_Neg: {
+ inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
inverse->costs += 1;
break;
}
return NULL;
}
- set_ia32_res_mode(inverse->nodes[0], mode);
- inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
-
return inverse;
}
+static ir_mode *get_spill_mode_mode(const ir_mode *mode)
+{
+ if(mode_is_float(mode))
+ return mode_D;
+
+ return mode_Iu;
+}
+
+/**
+ * Get the mode that should be used for spilling value node
+ */
+static ir_mode *get_spill_mode(const ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ return get_spill_mode_mode(mode);
+}
+
+/**
+ * Checks wether an addressmode reload for a node with mode mode is compatible
+ * with a spillslot of mode spill_mode
+ */
+static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
+{
+ if(mode_is_float(mode)) {
+ return mode == spillmode;
+ } else {
+ return 1;
+ }
+}
+
/**
* Check if irn can load it's operand at position i from memory (source addressmode).
* @param self Pointer to irn ops itself
* @return Non-Zero if operand can be loaded
*/
static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
+ ir_node *op = get_irn_n(irn, i);
+ const ir_mode *mode = get_irn_mode(op);
+ const ir_mode *spillmode = get_spill_mode(op);
+
if (! is_ia32_irn(irn) || /* must be an ia32 irn */
get_irn_arity(irn) != 5 || /* must be a binary operation */
get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
+ ! ia32_is_spillmode_compatible(mode, spillmode) ||
(i != 2 && i != 3) || /* a "real" operand position must be requested */
(i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
is_ia32_use_frame(irn)) /* must not already use frame */
set_ia32_am_flavour(irn, ia32_B);
set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
set_ia32_use_frame(irn);
- set_ia32_got_reload(irn);
+ set_ia32_need_stackent(irn);
set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
- set_irn_n(irn, 4, spill);
-
- /*
- Input at position one is index register, which is NoReg.
- We would need cg object to get a real noreg, but we cannot
- access it from here.
- */
set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
+ set_irn_n(irn, 4, spill);
//FIXME DBG_OPT_AM_S(reload, irn);
}
ia32_abi_get_between_type,
ia32_abi_dont_save_regs,
ia32_abi_prologue,
- ia32_abi_epilogue,
+ ia32_abi_epilogue
};
/* fill register allocator interface */
* |___/
**************************************************/
-static void ia32_kill_convs(ia32_code_gen_t *cg) {
- ir_node *irn;
-
- /* BEWARE: the Projs are inserted in the set */
- foreach_nodeset(cg->kill_conv, irn) {
- ir_node *in = get_irn_n(get_Proj_pred(irn), 2);
- edges_reroute(irn, in, cg->birg->irg);
- }
-}
-
-/**
- * Transform the Thread Local Store base.
- */
-static void transform_tls(ir_graph *irg) {
- ir_node *irn = get_irg_tls(irg);
-
- if (irn) {
- dbg_info *dbg = get_irn_dbg_info(irn);
- ir_node *blk = get_nodes_block(irn);
- ir_node *newn;
- newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
-
- exchange(irn, newn);
- }
-}
-
/**
* Transforms the standard firm graph into
* an ia32 firm graph
*/
static void ia32_prepare_graph(void *self) {
ia32_code_gen_t *cg = self;
- dom_front_info_t *dom;
- DEBUG_ONLY(firm_dbg_module_t *old_mod = cg->mod;)
- FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
-
- /* 1st: transform constants and psi condition trees */
+ /* transform psi condition trees */
ia32_pre_transform_phase(cg);
- /* 2nd: transform all remaining nodes */
- ia32_register_transformers();
- dom = be_compute_dominance_frontiers(cg->irg);
-
- cg->kill_conv = new_nodeset(5);
- transform_tls(cg->irg);
- irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
- ia32_kill_convs(cg);
- del_nodeset(cg->kill_conv);
+ /* transform all remaining nodes */
+ ia32_transform_graph(cg);
+ //add_fpu_edges(cg->birg);
- be_free_dominance_frontiers(dom);
+ // Matze: disabled for now. Because after transformation start block has no
+ // self-loop anymore so it might be merged with its successor block. This
+ // will bring several nodes to the startblock which sometimes get scheduled
+ // before the initial IncSP/Barrier
+ local_optimize_graph(cg->irg);
if (cg->dump)
be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
- /* 3rd: optimize address mode */
- FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
- ia32_optimize_addressmode(cg);
+ /* optimize address mode */
+ ia32_optimize_graph(cg);
if (cg->dump)
be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
- DEBUG_ONLY(cg->mod = old_mod;)
+ /* do code placement, to optimize the position of constants */
+ place_code(cg->irg);
+
+ if (cg->dump)
+ be_dump(cg->irg, "-place", dump_ir_block_graph_sched);
}
/**
after removing the Load from schedule.
*/
irg_walk_graph(cg->irg, NULL, remove_unused_loads_walker, already_visited);
+
+ /* setup fpu rounding modes */
+ ia32_setup_fpu_mode(cg);
}
/**
- * Transforms a be node into a Load.
+ * Transforms a be_Reload into a ia32 Load.
*/
-static void transform_to_Load(ia32_transform_env_t *env) {
- ir_node *irn = env->irn;
- entity *ent = be_get_frame_entity(irn);
- ir_mode *mode = env->mode;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_rd_NoMem(env->irg);
+static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
+ ir_graph *irg = get_irn_irg(node);
+ dbg_info *dbg = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_entity *ent = be_get_frame_entity(node);
+ ir_mode *mode = get_irn_mode(node);
+ ir_mode *spillmode = get_spill_mode(node);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *sched_point = NULL;
- ir_node *ptr = get_irn_n(irn, 0);
- ir_node *mem = be_is_Reload(irn) ? get_irn_n(irn, 1) : nomem;
+ ir_node *ptr = get_irg_frame(irg);
+ ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
ir_node *new_op, *proj;
const arch_register_t *reg;
- if (sched_is_scheduled(irn)) {
- sched_point = sched_prev(irn);
+ if (sched_is_scheduled(node)) {
+ sched_point = sched_prev(node);
}
- if (mode_is_float(mode)) {
- if (USE_SSE2(env->cg))
- new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
+ if (mode_is_float(spillmode)) {
+ if (USE_SSE2(cg))
+ new_op = new_rd_ia32_xLoad(dbg, irg, block, ptr, noreg, mem);
else
- new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
+ new_op = new_rd_ia32_vfld(dbg, irg, block, ptr, noreg, mem);
+ }
+ else if (get_mode_size_bits(spillmode) == 128) {
+ // Reload 128 bit sse registers
+ new_op = new_rd_ia32_xxLoad(dbg, irg, block, ptr, noreg, mem);
}
else
- new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
+ new_op = new_rd_ia32_Load(dbg, irg, block, ptr, noreg, mem);
set_ia32_am_support(new_op, ia32_am_Source);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, mode);
+ set_ia32_ls_mode(new_op, spillmode);
set_ia32_frame_ent(new_op, ent);
set_ia32_use_frame(new_op);
- DBG_OPT_RELOAD2LD(irn, new_op);
+ DBG_OPT_RELOAD2LD(node, new_op);
- proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res);
+ proj = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Load_res);
if (sched_point) {
sched_add_after(sched_point, new_op);
sched_add_after(new_op, proj);
- sched_remove(irn);
+ sched_remove(node);
}
/* copy the register from the old node to the new Load */
- reg = arch_get_irn_register(env->cg->arch_env, irn);
- arch_set_irn_register(env->cg->arch_env, new_op, reg);
+ reg = arch_get_irn_register(cg->arch_env, node);
+ arch_set_irn_register(cg->arch_env, new_op, reg);
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
+ SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
- exchange(irn, proj);
+ exchange(node, proj);
}
/**
- * Transforms a be node into a Store.
+ * Transforms a be_Spill node into a ia32 Store.
*/
-static void transform_to_Store(ia32_transform_env_t *env) {
- ir_node *irn = env->irn;
- entity *ent = be_get_frame_entity(irn);
- ir_mode *mode = env->mode;
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *nomem = new_rd_NoMem(env->irg);
- ir_node *ptr = get_irn_n(irn, 0);
- ir_node *val = get_irn_n(irn, 1);
- ir_node *new_op, *proj;
+static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
+ ir_graph *irg = get_irn_irg(node);
+ dbg_info *dbg = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_entity *ent = be_get_frame_entity(node);
+ const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
+ ir_mode *mode = get_spill_mode(spillval);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *nomem = new_rd_NoMem(irg);
+ ir_node *ptr = get_irg_frame(irg);
+ ir_node *val = get_irn_n(node, be_pos_Spill_val);
+ ir_node *store;
ir_node *sched_point = NULL;
- if (sched_is_scheduled(irn)) {
- sched_point = sched_prev(irn);
+ if (sched_is_scheduled(node)) {
+ sched_point = sched_prev(node);
+ }
+
+ /* No need to spill unknown values... */
+ if(is_ia32_Unknown_GP(val) ||
+ is_ia32_Unknown_VFP(val) ||
+ is_ia32_Unknown_XMM(val)) {
+ store = nomem;
+ if(sched_point)
+ sched_remove(node);
+
+ exchange(node, store);
+ return;
}
if (mode_is_float(mode)) {
- if (USE_SSE2(env->cg))
- new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ if (USE_SSE2(cg))
+ store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, val, nomem);
else
- new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
- }
- else if (get_mode_size_bits(mode) == 8) {
- new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
- }
- else {
- new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, nomem);
+ store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, val, nomem);
+ } else if (get_mode_size_bits(mode) == 128) {
+ // Spill 128 bit SSE registers
+ store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, val, nomem);
+ } else if (get_mode_size_bits(mode) == 8) {
+ store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, val, nomem);
+ } else {
+ store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, val, nomem);
}
- set_ia32_am_support(new_op, ia32_am_Dest);
- set_ia32_op_type(new_op, ia32_AddrModeD);
- set_ia32_am_flavour(new_op, ia32_B);
- set_ia32_ls_mode(new_op, mode);
- set_ia32_frame_ent(new_op, ent);
- set_ia32_use_frame(new_op);
-
- DBG_OPT_SPILL2ST(irn, new_op);
-
- proj = new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode_M, pn_ia32_Store_M);
+ set_ia32_am_support(store, ia32_am_Dest);
+ set_ia32_op_type(store, ia32_AddrModeD);
+ set_ia32_am_flavour(store, ia32_B);
+ set_ia32_ls_mode(store, mode);
+ set_ia32_frame_ent(store, ent);
+ set_ia32_use_frame(store);
+ SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
+ DBG_OPT_SPILL2ST(node, store);
if (sched_point) {
- sched_add_after(sched_point, new_op);
- sched_remove(irn);
+ sched_add_after(sched_point, store);
+ sched_remove(node);
}
- SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, irn));
-
- exchange(irn, proj);
+ exchange(node, store);
}
-static ir_node *create_push(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, ir_node *mem, entity *ent) {
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *frame = get_irg_frame(env->irg);
+static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent) {
+ ir_graph *irg = get_irn_irg(node);
+ dbg_info *dbg = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *frame = get_irg_frame(irg);
- ir_node *push = new_rd_ia32_Push(env->dbg, env->irg, env->block, frame, noreg, noreg, sp, mem);
+ ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, noreg, sp, mem);
set_ia32_frame_ent(push, ent);
set_ia32_use_frame(push);
return push;
}
-static ir_node *create_pop(ia32_transform_env_t *env, ir_node *schedpoint, ir_node *sp, entity *ent) {
- ir_node *noreg = ia32_new_NoReg_gp(env->cg);
- ir_node *frame = get_irg_frame(env->irg);
+static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent) {
+ ir_graph *irg = get_irn_irg(node);
+ dbg_info *dbg = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *frame = get_irg_frame(irg);
- ir_node *pop = new_rd_ia32_Pop(env->dbg, env->irg, env->block, frame, noreg, sp, new_NoMem());
+ ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, sp, new_NoMem());
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
set_ia32_op_type(pop, ia32_AddrModeD);
- set_ia32_am_flavour(pop, ia32_B);
+ set_ia32_am_flavour(pop, ia32_am_OB);
set_ia32_ls_mode(pop, mode_Is);
sched_add_before(schedpoint, pop);
return pop;
}
-static ir_node* create_spproj(ia32_transform_env_t *env, ir_node *pred, int pos, ir_node *schedpoint) {
+static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos, ir_node *schedpoint) {
+ ir_graph *irg = get_irn_irg(node);
+ dbg_info *dbg = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
ir_mode *spmode = mode_Iu;
const arch_register_t *spreg = &ia32_gp_regs[REG_ESP];
ir_node *sp;
- sp = new_rd_Proj(env->dbg, env->irg, env->block, pred, spmode, pos);
- arch_set_irn_register(env->cg->arch_env, sp, spreg);
+ sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
+ arch_set_irn_register(cg->arch_env, sp, spreg);
sched_add_before(schedpoint, sp);
return sp;
* push/pop into/from memory cascades. This is possible without using
* any registers.
*/
-static void transform_MemPerm(ia32_transform_env_t *env) {
- ir_node *node = env->irn;
+static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node) {
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *in[1];
+ ir_node *keep;
int i, arity;
- ir_node *sp = be_abi_get_ignore_irn(env->cg->birg->abi, &ia32_gp_regs[REG_ESP]);
+ ir_node *sp = be_abi_get_ignore_irn(cg->birg->abi, &ia32_gp_regs[REG_ESP]);
const ir_edge_t *edge;
const ir_edge_t *next;
ir_node **pops;
// create pushs
for(i = 0; i < arity; ++i) {
- entity *ent = be_get_MemPerm_in_entity(node, i);
- ir_type *enttype = get_entity_type(ent);
+ ir_entity *inent = be_get_MemPerm_in_entity(node, i);
+ ir_entity *outent = be_get_MemPerm_out_entity(node, i);
+ ir_type *enttype = get_entity_type(inent);
int entbits = get_type_size_bits(enttype);
+ int entbits2 = get_type_size_bits(get_entity_type(outent));
ir_node *mem = get_irn_n(node, i + 1);
ir_node *push;
+ /* work around cases where entities have different sizes */
+ if(entbits2 < entbits)
+ entbits = entbits2;
assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
- push = create_push(env, node, sp, mem, ent);
- sp = create_spproj(env, push, 0, node);
+ push = create_push(cg, node, node, sp, mem, inent);
+ sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
if(entbits == 64) {
// add another push after the first one
- push = create_push(env, node, sp, mem, ent);
+ push = create_push(cg, node, node, sp, mem, inent);
add_ia32_am_offs_int(push, 4);
- sp = create_spproj(env, push, 0, node);
+ sp = create_spproj(cg, node, push, pn_ia32_Push_stack, node);
}
set_irn_n(node, i, new_Bad());
// create pops
for(i = arity - 1; i >= 0; --i) {
- entity *ent = be_get_MemPerm_out_entity(node, i);
- ir_type *enttype = get_entity_type(ent);
+ ir_entity *inent = be_get_MemPerm_in_entity(node, i);
+ ir_entity *outent = be_get_MemPerm_out_entity(node, i);
+ ir_type *enttype = get_entity_type(outent);
int entbits = get_type_size_bits(enttype);
-
+ int entbits2 = get_type_size_bits(get_entity_type(inent));
ir_node *pop;
+ /* work around cases where entities have different sizes */
+ if(entbits2 < entbits)
+ entbits = entbits2;
assert( (entbits == 32 || entbits == 64) && "spillslot on x86 should be 32 or 64 bit");
- pop = create_pop(env, node, sp, ent);
+ pop = create_pop(cg, node, node, sp, outent);
+ sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
if(entbits == 64) {
- // add another pop after the first one
- sp = create_spproj(env, pop, 1, node);
- pop = create_pop(env, node, sp, ent);
add_ia32_am_offs_int(pop, 4);
+
+ // add another pop after the first one
+ pop = create_pop(cg, node, node, sp, outent);
+ sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack, node);
}
- sp = create_spproj(env, pop, 1, node);
pops[i] = pop;
}
+ in[0] = sp;
+ keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
+ sched_add_before(node, keep);
+
// exchange memprojs
foreach_out_edge_safe(node, edge, next) {
ir_node *proj = get_edge_src_irn(edge);
sched_remove(node);
}
-/**
- * Fix the mode of Spill/Reload
- */
-static ir_mode *fix_spill_mode(ia32_code_gen_t *cg, ir_mode *mode)
-{
- if (mode_is_float(mode)) {
- if (USE_SSE2(cg))
- mode = mode_D;
- else
- mode = mode_E;
- }
- else
- mode = mode_Is;
- return mode;
-}
-
/**
* Block-Walker: Calls the transform functions Spill and Reload.
*/
static void ia32_after_ra_walker(ir_node *block, void *env) {
ir_node *node, *prev;
ia32_code_gen_t *cg = env;
- ia32_transform_env_t tenv;
-
- tenv.block = block;
- tenv.irg = current_ir_graph;
- tenv.cg = cg;
- DEBUG_ONLY(tenv.mod = cg->mod;)
/* beware: the schedule is changed here */
for (node = sched_last(block); !sched_is_begin(node); node = prev) {
prev = sched_prev(node);
+
if (be_is_Reload(node)) {
- /* we always reload the whole register */
- tenv.dbg = get_irn_dbg_info(node);
- tenv.irn = node;
- tenv.mode = fix_spill_mode(cg, get_irn_mode(node));
- transform_to_Load(&tenv);
- }
- else if (be_is_Spill(node)) {
- ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
- /* we always spill the whole register */
- tenv.dbg = get_irn_dbg_info(node);
- tenv.irn = node;
- tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
- transform_to_Store(&tenv);
+ transform_to_Load(cg, node);
+ } else if (be_is_Spill(node)) {
+ transform_to_Store(cg, node);
+ } else if(be_is_MemPerm(node)) {
+ transform_MemPerm(cg, node);
}
- else if(be_is_MemPerm(node)) {
- tenv.dbg = get_irn_dbg_info(node);
- tenv.irn = node;
- transform_MemPerm(&tenv);
+ }
+}
+
+/**
+ * Collects nodes that need frame entities assigned.
+ */
+static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
+{
+ be_fec_env_t *env = data;
+
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
+ && is_ia32_use_frame(node)) {
+ if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = 4;
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if(is_ia32_FldCW(node)) {
+ const ir_mode *mode = ia32_reg_classes[CLASS_ia32_fp_cw].mode;
+ int align = 4;
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if (is_ia32_SetST0(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = 4;
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else {
+#ifndef NDEBUG
+ if(!is_ia32_St(node) && !is_ia32_xStoreSimple(node)
+ && !is_ia32_vfist(node)
+ && !is_ia32_GetST0(node)
+ && !is_ia32_FnstCW(node)) {
+ assert(0);
+ }
+#endif
}
}
}
/**
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
- *
- * If x87 instruction should be emitted, run the x87 simulator and patch
- * the virtual instructions. This must obviously be done after register allocation.
*/
static void ia32_after_ra(void *self) {
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
+
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env);
+ be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
}
/**
- * Last touchups for the graph before emit
+ * Last touchups for the graph before emit: x87 simulation to replace the
+ * virtual with real x87 instructions, creating a block schedule and peephole
+ * optimisations.
*/
static void ia32_finish(void *self) {
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- //be_remove_empty_blocks(irg);
- cg->blk_sched = be_create_block_schedule(irg, cg->birg->execfreqs);
-
- //cg->blk_sched = sched_create_block_schedule(cg->irg, cg->birg->execfreqs);
-
/* if we do x87 code generation, rewrite all the virtual instructions and registers */
if (cg->used_fp == fp_x87 || cg->force_sim) {
- x87_simulate_graph(cg->arch_env, irg, cg->blk_sched);
+ x87_simulate_graph(cg->arch_env, cg->birg);
}
+ /* create block schedule, this also removes empty blocks which might
+ * produce critical edges */
+ cg->blk_sched = be_create_block_schedule(irg, cg->birg->exec_freq);
+
+ /* do peephole optimisations */
ia32_peephole_optimization(irg, cg);
}
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- ia32_gen_routine(cg->isa->out, irg, cg);
+ ia32_gen_routine(cg, irg);
cur_reg_set = NULL;
free(cg);
}
-static void *ia32_cg_init(const be_irg_t *birg);
+static void *ia32_cg_init(be_irg_t *birg);
static const arch_code_generator_if_t ia32_code_gen_if = {
ia32_cg_init,
NULL, /* before abi introduce hook */
ia32_prepare_graph,
+ NULL, /* spill */
ia32_before_sched, /* before scheduling hook */
ia32_before_ra, /* before register allocation hook */
ia32_after_ra, /* after register allocation hook */
/**
* Initializes a IA32 code generator.
*/
-static void *ia32_cg_init(const be_irg_t *birg) {
+static void *ia32_cg_init(be_irg_t *birg) {
ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env->isa;
ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
cg->isa = isa;
cg->birg = birg;
cg->blk_sched = NULL;
- cg->fp_to_gp = NULL;
- cg->gp_to_fp = NULL;
cg->fp_kind = isa->fp_kind;
cg->used_fp = fp_none;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
- FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
-
/* copy optimizations from isa for easier access */
cg->opt = isa->opt;
cg->arch = isa->arch;
isa->cg = cg;
#ifndef NDEBUG
- if (isa->name_obst_size) {
- //printf("freed %d bytes from name obst\n", isa->name_obst_size);
- isa->name_obst_size = 0;
+ if (isa->name_obst) {
obstack_free(isa->name_obst, NULL);
obstack_init(isa->name_obst);
}
}
}
+const arch_isa_if_t ia32_isa_if;
/**
* The template that generates a new ISA object.
-1, /* stack direction */
NULL, /* main environment */
},
+ { NULL, }, /* emitter environment */
NULL, /* 16bit register names */
NULL, /* 8bit register names */
NULL, /* types */
IA32_OPT_LEA | /* optimize for LEAs default: on */
IA32_OPT_PLACECNST | /* place constants immediately before instructions, default: on */
IA32_OPT_IMMOPS | /* operations can use immediates, default: on */
- IA32_OPT_EXTBB | /* use extended basic block scheduling, default: on */
IA32_OPT_PUSHARGS), /* create pushs for function argument passing, default: on */
arch_pentium_4, /* instruction architecture */
arch_pentium_4, /* optimize for architecture */
fp_sse2, /* use sse2 unit */
NULL, /* current code generator */
- NULL, /* output file */
#ifndef NDEBUG
NULL, /* name obstack */
0 /* name obst size */
if (inited)
return NULL;
+ inited = 1;
set_tarval_output_modes();
isa = xmalloc(sizeof(*isa));
memcpy(isa, &ia32_isa_template, sizeof(*isa));
+ if(mode_fpcw == NULL) {
+ mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
+ }
+
ia32_register_init(isa);
ia32_create_opcodes();
+ ia32_register_copy_attr_func();
if ((ARCH_INTEL(isa->arch) && isa->arch < arch_pentium_4) ||
(ARCH_AMD(isa->arch) && isa->arch < arch_athlon))
isa->opt &= ~IA32_OPT_INCDEC;
}
+ be_emit_init_env(&isa->emit, file_handle);
isa->regs_16bit = pmap_create();
isa->regs_8bit = pmap_create();
isa->types = pmap_create();
isa->tv_ent = pmap_create();
- isa->out = file_handle;
+ isa->cpu = ia32_init_machine_description();
ia32_build_16bit_reg_map(isa->regs_16bit);
ia32_build_8bit_reg_map(isa->regs_8bit);
- /* patch register names of x87 registers */
- if (USE_x87(isa)) {
- ia32_st_regs[0].name = "st";
- ia32_st_regs[1].name = "st(1)";
- ia32_st_regs[2].name = "st(2)";
- ia32_st_regs[3].name = "st(3)";
- ia32_st_regs[4].name = "st(4)";
- ia32_st_regs[5].name = "st(5)";
- ia32_st_regs[6].name = "st(6)";
- ia32_st_regs[7].name = "st(7)";
- }
-
#ifndef NDEBUG
isa->name_obst = xmalloc(sizeof(*isa->name_obst));
obstack_init(isa->name_obst);
- isa->name_obst_size = 0;
#endif /* NDEBUG */
ia32_handle_intrinsics();
- ia32_switch_section(isa->out, NO_SECTION);
- fprintf(isa->out, "\t.intel_syntax\n");
/* needed for the debug support */
- ia32_switch_section(isa->out, SECTION_TEXT);
- fprintf(isa->out, ".Ltext0:\n");
+ be_gas_emit_switch_section(&isa->emit, GAS_SECTION_TEXT);
+ be_emit_cstring(&isa->emit, ".Ltext0:\n");
+ be_emit_write_line(&isa->emit);
- inited = 1;
+ /* we mark referenced global entities, so we can only emit those which
+ * are actually referenced. (Note: you mustn't use the type visited flag
+ * elsewhere in the backend)
+ */
+ inc_master_type_visited();
return isa;
}
ia32_isa_t *isa = self;
/* emit now all global declarations */
- ia32_gen_decls(isa->out, isa->arch_isa.main_env);
+ be_gas_emit_decls(&isa->emit, isa->arch_isa.main_env, 1);
pmap_destroy(isa->regs_16bit);
pmap_destroy(isa->regs_8bit);
pmap_destroy(isa->types);
#ifndef NDEBUG
- //printf("name obst size = %d bytes\n", isa->name_obst_size);
obstack_free(isa->name_obst, NULL);
#endif /* NDEBUG */
+ be_emit_destroy_env(&isa->emit);
+
free(self);
}
* - the general purpose registers
* - the SSE floating point register set
* - the virtual floating point registers
+ * - the SSE vector register set
*/
static int ia32_get_n_reg_class(const void *self) {
- return 3;
+ return N_CLASSES;
}
/**
* Return the register class for index i.
*/
-static const arch_register_class_t *ia32_get_reg_class(const void *self, int i) {
- assert(i >= 0 && i < 3 && "Invalid ia32 register class requested.");
- if (i == 0)
- return &ia32_reg_classes[CLASS_ia32_gp];
- else if (i == 1)
- return &ia32_reg_classes[CLASS_ia32_xmm];
- else
- return &ia32_reg_classes[CLASS_ia32_vfp];
+static const arch_register_class_t *ia32_get_reg_class(const void *self, int i)
+{
+ assert(i >= 0 && i < N_CLASSES);
+ return &ia32_reg_classes[i];
}
/**
}
int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
- return is_ia32_irn(irn) ? 1 : -1;
+ if(!is_ia32_irn(irn))
+ return -1;
+
+ if(is_ia32_NoReg_GP(irn) || is_ia32_NoReg_VFP(irn) || is_ia32_NoReg_XMM(irn)
+ || is_ia32_Unknown_GP(irn) || is_ia32_Unknown_XMM(irn)
+ || is_ia32_Unknown_VFP(irn) || is_ia32_ChangeCW(irn))
+ return 0;
+
+ return 1;
}
/**
return &ia32_sched_selector;
}
+static const ilp_sched_selector_t *ia32_get_ilp_sched_selector(const void *self) {
+ return NULL;
+}
+
/**
* Returns the necessary byte alignment for storing a register of given class.
*/
return bytes;
}
+static const be_execution_unit_t ***ia32_get_allowed_execution_units(const void *self, const ir_node *irn) {
+ static const be_execution_unit_t *_allowed_units_BRANCH[] = {
+ &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH1],
+ &ia32_execution_units_BRANCH[IA32_EXECUNIT_TP_BRANCH_BRANCH2],
+ NULL,
+ };
+ static const be_execution_unit_t *_allowed_units_GP[] = {
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EAX],
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBX],
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ECX],
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDX],
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_ESI],
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EDI],
+ &ia32_execution_units_GP[IA32_EXECUNIT_TP_GP_GP_EBP],
+ NULL,
+ };
+ static const be_execution_unit_t *_allowed_units_DUMMY[] = {
+ &be_machine_execution_units_DUMMY[0],
+ NULL,
+ };
+ static const be_execution_unit_t **_units_callret[] = {
+ _allowed_units_BRANCH,
+ NULL
+ };
+ static const be_execution_unit_t **_units_other[] = {
+ _allowed_units_GP,
+ NULL
+ };
+ static const be_execution_unit_t **_units_dummy[] = {
+ _allowed_units_DUMMY,
+ NULL
+ };
+ const be_execution_unit_t ***ret;
+
+ if (is_ia32_irn(irn)) {
+ ret = get_ia32_exec_units(irn);
+ }
+ else if (is_be_node(irn)) {
+ if (be_is_Call(irn) || be_is_Return(irn)) {
+ ret = _units_callret;
+ }
+ else if (be_is_Barrier(irn)) {
+ ret = _units_dummy;
+ }
+ else {
+ ret = _units_other;
+ }
+ }
+ else {
+ ret = _units_dummy;
+ }
+
+ return ret;
+}
+
+/**
+ * Return the abstract ia32 machine.
+ */
+static const be_machine_t *ia32_get_machine(const void *self) {
+ const ia32_isa_t *isa = self;
+ return isa->cpu;
+}
+
+/**
+ * Return irp irgs in the desired order.
+ */
+static ir_graph **ia32_get_irg_list(const void *self, ir_graph ***irg_list) {
+ return NULL;
+}
+
/**
- * Allows or disallows the creation of a Psi for the given Cond selector.
+ * Allows or disallows the creation of Psi nodes for the given Phi nodes.
* @return 1 if allowed, 0 otherwise
*/
-static int ia32_is_psi_allowed(ir_node *sel, ir_node *false_res, ir_node *true_res) {
- ir_node *cmp, *cmp_a;
+static int ia32_is_psi_allowed(ir_node *sel, ir_node *phi_list, int i, int j)
+{
+ ir_node *cmp, *cmp_a, *phi;
ir_mode *mode;
+/* we don't want long long an floating point Psi */
+#define IS_BAD_PSI_MODE(mode) (mode_is_float(mode) || get_mode_size_bits(mode) > 32)
+
if (get_irn_mode(sel) != mode_b)
return 0;
cmp_a = get_Cmp_left(cmp);
mode = get_irn_mode(cmp_a);
- /* we don't want long long an floating point Psi */
- return ! mode_is_float(mode) && get_mode_size_bits(mode) <= 32;
+ if (IS_BAD_PSI_MODE(mode))
+ return 0;
+
+ /* check the Phi nodes */
+ for (phi = phi_list; phi; phi = get_irn_link(phi)) {
+ ir_node *pred_i = get_irn_n(phi, i);
+ ir_node *pred_j = get_irn_n(phi, j);
+ ir_mode *mode_i = get_irn_mode(pred_i);
+ ir_mode *mode_j = get_irn_mode(pred_j);
+
+ if (IS_BAD_PSI_MODE(mode_i) || IS_BAD_PSI_MODE(mode_j))
+ return 0;
+ }
+
+#undef IS_BAD_PSI_MODE
+
+ return 1;
}
static ia32_intrinsic_env_t intrinsic_env = {
p.if_conv_info = &ifconv;
return &p;
}
-#ifdef WITH_LIBCORE
/* instruction set architectures. */
static const lc_opt_enum_int_items_t arch_items[] = {
};
static const lc_opt_enum_int_items_t gas_items[] = {
- { "linux", ASM_LINUX_GAS },
- { "mingw", ASM_MINGW_GAS },
+ { "normal", GAS_FLAVOUR_NORMAL },
+ { "mingw", GAS_FLAVOUR_MINGW },
{ NULL, 0 }
};
static lc_opt_enum_int_var_t gas_var = {
- (int *)&asm_flavour, gas_items
+ (int*) &be_gas_flavour, gas_items
};
static const lc_opt_table_entry_t ia32_options[] = {
LC_OPT_ENT_NEGBIT("nolea", "do not optimize for LEAs", &ia32_isa_template.opt, IA32_OPT_LEA),
LC_OPT_ENT_NEGBIT("noplacecnst", "do not place constants", &ia32_isa_template.opt, IA32_OPT_PLACECNST),
LC_OPT_ENT_NEGBIT("noimmop", "no operations with immediates", &ia32_isa_template.opt, IA32_OPT_IMMOPS),
- LC_OPT_ENT_NEGBIT("noextbb", "do not use extended basic block scheduling", &ia32_isa_template.opt, IA32_OPT_EXTBB),
LC_OPT_ENT_NEGBIT("nopushargs", "do not create pushs for function arguments", &ia32_isa_template.opt, IA32_OPT_PUSHARGS),
LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
{ NULL }
};
-/**
- * Register command line options for the ia32 backend.
- *
- * Options so far:
- *
- * ia32-arch=arch create instruction for arch
- * ia32-opt=arch optimize for run on arch
- * ia32-fpunit=unit select floating point unit (x87 or SSE2)
- * ia32-incdec optimize for inc/dec
- * ia32-noaddrmode do not use address mode
- * ia32-nolea do not optimize for LEAs
- * ia32-noplacecnst do not place constants,
- * ia32-noimmop no operations with immediates
- * ia32-noextbb do not use extended basic block scheduling
- * ia32-nopushargs do not create pushs for function argument passing
- * ia32-gasmode set the GAS compatibility mode
- */
-static void ia32_register_options(lc_opt_entry_t *ent)
-{
- lc_opt_entry_t *be_grp_ia32 = lc_opt_get_grp(ent, "ia32");
- lc_opt_add_table(be_grp_ia32, ia32_options);
-}
-#endif /* WITH_LIBCORE */
-
const arch_isa_if_t ia32_isa_if = {
ia32_init,
ia32_done,
ia32_get_irn_handler,
ia32_get_code_generator_if,
ia32_get_list_sched_selector,
+ ia32_get_ilp_sched_selector,
ia32_get_reg_class_alignment,
ia32_get_libfirm_params,
-#ifdef WITH_LIBCORE
- ia32_register_options
-#endif
+ ia32_get_allowed_execution_units,
+ ia32_get_machine,
+ ia32_get_irg_list,
};
+
+void ia32_init_emitter(void);
+void ia32_init_finish(void);
+void ia32_init_optimize(void);
+void ia32_init_transform(void);
+void ia32_init_x87(void);
+
+void be_init_arch_ia32(void)
+{
+ lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
+ lc_opt_entry_t *ia32_grp = lc_opt_get_grp(be_grp, "ia32");
+
+ lc_opt_add_table(ia32_grp, ia32_options);
+ be_register_isa_if("ia32", &ia32_isa_if);
+
+ FIRM_DBG_REGISTER(dbg, "firm.be.ia32.cg");
+
+ ia32_init_emitter();
+ ia32_init_finish();
+ ia32_init_optimize();
+ ia32_init_transform();
+ ia32_init_x87();
+}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);