{
ir_node *block, *res;
ir_node *startnode;
+ ir_node *in[1];
if(*place != NULL)
return *place;
*place = res;
startnode = get_irg_start(cg->irg);
+ /* make sure we get scheduled very early... */
+ add_irn_dep(startnode, res);
+ /* schedule the node if we already have a schedule program */
if(sched_is_scheduled(startnode)) {
sched_add_before(startnode, res);
}
+ /* keep the node so it isn't accidently removed when unused ... */
+ in[0] = res;
+ be_new_Keep(arch_register_get_class(reg), cg->irg, block, 1, in);
+
return res;
}
inverse->costs += 1;
}
break;
- case iro_ia32_Eor:
+ case iro_ia32_Xor:
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
- inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
+ inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
- inverse->nodes[0] = new_rd_ia32_Eor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
+ inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
inverse->costs += 1;
}
break;
inverse->costs += 1;
break;
}
- case iro_ia32_Minus: {
- inverse->nodes[0] = new_rd_ia32_Minus(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
+ case iro_ia32_Neg: {
+ inverse->nodes[0] = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, (ir_node*) irn, nomem);
inverse->costs += 1;
break;
}
return inverse;
}
+static ir_mode *get_spill_mode_mode(const ir_mode *mode)
+{
+ if(mode_is_float(mode))
+ return mode_D;
+
+ return mode_Iu;
+}
+
/**
* Get the mode that should be used for spilling value node
*/
-static ir_mode *get_spill_mode(ia32_code_gen_t *cg, const ir_node *node)
+static ir_mode *get_spill_mode(const ir_node *node)
{
ir_mode *mode = get_irn_mode(node);
- if (mode_is_float(mode)) {
-#if 0
- // super exact spilling...
- if (USE_SSE2(cg))
- return mode_D;
- else
- return mode_E;
-#else
- return mode_D;
-#endif
- }
- else
- return mode_Is;
-
- assert(0);
- return mode;
+ return get_spill_mode_mode(mode);
}
/**
* @return Non-Zero if operand can be loaded
*/
static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
- const ia32_irn_ops_t *ops = self;
- ia32_code_gen_t *cg = ops->cg;
ir_node *op = get_irn_n(irn, i);
const ir_mode *mode = get_irn_mode(op);
- const ir_mode *spillmode = get_spill_mode(cg, op);
+ const ir_mode *spillmode = get_spill_mode(op);
if (! is_ia32_irn(irn) || /* must be an ia32 irn */
get_irn_arity(irn) != 5 || /* must be a binary operation */
* |___/
**************************************************/
-/**
- * Transform the Thread Local Store base.
- */
-static void transform_tls(ir_graph *irg) {
- ir_node *irn = get_irg_tls(irg);
-
- if (irn) {
- dbg_info *dbg = get_irn_dbg_info(irn);
- ir_node *blk = get_nodes_block(irn);
- ir_node *newn;
- newn = new_rd_ia32_LdTls(dbg, irg, blk, get_irn_mode(irn));
-
- exchange(irn, newn);
- set_irg_tls(irg, newn);
- }
-}
-
/**
* Transforms the standard firm graph into
* an ia32 firm graph
FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.transform");
- /* 1st: transform constants and psi condition trees */
+ /* 1st: transform psi condition trees */
ia32_pre_transform_phase(cg);
/* 2nd: transform all remaining nodes */
- transform_tls(cg->irg);
ia32_transform_graph(cg);
// Matze: disabled for now. Because after transformation start block has no
- // self-loop anymore so it will probably melt with its successor block.
- //
- // This will bring several nodes to the startblock and we still can't
- // handle spill before the initial IncSP nicely
+ // self-loop anymore so it might be merged with its successor block. This
+ // will bring several nodes to the startblock which sometimes get scheduled
+ // before the initial IncSP/Barrier
//local_optimize_graph(cg->irg);
if (cg->dump)
ir_node *block = get_nodes_block(node);
ir_entity *ent = be_get_frame_entity(node);
ir_mode *mode = get_irn_mode(node);
- ir_mode *spillmode = get_spill_mode(cg, node);
+ ir_mode *spillmode = get_spill_mode(node);
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *sched_point = NULL;
ir_node *ptr = get_irg_frame(irg);
ir_node *block = get_nodes_block(node);
ir_entity *ent = be_get_frame_entity(node);
const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
- ir_mode *mode = get_spill_mode(cg, spillval);
+ ir_mode *mode = get_spill_mode(spillval);
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *nomem = new_rd_NoMem(irg);
ir_node *ptr = get_irg_frame(irg);
be_fec_env_t *env = data;
if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- const ir_mode *mode = get_irn_mode(node);
+ const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
int align = get_mode_size_bytes(mode);
be_node_needs_frame_entity(env, node, mode, align);
} else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
&& is_ia32_use_frame(node)) {
- if (is_ia32_Load(node)) {
+ if (is_ia32_got_reload(node) || is_ia32_Load(node)) {
const ir_mode *mode = get_ia32_ls_mode(node);
int align = get_mode_size_bytes(mode);
be_node_needs_frame_entity(env, node, mode, align);
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- ia32_gen_routine(cg->isa->out, irg, cg);
+ ia32_gen_routine(cg, cg->isa->out, irg);
cur_reg_set = NULL;
ia32_handle_intrinsics();
ia32_switch_section(isa->out, NO_SECTION);
- fprintf(isa->out, "\t.intel_syntax\n");
/* needed for the debug support */
ia32_switch_section(isa->out, SECTION_TEXT);