if (is_ia32_St(irn))
classification |= arch_irn_class_store;
- if (is_ia32_need_stackent(irn))
+ if (is_ia32_is_reload(irn))
classification |= arch_irn_class_reload;
+ if (is_ia32_is_spill(irn))
+ classification |= arch_irn_class_spill;
+
+ if (is_ia32_is_remat(irn))
+ classification |= arch_irn_class_remat;
+
return classification;
}
/**
* Generate the routine prologue.
*
- * @param self The callback object.
- * @param mem A pointer to the mem node. Update this if you define new memory.
- * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
+ * @param self The callback object.
+ * @param mem A pointer to the mem node. Update this if you define new memory.
+ * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
+ * @param stack_bias Points to the current stack bias, can be modified if needed.
*
- * @return The register which shall be used as a stack frame base.
+ * @return The register which shall be used as a stack frame base.
*
* All nodes which define registers in @p reg_map must keep @p reg_map current.
*/
-static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
+static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
{
ia32_abi_env_t *env = self;
ia32_code_gen_t *cg = ia32_current_cg;
arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
set_ia32_flags(push, arch_irn_flags_ignore);
+ /* this modifies the stack bias, because we pushed 32bit */
+ *stack_bias -= 4;
+
/* move esp to ebp */
curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
ir_node *leave;
/* leave */
- leave = new_rd_ia32_Leave(NULL, irg, bl, curr_sp, curr_bp);
+ leave = new_rd_ia32_Leave(NULL, irg, bl, curr_bp);
set_ia32_flags(leave, arch_irn_flags_ignore);
curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
static ir_type *ia32_abi_get_between_type(void *self)
{
#define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
- static ir_type *between_type = NULL;
- (void) self;
+ static ir_type *omit_fp_between_type = NULL;
+ static ir_type *between_type = NULL;
+
+ ia32_abi_env_t *env = self;
if (! between_type) {
+ ir_entity *old_bp_ent;
ir_entity *ret_addr_ent;
- ir_type *ret_addr_type;
+ ir_entity *omit_fp_ret_addr_ent;
- ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
- between_type = new_type_struct(IDENT("ia32_between_type"));
- ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
+ ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_Iu);
+ ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_Iu);
- set_entity_offset(ret_addr_ent, 0);
- set_type_size_bytes(between_type, get_type_size_bytes(ret_addr_type));
+ between_type = new_type_struct(IDENT("ia32_between_type"));
+ old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
+ ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
+
+ set_entity_offset(old_bp_ent, 0);
+ set_entity_offset(ret_addr_ent, get_type_size_bytes(old_bp_type));
+ set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
set_type_state(between_type, layout_fixed);
+
+ omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
+ omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
+
+ set_entity_offset(omit_fp_ret_addr_ent, 0);
+ set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
+ set_type_state(omit_fp_between_type, layout_fixed);
}
- return between_type;
+ return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
#undef IDENT
}
static void ia32_before_abi(void *self) {
lower_mode_b_config_t lower_mode_b_config = {
mode_Iu, /* lowered mode */
- mode_Bu, /* prefered mode for set */
+ mode_Bu, /* preferred mode for set */
0, /* don't lower direct compares */
};
ia32_code_gen_t *cg = self;
#ifdef FIRM_GRGEN_BE
case TRANSFORMER_PBQP:
- // disable CSE, because of two-step node-construction
- set_opt_cse(0);
-
+ case TRANSFORMER_RAND:
/* transform nodes into assembler instructions by PBQP magic */
ia32_transform_graph_by_pbqp(cg);
-
- set_opt_cse(1);
break;
#endif
load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
+ if (is_ia32_is_reload(node))
+ set_ia32_is_reload(load);
set_irn_n(node, n_ia32_mem, new_NoMem());
switch (get_ia32_am_arity(node)) {
/**
* Called before the register allocator.
- * Calculate a block schedule here. We need it for the x87
- * simulator and the emitter.
*/
static void ia32_before_ra(void *self) {
ia32_code_gen_t *cg = self;
set_ia32_ls_mode(new_op, spillmode);
set_ia32_frame_ent(new_op, ent);
set_ia32_use_frame(new_op);
+ set_ia32_is_reload(new_op);
DBG_OPT_RELOAD2LD(node, new_op);
set_ia32_ls_mode(store, mode);
set_ia32_frame_ent(store, ent);
set_ia32_use_frame(store);
+ set_ia32_is_spill(store);
SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
DBG_OPT_SPILL2ST(node, store);
&ia32_gp_regs[REG_ESP], /* stack pointer register */
&ia32_gp_regs[REG_EBP], /* base pointer register */
-1, /* stack direction */
- 4, /* power of two stack alignment, 2^4 == 16 */
+ 2, /* power of two stack alignment, 2^2 == 4 */
NULL, /* main environment */
7, /* costs for a spill instruction */
5, /* costs for a reload instruction */
return NULL;
}
+static void ia32_mark_remat(const void *self, ir_node *node) {
+ (void) self;
+ if (is_ia32_irn(node)) {
+ set_ia32_is_remat(node);
+ }
+}
+
/**
* Allows or disallows the creation of Psi nodes for the given Phi nodes.
* @return 1 if allowed, 0 otherwise
{ "default", TRANSFORMER_DEFAULT },
#ifdef FIRM_GRGEN_BE
{ "pbqp", TRANSFORMER_PBQP },
+ { "random", TRANSFORMER_RAND },
#endif
{ NULL, 0 }
};
ia32_get_allowed_execution_units,
ia32_get_machine,
ia32_get_irg_list,
+ ia32_mark_remat,
ia32_parse_asm_constraint,
ia32_is_valid_clobber
};