#include "ia32_new_nodes.h"
#include "gen_ia32_regalloc_if.h"
#include "gen_ia32_machine.h"
+#include "ia32_common_transform.h"
#include "ia32_transform.h"
#include "ia32_emitter.h"
#include "ia32_map_regs.h"
/**
* Generate the routine prologue.
*
- * @param self The callback object.
- * @param mem A pointer to the mem node. Update this if you define new memory.
- * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
+ * @param self The callback object.
+ * @param mem A pointer to the mem node. Update this if you define new memory.
+ * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
+ * @param stack_bias Points to the current stack bias, can be modified if needed.
*
- * @return The register which shall be used as a stack frame base.
+ * @return The register which shall be used as a stack frame base.
*
* All nodes which define registers in @p reg_map must keep @p reg_map current.
*/
-static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
+static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
{
ia32_abi_env_t *env = self;
ia32_code_gen_t *cg = ia32_current_cg;
arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
set_ia32_flags(push, arch_irn_flags_ignore);
+ /* this modifies the stack bias, because we pushed 32bit */
+ *stack_bias -= 4;
+
/* move esp to ebp */
curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
- add_irn_dep(curr_sp, *mem);
} else {
ir_mode *mode_bp = arch_env->bp->reg_class->mode;
ir_node *leave;
/* leave */
- leave = new_rd_ia32_Leave(NULL, irg, bl, curr_sp, curr_bp);
+ leave = new_rd_ia32_Leave(NULL, irg, bl, curr_bp);
set_ia32_flags(leave, arch_irn_flags_ignore);
curr_bp = new_r_Proj(irg, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(irg, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
(we assume they are in cache), other memory operations cost 20
cycles.
*/
- if(is_ia32_use_frame(irn) ||
- (is_ia32_NoReg_GP(get_irn_n(irn, 0)) &&
- is_ia32_NoReg_GP(get_irn_n(irn, 1)))) {
+ if (is_ia32_use_frame(irn) || (
+ is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
+ is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
+ )) {
cost += 5;
} else {
cost += 20;
static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
unsigned int i)
{
+ ir_mode *load_mode;
+ ir_mode *dest_op_mode;
+
ia32_code_gen_t *cg = ia32_current_cg;
assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
}
set_ia32_op_type(irn, ia32_AddrModeS);
- set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
+
+ load_mode = get_irn_mode(get_irn_n(irn, i));
+ dest_op_mode = get_ia32_ls_mode(irn);
+ if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
+ set_ia32_ls_mode(irn, load_mode);
+ }
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
set_irn_n(irn, n_ia32_mem, spill);
+ set_ia32_is_reload(irn);
/* immediates are only allowed on the right side */
if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
}
}
+transformer_t be_transformer = TRANSFORMER_DEFAULT;
+
/**
* Transforms the standard firm graph into
* an ia32 firm graph
if (cg->dump)
be_dump(cg->irg, "-pre_transform", dump_ir_block_graph_sched);
-#ifdef FIRM_GRGEN_BE
- // disable CSE, because of two-step node-construction
- set_opt_cse(0);
+ switch (be_transformer) {
+ case TRANSFORMER_DEFAULT:
+ /* transform remaining nodes into assembler instructions */
+ ia32_transform_graph(cg);
+ break;
- /* transform nodes into assembler instructions by PBQP magic */
- ia32_transform_graph_by_pbqp(cg);
+#ifdef FIRM_GRGEN_BE
+ case TRANSFORMER_PBQP:
+ case TRANSFORMER_RAND:
+ // disable CSE, because of two-step node-construction
+ set_opt_cse(0);
- if (cg->dump)
- be_dump(cg->irg, "-after_pbqp_transform", dump_ir_block_graph_sched);
- set_opt_cse(1);
-#else
+ /* transform nodes into assembler instructions by PBQP magic */
+ ia32_transform_graph_by_pbqp(cg);
- /* transform remaining nodes into assembler instructions */
- ia32_transform_graph(cg);
+ set_opt_cse(1);
+ break;
#endif
+ default: panic("invalid transformer");
+ }
+
/* do local optimizations (mainly CSE) */
optimize_graph_df(cg->irg);
load_res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
ia32_copy_am_attrs(load, node);
+ if (is_ia32_is_reload(node))
+ set_ia32_is_reload(load);
set_irn_n(node, n_ia32_mem, new_NoMem());
switch (get_ia32_am_arity(node)) {
/**
* Called before the register allocator.
- * Calculate a block schedule here. We need it for the x87
- * simulator and the emitter.
*/
static void ia32_before_ra(void *self) {
ia32_code_gen_t *cg = self;
set_ia32_ls_mode(new_op, spillmode);
set_ia32_frame_ent(new_op, ent);
set_ia32_use_frame(new_op);
+ set_ia32_is_reload(new_op);
DBG_OPT_RELOAD2LD(node, new_op);
set_ia32_ls_mode(store, mode);
set_ia32_frame_ent(store, ent);
set_ia32_use_frame(store);
+ set_ia32_is_spill(store);
SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(cg, node));
DBG_OPT_SPILL2ST(node, store);
if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
const ir_mode *mode = get_ia32_ls_mode(node);
const ia32_attr_t *attr = get_ia32_attr_const(node);
- int align = get_mode_size_bytes(mode);
+ int align;
+
+ if (is_ia32_is_reload(node)) {
+ mode = get_spill_mode_mode(mode);
+ }
if(attr->data.need_64bit_stackent) {
mode = mode_Ls;
if(attr->data.need_32bit_stackent) {
mode = mode_Is;
}
+ align = get_mode_size_bytes(mode);
be_node_needs_frame_entity(env, node, mode, align);
} else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
|| is_ia32_vfld(node)) {
&ia32_gp_regs[REG_ESP], /* stack pointer register */
&ia32_gp_regs[REG_EBP], /* base pointer register */
-1, /* stack direction */
- 4, /* power of two stack alignment, 2^4 == 16 */
+ 2, /* power of two stack alignment, 2^2 == 4 */
NULL, /* main environment */
7, /* costs for a spill instruction */
5, /* costs for a reload instruction */
ir_mode *mode;
unsigned cc;
int n, i, regnum;
+ int pop_amount = 0;
be_abi_call_flags_t call_flags = be_abi_call_get_flags(abi);
+
(void) self;
/* set abi flags for calls */
call_flags.bits.store_args_sequential = 0;
/* call_flags.bits.try_omit_fp not changed: can handle both settings */
call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
- call_flags.bits.call_has_imm = 1; /* IA32 calls can have immediate address */
+ call_flags.bits.call_has_imm = 1; /* No call immediates, we handle this by ourselves */
/* set parameter passing style */
be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
cc = cc_cdecl_set;
} else {
cc = get_method_calling_convention(method_type);
- if (get_method_additional_properties(method_type) & mtp_property_private
- && (ia32_cg_config.optimize_cc)) {
+ if (get_method_additional_properties(method_type) & mtp_property_private &&
+ ia32_cg_config.optimize_cc) {
/* set the calling conventions to register parameter */
cc = (cc & ~cc_bits) | cc_reg_param;
}
/* we have to pop the shadow parameter ourself for compound calls */
if( (get_method_calling_convention(method_type) & cc_compound_ret)
&& !(cc & cc_reg_param)) {
- be_abi_call_set_pop(abi, get_mode_size_bytes(mode_P_data));
+ pop_amount += get_mode_size_bytes(mode_P_data);
}
n = get_method_n_params(method_type);
/* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
* movl has a shorter opcode than mov[sz][bw]l */
ir_mode *load_mode = mode;
- if (mode != NULL && get_mode_size_bytes(mode) < 4) load_mode = mode_Iu;
+
+ if (mode != NULL) {
+ unsigned size = get_mode_size_bytes(mode);
+
+ if (cc & cc_callee_clear_stk) {
+ pop_amount += (size + 3U) & ~3U;
+ }
+
+ if (size < 4) load_mode = mode_Iu;
+ }
+
be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
}
}
+ be_abi_call_set_pop(abi, pop_amount);
+
/* set return registers */
n = get_method_n_ress(method_type);
return NULL;
}
+static void ia32_mark_remat(const void *self, ir_node *node) {
+ (void) self;
+ if (is_ia32_irn(node)) {
+ set_ia32_is_remat(node);
+ }
+}
+
/**
* Allows or disallows the creation of Psi nodes for the given Phi nodes.
* @return 1 if allowed, 0 otherwise
(int*) &be_gas_flavour, gas_items
};
+static const lc_opt_enum_int_items_t transformer_items[] = {
+ { "default", TRANSFORMER_DEFAULT },
+#ifdef FIRM_GRGEN_BE
+ { "pbqp", TRANSFORMER_PBQP },
+ { "random", TRANSFORMER_RAND },
+#endif
+ { NULL, 0 }
+};
+
+static lc_opt_enum_int_var_t transformer_var = {
+ (int*)&be_transformer, transformer_items
+};
+
static const lc_opt_table_entry_t ia32_options[] = {
LC_OPT_ENT_ENUM_INT("gasmode", "set the GAS compatibility mode", &gas_var),
+ LC_OPT_ENT_ENUM_INT("transformer", "the transformer used for code selection", &transformer_var),
LC_OPT_ENT_INT("stackalign", "set power of two stack alignment for calls",
&ia32_isa_template.arch_env.stack_alignment),
LC_OPT_LAST
ia32_get_allowed_execution_units,
ia32_get_machine,
ia32_get_irg_list,
+ ia32_mark_remat,
ia32_parse_asm_constraint,
ia32_is_valid_clobber
};