+typedef struct {
+ be_abi_call_flags_bits_t flags;
+ const arch_isa_t *isa;
+ const arch_env_t *aenv;
+ ir_graph *irg;
+} ia32_abi_env_t;
+
+static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
+{
+ ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
+ be_abi_call_flags_t fl = be_abi_call_get_flags(call);
+ env->flags = fl.bits;
+ env->irg = irg;
+ env->aenv = aenv;
+ env->isa = aenv->isa;
+ return env;
+}
+
+/**
+ * Put all registers which are saved by the prologue/epilogue in a set.
+ *
+ * @param self The callback object.
+ * @param s The result set.
+ */
+static void ia32_abi_dont_save_regs(void *self, pset *s)
+{
+ ia32_abi_env_t *env = self;
+ if(env->flags.try_omit_fp)
+ pset_insert_ptr(s, env->isa->bp);
+}
+
+/**
+ * Generate the routine prologue.
+ *
+ * @param self The callback object.
+ * @param mem A pointer to the mem node. Update this if you define new memory.
+ * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
+ *
+ * @return The register which shall be used as a stack frame base.
+ *
+ * All nodes which define registers in @p reg_map must keep @p reg_map current.
+ */
+static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map)
+{
+ ia32_abi_env_t *env = self;
+
+ if (!env->flags.try_omit_fp) {
+ ir_node *bl = get_irg_start_block(env->irg);
+ ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
+ ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
+ ir_node *push;
+
+ /* push ebp */
+ push = new_rd_ia32_Push(NULL, env->irg, bl, curr_sp, curr_bp, *mem);
+ curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
+ *mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
+
+ /* the push must have SP out register */
+ arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+ set_ia32_flags(push, arch_irn_flags_ignore);
+
+ /* move esp to ebp */
+ curr_bp = be_new_Copy(env->isa->bp->reg_class, env->irg, bl, curr_sp);
+ be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), env->isa->bp);
+ arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
+ be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
+
+ /* beware: the copy must be done before any other sp use */
+ curr_sp = be_new_CopyKeep_single(env->isa->sp->reg_class, env->irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
+ be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), env->isa->sp);
+ arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+ be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
+
+ be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
+ be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
+
+ return env->isa->bp;
+ }
+
+ return env->isa->sp;
+}
+
+/**
+ * Generate the routine epilogue.
+ * @param self The callback object.
+ * @param bl The block for the epilog
+ * @param mem A pointer to the mem node. Update this if you define new memory.
+ * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
+ * @return The register which shall be used as a stack frame base.
+ *
+ * All nodes which define registers in @p reg_map must keep @p reg_map current.
+ */
+static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
+{
+ ia32_abi_env_t *env = self;
+ ir_node *curr_sp = be_abi_reg_map_get(reg_map, env->isa->sp);
+ ir_node *curr_bp = be_abi_reg_map_get(reg_map, env->isa->bp);
+
+ if (env->flags.try_omit_fp) {
+ /* simply remove the stack frame here */
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ }
+ else {
+ const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
+ ir_mode *mode_bp = env->isa->bp->reg_class->mode;
+
+ /* gcc always emits a leave at the end of a routine */
+ if (1 || ARCH_AMD(isa->opt_arch)) {
+ ir_node *leave;
+
+ /* leave */
+ leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
+ set_ia32_flags(leave, arch_irn_flags_ignore);
+ curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
+ curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
+ *mem = new_r_Proj(current_ir_graph, bl, leave, mode_M, pn_ia32_Leave_M);
+ }
+ else {
+ ir_node *pop;
+
+ /* copy ebp to esp */
+ curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
+
+ /* pop ebp */
+ pop = new_rd_ia32_Pop(NULL, env->irg, bl, curr_sp, *mem);
+ set_ia32_flags(pop, arch_irn_flags_ignore);
+ curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
+ curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
+ *mem = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
+ }
+ arch_set_irn_register(env->aenv, curr_sp, env->isa->sp);
+ arch_set_irn_register(env->aenv, curr_bp, env->isa->bp);
+ }
+
+ be_abi_reg_map_set(reg_map, env->isa->sp, curr_sp);
+ be_abi_reg_map_set(reg_map, env->isa->bp, curr_bp);
+}
+
+/**
+ * Produces the type which sits between the stack args and the locals on the stack.
+ * it will contain the return address and space to store the old base pointer.
+ * @return The Firm type modeling the ABI between type.
+ */
+static ir_type *ia32_abi_get_between_type(void *self)
+{
+#define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
+ static ir_type *omit_fp_between_type = NULL;
+ static ir_type *between_type = NULL;
+
+ ia32_abi_env_t *env = self;
+
+ if ( !between_type) {
+ entity *old_bp_ent;
+ entity *ret_addr_ent;
+ entity *omit_fp_ret_addr_ent;
+
+ ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
+ ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
+
+ between_type = new_type_struct(IDENT("ia32_between_type"));
+ old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
+ ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
+
+ set_entity_offset_bytes(old_bp_ent, 0);
+ set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
+ set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
+ set_type_state(between_type, layout_fixed);
+
+ omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
+ omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
+
+ set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
+ set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
+ set_type_state(omit_fp_between_type, layout_fixed);
+ }
+
+ return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
+#undef IDENT
+}
+
+/**
+ * Get the estimated cycle count for @p irn.
+ *
+ * @param self The this pointer.
+ * @param irn The node.
+ *
+ * @return The estimated cycle count for this operation
+ */
+static int ia32_get_op_estimated_cost(const void *self, const ir_node *irn)
+{
+ int cost;
+ ia32_op_type_t op_tp;
+ const ia32_irn_ops_t *ops = self;
+
+ if (is_Proj(irn))
+ return 0;
+
+ assert(is_ia32_irn(irn));
+
+ cost = get_ia32_latency(irn);
+ op_tp = get_ia32_op_type(irn);
+
+ if (is_ia32_CopyB(irn)) {
+ cost = 250;
+ if (ARCH_INTEL(ops->cg->arch))
+ cost += 150;
+ }
+ else if (is_ia32_CopyB_i(irn)) {
+ int size = get_tarval_long(get_ia32_Immop_tarval(irn));
+ cost = 20 + (int)ceil((4/3) * size);
+ if (ARCH_INTEL(ops->cg->arch))
+ cost += 150;
+ }
+ /* in case of address mode operations add additional cycles */
+ else if (op_tp == ia32_AddrModeD || op_tp == ia32_AddrModeS) {
+ /*
+ In case of stack access add 5 cycles (we assume stack is in cache),
+ other memory operations cost 20 cycles.
+ */
+ cost += is_ia32_use_frame(irn) ? 5 : 20;
+ }
+
+ return cost;
+}
+
+/**
+ * Returns the inverse operation if @p irn, recalculating the argument at position @p i.
+ *
+ * @param irn The original operation
+ * @param i Index of the argument we want the inverse operation to yield
+ * @param inverse struct to be filled with the resulting inverse op
+ * @param obstack The obstack to use for allocation of the returned nodes array
+ * @return The inverse operation or NULL if operation invertible
+ */
+static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst) {
+ ir_graph *irg;
+ ir_mode *mode;
+ ir_node *block, *noreg, *nomem;
+ int pnc;
+
+ /* we cannot invert non-ia32 irns */
+ if (! is_ia32_irn(irn))
+ return NULL;
+
+ /* operand must always be a real operand (not base, index or mem) */
+ if (i != 2 && i != 3)
+ return NULL;
+
+ /* we don't invert address mode operations */
+ if (get_ia32_op_type(irn) != ia32_Normal)
+ return NULL;
+
+ irg = get_irn_irg(irn);
+ block = get_nodes_block(irn);
+ mode = get_ia32_res_mode(irn);
+ noreg = get_irn_n(irn, 0);
+ nomem = new_r_NoMem(irg);
+
+ /* initialize structure */
+ inverse->nodes = obstack_alloc(obst, 2 * sizeof(inverse->nodes[0]));
+ inverse->costs = 0;
+ inverse->n = 2;
+
+ switch (get_ia32_irn_opcode(irn)) {
+ case iro_ia32_Add:
+ if (get_ia32_immop_type(irn) == ia32_ImmConst) {
+ /* we have an add with a const here */
+ /* invers == add with negated const */
+ inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
+ pnc = pn_ia32_Add_res;
+ inverse->costs += 1;
+ copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
+ set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
+ set_ia32_commutative(inverse->nodes[0]);
+ }
+ else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
+ /* we have an add with a symconst here */
+ /* invers == sub with const */
+ inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
+ pnc = pn_ia32_Sub_res;
+ inverse->costs += 2;
+ copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
+ }
+ else {
+ /* normal add: inverse == sub */
+ ir_node *proj = ia32_get_res_proj(irn);
+ assert(proj);
+
+ inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, i ^ 1), nomem);
+ pnc = pn_ia32_Sub_res;
+ inverse->costs += 2;
+ }
+ break;
+ case iro_ia32_Sub:
+ if (get_ia32_immop_type(irn) != ia32_ImmNone) {
+ /* we have a sub with a const/symconst here */
+ /* invers == add with this const */
+ inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
+ pnc = pn_ia32_Add_res;
+ inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
+ copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
+ }
+ else {
+ /* normal sub */
+ ir_node *proj = ia32_get_res_proj(irn);
+ assert(proj);
+
+ if (i == 2) {
+ inverse->nodes[0] = new_rd_ia32_Add(NULL, irg, block, noreg, noreg, proj, get_irn_n(irn, 3), nomem);
+ }
+ else {
+ inverse->nodes[0] = new_rd_ia32_Sub(NULL, irg, block, noreg, noreg, get_irn_n(irn, 2), proj, nomem);
+ }
+ pnc = pn_ia32_Sub_res;
+ inverse->costs += 1;
+ }
+ break;
+ case iro_ia32_Eor:
+ if (get_ia32_immop_type(irn) != ia32_ImmNone) {
+ /* xor with const: inverse = xor */
+ inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
+ pnc = pn_ia32_Eor_res;
+ inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
+ copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
+ }
+ else {
+ /* normal xor */
+ inverse->nodes[0] = new_rd_ia32_Eor(NULL, irg, block, noreg, noreg, (ir_node *)irn, get_irn_n(irn, i), nomem);
+ pnc = pn_ia32_Eor_res;
+ inverse->costs += 1;
+ }
+ break;
+ case iro_ia32_Not: {
+ ir_node *proj = ia32_get_res_proj(irn);
+ assert(proj);
+
+ inverse->nodes[0] = new_rd_ia32_Not(NULL, irg, block, noreg, noreg, proj, nomem);
+ pnc = pn_ia32_Not_res;
+ inverse->costs += 1;
+ break;
+ }
+ case iro_ia32_Minus: {
+ ir_node *proj = ia32_get_res_proj(irn);
+ assert(proj);
+
+ inverse->nodes[0] = new_rd_ia32_Minus(NULL, irg, block, noreg, noreg, proj, nomem);
+ pnc = pn_ia32_Minus_res;
+ inverse->costs += 1;
+ break;
+ }
+ default:
+ /* inverse operation not supported */
+ return NULL;
+ }
+
+ set_ia32_res_mode(inverse->nodes[0], mode);
+ inverse->nodes[1] = new_r_Proj(irg, block, inverse->nodes[0], mode, pnc);
+
+ return inverse;
+}
+
+/**
+ * Check if irn can load it's operand at position i from memory (source addressmode).
+ * @param self Pointer to irn ops itself
+ * @param irn The irn to be checked
+ * @param i The operands position
+ * @return Non-Zero if operand can be loaded
+ */
+static int ia32_possible_memory_operand(const void *self, const ir_node *irn, unsigned int i) {
+ if (! is_ia32_irn(irn) || /* must be an ia32 irn */
+ get_irn_arity(irn) != 5 || /* must be a binary operation */
+ get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
+ ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
+ (i != 2 && i != 3) || /* a "real" operand position must be requested */
+ (i == 2 && ! is_ia32_commutative(irn)) || /* if first operand requested irn must be commutative */
+ is_ia32_use_frame(irn)) /* must not already use frame */
+ return 0;
+
+ return 1;
+}
+
+static void ia32_perform_memory_operand(const void *self, ir_node *irn, ir_node *reload, unsigned int i) {
+ assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
+ assert(get_nodes_block(reload) == get_nodes_block(irn) && "Reload must be in same block as irn.");
+
+ if (get_irn_n_edges(reload) > 1)
+ return;
+
+ if (i == 2) {
+ ir_node *tmp = get_irn_n(irn, 3);
+ set_irn_n(irn, 3, get_irn_n(irn, 2));
+ set_irn_n(irn, 2, tmp);
+ }
+
+ set_ia32_am_support(irn, ia32_am_Source);
+ set_ia32_op_type(irn, ia32_AddrModeS);
+ set_ia32_am_flavour(irn, ia32_B);
+ set_ia32_ls_mode(irn, get_irn_mode(reload));
+ set_ia32_frame_ent(irn, be_get_frame_entity(reload));
+ set_ia32_use_frame(irn);
+ set_ia32_got_reload(irn);
+
+ set_irn_n(irn, 0, be_get_Reload_frame(reload));
+ set_irn_n(irn, 4, be_get_Reload_mem(reload));
+
+ /*
+ Input at position one is index register, which is NoReg.
+ We would need cg object to get a real noreg, but we cannot
+ access it from here.
+ */
+ set_irn_n(irn, 3, get_irn_n(irn, 1));
+
+ DBG_OPT_AM_S(reload, irn);
+}
+
+static const be_abi_callbacks_t ia32_abi_callbacks = {
+ ia32_abi_init,
+ free,
+ ia32_abi_get_between_type,
+ ia32_abi_dont_save_regs,
+ ia32_abi_prologue,
+ ia32_abi_epilogue,
+};
+