-typedef struct {
- ia32_code_gen_t *cg;
- heights_t *h;
-} ia32_am_opt_env_t;
-
-static int node_is_ia32_comm(const ir_node *irn) {
- return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
-}
-
-static int ia32_get_irn_n_edges(const ir_node *irn) {
- const ir_edge_t *edge;
- int cnt = 0;
-
- foreach_out_edge(irn, edge) {
- cnt++;
- }
-
- return cnt;
-}
-
-/**
- * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
- *
- * @param pred The node to be checked
- * @param is_op_func The check-function
- * @return 1 if conditions are fulfilled, 0 otherwise
- */
-static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
- return is_op_func(pred);
-}
-
-/**
- * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
- * and if the predecessor is in block bl.
- *
- * @param bl The block
- * @param pred The node to be checked
- * @param is_op_func The check-function
- * @return 1 if conditions are fulfilled, 0 otherwise
- */
-static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
- int (*is_op_func)(const ir_node *n))
-{
- if (is_Proj(pred)) {
- pred = get_Proj_pred(pred);
- if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
- return 1;
- }
- }
-
- return 0;
-}
-
-/**
- * Checks if irn is a candidate for address calculation. We avoid transforming
- * adds to leas if they have a load as pred, because then we can use AM mode
- * for the add later.
- *
- * - none of the operand must be a Load within the same block OR
- * - all Loads must have more than one user OR
- *
- * @param block The block the Loads must/mustnot be in
- * @param irn The irn to check
- * return 1 if irn is a candidate, 0 otherwise
- */
-static int is_addr_candidate(const ir_node *irn) {
-#ifndef AGGRESSIVE_AM
- const ir_node *block = get_nodes_block(irn);
- ir_node *left, *right;
- int n;
-
- left = get_irn_n(irn, 2);
- right = get_irn_n(irn, 3);
-
- if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) {
- n = ia32_get_irn_n_edges(left);
- /* load with only one user: don't create LEA */
- if(n == 1)
- return 0;
- }
-
- if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
- n = ia32_get_irn_n_edges(right);
- if(n == 1)
- return 0;
- }
-#endif
-
- return 1;
-}
-
-/**
- * Checks if irn is a candidate for address mode.
- *
- * address mode (AM):
- * - at least one operand has to be a Load within the same block AND
- * - the load must not have other users than the irn AND
- * - the irn must not have a frame entity set
- *
- * @param cg The ia32 code generator
- * @param h The height information of the irg
- * @param block The block the Loads must/mustnot be in
- * @param irn The irn to check
- * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
- */
-static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
- ir_node *in, *load, *other, *left, *right;
- int is_cand = 0, cand;
- int arity;
-
- if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
- is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
- return 0;
-
- left = get_irn_n(irn, 2);
- arity = get_irn_arity(irn);
- assert(arity == 5 || arity == 4);
- if(arity == 5) {
- /* binary op */
- right = get_irn_n(irn, 3);
- } else {
- /* unary op */
- right = left;
- }
-
- in = left;
-
- if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
-#ifndef AGGRESSIVE_AM
- int n;
- n = ia32_get_irn_n_edges(in);
- is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
-#else
- is_cand = 1;
-#endif
-
- load = get_Proj_pred(in);
- other = right;
-
- /* 8bit Loads are not supported (for binary ops),
- * they cannot be used with every register */
- if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
- assert(get_irn_arity(irn) == 5);
- is_cand = 0;
- }
-
- /* If there is a data dependency of other irn from load: cannot use AM */
- if (is_cand && get_nodes_block(other) == block) {
- other = skip_Proj(other);
- is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
- /* this could happen in loops */
- is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
- }
- }
-
- cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
- in = right;
- is_cand = 0;
-
- if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
-#ifndef AGGRESSIVE_AM
- int n;
- n = ia32_get_irn_n_edges(in);
- is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
-#else
- is_cand = 1;
-#endif
-
- load = get_Proj_pred(in);
- other = left;
-
- /* 8bit Loads are not supported, they cannot be used with every register */
- if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
- is_cand = 0;
-
- /* If there is a data dependency of other irn from load: cannot use load */
- if (is_cand && get_nodes_block(other) == block) {
- other = skip_Proj(other);
- is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
- /* this could happen in loops */
- is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
- }
- }
-
- cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
-
- /* if the irn has a frame entity: we do not use address mode */
- return get_ia32_frame_ent(irn) ? IA32_AM_CAND_NONE : cand;
-}
-
-/**
- * Compares the base and index addr and the load/store entities
- * and returns 1 if they are equal.
- */
-static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
- const ir_node *addr_b, const ir_node *addr_i)
-{
- if(get_irn_n(load, 0) != addr_b)
- return 0;
- if(get_irn_n(load, 1) != addr_i)
- return 0;
-
- if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store))
- return 0;
-
- if(get_ia32_am_sc(load) != get_ia32_am_sc(store))
- return 0;
- if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store))
- return 0;
- if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store))
- return 0;
- if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store))
- return 0;
-
- return 1;
-}
-
-typedef enum _ia32_take_lea_attr {
- IA32_LEA_ATTR_NONE = 0,
- IA32_LEA_ATTR_BASE = (1 << 0),
- IA32_LEA_ATTR_INDEX = (1 << 1),
- IA32_LEA_ATTR_OFFS = (1 << 2),
- IA32_LEA_ATTR_SCALE = (1 << 3),
- IA32_LEA_ATTR_AMSC = (1 << 4),
- IA32_LEA_ATTR_FENT = (1 << 5)
-} ia32_take_lea_attr;
-
-/**
- * Decides if we have to keep the LEA operand or if we can assimilate it.
- */
-static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
- int have_am_sc, ia32_code_gen_t *cg)
-{
- ir_entity *irn_ent = get_ia32_frame_ent(irn);
- ir_entity *lea_ent = get_ia32_frame_ent(lea);
- int ret_val = 0;
- int is_noreg_base = be_is_NoReg(cg, base);
- int is_noreg_index = be_is_NoReg(cg, index);
- ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
-
- /* If the Add and the LEA both have a different frame entity set: keep */
- if (irn_ent && lea_ent && (irn_ent != lea_ent))
- return IA32_LEA_ATTR_NONE;
- else if (! irn_ent && lea_ent)
- ret_val |= IA32_LEA_ATTR_FENT;
-
- /* If the Add and the LEA both have already an address mode symconst: keep */
- if (have_am_sc && get_ia32_am_sc(lea))
- return IA32_LEA_ATTR_NONE;
- else if (get_ia32_am_sc(lea))
- ret_val |= IA32_LEA_ATTR_AMSC;
-
- /* Check the different base-index combinations */
-
- if (! is_noreg_base && ! is_noreg_index) {
- /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
- if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
- if (am_flav & ia32_O)
- ret_val |= IA32_LEA_ATTR_OFFS;
-
- ret_val |= IA32_LEA_ATTR_BASE;
- }
- else
- return IA32_LEA_ATTR_NONE;
- }
- else if (! is_noreg_base && is_noreg_index) {
- /* Base is set but index not */
- if (base == lea) {
- /* Base points to LEA: assimilate everything */
- if (am_flav & ia32_O)
- ret_val |= IA32_LEA_ATTR_OFFS;
- if (am_flav & ia32_S)
- ret_val |= IA32_LEA_ATTR_SCALE;
- if (am_flav & ia32_I)
- ret_val |= IA32_LEA_ATTR_INDEX;
-
- ret_val |= IA32_LEA_ATTR_BASE;
- }
- else if (am_flav & ia32_B ? 0 : 1) {
- /* Base is not the LEA but the LEA is an index only calculation: assimilate */
- if (am_flav & ia32_O)
- ret_val |= IA32_LEA_ATTR_OFFS;
- if (am_flav & ia32_S)
- ret_val |= IA32_LEA_ATTR_SCALE;
-
- ret_val |= IA32_LEA_ATTR_INDEX;
- }
- else
- return IA32_LEA_ATTR_NONE;
- }
- else if (is_noreg_base && ! is_noreg_index) {
- /* Index is set but not base */
- if (index == lea) {
- /* Index points to LEA: assimilate everything */
- if (am_flav & ia32_O)
- ret_val |= IA32_LEA_ATTR_OFFS;
- if (am_flav & ia32_S)
- ret_val |= IA32_LEA_ATTR_SCALE;
- if (am_flav & ia32_B)
- ret_val |= IA32_LEA_ATTR_BASE;
-
- ret_val |= IA32_LEA_ATTR_INDEX;
- }
- else if (am_flav & ia32_I ? 0 : 1) {
- /* Index is not the LEA but the LEA is a base only calculation: assimilate */
- if (am_flav & ia32_O)
- ret_val |= IA32_LEA_ATTR_OFFS;
- if (am_flav & ia32_S)
- ret_val |= IA32_LEA_ATTR_SCALE;
-
- ret_val |= IA32_LEA_ATTR_BASE;
- }
- else
- return IA32_LEA_ATTR_NONE;
- }
- else {
- assert(0 && "There must have been set base or index");
- }
-
- return ret_val;
-}
-
-/**
- * Adds res before irn into schedule if irn was scheduled.
- * @param irn The schedule point
- * @param res The node to be scheduled
- */
-static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
- if (sched_is_scheduled(irn))
- sched_add_before(irn, res);
-}
-