/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
/**
* @file
* @brief This file contains functions for matching firm graphs for
- * nodes that can be used as addressmode for x86 commands
+ * nodes that can be used as address mode for x86 instructions
* @author Matthias Braun
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "ia32_address_mode.h"
#include "ia32_transform.h"
#include "iredges_t.h"
#include "irgwalk.h"
-#include "../benode_t.h"
+#include "../benode.h"
+#include "../belive.h"
#define AGGRESSIVE_AM
/* Consts are typically immediates */
if (!tarval_is_long(get_Const_tarval(node))) {
#ifdef DEBUG_libfirm
- ir_fprintf(stderr, "Optimisation warning tarval of %+F(%+F) is not "
- "a long.\n", node, current_ir_graph);
+ ir_fprintf(stderr,
+ "Optimisation warning tarval of %+F(%+F) is not a long.\n",
+ node, current_ir_graph);
#endif
return 0;
}
/* the first SymConst of a DAG can be fold into an immediate */
#ifndef SUPPORT_NEGATIVE_SYMCONSTS
/* unfortunately the assembler/linker doesn't support -symconst */
- if(negate)
+ if (negate)
return 0;
#endif
- if(get_SymConst_kind(node) != symconst_addr_ent)
+ if (get_SymConst_kind(node) != symconst_addr_ent)
return 0;
(*symconsts)++;
- if(*symconsts > 1)
+ if (*symconsts > 1)
return 0;
return 1;
case iro_Add:
case iro_Sub:
- /* Add's and Sub's are typically supported as long as both operands are immediates */
- if(bitset_is_set(non_address_mode_nodes, get_irn_idx(node)))
+ /* Add's and Sub's are typically supported as long as both operands are
+ * immediates */
+ if (ia32_is_non_address_mode_node(node))
return 0;
left = get_binop_left(node);
right = get_binop_right(node);
- if(!do_is_immediate(left, symconsts, negate))
+ if (!do_is_immediate(left, symconsts, negate))
return 0;
- if(!do_is_immediate(right, symconsts, is_Sub(node) ? !negate : negate))
+ if (!do_is_immediate(right, symconsts, is_Sub(node) ? !negate : negate))
return 0;
return 1;
*
* @return non-zero if the DAG represents an immediate, 0 else
*/
-static int is_immediate_simple(const ir_node *node) {
+#if 0
+static int is_immediate_simple(const ir_node *node)
+{
int symconsts = 0;
return do_is_immediate(node, &symconsts, 0);
}
+#endif
/**
* Check if a DAG starting with root node can be folded into an address mode
case iro_SymConst:
/* place the entity into the symconst */
if (addr->symconst_ent != NULL) {
- panic("Internal error: more than 1 symconst in address "
- "calculation");
+ panic("Internal error: more than 1 symconst in address calculation");
}
addr->symconst_ent = get_SymConst_entity(node);
#ifndef SUPPORT_NEGATIVE_SYMCONSTS
addr->symconst_sign = negate;
break;
case iro_Add:
- assert(!bitset_is_set(non_address_mode_nodes, get_irn_idx(node)));
+ assert(!ia32_is_non_address_mode_node(node));
left = get_Add_left(node);
right = get_Add_right(node);
eat_immediate(addr, left, negate);
eat_immediate(addr, right, negate);
break;
case iro_Sub:
- assert(!bitset_is_set(non_address_mode_nodes, get_irn_idx(node)));
+ assert(!ia32_is_non_address_mode_node(node));
left = get_Sub_left(node);
right = get_Sub_right(node);
eat_immediate(addr, left, negate);
*
* @param addr the address mode data so far
* @param node the node
- * @param force if set, ignore the marking of node as a non-address-mode node
+ * @param flags the flags
*
* @return the folded node
*/
-static ir_node *eat_immediates(ia32_address_t *addr, ir_node *node, int force)
+static ir_node *eat_immediates(ia32_address_t *addr, ir_node *node,
+ ia32_create_am_flags_t flags)
{
- if(!force && bitset_is_set(non_address_mode_nodes, get_irn_idx(node)))
+ if (!(flags & ia32_create_am_force) &&
+ ia32_is_non_address_mode_node(node) &&
+ (!(flags & ia32_create_am_double_use) || get_irn_n_edges(node) > 2))
return node;
- if(is_Add(node)) {
+ if (is_Add(node)) {
ir_node *left = get_Add_left(node);
ir_node *right = get_Add_right(node);
- if(is_immediate(addr, left, 0)) {
+ if (is_immediate(addr, left, 0)) {
eat_immediate(addr, left, 0);
return eat_immediates(addr, right, 0);
}
- if(is_immediate(addr, right, 0)) {
+ if (is_immediate(addr, right, 0)) {
eat_immediate(addr, right, 0);
return eat_immediates(addr, left, 0);
}
- } else if(is_Sub(node)) {
+ } else if (is_Sub(node)) {
ir_node *left = get_Sub_left(node);
ir_node *right = get_Sub_right(node);
- if(is_immediate(addr, right, 1)) {
+ if (is_immediate(addr, right, 1)) {
eat_immediate(addr, right, 1);
return eat_immediates(addr, left, 0);
}
*/
static int eat_shl(ia32_address_t *addr, ir_node *node)
{
- ir_node *right = get_Shl_right(node);
- tarval *tv;
+ ir_node *shifted_val;
long val;
- /* we can only eat a shl if we don't have a scale or index set yet */
- if(addr->scale != 0 || addr->index != NULL)
- return 0;
+ if (is_Shl(node)) {
+ ir_node *right = get_Shl_right(node);
+ tarval *tv;
- /* we can use shl with 1, 2 or 3 shift */
- if(!is_Const(right))
- return 0;
- tv = get_Const_tarval(right);
- if(!tarval_is_long(tv))
- return 0;
- val = get_tarval_long(tv);
- if(val < 0 || val > 3)
+ /* we can use shl with 1, 2 or 3 shift */
+ if (!is_Const(right))
+ return 0;
+ tv = get_Const_tarval(right);
+ if (!tarval_is_long(tv))
+ return 0;
+
+ val = get_tarval_long(tv);
+ if (val < 0 || val > 3)
+ return 0;
+ if (val == 0) {
+ ir_fprintf(stderr, "Optimisation warning: unoptimized Shl(,0) found\n");
+ }
+
+ shifted_val = get_Shl_left(node);
+ } else if (is_Add(node)) {
+ /* might be an add x, x */
+ ir_node *left = get_Add_left(node);
+ ir_node *right = get_Add_right(node);
+
+ if (left != right)
+ return 0;
+ if (is_Const(left))
+ return 0;
+
+ val = 1;
+ shifted_val = left;
+ } else {
return 0;
- if(val == 0) {
- ir_fprintf(stderr, "Optimisation warning: unoptimized Shl(,0) found\n");
}
- if(bitset_is_set(non_address_mode_nodes, get_irn_idx(node)))
+
+ /* we can only eat a shl if we don't have a scale or index set yet */
+ if (addr->scale != 0 || addr->index != NULL)
+ return 0;
+ if (ia32_is_non_address_mode_node(node))
return 0;
#ifndef AGGRESSIVE_AM
- if(get_irn_n_edges(node) > 1)
+ if (get_irn_n_edges(node) > 1)
return 0;
#endif
addr->scale = val;
- addr->index = eat_immediates(addr, get_Shl_left(node), 0);
+ addr->index = shifted_val;
return 1;
}
-/**
- * Returns non-zero if a value of a given mode can be stored in GP registers.
- */
-static INLINE int mode_needs_gp_reg(ir_mode *mode) {
- if(mode == mode_fpcw)
- return 0;
- if(get_mode_size_bits(mode) > 32)
- return 0;
- return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
-}
-
-/**
- * Check, if a given node is a Down-Conv, ie. a integer Conv
- * from a mode with a mode with more bits to a mode with lesser bits.
- * Moreover, we return only true if the node has not more than 1 user.
- *
- * @param node the node
- * @return non-zero if node is a Down-Conv
- */
-static int is_downconv(const ir_node *node)
-{
- ir_mode *src_mode;
- ir_mode *dest_mode;
-
- if(!is_Conv(node))
- return 0;
-
- /* we only want to skip the conv when we're the only user
- * (not optimal but for now...)
- */
- if(get_irn_n_edges(node) > 1)
- return 0;
-
- src_mode = get_irn_mode(get_Conv_op(node));
- dest_mode = get_irn_mode(node);
- return mode_needs_gp_reg(src_mode)
- && mode_needs_gp_reg(dest_mode)
- && get_mode_size_bits(dest_mode) < get_mode_size_bits(src_mode);
-}
-
-/**
- * Skip all Down-Conv's on a given node and return the resulting node.
- */
-static ir_node *skip_downconv(ir_node *node)
-{
- while(is_downconv(node))
- node = get_Conv_op(node);
-
- return node;
-}
-
/* Create an address mode for a given node. */
-void ia32_create_address_mode(ia32_address_t *addr, ir_node *node, int force)
+void ia32_create_address_mode(ia32_address_t *addr, ir_node *node, ia32_create_am_flags_t flags)
{
int res = 0;
ir_node *eat_imms;
- if(is_immediate(addr, node, 0)) {
+ if (is_immediate(addr, node, 0)) {
eat_immediate(addr, node, 0);
return;
}
#ifndef AGGRESSIVE_AM
- if(!force && get_irn_n_edges(node) > 1) {
+ if (!(flags & ia32_create_am_force) && get_irn_n_edges(node) > 1) {
addr->base = node;
return;
}
#endif
- if(!force && bitset_is_set(non_address_mode_nodes, get_irn_idx(node))) {
+ if (!(flags & ia32_create_am_force) &&
+ ia32_is_non_address_mode_node(node) &&
+ (!(flags & ia32_create_am_double_use) || get_irn_n_edges(node) > 2)) {
addr->base = node;
return;
}
- eat_imms = eat_immediates(addr, node, force);
- if(eat_imms != node) {
- if(force) {
- eat_imms = skip_downconv(eat_imms);
+ eat_imms = eat_immediates(addr, node, flags);
+ if (eat_imms != node) {
+ if (flags & ia32_create_am_force) {
+ eat_imms = ia32_skip_downconv(eat_imms);
}
res = 1;
node = eat_imms;
#ifndef AGGRESSIVE_AM
- if(get_irn_n_edges(node) > 1) {
+ if (get_irn_n_edges(node) > 1) {
addr->base = node;
return;
}
#endif
- if(bitset_is_set(non_address_mode_nodes, get_irn_idx(node))) {
+ if (ia32_is_non_address_mode_node(node)) {
addr->base = node;
return;
}
}
/* starting point Add, Sub or Shl, FrameAddr */
- if(is_Shl(node)) {
- if(eat_shl(addr, node))
+ if (is_Shl(node)) {
+ /* We don't want to eat add x, x as shl here, so only test for real Shl
+ * instructions, because we want the former as Lea x, x, not Shl x, 1 */
+ if (eat_shl(addr, node))
return;
- } else if(is_immediate(addr, node, 0)) {
+ } else if (is_immediate(addr, node, 0)) {
eat_immediate(addr, node, 0);
return;
- } else if(be_is_FrameAddr(node)) {
+ } else if (be_is_FrameAddr(node)) {
assert(addr->base == NULL);
assert(addr->frame_entity == NULL);
addr->base = be_get_FrameAddr_frame(node);
addr->use_frame = 1;
addr->frame_entity = be_get_FrameAddr_entity(node);
return;
- } else if(is_Add(node)) {
+ } else if (is_Add(node)) {
ir_node *left = get_Add_left(node);
ir_node *right = get_Add_right(node);
- if(force) {
- left = skip_downconv(left);
- right = skip_downconv(right);
+ if (flags & ia32_create_am_force) {
+ left = ia32_skip_downconv(left);
+ right = ia32_skip_downconv(right);
}
- assert(force || !is_immediate(addr, left, 0));
- assert(force || !is_immediate(addr, right, 0));
+ assert(flags & ia32_create_am_force || !is_immediate(addr, left, 0));
+ assert(flags & ia32_create_am_force || !is_immediate(addr, right, 0));
- if(is_Shl(left) && eat_shl(addr, left)) {
+ if (eat_shl(addr, left)) {
left = NULL;
- } else if(is_Shl(right) && eat_shl(addr, right)) {
+ } else if (eat_shl(addr, right)) {
right = NULL;
}
- if(left != NULL && be_is_FrameAddr(left)
- && !bitset_is_set(non_address_mode_nodes, get_irn_idx(left))) {
+ if (left != NULL &&
+ be_is_FrameAddr(left) &&
+ !ia32_is_non_address_mode_node(left)) {
assert(addr->base == NULL);
assert(addr->frame_entity == NULL);
addr->base = be_get_FrameAddr_frame(left);
addr->use_frame = 1;
addr->frame_entity = be_get_FrameAddr_entity(left);
left = NULL;
- } else if(right != NULL && be_is_FrameAddr(right)
- && !bitset_is_set(non_address_mode_nodes, get_irn_idx(right))) {
+ } else if (right != NULL &&
+ be_is_FrameAddr(right) &&
+ !ia32_is_non_address_mode_node(right)) {
assert(addr->base == NULL);
assert(addr->frame_entity == NULL);
addr->base = be_get_FrameAddr_frame(right);
right = NULL;
}
- if(left != NULL) {
- if(addr->base != NULL) {
+ if (left != NULL) {
+ if (addr->base != NULL) {
assert(addr->index == NULL && addr->scale == 0);
assert(right == NULL);
addr->index = left;
addr->base = left;
}
}
- if(right != NULL) {
- if(addr->base == NULL) {
+ if (right != NULL) {
+ if (addr->base == NULL) {
addr->base = right;
} else {
assert(addr->index == NULL && addr->scale == 0);
addr->base = node;
}
+void ia32_mark_non_am(ir_node *node)
+{
+ bitset_set(non_address_mode_nodes, get_irn_idx(node));
+}
+
+int ia32_is_non_address_mode_node(ir_node const *node)
+{
+ return bitset_is_set(non_address_mode_nodes, get_irn_idx(node));
+}
+
+static int value_last_used_here(be_lv_t *lv, ir_node *here, ir_node *value)
+{
+ ir_node *block = get_nodes_block(here);
+ const ir_edge_t *edge;
+
+ /* If the value is live end it is for sure it does not die here */
+ if (be_is_live_end(lv, block, value)) return 0;
+
+ /* if multiple nodes in this block use the value, then we cannot decide
+ * whether the value will die here (because there is no schedule yet).
+ * Assume it does not die in this case. */
+ foreach_out_edge(value, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ if (user != here && get_nodes_block(user) == block) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
/**
* Walker: mark those nodes that cannot be part of an address mode because
- * there value must be access through an register
+ * their value must be accessed through a register
*/
static void mark_non_address_nodes(ir_node *node, void *env)
{
- int i, arity;
- ir_node *ptr;
- ir_node *mem;
+ be_lv_t *lv = env;
+ int arity;
+ int i;
ir_node *val;
ir_node *left;
ir_node *right;
- (void) env;
+ ir_mode *mode;
- switch(get_irn_opcode(node)) {
- case iro_Load:
- ptr = get_Load_ptr(node);
- mem = get_Load_mem(node);
+ mode = get_irn_mode(node);
+ if (!mode_is_int(mode) && !mode_is_reference(mode) && mode != mode_b)
+ return;
- bitset_set(non_address_mode_nodes, get_irn_idx(mem));
+ switch (get_irn_opcode(node)) {
+ case iro_Load:
+ /* Nothing to do. especially do not mark the pointer, because we want to
+ * turn it into AM. */
break;
case iro_Store:
+ /* Do not mark the pointer, because we want to turn it into AM. */
val = get_Store_value(node);
- ptr = get_Store_ptr(node);
- mem = get_Store_mem(node);
-
- bitset_set(non_address_mode_nodes, get_irn_idx(val));
- bitset_set(non_address_mode_nodes, get_irn_idx(mem));
+ ia32_mark_non_am(val);
break;
+ case iro_Shl:
case iro_Add:
- left = get_Add_left(node);
- right = get_Add_right(node);
- /* if we can do source address mode then we will never fold the add
- * into address mode */
- if(!mode_is_float(get_irn_mode(node)) && (is_immediate_simple(right) ||
- (!use_source_address_mode(get_nodes_block(node), left, right)
- && !use_source_address_mode(get_nodes_block(node), right, left))))
- {
- break;
+ /* only 1 user: AM folding is always beneficial */
+ if (get_irn_n_edges(node) <= 1)
+ break;
+
+ /* for adds and shls with multiple users we use this heuristic:
+ * we do not fold them into address mode if their operands don't live
+ * out of the block, because in this case we will reduce register
+ * pressure. Otherwise we fold them in aggressively in the hope, that
+ * the node itself doesn't exist anymore and we were able to save the
+ * register for the result */
+ left = get_binop_left(node);
+ right = get_binop_right(node);
+
+ /* Fold AM if any of the two operands does not die here. This duplicates
+ * an addition and has the same register pressure for the case that only
+ * one operand dies, but is faster (on Pentium 4).
+ * && instead of || only folds AM if both operands do not die here */
+ if (!value_last_used_here(lv, node, left) ||
+ !value_last_used_here(lv, node, right)) {
+ return;
}
- bitset_set(non_address_mode_nodes, get_irn_idx(node));
- /* fallthrough */
+
+ /* At least one of left and right are not used by anyone else, so it is
+ * beneficial for the register pressure (if both are unused otherwise,
+ * else neutral) and ALU use to not fold AM. */
+ ia32_mark_non_am(node);
+ break;
default:
arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
+ for (i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
- bitset_set(non_address_mode_nodes, get_irn_idx(in));
+ ia32_mark_non_am(in);
}
break;
}
}
-void calculate_non_address_mode_nodes(ir_graph *irg)
+void ia32_calculate_non_address_mode_nodes(be_irg_t *birg)
{
+ ir_graph *irg = be_get_birg_irg(birg);
+ be_lv_t *lv = be_assure_liveness(birg);
+
non_address_mode_nodes = bitset_malloc(get_irg_last_idx(irg));
- irg_walk_graph(irg, NULL, mark_non_address_nodes, NULL);
+ irg_walk_graph(irg, NULL, mark_non_address_nodes, lv);
}
-void free_non_address_mode_nodes(void)
+void ia32_free_non_address_mode_nodes(void)
{
bitset_free(non_address_mode_nodes);
}