/* gas/ld don't support negative symconsts :-( */
#undef SUPPORT_NEGATIVE_SYMCONSTS
+static be_lv_t *lv;
static bitset_t *non_address_mode_nodes;
/**
*
* @return non-zero if the DAG represents an immediate, 0 else
*/
+#if 0
static int is_immediate_simple(const ir_node *node) {
int symconsts = 0;
return do_is_immediate(node, &symconsts, 0);
}
+#endif
/**
* Check if a DAG starting with root node can be folded into an address mode
*/
static int eat_shl(ia32_address_t *addr, ir_node *node)
{
- ir_node *right = get_Shl_right(node);
- tarval *tv;
+ ir_node *shifted_val;
long val;
- /* we can only eat a shl if we don't have a scale or index set yet */
- if(addr->scale != 0 || addr->index != NULL)
- return 0;
+ if(is_Shl(node)) {
+ ir_node *right = get_Shl_right(node);
+ tarval *tv;
- /* we can use shl with 1, 2 or 3 shift */
- if(!is_Const(right))
- return 0;
- tv = get_Const_tarval(right);
- if(!tarval_is_long(tv))
- return 0;
- val = get_tarval_long(tv);
- if(val < 0 || val > 3)
+ /* we can use shl with 1, 2 or 3 shift */
+ if(!is_Const(right))
+ return 0;
+ tv = get_Const_tarval(right);
+ if(!tarval_is_long(tv))
+ return 0;
+
+ val = get_tarval_long(tv);
+ if(val < 0 || val > 3)
+ return 0;
+ if(val == 0) {
+ ir_fprintf(stderr, "Optimisation warning: unoptimized Shl(,0) "
+ "found\n");
+ }
+
+ shifted_val = get_Shl_left(node);
+ } else if(is_Add(node)) {
+ /* might be an add x, x */
+ ir_node *left = get_Add_left(node);
+ ir_node *right = get_Add_right(node);
+
+ if(left != right)
+ return 0;
+ if(is_Const(left))
+ return 0;
+
+ val = 1;
+ shifted_val = left;
+ } else {
return 0;
- if(val == 0) {
- ir_fprintf(stderr, "Optimisation warning: unoptimized Shl(,0) found\n");
}
+
+ /* we can only eat a shl if we don't have a scale or index set yet */
+ if(addr->scale != 0 || addr->index != NULL)
+ return 0;
if(bitset_is_set(non_address_mode_nodes, get_irn_idx(node)))
return 0;
#endif
addr->scale = val;
- addr->index = eat_immediates(addr, get_Shl_left(node), 0);
+ addr->index = shifted_val;
return 1;
}
/* starting point Add, Sub or Shl, FrameAddr */
if(is_Shl(node)) {
+ /* We don't want to eat add x, x as shl here, so only test for real Shl
+ * instructions, because we want the former as Lea x, x, not Shl x, 1 */
if(eat_shl(addr, node))
return;
} else if(is_immediate(addr, node, 0)) {
assert(force || !is_immediate(addr, left, 0));
assert(force || !is_immediate(addr, right, 0));
- if(is_Shl(left) && eat_shl(addr, left)) {
+ if(eat_shl(addr, left)) {
left = NULL;
- } else if(is_Shl(right) && eat_shl(addr, right)) {
+ } else if(eat_shl(addr, right)) {
right = NULL;
}
if(left != NULL && be_is_FrameAddr(left)
addr->base = node;
}
+void ia32_mark_non_am(ir_node *node)
+{
+ bitset_set(non_address_mode_nodes, get_irn_idx(node));
+}
/**
* Walker: mark those nodes that cannot be part of an address mode because
ir_node *ptr;
ir_node *mem;
ir_node *val;
+ ir_node *block;
ir_node *left;
ir_node *right;
+ ir_mode *mode;
+ const ir_edge_t *edge;
(void) env;
+ mode = get_irn_mode(node);
+ if(!mode_is_int(mode) && !mode_is_reference(mode) && mode != mode_b)
+ return;
+
switch(get_irn_opcode(node)) {
case iro_Load:
ptr = get_Load_ptr(node);
bitset_set(non_address_mode_nodes, get_irn_idx(mem));
break;
+ case iro_Shl:
case iro_Add:
- left = get_Add_left(node);
- right = get_Add_right(node);
- /* if we can do source address mode then we will never fold the add
- * into address mode */
- if(!mode_is_float(get_irn_mode(node)) && (is_immediate_simple(right) ||
- (!ia32_use_source_address_mode(get_nodes_block(node), left, right)
- && !ia32_use_source_address_mode(get_nodes_block(node), right, left))))
- {
- break;
+ /* only 1 user: AM folding is always beneficial */
+ if(get_irn_n_edges(node) <= 1)
+ break;
+
+ /* for adds and shls with multiple users we use this heuristic:
+ * we do not fold them into address mode if their operands don't live
+ * out of the block, because in this case we will reduce register
+ * pressure. Otherwise we fold them in aggressively in the hope, that
+ * the node itself doesn't exist anymore and we were able to save the
+ * register for the result */
+ block = get_nodes_block(node);
+ left = get_binop_left(node);
+ right = get_binop_right(node);
+
+ /* live end: we won't save a register by AM folding */
+ if(be_is_live_end(lv, block, left) || be_is_live_end(lv, block, right))
+ return;
+
+ /* if multiple nodes in this block use left/right values, then we
+ * can't really decide wether the values will die after node.
+ * We use aggressive mode then, since it's usually just multiple address
+ * calculations. */
+ foreach_out_edge(left, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ if(user != node && get_nodes_block(user) == block)
+ return;
+ }
+ foreach_out_edge(right, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ if(user != node && get_nodes_block(user) == block)
+ return;
}
+
+ /* noone-else in this block is using left/right so we'll reduce register
+ * pressure if we don't fold the node */
bitset_set(non_address_mode_nodes, get_irn_idx(node));
- /* fallthrough */
+ break;
default:
arity = get_irn_arity(node);
}
}
-void calculate_non_address_mode_nodes(ir_graph *irg)
+void ia32_calculate_non_address_mode_nodes(be_irg_t *birg)
{
+ ir_graph *irg = be_get_birg_irg(birg);
+
+ lv = be_assure_liveness(birg);
non_address_mode_nodes = bitset_malloc(get_irg_last_idx(irg));
irg_walk_graph(irg, NULL, mark_non_address_nodes, NULL);
}
-void free_non_address_mode_nodes(void)
+void ia32_free_non_address_mode_nodes(void)
{
bitset_free(non_address_mode_nodes);
}