return 1;
}
-/**
- * Map a Shl (a_l, a_h, count)
- */
-static int map_Shl(ir_node *call, void *ctx)
-{
- ir_graph *irg = current_ir_graph;
- dbg_info *dbg = get_irn_dbg_info(call);
- ir_node *block = get_nodes_block(call);
- ir_node **params = get_Call_param_arr(call);
- ir_type *method = get_Call_type(call);
- ir_node *a_l = params[BINOP_Left_Low];
- ir_node *a_h = params[BINOP_Left_High];
- ir_node *cnt = params[BINOP_Right_Low];
- ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
- ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
- ir_mode *c_mode;
- ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
- (void) ctx;
-
- if (is_Const(cnt)) {
- /* the shift count is a const, create better code */
- ir_tarval *tv = get_Const_tarval(cnt);
-
- if (tarval_cmp(tv, new_tarval_from_long(32, l_mode))
- & (ir_relation_greater_equal)) {
- /* simplest case: shift only the lower bits. Note that there is no
- need to reduce the constant here, this is done by the hardware. */
- ir_node *conv = new_rd_Conv(dbg, block, a_l, h_mode);
- h_res = new_rd_Shl(dbg, block, conv, cnt, h_mode);
- l_res = new_rd_Const(dbg, irg, get_mode_null(l_mode));
-
- } else {
- /* h_res = SHLD a_h, a_l, cnt */
- h_res = new_bd_ia32_l_ShlD(dbg, block, a_h, a_l, cnt, h_mode);
-
- /* l_res = SHL a_l, cnt */
- l_res = new_bd_ia32_l_ShlDep(dbg, block, a_l, cnt, h_res, l_mode);
- }
-
- resolve_call(call, l_res, h_res, irg, block);
- return 1;
- }
-
- part_block(call);
- upper = get_nodes_block(call);
-
- /* h_res = SHLD a_h, a_l, cnt */
- h1 = new_bd_ia32_l_ShlD(dbg, upper, a_h, a_l, cnt, h_mode);
-
- /* l_res = SHL a_l, cnt */
- l1 = new_bd_ia32_l_ShlDep(dbg, upper, a_l, cnt, h1, l_mode);
-
- c_mode = get_irn_mode(cnt);
- irn = new_r_Const_long(irg, c_mode, 32);
- irn = new_rd_And(dbg, upper, cnt, irn, c_mode);
- irn = new_rd_Cmp(dbg, upper, irn, new_r_Const(irg, get_mode_null(c_mode)), ir_relation_equal);
- cond = new_rd_Cond(dbg, upper, irn);
-
- in[0] = new_r_Proj(cond, mode_X, pn_Cond_true);
- in[1] = new_r_Proj(cond, mode_X, pn_Cond_false);
-
- /* the block for cnt >= 32 */
- n_block = new_rd_Block(dbg, irg, 1, &in[1]);
- h2 = new_rd_Conv(dbg, n_block, l1, h_mode);
- l2 = new_r_Const(irg, get_mode_null(l_mode));
- in[1] = new_r_Jmp(n_block);
-
- set_irn_in(block, 2, in);
-
- in[0] = l1;
- in[1] = l2;
- l_res = new_r_Phi(block, 2, in, l_mode);
- set_Block_phis(block, l_res);
-
- in[0] = h1;
- in[1] = h2;
- h_res = new_r_Phi(block, 2, in, h_mode);
- set_Phi_next(l_res, h_res);
- set_Phi_next(h_res, NULL);
-
- /* move it down */
- set_nodes_block(call, block);
- for (irn = (ir_node*)get_irn_link(call); irn != NULL;
- irn = (ir_node*)get_irn_link(irn)) {
- set_nodes_block(irn, block);
- }
-
- resolve_call(call, l_res, h_res, irg, block);
- return 1;
-}
-
-/**
- * Map a Shr (a_l, a_h, count)
- */
-static int map_Shr(ir_node *call, void *ctx)
-{
- ir_graph *irg = current_ir_graph;
- dbg_info *dbg = get_irn_dbg_info(call);
- ir_node *block = get_nodes_block(call);
- ir_node **params = get_Call_param_arr(call);
- ir_type *method = get_Call_type(call);
- ir_node *a_l = params[BINOP_Left_Low];
- ir_node *a_h = params[BINOP_Left_High];
- ir_node *cnt = params[BINOP_Right_Low];
- ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
- ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
- ir_mode *c_mode;
- ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
- (void) ctx;
-
- if (is_Const(cnt)) {
- /* the shift count is a const, create better code */
- ir_tarval *tv = get_Const_tarval(cnt);
-
- if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (ir_relation_greater_equal)) {
- /* simplest case: shift only the higher bits. Note that there is no
- need to reduce the constant here, this is done by the hardware. */
- ir_node *conv = new_rd_Conv(dbg, block, a_h, l_mode);
- h_res = new_rd_Const(dbg, irg, get_mode_null(h_mode));
- l_res = new_rd_Shr(dbg, block, conv, cnt, l_mode);
- } else {
- /* l_res = SHRD a_h:a_l, cnt */
- l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
-
- /* h_res = SHR a_h, cnt */
- h_res = new_bd_ia32_l_ShrDep(dbg, block, a_h, cnt, l_res, h_mode);
- }
- resolve_call(call, l_res, h_res, irg, block);
- return 1;
- }
-
- part_block(call);
- upper = get_nodes_block(call);
-
- /* l_res = SHRD a_h:a_l, cnt */
- l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
-
- /* h_res = SHR a_h, cnt */
- h1 = new_bd_ia32_l_ShrDep(dbg, upper, a_h, cnt, l1, h_mode);
-
- c_mode = get_irn_mode(cnt);
- irn = new_r_Const_long(irg, c_mode, 32);
- irn = new_rd_And(dbg, upper, cnt, irn, c_mode);
- irn = new_rd_Cmp(dbg, upper, irn, new_r_Const(irg, get_mode_null(c_mode)), ir_relation_equal);
- cond = new_rd_Cond(dbg, upper, irn);
-
- in[0] = new_r_Proj(cond, mode_X, pn_Cond_true);
- in[1] = new_r_Proj(cond, mode_X, pn_Cond_false);
-
- /* the block for cnt >= 32 */
- n_block = new_rd_Block(dbg, irg, 1, &in[1]);
- l2 = new_rd_Conv(dbg, n_block, h1, l_mode);
- h2 = new_r_Const(irg, get_mode_null(h_mode));
- in[1] = new_r_Jmp(n_block);
-
- set_irn_in(block, 2, in);
-
- in[0] = l1;
- in[1] = l2;
- l_res = new_r_Phi(block, 2, in, l_mode);
- set_Block_phis(block, l_res);
-
- in[0] = h1;
- in[1] = h2;
- h_res = new_r_Phi(block, 2, in, h_mode);
- set_Phi_next(l_res, h_res);
- set_Phi_next(h_res, NULL);
-
- /* move it down */
- set_nodes_block(call, block);
- for (irn = (ir_node*)get_irn_link(call); irn != NULL;
- irn = (ir_node*)get_irn_link(irn)) {
- set_nodes_block(irn, block);
- }
-
- resolve_call(call, l_res, h_res, irg, block);
- return 1;
-}
-
-/**
- * Map a Shrs (a_l, a_h, count)
- */
-static int map_Shrs(ir_node *call, void *ctx)
-{
- ir_graph *irg = current_ir_graph;
- dbg_info *dbg = get_irn_dbg_info(call);
- ir_node *block = get_nodes_block(call);
- ir_node **params = get_Call_param_arr(call);
- ir_type *method = get_Call_type(call);
- ir_node *a_l = params[BINOP_Left_Low];
- ir_node *a_h = params[BINOP_Left_High];
- ir_node *cnt = params[BINOP_Right_Low];
- ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
- ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
- ir_mode *c_mode;
- ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
- (void) ctx;
-
- if (is_Const(cnt)) {
- /* the shift count is a const, create better code */
- ir_tarval *tv = get_Const_tarval(cnt);
-
- if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (ir_relation_greater_equal)) {
- /* simplest case: shift only the higher bits. Note that there is no
- need to reduce the constant here, this is done by the hardware. */
- ir_node *conv = new_rd_Conv(dbg, block, a_h, l_mode);
- ir_mode *c_mode = get_irn_mode(cnt);
-
- h_res = new_rd_Shrs(dbg, block, a_h, new_r_Const_long(irg, c_mode, 31), h_mode);
- l_res = new_rd_Shrs(dbg, block, conv, cnt, l_mode);
- } else {
- /* l_res = SHRD a_h:a_l, cnt */
- l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
-
- /* h_res = SAR a_h, cnt */
- h_res = new_bd_ia32_l_SarDep(dbg, block, a_h, cnt, l_res, h_mode);
- }
- resolve_call(call, l_res, h_res, irg, block);
- return 1;
- }
-
- part_block(call);
- upper = get_nodes_block(call);
-
- /* l_res = SHRD a_h:a_l, cnt */
- l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
-
- /* h_res = SAR a_h, cnt */
- h1 = new_bd_ia32_l_SarDep(dbg, upper, a_h, cnt, l1, h_mode);
-
- c_mode = get_irn_mode(cnt);
- irn = new_r_Const_long(irg, c_mode, 32);
- irn = new_rd_And(dbg, upper, cnt, irn, c_mode);
- irn = new_rd_Cmp(dbg, upper, irn, new_r_Const(irg, get_mode_null(c_mode)), ir_relation_equal);
- cond = new_rd_Cond(dbg, upper, irn);
-
- in[0] = new_r_Proj(cond, mode_X, pn_Cond_true);
- in[1] = new_r_Proj(cond, mode_X, pn_Cond_false);
-
- /* the block for cnt >= 32 */
- n_block = new_rd_Block(dbg, irg, 1, &in[1]);
- l2 = new_rd_Conv(dbg, n_block, h1, l_mode);
- h2 = new_rd_Shrs(dbg, n_block, a_h, new_r_Const_long(irg, c_mode, 31), h_mode);
- in[1] = new_r_Jmp(n_block);
-
- set_irn_in(block, 2, in);
-
- in[0] = l1;
- in[1] = l2;
- l_res = new_r_Phi(block, 2, in, l_mode);
- set_Block_phis(block, l_res);
-
- in[0] = h1;
- in[1] = h2;
- h_res = new_r_Phi(block, 2, in, h_mode);
- set_Phi_next(l_res, h_res);
- set_Phi_next(h_res, NULL);
-
- /* move it down */
- set_nodes_block(call, block);
- for (irn = (ir_node*)get_irn_link(call); irn != NULL;
- irn = (ir_node*)get_irn_link(irn)) {
- set_nodes_block(irn, block);
- }
-
- resolve_call(call, l_res, h_res, irg, block);
- return 1;
-}
-
/**
* Checks where node high is a sign extension of low.
*/
ent = &i_ents[iro_Sub];
mapper = map_Sub;
break;
- case iro_Shl:
- ent = &i_ents[iro_Shl];
- mapper = map_Shl;
- break;
- case iro_Shr:
- ent = &i_ents[iro_Shr];
- mapper = map_Shr;
- break;
- case iro_Shrs:
- ent = &i_ents[iro_Shrs];
- mapper = map_Shrs;
- break;
case iro_Mul:
ent = &i_ents[iro_Mul];
mapper = map_Mul;
#include "irprintf.h"
#include "debug.h"
#include "irdom.h"
+#include "iropt.h"
#include "error.h"
#include "array_t.h"
#include "heights.h"
match_commutative | match_mode_neutral | match_am | match_immediate);
}
+/**
+ * test wether 2 values result in 'x' and '32-x' when interpreted as a shift
+ * value.
+ */
+static bool is_complementary_shifts(ir_node *value1, ir_node *value2)
+{
+ if (is_Const(value1) && is_Const(value2)) {
+ ir_tarval *tv1 = get_Const_tarval(value1);
+ ir_tarval *tv2 = get_Const_tarval(value2);
+ if (tarval_is_long(tv1) && tarval_is_long(tv2)) {
+ long v1 = get_tarval_long(tv1);
+ long v2 = get_tarval_long(tv2);
+ return v1 < v2 && v2 == 32-v1;
+ }
+ }
+ return false;
+}
+typedef ir_node* (*new_shiftd_func)(dbg_info *dbgi, ir_node *block,
+ ir_node *high, ir_node *low,
+ ir_node *count);
+
+/**
+ * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs:
+ * op1 - target to be shifted
+ * op2 - contains bits to be shifted into target
+ * op3 - shift count
+ * Only op3 can be an immediate.
+ */
+static ir_node *gen_64bit_shifts(dbg_info *dbgi, ir_node *block,
+ ir_node *high, ir_node *low, ir_node *count,
+ new_shiftd_func func)
+{
+ ir_node *new_block = be_transform_node(block);
+ ir_node *new_high = be_transform_node(high);
+ ir_node *new_low = be_transform_node(low);
+ ir_node *new_count;
+ ir_node *new_node;
+
+ /* the shift amount can be any mode that is bigger than 5 bits, since all
+ * other bits are ignored anyway */
+ while (is_Conv(count) &&
+ get_irn_n_edges(count) == 1 &&
+ mode_is_int(get_irn_mode(count))) {
+ assert(get_mode_size_bits(get_irn_mode(count)) >= 5);
+ count = get_Conv_op(count);
+ }
+ new_count = create_immediate_or_transform(count, 0);
+
+ new_node = func(dbgi, new_block, new_high, new_low, new_count);
+ return new_node;
+}
+
+static ir_node *match_64bit_shift(ir_node *node)
+{
+ ir_node *op1 = get_Or_left(node);
+ ir_node *op2 = get_Or_right(node);
+
+ if (is_Shr(op1)) {
+ ir_node *tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+
+ /* match ShlD operation */
+ if (is_Shl(op1) && is_Shr(op2)) {
+ ir_node *shl_right = get_Shl_right(op1);
+ ir_node *shl_left = get_Shl_left(op1);
+ ir_node *shr_right = get_Shr_right(op2);
+ ir_node *shr_left = get_Shr_left(op2);
+ /* constant ShlD operation */
+ if (is_complementary_shifts(shl_right, shr_right)) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ return gen_64bit_shifts(dbgi, block, shl_left, shr_left, shl_right,
+ new_bd_ia32_ShlD);
+ }
+ /* constant ShrD operation */
+ if (is_complementary_shifts(shr_right, shl_right)) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ return gen_64bit_shifts(dbgi, block, shr_left, shl_left, shr_right,
+ new_bd_ia32_ShrD);
+ }
+ /* lower_dw produces the following for ShlD:
+ * Or(Shr(Shr(high,1),Not(c)),Shl(low,c)) */
+ if (is_Shr(shr_left) && is_Not(shr_right)
+ && is_Const_1(get_Shr_right(shr_left))
+ && get_Not_op(shr_right) == shl_right) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *val_h = get_Shr_left(shr_left);
+ return gen_64bit_shifts(dbgi, block, shl_left, val_h, shl_right,
+ new_bd_ia32_ShlD);
+ }
+ /* lower_dw produces the following for ShrD:
+ * Or(Shl(Shl(high,1),Not(c)), Shr(low,c)) */
+ if (is_Shl(shl_left) && is_Not(shl_right)
+ && is_Const_1(get_Shl_right(shl_left))
+ && get_Not_op(shl_right) == shr_right) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *val_h = get_Shl_left(shl_left);
+ return gen_64bit_shifts(dbgi, block, shr_left, val_h, shr_right,
+ new_bd_ia32_ShrD);
+ }
+ }
+
+ return NULL;
+}
/**
* Creates an ia32 Or.
{
ir_node *op1 = get_Or_left(node);
ir_node *op2 = get_Or_right(node);
+ ir_node *res;
+
+ res = match_64bit_shift(node);
+ if (res != NULL)
+ return res;
assert (! mode_is_float(get_irn_mode(node)));
return gen_binop(node, op1, op2, new_bd_ia32_Or, match_commutative
return new_node;
}
-static ir_node *gen_ia32_l_ShlDep(ir_node *node)
-{
- ir_node *left = get_irn_n(node, n_ia32_l_ShlDep_val);
- ir_node *right = get_irn_n(node, n_ia32_l_ShlDep_count);
-
- return gen_shift_binop(node, left, right, new_bd_ia32_Shl,
- match_immediate | match_mode_neutral);
-}
-
-static ir_node *gen_ia32_l_ShrDep(ir_node *node)
-{
- ir_node *left = get_irn_n(node, n_ia32_l_ShrDep_val);
- ir_node *right = get_irn_n(node, n_ia32_l_ShrDep_count);
- return gen_shift_binop(node, left, right, new_bd_ia32_Shr,
- match_immediate);
-}
-
-static ir_node *gen_ia32_l_SarDep(ir_node *node)
-{
- ir_node *left = get_irn_n(node, n_ia32_l_SarDep_val);
- ir_node *right = get_irn_n(node, n_ia32_l_SarDep_count);
- return gen_shift_binop(node, left, right, new_bd_ia32_Sar,
- match_immediate);
-}
-
static ir_node *gen_ia32_l_Add(ir_node *node)
{
ir_node *left = get_irn_n(node, n_ia32_l_Add_left);
match_am | match_immediate | match_mode_neutral);
}
-/**
- * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs:
- * op1 - target to be shifted
- * op2 - contains bits to be shifted into target
- * op3 - shift count
- * Only op3 can be an immediate.
- */
-static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *high,
- ir_node *low, ir_node *count)
-{
- ir_node *block = get_nodes_block(node);
- ir_node *new_block = be_transform_node(block);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *new_high = be_transform_node(high);
- ir_node *new_low = be_transform_node(low);
- ir_node *new_count;
- ir_node *new_node;
-
- /* the shift amount can be any mode that is bigger than 5 bits, since all
- * other bits are ignored anyway */
- while (is_Conv(count) &&
- get_irn_n_edges(count) == 1 &&
- mode_is_int(get_irn_mode(count))) {
- assert(get_mode_size_bits(get_irn_mode(count)) >= 5);
- count = get_Conv_op(count);
- }
- new_count = create_immediate_or_transform(count, 0);
-
- if (is_ia32_l_ShlD(node)) {
- new_node = new_bd_ia32_ShlD(dbgi, new_block, new_high, new_low,
- new_count);
- } else {
- new_node = new_bd_ia32_ShrD(dbgi, new_block, new_high, new_low,
- new_count);
- }
- SET_IA32_ORIG_NODE(new_node, node);
-
- return new_node;
-}
-
-static ir_node *gen_ia32_l_ShlD(ir_node *node)
-{
- ir_node *high = get_irn_n(node, n_ia32_l_ShlD_val_high);
- ir_node *low = get_irn_n(node, n_ia32_l_ShlD_val_low);
- ir_node *count = get_irn_n(node, n_ia32_l_ShlD_count);
- return gen_lowered_64bit_shifts(node, high, low, count);
-}
-
-static ir_node *gen_ia32_l_ShrD(ir_node *node)
-{
- ir_node *high = get_irn_n(node, n_ia32_l_ShrD_val_high);
- ir_node *low = get_irn_n(node, n_ia32_l_ShrD_val_low);
- ir_node *count = get_irn_n(node, n_ia32_l_ShrD_count);
- return gen_lowered_64bit_shifts(node, high, low, count);
-}
-
static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
{
ir_node *src_block = get_nodes_block(node);
be_set_transform_function(op_ia32_l_IMul, gen_ia32_l_IMul);
be_set_transform_function(op_ia32_l_LLtoFloat, gen_ia32_l_LLtoFloat);
be_set_transform_function(op_ia32_l_Mul, gen_ia32_l_Mul);
- be_set_transform_function(op_ia32_l_SarDep, gen_ia32_l_SarDep);
be_set_transform_function(op_ia32_l_Sbb, gen_ia32_l_Sbb);
- be_set_transform_function(op_ia32_l_ShlDep, gen_ia32_l_ShlDep);
- be_set_transform_function(op_ia32_l_ShlD, gen_ia32_l_ShlD);
- be_set_transform_function(op_ia32_l_ShrDep, gen_ia32_l_ShrDep);
- be_set_transform_function(op_ia32_l_ShrD, gen_ia32_l_ShrD);
be_set_transform_function(op_ia32_l_Sub, gen_ia32_l_Sub);
be_set_transform_function(op_ia32_GetEIP, be_duplicate_node);
be_set_transform_function(op_ia32_Minus64Bit, be_duplicate_node);
static pmap *lowered_type;
/** The types for the binop and unop intrinsics. */
-static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *shiftop_tp_u, *shiftop_tp_s, *tp_s, *tp_u;
+static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *tp_s, *tp_u;
/** the debug handle */
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
ir_tarval *tv_mode_bits; /**< a tarval containing the number of bits in the lowered modes */
pdeq *waitq; /**< a wait queue of all nodes that must be handled later */
ir_node **lowered_phis; /**< list of lowered phis */
- pmap *proj_2_block; /**< a map from ProjX to its destination blocks */
ir_mode *high_signed; /**< doubleword signed type */
ir_mode *high_unsigned; /**< doubleword unsigned type */
ir_mode *low_signed; /**< word signed type */
static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
{
int i, arity = get_irn_arity(block);
- ir_node **in, *phi;
+ ir_node **in;
+ const ir_edge_t *edge;
assert(nr < arity);
set_irn_in(block, i + 1, in);
- for (phi = get_Block_phis(block); phi != NULL; phi = get_Phi_next(phi)) {
+ foreach_out_edge(block, edge) {
+ ir_node *phi = get_edge_src_irn(edge);
+ if (!is_Phi(phi))
+ continue;
+
for (i = 0; i < arity; ++i)
in[i] = get_irn_n(phi, i);
in[i] = in[nr];
{
ir_mode *mode = get_irn_op_mode(node);
lower64_entry_t *link;
- int i;
if (mode == env->high_signed || mode == env->high_unsigned) {
unsigned idx = get_irn_idx(node);
}
return;
}
-
- if (is_Proj(node)) {
- /* link all Proj nodes to its predecessor:
- Note that Tuple Proj's and its Projs are linked either. */
- ir_node *pred = get_Proj_pred(node);
-
- set_irn_link(node, get_irn_link(pred));
- set_irn_link(pred, node);
- } else if (is_Phi(node)) {
- /* link all Phi nodes to its block */
- ir_node *block = get_nodes_block(node);
- add_Block_phi(block, node);
- } else if (is_Block(node)) {
- /* fill the Proj -> Block map */
- for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
- ir_node *pred = get_Block_cfgpred(node, i);
-
- if (is_Proj(pred))
- pmap_insert(env->proj_2_block, pred, node);
- }
- }
}
lower64_entry_t *get_node_entry(ir_node *node)
ir_node *block = get_nodes_block(node);
ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
? cons_volatile : cons_none;
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
if (env->params->little_endian) {
low = adr;
proj = new_r_Proj(low, mode_M, pn_Load_M);
high = new_rd_Load(dbg, block, proj, high, mode, volatility);
- ir_set_dw_lowered(node, low, high);
+ foreach_out_edge_safe(node, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
switch (get_Proj_proj(proj)) {
case pn_Load_M: /* Memory result. */
/* put it to the second one */
const lower64_entry_t *entry = get_node_entry(value);
ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
? cons_volatile : cons_none;
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
(void) mode;
assert(entry);
proj = new_r_Proj(low, mode_M, pn_Store_M);
high = new_rd_Store(dbg, block, proj, high, entry->high_word, volatility);
- ir_set_dw_lowered(node, low, high);
+ foreach_out_edge_safe(node, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
switch (get_Proj_proj(proj)) {
case pn_Store_M: /* Memory result. */
/* put it to the second one */
resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
set_irn_pinned(call, get_irn_pinned(node));
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
+ foreach_out_edge_safe(node, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+
switch (get_Proj_proj(proj)) {
case pn_Div_M: /* Memory result. */
/* reroute to the call */
resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
set_irn_pinned(call, get_irn_pinned(node));
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
+ foreach_out_edge_safe(node, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+
switch (get_Proj_proj(proj)) {
case pn_Mod_M: /* Memory result. */
/* reroute to the call */
}
}
-/**
- * Translate a Shiftop.
- *
- * Create an intrinsic Call.
- */
-static void lower_Shiftop(ir_node *node, ir_mode *mode)
+static ir_node *create_conv(ir_node *block, ir_node *node, ir_mode *dest_mode)
{
- ir_node *block = get_nodes_block(node);
- ir_node *left = get_binop_left(node);
- const lower64_entry_t *left_entry = get_node_entry(left);
- ir_node *right = get_binop_right(node);
- ir_node *in[3] = {
- left_entry->low_word, left_entry->high_word,
- /* it should be safe to conv to low_unsigned */
- new_r_Conv(block, right, env->low_unsigned)
- };
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_graph *irg = get_irn_irg(block);
- ir_type *mtp
- = mode_is_signed(mode) ? shiftop_tp_s : shiftop_tp_u;
- ir_node *addr
- = get_intrinsic_address(mtp, get_irn_op(node), mode, mode);
- ir_node *call
- = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 3, in, mtp);
- ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
- ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
- ir_node *res_high = new_r_Proj(resproj, mode, 1);
-
- set_irn_pinned(call, get_irn_pinned(node));
- ir_set_dw_lowered(node, res_low, res_high);
+ if (get_irn_mode(node) == dest_mode)
+ return node;
+ return new_r_Conv(block, node, dest_mode);
}
/**
- * Translate a Shr and handle special cases.
+ * Moves node and all predecessors of node from from_bl to to_bl.
+ * Does not move predecessors of Phi nodes (or block nodes).
*/
-static void lower_Shr(ir_node *node, ir_mode *mode)
+static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl)
{
- ir_graph *irg = get_irn_irg(node);
- ir_node *right = get_Shr_right(node);
-
- if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
- ir_tarval *tv = get_Const_tarval(right);
-
- if (tarval_is_long(tv) &&
- get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
- ir_node *block = get_nodes_block(node);
- ir_node *left = get_Shr_left(node);
- ir_mode *low_unsigned = env->low_unsigned;
- long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
- const lower64_entry_t *left_entry = get_node_entry(left);
- ir_node *res_low;
- ir_node *res_high;
-
- left = left_entry->high_word;
-
- /* convert high word into low_unsigned mode if necessary */
- if (get_irn_mode(left) != low_unsigned)
- left = new_r_Conv(block, left, low_unsigned);
-
- if (shf_cnt > 0) {
- ir_node *c = new_r_Const_long(irg, low_unsigned, shf_cnt);
- res_low = new_r_Shr(block, left, c, low_unsigned);
- } else {
- res_low = left;
- }
- res_high = new_r_Const(irg, get_mode_null(mode));
- ir_set_dw_lowered(node, res_low, res_high);
+ int i, arity;
+
+ /* move this node */
+ set_nodes_block(node, to_bl);
+
+ /* move its Projs */
+ if (get_irn_mode(node) == mode_T) {
+ const ir_edge_t *edge;
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+ move(proj, from_bl, to_bl);
+ }
+ }
- return;
+ /* We must not move predecessors of Phi nodes, even if they are in
+ * from_bl. (because these are values from an earlier loop iteration
+ * which are not predecessors of node here)
+ */
+ if (is_Phi(node))
+ return;
+
+ /* recursion ... */
+ arity = get_irn_arity(node);
+ for (i = 0; i < arity; i++) {
+ ir_node *pred = get_irn_n(node, i);
+ ir_mode *pred_mode = get_irn_mode(pred);
+ if (get_nodes_block(pred) == from_bl)
+ move(pred, from_bl, to_bl);
+ if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
+ ir_node *pred_low = get_lowered_low(pred);
+ ir_node *pred_high = get_lowered_high(pred);
+ if (get_nodes_block(pred_low) == from_bl)
+ move(pred_low, from_bl, to_bl);
+ if (pred_high != NULL && get_nodes_block(pred_high) == from_bl)
+ move(pred_high, from_bl, to_bl);
}
}
- lower_Shiftop(node, mode);
}
/**
- * Translate a Shl and handle special cases.
+ * We need a custom version of part_block_edges because during transformation
+ * not all data-dependencies are explicit yet if a lowered nodes users are not
+ * lowered yet.
+ * We can fix this by modifying move to look for such implicit dependencies.
+ * Additionally we have to keep the proj_2_block map updated
*/
-static void lower_Shl(ir_node *node, ir_mode *mode)
+static ir_node *part_block_dw(ir_node *node)
{
- ir_graph *irg = get_irn_irg(node);
- ir_node *right = get_Shl_right(node);
-
- if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
- ir_tarval *tv = get_Const_tarval(right);
-
- if (tarval_is_long(tv)) {
- long value = get_tarval_long(tv);
- if (value >= (long)get_mode_size_bits(mode)) {
- /* simple case: shift above the lower word */
- ir_mode *mode_l;
- ir_node *block = get_nodes_block(node);
- ir_node *left = get_Shl_left(node);
- ir_node *c;
- long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
- const lower64_entry_t *left_entry = get_node_entry(left);
- ir_node *res_low;
- ir_node *res_high;
-
- left = left_entry->low_word;
- left = new_r_Conv(block, left, mode);
-
- mode_l = env->low_unsigned;
- if (shf_cnt > 0) {
- c = new_r_Const_long(irg, mode_l, shf_cnt);
- res_high = new_r_Shl(block, left, c, mode);
- } else {
- res_high = left;
- }
- res_low = new_r_Const(irg, get_mode_null(mode_l));
- ir_set_dw_lowered(node, res_low, res_high);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *old_block = get_nodes_block(node);
+ int n_cfgpreds = get_Block_n_cfgpreds(old_block);
+ ir_node **cfgpreds = get_Block_cfgpred_arr(old_block);
+ ir_node *new_block = new_r_Block(irg, n_cfgpreds, cfgpreds);
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
- return;
- }
- if (value == 1) {
- /* left << 1 == left + left */
- ir_node *left = get_binop_left(node);
- const lower64_entry_t *left_entry = get_node_entry(left);
- ir_node *in[4] = {
- left_entry->low_word, left_entry->high_word,
- left_entry->low_word, left_entry->high_word,
- };
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *block = get_nodes_block(node);
- ir_graph *irg = get_irn_irg(block);
- ir_type *mtp
- = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
- ir_node *addr
- = get_intrinsic_address(mtp, op_Add, mode, mode);
- ir_node *call
- = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
- ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
- ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
- ir_node *res_high = new_r_Proj(resproj, mode, 1);
- set_irn_pinned(call, get_irn_pinned(node));
- ir_set_dw_lowered(node, res_low, res_high);
-
- return;
- }
- }
+ /* old_block has no predecessors anymore for now */
+ set_irn_in(old_block, 0, NULL);
+
+ /* move node and its predecessors to new_block */
+ move(node, old_block, new_block);
+
+ /* move Phi nodes to new_block */
+ foreach_out_edge_safe(old_block, edge, next) {
+ ir_node *phi = get_edge_src_irn(edge);
+ if (!is_Phi(phi))
+ continue;
+ set_nodes_block(phi, new_block);
}
- lower_Shiftop(node, mode);
+ return old_block;
}
-/**
- * Translate a Shrs and handle special cases.
- */
-static void lower_Shrs(ir_node *node, ir_mode *mode)
+typedef ir_node* (*new_rd_shr_func)(dbg_info *dbgi, ir_node *block,
+ ir_node *left, ir_node *right,
+ ir_mode *mode);
+
+static void lower_shr_helper(ir_node *node, ir_mode *mode,
+ new_rd_shr_func new_rd_shrs)
{
- ir_graph *irg = get_irn_irg(node);
- ir_node *right = get_Shrs_right(node);
-
- if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
- ir_tarval *tv = get_Const_tarval(right);
-
- if (tarval_is_long(tv) &&
- get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
- ir_node *block = get_nodes_block(node);
- ir_node *left = get_Shrs_left(node);
- ir_mode *low_unsigned = env->low_unsigned;
- long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
- const lower64_entry_t *left_entry = get_node_entry(left);
- ir_node *left_unsigned = left;
- ir_node *res_low;
- ir_node *res_high;
- ir_node *c;
-
- left = left_entry->high_word;
-
- /* convert high word into low_unsigned mode if necessary */
- if (get_irn_mode(left_unsigned) != low_unsigned)
- left_unsigned = new_r_Conv(block, left, low_unsigned);
-
- if (shf_cnt > 0) {
- c = new_r_Const_long(irg, low_unsigned, shf_cnt);
- res_low = new_r_Shrs(block, left_unsigned, c, low_unsigned);
- } else {
- res_low = left_unsigned;
- }
+ ir_node *right = get_binop_right(node);
+ ir_node *left = get_binop_left(node);
+ ir_mode *shr_mode = get_irn_mode(node);
+ unsigned modulo_shift = get_mode_modulo_shift(shr_mode);
+ ir_mode *low_unsigned = env->low_unsigned;
+ unsigned modulo_shift2 = get_mode_modulo_shift(mode);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *left_low = get_lowered_low(left);
+ ir_node *left_high = get_lowered_high(left);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *lower_block;
+ ir_node *block;
+ ir_node *cnst;
+ ir_node *and;
+ ir_node *cmp;
+ ir_node *cond;
+ ir_node *proj_true;
+ ir_node *proj_false;
+ ir_node *phi_low;
+ ir_node *phi_high;
+ ir_node *lower_in[2];
+ ir_node *phi_low_in[2];
+ ir_node *phi_high_in[2];
+
+ /* this version is optimized for modulo shift architectures
+ * (and can't handle anything else) */
+ if (modulo_shift != get_mode_size_bits(shr_mode)
+ || modulo_shift2<<1 != modulo_shift) {
+ panic("Shr lowering only implemented for modulo shift shr operations");
+ }
+ if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
+ panic("Shr lowering only implemented for power-of-2 modes");
+ }
+ /* without 2-complement the -x instead of (bit_width-x) trick won't work */
+ if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
+ panic("Shr lowering only implemented for two-complement modes");
+ }
+
+ /* if the right operand is a 64bit value, we're only interested in the
+ * lower word */
+ if (get_irn_mode(right) == env->high_unsigned) {
+ right = get_lowered_low(right);
+ } else {
+ /* shift should never have signed mode on the right */
+ assert(get_irn_mode(right) != env->high_signed);
+ ir_node *block = get_nodes_block(node);
+ right = create_conv(block, right, low_unsigned);
+ }
- c = new_r_Const(irg, get_mode_all_one(low_unsigned));
- res_high = new_r_Shrs(block, left, c, mode);
- ir_set_dw_lowered(node, res_low, res_high);
- return;
+ lower_block = part_block_dw(node);
+ env->flags |= CF_CHANGED;
+ block = get_nodes_block(node);
+
+ /* add a Cmp to test if highest bit is set <=> wether we shift more
+ * than half the word width */
+ cnst = new_r_Const_long(irg, low_unsigned, modulo_shift2);
+ and = new_r_And(block, right, cnst, low_unsigned);
+ cnst = new_r_Const(irg, get_mode_null(low_unsigned));
+ cmp = new_rd_Cmp(dbgi, block, and, cnst, ir_relation_equal);
+ cond = new_rd_Cond(dbgi, block, cmp);
+ proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
+ proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
+
+ /* the true block => shift_width < 1word */
+ {
+ /* In theory the low value (for 64bit shifts) is:
+ * Or(High << (32-x)), Low >> x)
+ * In practice High << 32-x will fail when x is zero (since we have
+ * modulo shift and 32 will be 0). So instead we use:
+ * Or(High<<1<<~x, Low >> x)
+ */
+ ir_node *in[1] = { proj_true };
+ ir_node *block_true = new_r_Block(irg, ARRAY_SIZE(in), in);
+ ir_node *res_high = new_rd_shrs(dbgi, block_true, left_high,
+ right, mode);
+ ir_node *shift_low = new_rd_Shr(dbgi, block_true, left_low, right,
+ low_unsigned);
+ ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
+ low_unsigned);
+ ir_node *conv = create_conv(block_true, left_high,
+ low_unsigned);
+ ir_node *one = new_r_Const(irg, get_mode_one(low_unsigned));
+ ir_node *carry0 = new_rd_Shl(dbgi, block_true, conv, one,
+ low_unsigned);
+ ir_node *carry1 = new_rd_Shl(dbgi, block_true, carry0,
+ not_shiftval, low_unsigned);
+ ir_node *res_low = new_rd_Or(dbgi, block_true, shift_low, carry1,
+ low_unsigned);
+ lower_in[0] = new_r_Jmp(block_true);
+ phi_low_in[0] = res_low;
+ phi_high_in[0] = res_high;
+ }
+
+ /* false block => shift_width > 1word */
+ {
+ ir_node *in[1] = { proj_false };
+ ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
+ ir_node *conv = create_conv(block_false, left_high, low_unsigned);
+ ir_node *res_low = new_rd_shrs(dbgi, block_false, conv, right,
+ low_unsigned);
+ int cnsti = modulo_shift2-1;
+ ir_node *cnst = new_r_Const_long(irg, low_unsigned, cnsti);
+ ir_node *res_high;
+ if (new_rd_shrs == new_rd_Shrs) {
+ res_high = new_rd_shrs(dbgi, block_false, left_high, cnst, mode);
+ } else {
+ res_high = new_r_Const(irg, get_mode_null(mode));
}
+ lower_in[1] = new_r_Jmp(block_false);
+ phi_low_in[1] = res_low;
+ phi_high_in[1] = res_high;
+ }
+
+ /* patch lower block */
+ set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
+ phi_low = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
+ low_unsigned);
+ phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
+ mode);
+ ir_set_dw_lowered(node, phi_low, phi_high);
+}
+
+static void lower_Shr(ir_node *node, ir_mode *mode)
+{
+ lower_shr_helper(node, mode, new_rd_Shr);
+}
+
+static void lower_Shrs(ir_node *node, ir_mode *mode)
+{
+ lower_shr_helper(node, mode, new_rd_Shrs);
+}
+
+static void lower_Shl(ir_node *node, ir_mode *mode)
+{
+ ir_node *right = get_binop_right(node);
+ ir_node *left = get_binop_left(node);
+ ir_mode *shr_mode = get_irn_mode(node);
+ unsigned modulo_shift = get_mode_modulo_shift(shr_mode);
+ ir_mode *low_unsigned = env->low_unsigned;
+ unsigned modulo_shift2 = get_mode_modulo_shift(mode);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *left_low = get_lowered_low(left);
+ ir_node *left_high = get_lowered_high(left);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *lower_block = get_nodes_block(node);
+ ir_node *block;
+ ir_node *cnst;
+ ir_node *and;
+ ir_node *cmp;
+ ir_node *cond;
+ ir_node *proj_true;
+ ir_node *proj_false;
+ ir_node *phi_low;
+ ir_node *phi_high;
+ ir_node *lower_in[2];
+ ir_node *phi_low_in[2];
+ ir_node *phi_high_in[2];
+
+ /* this version is optimized for modulo shift architectures
+ * (and can't handle anything else) */
+ if (modulo_shift != get_mode_size_bits(shr_mode)
+ || modulo_shift2<<1 != modulo_shift) {
+ panic("Shr lowering only implemented for modulo shift shr operations");
+ }
+ if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
+ panic("Shr lowering only implemented for power-of-2 modes");
+ }
+ /* without 2-complement the -x instead of (bit_width-x) trick won't work */
+ if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
+ panic("Shr lowering only implemented for two-complement modes");
+ }
+
+ /* if the right operand is a 64bit value, we're only interested in the
+ * lower word */
+ if (get_irn_mode(right) == env->high_unsigned) {
+ right = get_lowered_low(right);
+ } else {
+ /* shift should never have signed mode on the right */
+ assert(get_irn_mode(right) != env->high_signed);
+ right = create_conv(lower_block, right, low_unsigned);
}
- lower_Shiftop(node, mode);
+
+ part_block_dw(node);
+ env->flags |= CF_CHANGED;
+ block = get_nodes_block(node);
+
+ /* add a Cmp to test if highest bit is set <=> wether we shift more
+ * than half the word width */
+ cnst = new_r_Const_long(irg, low_unsigned, modulo_shift2);
+ and = new_r_And(block, right, cnst, low_unsigned);
+ cnst = new_r_Const(irg, get_mode_null(low_unsigned));
+ cmp = new_rd_Cmp(dbgi, block, and, cnst, ir_relation_equal);
+ cond = new_rd_Cond(dbgi, block, cmp);
+ proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
+ proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
+
+ /* the true block => shift_width < 1word */
+ {
+ ir_node *in[1] = { proj_true };
+ ir_node *block_true = new_r_Block(irg, ARRAY_SIZE(in), in);
+
+ ir_node *res_low = new_rd_Shl(dbgi, block_true, left_low,
+ right, low_unsigned);
+ ir_node *shift_high = new_rd_Shl(dbgi, block_true, left_high, right,
+ mode);
+ ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
+ low_unsigned);
+ ir_node *conv = create_conv(block_true, left_low, mode);
+ ir_node *one = new_r_Const(irg, get_mode_one(mode));
+ ir_node *carry0 = new_rd_Shr(dbgi, block_true, conv, one, mode);
+ ir_node *carry1 = new_rd_Shr(dbgi, block_true, carry0,
+ not_shiftval, mode);
+ ir_node *res_high = new_rd_Or(dbgi, block_true, shift_high, carry1,
+ mode);
+ lower_in[0] = new_r_Jmp(block_true);
+ phi_low_in[0] = res_low;
+ phi_high_in[0] = res_high;
+ }
+
+ /* false block => shift_width > 1word */
+ {
+ ir_node *in[1] = { proj_false };
+ ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
+ ir_node *res_low = new_r_Const(irg, get_mode_null(low_unsigned));
+ ir_node *conv = create_conv(block_false, left_low, mode);
+ ir_node *res_high = new_rd_Shl(dbgi, block_false, conv, right, mode);
+ lower_in[1] = new_r_Jmp(block_false);
+ phi_low_in[1] = res_low;
+ phi_high_in[1] = res_high;
+ }
+
+ /* patch lower block */
+ set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
+ phi_low = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
+ low_unsigned);
+ phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
+ mode);
+ ir_set_dw_lowered(node, phi_low, phi_high);
}
/**
}
}
+static ir_node *get_cfop_destination(const ir_node *cfop)
+{
+ const ir_edge_t *first = get_irn_out_edge_first(cfop);
+ /* we should only have 1 destination */
+ assert(get_irn_n_edges(cfop) == 1);
+ return get_edge_src_irn(first);
+}
+
/**
* Translate a Cond.
*/
ir_relation relation;
ir_graph *irg;
dbg_info *dbg;
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
(void) mode;
rentry = get_node_entry(right);
/* all right, build the code */
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
- long proj_nr = get_Proj_proj(proj);
+ foreach_out_edge_safe(node, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ long proj_nr;
+ if (!is_Proj(proj))
+ continue;
+ proj_nr = get_Proj_proj(proj);
if (proj_nr == pn_Cond_true) {
assert(projT == NULL && "more than one Proj(true)");
if (relation == ir_relation_equal) {
/* simple case:a == b <==> a_h == b_h && a_l == b_l */
- pmap_entry *entry = pmap_find(env->proj_2_block, projF);
-
- assert(entry);
- dst_blk = (ir_node*)entry->value;
+ dst_blk = get_cfop_destination(projF);
irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
ir_relation_equal);
exchange(projT, proj);
} else if (relation == ir_relation_less_greater) {
/* simple case:a != b <==> a_h != b_h || a_l != b_l */
- pmap_entry *entry = pmap_find(env->proj_2_block, projT);
-
- assert(entry);
- dst_blk = (ir_node*)entry->value;
+ dst_blk = get_cfop_destination(projT);
irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
ir_relation_less_greater);
/* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
ir_node *dstT, *dstF, *newbl_eq, *newbl_l;
ir_node *projEqF;
- pmap_entry *entry;
-
- entry = pmap_find(env->proj_2_block, projT);
- assert(entry);
- dstT = (ir_node*)entry->value;
- entry = pmap_find(env->proj_2_block, projF);
- assert(entry);
- dstF = (ir_node*)entry->value;
+ dstT = get_cfop_destination(projT);
+ dstF = get_cfop_destination(projF);
irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
relation & ~ir_relation_equal);
ir_node *irn, *call, *in[2];
ir_mode *imode = get_irn_mode(op);
ir_type *mtp = get_conv_type(imode, omode);
+ ir_node *res;
irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode);
in[0] = entry->low_word;
call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 2, in, mtp);
set_irn_pinned(call, get_irn_pinned(node));
irn = new_r_Proj(call, mode_T, pn_Call_T_result);
+ res = new_r_Proj(irn, omode, 0);
- exchange(node, new_r_Proj(irn, omode, 0));
+ exchange(node, res);
}
}
ir_graph *irg = get_irn_irg(node);
ir_entity *ent = get_irg_entity(irg);
ir_type *tp = get_entity_type(ent);
+ ir_node *args;
long *new_projs;
size_t i, j, n_params;
- int rem;
- ir_node *proj, *args;
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
(void) mode;
if (!mtp_must_be_lowered(tp))
tp = lower_mtp(tp);
set_entity_type(ent, tp);
- /* switch off optimization for new Proj nodes or they might be CSE'ed
- with not patched one's */
- rem = get_optimize();
- set_optimize(0);
+ args = NULL;
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+ if (get_Proj_proj(proj) == pn_Start_T_args) {
+ args = proj;
+ break;
+ }
+ }
+ if (args == NULL)
+ return;
/* fix all Proj's and create new ones */
- args = get_irg_args(irg);
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
- ir_node *pred = get_Proj_pred(proj);
- long proj_nr;
- ir_mode *mode;
- ir_mode *mode_l;
+ foreach_out_edge_safe(args, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ ir_mode *mode = get_irn_mode(proj);
+ ir_mode *mode_l = env->low_unsigned;
+ ir_node *pred;
+ long proj_nr;
ir_mode *mode_h;
ir_node *res_low;
ir_node *res_high;
dbg_info *dbg;
- /* do not visit this node again */
- mark_irn_visited(proj);
-
- if (pred != args)
+ if (!is_Proj(proj))
continue;
-
+ pred = get_Proj_pred(proj);
proj_nr = get_Proj_proj(proj);
- set_Proj_proj(proj, new_projs[proj_nr]);
- mode = get_irn_mode(proj);
- mode_l = env->low_unsigned;
if (mode == env->high_signed) {
mode_h = env->low_signed;
} else if (mode == env->high_unsigned) {
mode_h = env->low_unsigned;
} else {
+ long new_pn = new_projs[proj_nr];
+ if (new_pn != proj_nr) {
+ ir_node *new_proj = new_r_Proj(pred, mode, new_pn);
+ exchange(proj, new_proj);
+ }
continue;
}
dbg = get_irn_dbg_info(proj);
if (env->params->little_endian) {
- res_low = new_rd_Proj(dbg, args, mode_l, new_projs[proj_nr]);
- res_high = new_rd_Proj(dbg, args, mode_h, new_projs[proj_nr] + 1);
+ res_low = new_rd_Proj(dbg, pred, mode_l, new_projs[proj_nr]);
+ res_high = new_rd_Proj(dbg, pred, mode_h, new_projs[proj_nr] + 1);
} else {
- res_high = new_rd_Proj(dbg, args, mode_h, new_projs[proj_nr]);
- res_low = new_rd_Proj(dbg, args, mode_l, new_projs[proj_nr] + 1);
+ res_high = new_rd_Proj(dbg, pred, mode_h, new_projs[proj_nr]);
+ res_low = new_rd_Proj(dbg, pred, mode_l, new_projs[proj_nr] + 1);
}
ir_set_dw_lowered(proj, res_low, res_high);
}
- set_optimize(rem);
}
/**
static void lower_Call(ir_node *node, ir_mode *mode)
{
ir_type *tp = get_Call_type(node);
- ir_node **in, *proj, *results;
+ ir_node **in;
size_t n_params, n_res;
bool need_lower = false;
size_t i, j;
size_t p;
long *res_numbers = NULL;
+ ir_node *resproj;
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
(void) mode;
n_params = get_method_n_params(tp);
set_irn_in(node, j, in);
- /* fix the results */
- results = NULL;
- for (proj = (ir_node*)get_irn_link(node); proj;
- proj = (ir_node*)get_irn_link(proj)) {
- long proj_nr = get_Proj_proj(proj);
-
- if (proj_nr == pn_Call_T_result && get_Proj_pred(proj) == node) {
- /* found the result proj */
- results = proj;
+ /* find results T */
+ resproj = NULL;
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+ if (get_Proj_proj(proj) == pn_Call_T_result) {
+ resproj = proj;
break;
}
}
+ if (resproj == NULL)
+ return;
- if (results != NULL) { /* there are results */
- int rem = get_optimize();
-
- /* switch off optimization for new Proj nodes or they might be CSE'ed
- with not patched one's */
- set_optimize(0);
- for (proj = (ir_node*)get_irn_link(results); proj; proj = (ir_node*)get_irn_link(proj)) {
- if (get_Proj_pred(proj) == results) {
- long proj_nr = get_Proj_proj(proj);
- ir_mode *proj_mode = get_irn_mode(proj);
- ir_mode *mode_l;
- ir_mode *mode_h;
- ir_node *res_low;
- ir_node *res_high;
- dbg_info *dbg;
-
- /* found a result */
- mark_irn_visited(proj);
-
- set_Proj_proj(proj, res_numbers[proj_nr]);
-
- mode_l = env->low_unsigned;
- if (proj_mode == env->high_signed) {
- mode_h = env->low_signed;
- } else if (proj_mode == env->high_unsigned) {
- mode_h = env->low_unsigned;
- } else {
- continue;
- }
+ /* fix the results */
+ foreach_out_edge_safe(resproj, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ ir_mode *proj_mode = get_irn_mode(proj);
+ ir_mode *mode_l = env->low_unsigned;
+ ir_node *pred;
+ long proj_nr;
+ ir_mode *mode_h;
+ ir_node *res_low;
+ ir_node *res_high;
+ dbg_info *dbg;
- dbg = get_irn_dbg_info(proj);
- if (env->params->little_endian) {
- res_low = new_rd_Proj(dbg, results, mode_l,
- res_numbers[proj_nr]);
- res_high = new_rd_Proj(dbg, results, mode_h,
- res_numbers[proj_nr] + 1);
- } else {
- res_high = new_rd_Proj(dbg, results, mode_h,
- res_numbers[proj_nr]);
- res_low = new_rd_Proj(dbg, results, mode_l,
- res_numbers[proj_nr] + 1);
- }
- ir_set_dw_lowered(proj, res_low, res_high);
+ if (!is_Proj(proj))
+ continue;
+ pred = get_Proj_pred(proj);
+ proj_nr = get_Proj_proj(proj);
+
+ if (proj_mode == env->high_signed) {
+ mode_h = env->low_signed;
+ } else if (proj_mode == env->high_unsigned) {
+ mode_h = env->low_unsigned;
+ } else {
+ long new_nr = res_numbers[proj_nr];
+ if (proj_nr != new_nr) {
+ ir_node *new_proj = new_r_Proj(pred, proj_mode, new_nr);
+ exchange(proj, new_proj);
}
+ continue;
}
- set_optimize(rem);
+
+ dbg = get_irn_dbg_info(proj);
+ if (env->params->little_endian) {
+ res_low = new_rd_Proj(dbg, pred, mode_l, res_numbers[proj_nr]);
+ res_high = new_rd_Proj(dbg, pred, mode_h, res_numbers[proj_nr] + 1);
+ } else {
+ res_high = new_rd_Proj(dbg, pred, mode_h, res_numbers[proj_nr]);
+ res_low = new_rd_Proj(dbg, pred, mode_l, res_numbers[proj_nr] + 1);
+ }
+ ir_set_dw_lowered(proj, res_low, res_high);
}
}
/* remember that we need to fixup the predecessors later */
ARR_APP1(ir_node*, env->lowered_phis, phi);
-
- /* Don't forget to link the new Phi nodes into the block.
- * Beware that some Phis might be optimized away. */
- if (is_Phi(phi_l))
- add_Block_phi(block, phi_l);
- if (is_Phi(phi_h))
- add_Block_phi(block, phi_h);
}
static void fixup_phi(ir_node *phi)
ir_asm_constraint *input_constraints = get_ASM_input_constraints(asmn);
unsigned n_64bit_outs = 0;
int i;
- ir_node *n;
(void)mode;
ir_asm_constraint *new_outputs
= ALLOCAN(ir_asm_constraint, n_outs+n_64bit_outs);
ir_node *new_asm;
+ const ir_edge_t *edge;
+ const ir_edge_t *next;
for (i = 0; i < n_outs; ++i) {
const ir_asm_constraint *constraint = &output_constraints[i];
new_n_outs, new_outputs, n_clobber, clobbers,
asm_text);
- for (n = asmn;;) {
- long pn;
- ir_mode *proj_mode;
- n = (ir_node*)get_irn_link(n);
- if (n == NULL)
- break;
- proj_mode = get_irn_mode(n);
- pn = get_Proj_proj(n);
+ foreach_out_edge_safe(asmn, edge, next) {
+ ir_node *proj = get_edge_src_irn(edge);
+ ir_mode *proj_mode = get_irn_mode(proj);
+ long pn;
+
+ if (!is_Proj(proj))
+ continue;
+ pn = get_Proj_proj(proj);
+
if (pn < n_outs)
pn = proj_map[pn];
else
= proj_mode == high_signed ? env->low_signed : env->low_unsigned;
ir_node *np_low = new_r_Proj(new_asm, env->low_unsigned, pn);
ir_node *np_high = new_r_Proj(new_asm, high_mode, pn+1);
- ir_set_dw_lowered(n, np_low, np_high);
+ ir_set_dw_lowered(proj, np_low, np_high);
} else {
ir_node *np = new_r_Proj(new_asm, proj_mode, pn);
- exchange(n, np);
+ exchange(proj, np);
}
}
}
}
}
+static void clear_node_and_phi_links(ir_node *node, void *data)
+{
+ (void) data;
+ if (get_irn_mode(node) == mode_T) {
+ set_irn_link(node, node);
+ } else {
+ set_irn_link(node, NULL);
+ }
+ if (is_Block(node))
+ set_Block_phis(node, NULL);
+ else if (is_Phi(node))
+ set_Phi_next(node, NULL);
+}
+
static void lower_irg(ir_graph *irg)
{
ir_entity *ent;
obstack_init(&env->obst);
+ /* just here for debugging */
+ current_ir_graph = irg;
+ edges_assure(irg);
+
n_idx = get_irg_last_idx(irg);
n_idx = n_idx + (n_idx >> 2); /* add 25% */
env->n_entries = n_idx;
env->irg = irg;
env->l_mtp = NULL;
env->flags = 0;
- env->proj_2_block = pmap_create();
env->value_param_tp = NULL;
ent = get_irg_entity(irg);
/* first step: link all nodes and allocate data */
ir_reserve_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
- irg_walk_graph(irg, firm_clear_node_and_phi_links,
- prepare_links_and_handle_rotl, env);
+ visit_all_identities(irg, clear_node_and_phi_links, NULL);
+ irg_walk_graph(irg, NULL, prepare_links_and_handle_rotl, env);
if (env->flags & MUST_BE_LOWERED) {
size_t i;
ir_free_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
- pmap_destroy(env->proj_2_block);
DEL_ARR_F(env->entries);
obstack_free(&env->obst, NULL);
}
set_method_res_type(binop_tp_s, 1, tp_u);
}
}
- if (! shiftop_tp_u) {
- shiftop_tp_u = new_type_method(3, 2);
- set_method_param_type(shiftop_tp_u, 0, tp_u);
- set_method_param_type(shiftop_tp_u, 1, tp_u);
- set_method_param_type(shiftop_tp_u, 2, tp_u);
- set_method_res_type(shiftop_tp_u, 0, tp_u);
- set_method_res_type(shiftop_tp_u, 1, tp_u);
- }
- if (! shiftop_tp_s) {
- shiftop_tp_s = new_type_method(3, 2);
- set_method_param_type(shiftop_tp_s, 0, tp_u);
- set_method_param_type(shiftop_tp_s, 1, tp_s);
- set_method_param_type(shiftop_tp_s, 2, tp_u);
- set_method_res_type(shiftop_tp_s, 0, tp_u);
- set_method_res_type(shiftop_tp_s, 1, tp_s);
- }
if (! unop_tp_u) {
unop_tp_u = new_type_method(2, 2);
set_method_param_type(unop_tp_u, 0, tp_u);