+ sched_add_before(node, test);
+ copy_mark(node, test);
+ be_peephole_exchange(node, test);
+}
+
+/**
+ * Peephole optimization for Test instructions.
+ * - Remove the Test, if an appropriate flag was produced which is still live
+ * - Change a Test(x, c) to 8Bit, if 0 <= c < 256 (3 byte shorter opcode)
+ */
+static void peephole_ia32_Test(ir_node *node)
+{
+ ir_node *left = get_irn_n(node, n_ia32_Test_left);
+ ir_node *right = get_irn_n(node, n_ia32_Test_right);
+
+ assert(n_ia32_Test_left == n_ia32_Test8Bit_left
+ && n_ia32_Test_right == n_ia32_Test8Bit_right);
+
+ if (left == right) { /* we need a test for 0 */
+ ir_node *block = get_nodes_block(node);
+ int pn = pn_ia32_res;
+ ir_node *flags_proj;
+ ir_mode *flags_mode;
+ ir_node *schedpoint;
+ const ir_edge_t *edge;
+
+ if (get_nodes_block(left) != block)
+ return;
+
+ if (is_Proj(left)) {
+ pn = get_Proj_proj(left);
+ left = get_Proj_pred(left);
+ }
+
+ /* walk schedule up and abort when we find left or some other node
+ * destroys the flags */
+ schedpoint = node;
+ for (;;) {
+ schedpoint = sched_prev(schedpoint);
+ if (schedpoint == left)
+ break;
+ if (arch_irn_is(schedpoint, modify_flags))
+ return;
+ if (schedpoint == block)
+ panic("couldn't find left");
+ }
+
+ /* make sure only Lg/Eq tests are used */
+ foreach_out_edge(node, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ int pnc = get_ia32_condcode(user);
+
+ if (pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
+ return;
+ }
+ }
+
+ switch (produces_test_flag(left, pn)) {
+ case produces_flag_zero:
+ break;
+
+ case produces_flag_carry:
+ foreach_out_edge(node, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ int pnc = get_ia32_condcode(user);
+
+ switch (pnc) {
+ case pn_Cmp_Eq: pnc = ia32_pn_Cmp_not_carry; break;
+ case pn_Cmp_Lg: pnc = ia32_pn_Cmp_carry; break;
+ default: panic("unexpected pn");
+ }
+ set_ia32_condcode(user, pnc);
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ if (get_irn_mode(left) != mode_T) {
+ set_irn_mode(left, mode_T);
+
+ /* If there are other users, reroute them to result proj */
+ if (get_irn_n_edges(left) != 2) {
+ ir_node *res = new_r_Proj(left, mode_Iu, pn_ia32_res);
+
+ edges_reroute(left, res, current_ir_graph);
+ /* Reattach the result proj to left */
+ set_Proj_pred(res, left);
+ }
+ }
+
+ flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
+ flags_proj = new_r_Proj(left, flags_mode, pn_ia32_flags);
+ arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]);
+
+ assert(get_irn_mode(node) != mode_T);
+
+ be_peephole_exchange(node, flags_proj);
+ } else if (is_ia32_Immediate(right)) {
+ ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right);
+ unsigned offset;
+
+ /* A test with a symconst is rather strange, but better safe than sorry */
+ if (imm->symconst != NULL)
+ return;
+
+ offset = imm->offset;
+ if (get_ia32_op_type(node) == ia32_AddrModeS) {
+ ia32_attr_t *const attr = get_irn_generic_attr(node);
+
+ if ((offset & 0xFFFFFF00) == 0) {
+ /* attr->am_offs += 0; */
+ } else if ((offset & 0xFFFF00FF) == 0) {
+ ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 8);
+ set_irn_n(node, n_ia32_Test_right, imm);
+ attr->am_offs += 1;
+ } else if ((offset & 0xFF00FFFF) == 0) {
+ ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 16);
+ set_irn_n(node, n_ia32_Test_right, imm);
+ attr->am_offs += 2;
+ } else if ((offset & 0x00FFFFFF) == 0) {
+ ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 24);
+ set_irn_n(node, n_ia32_Test_right, imm);
+ attr->am_offs += 3;
+ } else {
+ return;
+ }
+ } else if (offset < 256) {
+ arch_register_t const* const reg = arch_get_irn_register(left);