+/**
+ * Transform Builtin trap
+ */
+static ir_node *gen_trap(ir_node *node) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *mem = be_transform_node(get_Builtin_mem(node));
+
+ return new_bd_ia32_UD2(dbgi, block, mem);
+}
+
+/**
+ * Transform Builtin debugbreak
+ */
+static ir_node *gen_debugbreak(ir_node *node) {
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *mem = be_transform_node(get_Builtin_mem(node));
+
+ return new_bd_ia32_Breakpoint(dbgi, block, mem);
+}
+
+/**
+ * Transform Builtin return_address
+ */
+static ir_node *gen_return_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *load;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the return address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg_GP, nomem);
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ia32_get_return_address_entity());
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_frame_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *load;
+ ir_entity *ent;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the frame address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg_GP, nomem);
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ ent = ia32_get_frame_address_entity();
+ if (ent != NULL) {
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ent);
+ } else {
+ /* will fail anyway, but gcc does this: */
+ set_ia32_am_offs_int(load, 0);
+ }
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_r_Proj(current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_prefetch(ir_node *node) {
+ dbg_info *dbgi;
+ ir_node *ptr, *block, *mem, *base, *index;
+ ir_node *param, *new_node;
+ long rw, locality;
+ tarval *tv;
+ ia32_address_t addr;
+
+ if (!ia32_cg_config.use_sse_prefetch && !ia32_cg_config.use_3dnow_prefetch) {
+ /* no prefetch at all, route memory */
+ return be_transform_node(get_Builtin_mem(node));
+ }
+
+ param = get_Builtin_param(node, 1);
+ tv = get_Const_tarval(param);
+ rw = get_tarval_long(tv);
+
+ /* construct load address */
+ memset(&addr, 0, sizeof(addr));
+ ptr = get_Builtin_param(node, 0);
+ ia32_create_address_mode(&addr, ptr, 0);
+ base = addr.base;
+ index = addr.index;
+
+ if (base == NULL) {
+ base = noreg_GP;
+ } else {
+ base = be_transform_node(base);
+ }
+
+ if (index == NULL) {
+ index = noreg_GP;
+ } else {
+ index = be_transform_node(index);
+ }
+
+ dbgi = get_irn_dbg_info(node);
+ block = be_transform_node(get_nodes_block(node));
+ mem = be_transform_node(get_Builtin_mem(node));
+
+ if (rw == 1 && ia32_cg_config.use_3dnow_prefetch) {
+ /* we have 3DNow!, this was already checked above */
+ new_node = new_bd_ia32_PrefetchW(dbgi, block, base, index, mem);
+ } else if (ia32_cg_config.use_sse_prefetch) {
+ /* note: rw == 1 is IGNORED in that case */
+ param = get_Builtin_param(node, 2);
+ tv = get_Const_tarval(param);
+ locality = get_tarval_long(tv);
+
+ /* SSE style prefetch */
+ switch (locality) {
+ case 0:
+ new_node = new_bd_ia32_PrefetchNTA(dbgi, block, base, index, mem);
+ break;
+ case 1:
+ new_node = new_bd_ia32_Prefetch2(dbgi, block, base, index, mem);
+ break;
+ case 2:
+ new_node = new_bd_ia32_Prefetch1(dbgi, block, base, index, mem);
+ break;
+ default:
+ new_node = new_bd_ia32_Prefetch0(dbgi, block, base, index, mem);
+ break;
+ }
+ } else {
+ assert(ia32_cg_config.use_3dnow_prefetch);
+ /* 3DNow! style prefetch */
+ new_node = new_bd_ia32_Prefetch(dbgi, block, base, index, mem);
+ }
+
+ set_irn_pinned(new_node, get_irn_pinned(node));
+ set_ia32_op_type(new_node, ia32_AddrModeS);
+ set_ia32_ls_mode(new_node, mode_Bu);
+ set_address(new_node, &addr);
+
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ be_dep_on_frame(new_node);
+ return new_r_Proj(current_ir_graph, block, new_node, mode_M, pn_ia32_Prefetch_M);
+}
+
+/**
+ * Transform bsf like node
+ */
+static ir_node *gen_unop_AM(ir_node *node, construct_binop_dest_func *func)
+{
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+ ir_node *cnt;
+
+ match_arguments(&am, block, NULL, param, NULL, match_am);
+
+ cnt = func(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
+ set_am_attributes(cnt, &am);
+ set_ia32_ls_mode(cnt, get_irn_mode(param));
+
+ SET_IA32_ORIG_NODE(cnt, node);
+ return fix_mem_proj(cnt, &am);
+}
+
+/**
+ * Transform builtin ffs.
+ */
+static ir_node *gen_ffs(ir_node *node)
+{
+ ir_node *bsf = gen_unop_AM(node, new_bd_ia32_Bsf);
+ ir_node *real = skip_Proj(bsf);
+ dbg_info *dbgi = get_irn_dbg_info(real);
+ ir_node *block = get_nodes_block(real);
+ ir_node *flag, *set, *conv, *neg, *or;
+
+ /* bsf x */
+ if (get_irn_mode(real) != mode_T) {
+ set_irn_mode(real, mode_T);
+ bsf = new_r_Proj(current_ir_graph, block, real, mode_Iu, pn_ia32_res);
+ }
+
+ flag = new_r_Proj(current_ir_graph, block, real, mode_b, pn_ia32_flags);
+
+ /* sete */
+ set = new_bd_ia32_Set(dbgi, block, flag, pn_Cmp_Eq, 0);
+ SET_IA32_ORIG_NODE(set, node);
+
+ /* conv to 32bit */
+ conv = new_bd_ia32_Conv_I2I8Bit(dbgi, block, noreg_GP, noreg_GP, nomem, set, mode_Bu);
+ SET_IA32_ORIG_NODE(conv, node);
+
+ /* neg */
+ neg = new_bd_ia32_Neg(dbgi, block, conv);
+
+ /* or */
+ or = new_bd_ia32_Or(dbgi, block, noreg_GP, noreg_GP, nomem, bsf, neg);
+ set_ia32_commutative(or);
+
+ /* add 1 */
+ return new_bd_ia32_Add(dbgi, block, noreg_GP, noreg_GP, nomem, or, ia32_create_Immediate(NULL, 0, 1));
+}
+
+/**
+ * Transform builtin clz.
+ */
+static ir_node *gen_clz(ir_node *node)
+{
+ ir_node *bsr = gen_unop_AM(node, new_bd_ia32_Bsr);
+ ir_node *real = skip_Proj(bsr);
+ dbg_info *dbgi = get_irn_dbg_info(real);
+ ir_node *block = get_nodes_block(real);
+ ir_node *imm = ia32_create_Immediate(NULL, 0, 31);
+
+ return new_bd_ia32_Xor(dbgi, block, noreg_GP, noreg_GP, nomem, bsr, imm);
+}
+
+/**
+ * Transform builtin ctz.
+ */
+static ir_node *gen_ctz(ir_node *node)
+{
+ return gen_unop_AM(node, new_bd_ia32_Bsf);
+}
+
+/**
+ * Transform builtin parity.
+ */
+static ir_node *gen_parity(ir_node *node)
+{
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+
+ ir_node *new_block = be_transform_node(block);
+ ir_node *imm, *cmp, *new_node;
+
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+
+
+ /* cmp param, 0 */
+ match_arguments(&am, block, NULL, param, NULL, match_am);
+ imm = ia32_create_Immediate(NULL, 0, 0);
+ cmp = new_bd_ia32_Cmp(dbgi, new_block, addr->base, addr->index,
+ addr->mem, imm, am.new_op2, am.ins_permuted, 0);
+ set_am_attributes(cmp, &am);
+ set_ia32_ls_mode(cmp, mode_Iu);
+
+ SET_IA32_ORIG_NODE(cmp, node);
+
+ cmp = fix_mem_proj(cmp, &am);
+
+ /* setp */
+ new_node = new_bd_ia32_Set(dbgi, new_block, cmp, ia32_pn_Cmp_parity, 0);
+ SET_IA32_ORIG_NODE(new_node, node);
+
+ /* conv to 32bit */
+ new_node = new_bd_ia32_Conv_I2I8Bit(dbgi, new_block, noreg_GP, noreg_GP,
+ nomem, new_node, mode_Bu);
+ SET_IA32_ORIG_NODE(new_node, node);
+ return new_node;
+}
+
+/**
+ * Transform builtin popcount
+ */
+static ir_node *gen_popcount(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+
+ ir_node *new_param;
+ ir_node *imm, *simm, *m1, *s1, *s2, *s3, *s4, *s5, *m2, *m3, *m4, *m5, *m6, *m7, *m8, *m9, *m10, *m11, *m12, *m13;
+
+ /* check for SSE4.2 or SSE4a and use the popcnt instruction */
+ if (ia32_cg_config.use_popcnt) {
+ ia32_address_mode_t am;
+ ia32_address_t *addr = &am.addr;
+ ir_node *cnt;
+
+ match_arguments(&am, block, NULL, param, NULL, match_am | match_16bit_am);
+
+ cnt = new_bd_ia32_Popcnt(dbgi, new_block, addr->base, addr->index, addr->mem, am.new_op2);
+ set_am_attributes(cnt, &am);
+ set_ia32_ls_mode(cnt, get_irn_mode(param));
+
+ SET_IA32_ORIG_NODE(cnt, node);
+ return fix_mem_proj(cnt, &am);
+ }
+
+ new_param = be_transform_node(param);
+
+ /* do the standard popcount algo */
+
+ /* m1 = x & 0x55555555 */
+ imm = ia32_create_Immediate(NULL, 0, 0x55555555);
+ m1 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_param, imm);
+
+ /* s1 = x >> 1 */
+ simm = ia32_create_Immediate(NULL, 0, 1);
+ s1 = new_bd_ia32_Shl(dbgi, new_block, new_param, simm);
+
+ /* m2 = s1 & 0x55555555 */
+ m2 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s1, imm);
+
+ /* m3 = m1 + m2 */
+ m3 = new_bd_ia32_Lea(dbgi, new_block, m2, m1);
+
+ /* m4 = m3 & 0x33333333 */
+ imm = ia32_create_Immediate(NULL, 0, 0x33333333);
+ m4 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m3, imm);
+
+ /* s2 = m3 >> 2 */
+ simm = ia32_create_Immediate(NULL, 0, 2);
+ s2 = new_bd_ia32_Shl(dbgi, new_block, m3, simm);
+
+ /* m5 = s2 & 0x33333333 */
+ m5 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s2, imm);
+
+ /* m6 = m4 + m5 */
+ m6 = new_bd_ia32_Lea(dbgi, new_block, m4, m5);
+
+ /* m7 = m6 & 0x0F0F0F0F */
+ imm = ia32_create_Immediate(NULL, 0, 0x0F0F0F0F);
+ m7 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m6, imm);
+
+ /* s3 = m6 >> 4 */
+ simm = ia32_create_Immediate(NULL, 0, 4);
+ s3 = new_bd_ia32_Shl(dbgi, new_block, m6, simm);
+
+ /* m8 = s3 & 0x0F0F0F0F */
+ m8 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s3, imm);
+
+ /* m9 = m7 + m8 */
+ m9 = new_bd_ia32_Lea(dbgi, new_block, m7, m8);
+
+ /* m10 = m9 & 0x00FF00FF */
+ imm = ia32_create_Immediate(NULL, 0, 0x00FF00FF);
+ m10 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m9, imm);
+
+ /* s4 = m9 >> 8 */
+ simm = ia32_create_Immediate(NULL, 0, 8);
+ s4 = new_bd_ia32_Shl(dbgi, new_block, m9, simm);
+
+ /* m11 = s4 & 0x00FF00FF */
+ m11 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s4, imm);
+
+ /* m12 = m10 + m11 */
+ m12 = new_bd_ia32_Lea(dbgi, new_block, m10, m11);
+
+ /* m13 = m12 & 0x0000FFFF */
+ imm = ia32_create_Immediate(NULL, 0, 0x0000FFFF);
+ m13 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, m12, imm);
+
+ /* s5 = m12 >> 16 */
+ simm = ia32_create_Immediate(NULL, 0, 16);
+ s5 = new_bd_ia32_Shl(dbgi, new_block, m12, simm);
+
+ /* res = m13 + s5 */
+ return new_bd_ia32_Lea(dbgi, new_block, m13, s5);
+}
+
+/**
+ * Transform builtin byte swap.
+ */
+static ir_node *gen_bswap(ir_node *node) {
+ ir_node *param = be_transform_node(get_Builtin_param(node, 0));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_mode *mode = get_irn_mode(param);
+ unsigned size = get_mode_size_bits(mode);
+ ir_node *m1, *m2, *m3, *m4, *s1, *s2, *s3, *s4;
+
+ switch (size) {
+ case 32:
+ if (ia32_cg_config.use_i486) {
+ /* swap available */
+ return new_bd_ia32_Bswap(dbgi, new_block, param);
+ }
+ s1 = new_bd_ia32_Shl(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 24));
+ s2 = new_bd_ia32_Shl(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 8));
+
+ m1 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s2, ia32_create_Immediate(NULL, 0, 0xFF00));
+ m2 = new_bd_ia32_Lea(dbgi, new_block, s1, m1);
+
+ s3 = new_bd_ia32_Shr(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 8));
+
+ m3 = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, s3, ia32_create_Immediate(NULL, 0, 0xFF0000));
+ m4 = new_bd_ia32_Lea(dbgi, new_block, m2, m3);
+
+ s4 = new_bd_ia32_Shr(dbgi, new_block, param, ia32_create_Immediate(NULL, 0, 24));
+ return new_bd_ia32_Lea(dbgi, new_block, m4, s4);
+
+ case 16:
+ /* swap16 always available */
+ return new_bd_ia32_Bswap16(dbgi, new_block, param);
+
+ default:
+ panic("Invalid bswap size (%d)", size);
+ }
+}
+
+/**
+ * Transform builtin outport.
+ */
+static ir_node *gen_outport(ir_node *node) {
+ ir_node *port = create_immediate_or_transform(get_Builtin_param(node, 0), 0);
+ ir_node *oldv = get_Builtin_param(node, 1);
+ ir_mode *mode = get_irn_mode(oldv);
+ ir_node *value = be_transform_node(oldv);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *mem = be_transform_node(get_Builtin_mem(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *res = new_bd_ia32_Outport(dbgi, block, port, value, mem);
+ set_ia32_ls_mode(res, mode);
+ return res;
+}
+
+/**
+ * Transform builtin inport.
+ */
+static ir_node *gen_inport(ir_node *node) {
+ ir_type *tp = get_Builtin_type(node);
+ ir_type *rstp = get_method_res_type(tp, 0);
+ ir_mode *mode = get_type_mode(rstp);
+ ir_node *port = create_immediate_or_transform(get_Builtin_param(node, 0), 0);
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *mem = be_transform_node(get_Builtin_mem(node));
+ dbg_info *dbgi = get_irn_dbg_info(node);
+
+ ir_node *res = new_bd_ia32_Inport(dbgi, block, port, mem);
+ set_ia32_ls_mode(res, mode);
+
+ /* check for missing Result Proj */
+ return res;
+}
+
+/**
+ * Transform a builtin inner trampoline
+ */
+static ir_node *gen_inner_trampoline(ir_node *node) {
+ ir_node *ptr = get_Builtin_param(node, 0);
+ ir_node *callee = get_Builtin_param(node, 1);
+ ir_node *env = be_transform_node(get_Builtin_param(node, 2));
+ ir_node *mem = get_Builtin_mem(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_node *val;
+ ir_node *store;
+ ir_node *rel;
+ ir_node *trampoline;
+ ir_node *in[2];
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ia32_address_t addr;
+
+ /* construct store address */
+ memset(&addr, 0, sizeof(addr));
+ ia32_create_address_mode(&addr, ptr, 0);
+
+ if (addr.base == NULL) {
+ addr.base = noreg_GP;
+ } else {
+ addr.base = be_transform_node(addr.base);
+ }
+
+ if (addr.index == NULL) {
+ addr.index = noreg_GP;
+ } else {
+ addr.index = be_transform_node(addr.index);
+ }
+ addr.mem = be_transform_node(mem);
+
+ /* mov ecx, <env> */
+ val = ia32_create_Immediate(NULL, 0, 0xB9);
+ store = new_bd_ia32_Store8Bit(dbgi, new_block, addr.base,
+ addr.index, addr.mem, val);
+ set_irn_pinned(store, get_irn_pinned(node));
+ set_ia32_op_type(store, ia32_AddrModeD);
+ set_ia32_ls_mode(store, mode_Bu);
+ set_address(store, &addr);
+ addr.mem = store;
+ addr.offset += 1;
+
+ store = new_bd_ia32_Store(dbgi, new_block, addr.base,
+ addr.index, addr.mem, env);
+ set_irn_pinned(store, get_irn_pinned(node));
+ set_ia32_op_type(store, ia32_AddrModeD);
+ set_ia32_ls_mode(store, mode_Iu);
+ set_address(store, &addr);
+ addr.mem = store;
+ addr.offset += 4;
+
+ /* jmp rel <callee> */
+ val = ia32_create_Immediate(NULL, 0, 0xE9);
+ store = new_bd_ia32_Store8Bit(dbgi, new_block, addr.base,
+ addr.index, addr.mem, val);
+ set_irn_pinned(store, get_irn_pinned(node));
+ set_ia32_op_type(store, ia32_AddrModeD);
+ set_ia32_ls_mode(store, mode_Bu);
+ set_address(store, &addr);
+ addr.mem = store;
+ addr.offset += 1;
+
+ trampoline = be_transform_node(ptr);
+
+ /* the callee is typically an immediate */
+ if (is_SymConst(callee)) {
+ rel = new_bd_ia32_Const(dbgi, new_block, get_SymConst_entity(callee), 0, -10);
+ } else {
+ rel = new_bd_ia32_Lea(dbgi, new_block, be_transform_node(callee), ia32_create_Immediate(NULL, 0, -10));
+ }
+ rel = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, nomem, rel, trampoline);
+
+ store = new_bd_ia32_Store(dbgi, new_block, addr.base,
+ addr.index, addr.mem, rel);
+ set_irn_pinned(store, get_irn_pinned(node));
+ set_ia32_op_type(store, ia32_AddrModeD);
+ set_ia32_ls_mode(store, mode_Iu);
+ set_address(store, &addr);
+
+ in[0] = store;
+ in[1] = trampoline;
+
+ return new_r_Tuple(current_ir_graph, new_block, 2, in);
+}
+
+/**
+ * Transform Builtin node.
+ */
+static ir_node *gen_Builtin(ir_node *node) {
+ ir_builtin_kind kind = get_Builtin_kind(node);
+
+ switch (kind) {
+ case ir_bk_trap:
+ return gen_trap(node);
+ case ir_bk_debugbreak:
+ return gen_debugbreak(node);
+ case ir_bk_return_address:
+ return gen_return_address(node);
+ case ir_bk_frame_addess:
+ return gen_frame_address(node);
+ case ir_bk_prefetch:
+ return gen_prefetch(node);
+ case ir_bk_ffs:
+ return gen_ffs(node);
+ case ir_bk_clz:
+ return gen_clz(node);
+ case ir_bk_ctz:
+ return gen_ctz(node);
+ case ir_bk_parity:
+ return gen_parity(node);
+ case ir_bk_popcount:
+ return gen_popcount(node);
+ case ir_bk_bswap:
+ return gen_bswap(node);
+ case ir_bk_outport:
+ return gen_outport(node);
+ case ir_bk_inport:
+ return gen_inport(node);
+ case ir_bk_inner_trampoline:
+ return gen_inner_trampoline(node);
+ }
+ panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+}
+
+/**
+ * Transform Proj(Builtin) node.
+ */
+static ir_node *gen_Proj_Builtin(ir_node *proj) {
+ ir_node *node = get_Proj_pred(proj);
+ ir_node *new_node = be_transform_node(node);
+ ir_builtin_kind kind = get_Builtin_kind(node);
+
+ switch (kind) {
+ case ir_bk_return_address:
+ case ir_bk_frame_addess:
+ case ir_bk_ffs:
+ case ir_bk_clz:
+ case ir_bk_ctz:
+ case ir_bk_parity:
+ case ir_bk_popcount:
+ case ir_bk_bswap:
+ assert(get_Proj_proj(proj) == pn_Builtin_1_result);
+ return new_node;
+ case ir_bk_trap:
+ case ir_bk_debugbreak:
+ case ir_bk_prefetch:
+ case ir_bk_outport:
+ assert(get_Proj_proj(proj) == pn_Builtin_M);
+ return new_node;
+ case ir_bk_inport:
+ if (get_Proj_proj(proj) == pn_Builtin_1_result) {
+ return new_r_Proj(current_ir_graph, get_nodes_block(new_node),
+ new_node, get_irn_mode(proj), pn_ia32_Inport_res);
+ } else {
+ assert(get_Proj_proj(proj) == pn_Builtin_M);
+ return new_r_Proj(current_ir_graph, get_nodes_block(new_node),
+ new_node, mode_M, pn_ia32_Inport_M);
+ }
+ case ir_bk_inner_trampoline:
+ if (get_Proj_proj(proj) == pn_Builtin_1_result) {
+ return get_Tuple_pred(new_node, 1);
+ } else {
+ assert(get_Proj_proj(proj) == pn_Builtin_M);
+ return get_Tuple_pred(new_node, 0);
+ }
+ }
+ panic("Builtin %s not implemented in IA32", get_builtin_kind_name(kind));
+}
+