2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into SPARC FIRM)
29 #include "irgraph_t.h"
41 #include "../benode.h"
43 #include "../beutil.h"
44 #include "../betranshlp.h"
45 #include "bearch_sparc_t.h"
47 #include "sparc_nodes_attr.h"
48 #include "sparc_transform.h"
49 #include "sparc_new_nodes.h"
50 #include "gen_sparc_new_nodes.h"
52 #include "gen_sparc_regalloc_if.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static sparc_code_gen_t *env_cg;
60 static ir_node *gen_SymConst(ir_node *node);
63 static inline int mode_needs_gp_reg(ir_mode *mode)
65 return mode_is_int(mode) || mode_is_reference(mode);
69 * Create an And that will zero out upper bits.
71 * @param dbgi debug info
72 * @param block the basic block
73 * @param op the original node
74 * @param src_bits number of lower bits that will remain
76 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
80 return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
81 } else if (src_bits == 16) {
82 ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
83 ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
86 panic("zero extension only supported for 8 and 16 bits");
91 * Generate code for a sign extension.
93 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
96 int shift_width = 32 - src_bits;
97 ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
98 ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
103 * returns true if it is assured, that the upper bits of a node are "clean"
104 * which means for a 16 or 8 bit value, that the upper bits in the register
105 * are 0 for unsigned and a copy of the last significant bit for signed
108 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
110 (void) transformed_node;
116 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
119 int bits = get_mode_size_bits(orig_mode);
123 if (mode_is_signed(orig_mode)) {
124 return gen_sign_extension(dbgi, block, op, bits);
126 return gen_zero_extension(dbgi, block, op, bits);
132 * Creates a possible DAG for a constant.
134 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
139 // we need to load hi & lo separately
140 if (value < -4096 || value > 4095) {
141 ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value);
142 result = new_bd_sparc_LoImm(dbgi, block, hi, value);
145 result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
146 be_dep_on_frame(result);
154 * Create a DAG constructing a given Const.
156 * @param irn a Firm const
158 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
160 tarval *tv = get_Const_tarval(irn);
161 ir_mode *mode = get_tarval_mode(tv);
162 dbg_info *dbgi = get_irn_dbg_info(irn);
166 if (mode_is_reference(mode)) {
167 /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
168 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
169 tv = tarval_convert_to(tv, mode_Iu);
172 value = get_tarval_long(tv);
173 return create_const_graph_value(dbgi, block, value);
177 * create a DAG to load fp constant. sparc only supports loading from global memory
179 static ir_node *create_fp_const_graph(ir_node *irn, ir_node *block)
183 panic("FP constants not implemented");
189 MATCH_COMMUTATIVE = 1 << 0,
190 MATCH_SIZE_NEUTRAL = 1 << 1,
193 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
194 typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode);
195 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
198 * checks if a node's value can be encoded as a immediate
201 static bool is_imm_encodeable(const ir_node *node)
205 //assert(mode_is_float_vector(get_irn_mode(node)));
210 val = get_tarval_long(get_Const_tarval(node));
212 return !(val < -4096 || val > 4095);
216 * helper function for binop operations
218 * @param new_binop_reg_func register generation function ptr
219 * @param new_binop_imm_func immediate generation function ptr
221 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
222 new_binop_reg_func new_reg, new_binop_imm_func new_imm)
224 ir_node *block = be_transform_node(get_nodes_block(node));
225 ir_node *op1 = get_binop_left(node);
227 ir_node *op2 = get_binop_right(node);
229 dbg_info *dbgi = get_irn_dbg_info(node);
232 if (flags & MATCH_SIZE_NEUTRAL) {
233 op1 = arm_skip_downconv(op1);
234 op2 = arm_skip_downconv(op2);
236 assert(get_mode_size_bits(get_irn_mode(node)) == 32);
239 if (is_imm_encodeable(op2)) {
240 ir_node *new_op1 = be_transform_node(op1);
241 return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
244 new_op2 = be_transform_node(op2);
246 if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
247 return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) );
250 new_op1 = be_transform_node(op1);
252 return new_reg(dbgi, block, new_op1, new_op2);
256 * helper function for FP binop operations
258 static ir_node *gen_helper_binfpop(ir_node *node, new_binop_fp_func new_reg)
260 ir_node *block = be_transform_node(get_nodes_block(node));
261 ir_node *op1 = get_binop_left(node);
263 ir_node *op2 = get_binop_right(node);
265 dbg_info *dbgi = get_irn_dbg_info(node);
267 new_op2 = be_transform_node(op2);
268 new_op1 = be_transform_node(op1);
269 return new_reg(dbgi, block, new_op1, new_op2, get_irn_mode(node));
273 * Creates an sparc Add.
275 * @param node FIRM node
276 * @return the created sparc Add node
278 static ir_node *gen_Add(ir_node *node)
280 ir_mode *mode = get_irn_mode(node);
281 ir_node *block = be_transform_node(get_nodes_block(node));
282 dbg_info *dbgi = get_irn_dbg_info(node);
287 if (mode_is_float(mode))
288 panic("FP not implemented yet");
290 return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
295 * Creates an sparc Sub.
297 * @param node FIRM node
298 * @return the created sparc Sub node
300 static ir_node *gen_Sub(ir_node *node)
302 ir_mode *mode = get_irn_mode(node);
303 ir_node *block = be_transform_node(get_nodes_block(node));
304 dbg_info *dbgi = get_irn_dbg_info(node);
309 if (mode_is_float(mode))
310 panic("FP not implemented yet");
312 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
319 * @param node the ir Load node
320 * @return the created sparc Load node
322 static ir_node *gen_Load(ir_node *node)
324 ir_mode *mode = get_Load_mode(node);
325 ir_node *block = be_transform_node(get_nodes_block(node));
326 ir_node *ptr = get_Load_ptr(node);
327 ir_node *new_ptr = be_transform_node(ptr);
328 ir_node *mem = get_Load_mem(node);
329 ir_node *new_mem = be_transform_node(mem);
330 dbg_info *dbgi = get_irn_dbg_info(node);
331 ir_node *new_load = NULL;
333 if (mode_is_float(mode))
334 panic("SPARC: no fp implementation yet");
336 new_load = new_bd_sparc_Load(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
337 set_irn_pinned(new_load, get_irn_pinned(node));
345 * Transforms a Store.
347 * @param node the ir Store node
348 * @return the created sparc Store node
350 static ir_node *gen_Store(ir_node *node)
352 ir_node *block = be_transform_node(get_nodes_block(node));
353 ir_node *ptr = get_Store_ptr(node);
354 ir_node *new_ptr = be_transform_node(ptr);
355 ir_node *mem = get_Store_mem(node);
356 ir_node *new_mem = be_transform_node(mem);
357 ir_node *val = get_Store_value(node);
358 ir_node *new_val = be_transform_node(val);
359 ir_mode *mode = get_irn_mode(val);
360 dbg_info *dbgi = get_irn_dbg_info(node);
361 ir_node *new_store = NULL;
363 if (mode_is_float(mode))
364 panic("SPARC: no fp implementation yet");
366 new_store = new_bd_sparc_Store(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false);
372 * Creates an sparc Mul.
373 * returns the lower 32bits of the 64bit multiply result
375 * @return the created sparc Mul node
377 static ir_node *gen_Mul(ir_node *node) {
378 ir_mode *mode = get_irn_mode(node);
379 dbg_info *dbgi = get_irn_dbg_info(node);
382 ir_node *proj_res_low;
384 if (mode_is_float(mode)) {
385 mul = gen_helper_binfpop(node, new_bd_sparc_fMul);
389 assert(mode_is_data(mode));
390 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
391 arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
393 proj_res_low = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mul_low);
398 * Creates an sparc Mulh.
399 * Mulh returns the upper 32bits of a mul instruction
401 * @return the created sparc Mulh node
403 static ir_node *gen_Mulh(ir_node *node) {
404 ir_mode *mode = get_irn_mode(node);
405 dbg_info *dbgi = get_irn_dbg_info(node);
408 ir_node *proj_res_hi;
410 if (mode_is_float(mode))
411 panic("FP not supported yet");
414 assert(mode_is_data(mode));
415 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
416 //arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
417 proj_res_hi = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mulh_low);
422 * Creates an sparc Div.
424 * @return the created sparc Div node
426 static ir_node *gen_Div(ir_node *node) {
428 ir_mode *mode = get_irn_mode(node);
432 if (mode_is_float(mode))
433 panic("FP not supported yet");
435 //assert(mode_is_data(mode));
436 div = gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Div_reg, new_bd_sparc_Div_imm);
442 * transform abs node:
450 static ir_node *gen_Abs(ir_node *node) {
451 ir_node *block = be_transform_node(get_nodes_block(node));
452 ir_mode *mode = get_irn_mode(node);
453 dbg_info *dbgi = get_irn_dbg_info(node);
454 ir_node *op = get_Abs_op(node);
456 ir_node *mov, *sra, *xor, *sub, *new_op;
458 if (mode_is_float(mode))
459 panic("FP not supported yet");
461 new_op = be_transform_node(op);
463 mov = new_bd_sparc_Mov_reg(dbgi, block, new_op);
464 sra = new_bd_sparc_ShiftRA_imm(dbgi, block, mov, 31);
465 xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra);
466 sub = new_bd_sparc_Sub_reg(dbgi, block, sra, xor);
472 * Transforms a Not node.
474 * @return the created ARM Not node
476 static ir_node *gen_Not(ir_node *node)
478 ir_node *block = be_transform_node(get_nodes_block(node));
479 ir_node *op = get_Not_op(node);
480 ir_node *new_op = be_transform_node(op);
481 dbg_info *dbgi = get_irn_dbg_info(node);
483 return new_bd_sparc_Not(dbgi, block, new_op);
486 static ir_node *gen_And(ir_node *node)
488 ir_mode *mode = get_irn_mode(node);
489 ir_node *block = be_transform_node(get_nodes_block(node));
490 dbg_info *dbgi = get_irn_dbg_info(node);
495 if (mode_is_float(mode))
496 panic("FP not implemented yet");
498 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg, new_bd_sparc_And_imm);
501 static ir_node *gen_Or(ir_node *node)
503 ir_mode *mode = get_irn_mode(node);
504 ir_node *block = be_transform_node(get_nodes_block(node));
505 dbg_info *dbgi = get_irn_dbg_info(node);
510 if (mode_is_float(mode))
511 panic("FP not implemented yet");
513 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg, new_bd_sparc_Or_imm);
516 static ir_node *gen_Xor(ir_node *node)
518 ir_mode *mode = get_irn_mode(node);
519 ir_node *block = be_transform_node(get_nodes_block(node));
520 dbg_info *dbgi = get_irn_dbg_info(node);
525 if (mode_is_float(mode))
526 panic("FP not implemented yet");
528 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg, new_bd_sparc_Xor_imm);
531 static ir_node *gen_Shl(ir_node *node)
533 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLL_reg, new_bd_sparc_ShiftLL_imm);
536 static ir_node *gen_Shr(ir_node *node)
538 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLR_reg, new_bd_sparc_ShiftLR_imm);
541 static ir_node *gen_Shra(ir_node *node)
543 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftRA_reg, new_bd_sparc_ShiftRA_imm);
546 /****** TRANSFORM GENERAL BACKEND NODES ********/
549 * Transforms a Minus node.
552 static ir_node *gen_Minus(ir_node *node)
554 ir_node *block = be_transform_node(get_nodes_block(node));
555 ir_node *op = get_Minus_op(node);
556 ir_node *new_op = be_transform_node(op);
557 dbg_info *dbgi = get_irn_dbg_info(node);
558 ir_mode *mode = get_irn_mode(node);
560 if (mode_is_float(mode)) {
561 panic("FP not implemented yet");
564 assert(mode_is_data(mode));
565 return new_bd_sparc_Minus(dbgi, block, new_op);
569 * Transforms a Const node.
571 * @param node the ir Const node
572 * @return The transformed sparc node.
574 static ir_node *gen_Const(ir_node *node)
576 ir_node *block = be_transform_node(get_nodes_block(node));
577 ir_mode *mode = get_irn_mode(node);
578 dbg_info *dbg = get_irn_dbg_info(node);
582 if (mode_is_float(mode)) {
583 return create_fp_const_graph(node, block);
586 return create_const_graph(node, block);
591 * @param node the ir AddSP node
592 * @return transformed sparc SAVE node
594 static ir_node *gen_be_AddSP(ir_node *node)
596 ir_node *block = be_transform_node(get_nodes_block(node));
597 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
598 ir_node *new_sz = be_transform_node(sz);
599 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
600 ir_node *new_sp = be_transform_node(sp);
601 dbg_info *dbgi = get_irn_dbg_info(node);
602 ir_node *nomem = new_NoMem();
605 /* SPARC stack grows in reverse direction */
606 new_op = new_bd_sparc_AddSP(dbgi, block, new_sp, new_sz, nomem);
614 * @param node the ir SubSP node
615 * @return transformed sparc SAVE node
617 static ir_node *gen_be_SubSP(ir_node *node)
619 ir_node *block = be_transform_node(get_nodes_block(node));
620 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
621 ir_node *new_sz = be_transform_node(sz);
622 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
623 ir_node *new_sp = be_transform_node(sp);
624 dbg_info *dbgi = get_irn_dbg_info(node);
625 ir_node *nomem = new_NoMem();
628 /* SPARC stack grows in reverse direction */
629 new_op = new_bd_sparc_SubSP(dbgi, block, new_sp, new_sz, nomem);
634 * transform FrameAddr
636 static ir_node *gen_be_FrameAddr(ir_node *node)
638 ir_node *block = be_transform_node(get_nodes_block(node));
639 ir_entity *ent = be_get_frame_entity(node);
640 ir_node *fp = be_get_FrameAddr_frame(node);
641 ir_node *new_fp = be_transform_node(fp);
642 dbg_info *dbgi = get_irn_dbg_info(node);
644 new_node = new_bd_sparc_FrameAddr(dbgi, block, new_fp, ent);
649 * Transform a be_Copy.
651 static ir_node *gen_be_Copy(ir_node *node)
653 ir_node *result = be_duplicate_node(node);
654 ir_mode *mode = get_irn_mode(result);
656 if (mode_needs_gp_reg(mode)) {
657 set_irn_mode(node, mode_Iu);
666 static ir_node *gen_be_Call(ir_node *node)
668 ir_node *res = be_duplicate_node(node);
669 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
674 * Transforms a Switch.
677 static ir_node *gen_SwitchJmp(ir_node *node)
679 ir_node *block = be_transform_node(get_nodes_block(node));
680 ir_node *selector = get_Cond_selector(node);
681 dbg_info *dbgi = get_irn_dbg_info(node);
682 ir_node *new_op = be_transform_node(selector);
683 ir_node *const_graph;
687 const ir_edge_t *edge;
694 foreach_out_edge(node, edge) {
695 proj = get_edge_src_irn(edge);
696 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
698 pn = get_Proj_proj(proj);
700 min = pn<min ? pn : min;
701 max = pn>max ? pn : max;
705 n_projs = max - translation + 1;
707 foreach_out_edge(node, edge) {
708 proj = get_edge_src_irn(edge);
709 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
711 pn = get_Proj_proj(proj) - translation;
712 set_Proj_proj(proj, pn);
715 const_graph = create_const_graph_value(dbgi, block, translation);
716 sub = new_bd_sparc_Sub_reg(dbgi, block, new_op, const_graph);
717 return new_bd_sparc_SwitchJmp(dbgi, block, sub, n_projs, get_Cond_default_proj(node) - translation);
721 * Transform Cond nodes
723 static ir_node *gen_Cond(ir_node *node)
725 ir_node *selector = get_Cond_selector(node);
726 ir_mode *mode = get_irn_mode(selector);
732 if (mode != mode_b) {
733 return gen_SwitchJmp(node);
736 // regular if/else jumps
737 assert(is_Proj(selector));
739 block = be_transform_node(get_nodes_block(node));
740 dbgi = get_irn_dbg_info(node);
741 flag_node = be_transform_node(get_Proj_pred(selector));
742 return new_bd_sparc_Branch(dbgi, block, flag_node, get_Proj_proj(selector));
748 static ir_node *gen_Cmp(ir_node *node)
750 ir_node *block = be_transform_node(get_nodes_block(node));
751 ir_node *op1 = get_Cmp_left(node);
752 ir_node *op2 = get_Cmp_right(node);
753 ir_mode *cmp_mode = get_irn_mode(op1);
754 dbg_info *dbgi = get_irn_dbg_info(node);
759 if (mode_is_float(cmp_mode)) {
760 panic("FloatCmp not implemented");
764 if (get_mode_size_bits(cmp_mode) != 32) {
765 panic("CmpMode != 32bit not supported yet");
769 assert(get_irn_mode(op2) == cmp_mode);
770 is_unsigned = !mode_is_signed(cmp_mode);
772 /* compare with 0 can be done with Tst */
774 if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
775 new_op1 = be_transform_node(op1);
776 return new_bd_sparc_Tst(dbgi, block, new_op1, false,
780 if (is_Const(op1) && tarval_is_null(get_Const_tarval(op1))) {
781 new_op2 = be_transform_node(op2);
782 return new_bd_sparc_Tst(dbgi, block, new_op2, true,
787 /* integer compare */
788 new_op1 = be_transform_node(op1);
789 new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
790 new_op2 = be_transform_node(op2);
791 new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
792 return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
796 * Transforms a SymConst node.
798 static ir_node *gen_SymConst(ir_node *node)
800 ir_node *block = be_transform_node(get_nodes_block(node));
801 ir_entity *entity = get_SymConst_entity(node);
802 dbg_info *dbgi = get_irn_dbg_info(node);
805 new_node = new_bd_sparc_SymConst(dbgi, block, entity);
806 be_dep_on_frame(new_node);
811 * Transforms a Conv node.
814 static ir_node *gen_Conv(ir_node *node)
816 ir_node *block = be_transform_node(get_nodes_block(node));
817 ir_node *op = get_Conv_op(node);
818 ir_node *new_op = be_transform_node(op);
819 ir_mode *src_mode = get_irn_mode(op);
820 ir_mode *dst_mode = get_irn_mode(node);
821 dbg_info *dbg = get_irn_dbg_info(node);
823 int src_bits = get_mode_size_bits(src_mode);
824 int dst_bits = get_mode_size_bits(dst_mode);
826 if (src_mode == dst_mode)
829 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
830 assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented");
832 if (mode_is_float(src_mode)) {
833 if (mode_is_float(dst_mode)) {
834 // float -> float conv
835 if (src_bits > dst_bits) {
836 return new_bd_sparc_FpDToFpS(dbg, block, new_op, dst_mode);
838 return new_bd_sparc_FpSToFpD(dbg, block, new_op, dst_mode);
844 return new_bd_sparc_FpSToInt(dbg, block, new_op, dst_mode);
846 return new_bd_sparc_FpDToInt(dbg, block, new_op, dst_mode);
848 panic("quad FP not implemented");
855 return new_bd_sparc_IntToFpS(dbg, block, new_op, src_mode);
857 return new_bd_sparc_IntToFpD(dbg, block, new_op, src_mode);
859 panic("quad FP not implemented");
862 } else { /* complete in gp registers */
866 if (src_bits == dst_bits) {
867 /* kill unneccessary conv */
871 if (src_bits < dst_bits) {
879 if (upper_bits_clean(new_op, min_mode)) {
883 if (mode_is_signed(min_mode)) {
884 return gen_sign_extension(dbg, block, new_op, min_bits);
886 return gen_zero_extension(dbg, block, new_op, min_bits);
891 static ir_node *gen_Unknown(ir_node *node)
893 ir_node *block = get_nodes_block(node);
894 ir_node *new_block = be_transform_node(block);
895 dbg_info *dbgi = get_irn_dbg_info(node);
897 /* just produce a 0 */
898 ir_mode *mode = get_irn_mode(node);
899 if (mode_is_float(mode)) {
900 panic("FP not implemented");
901 be_dep_on_frame(node);
903 } else if (mode_needs_gp_reg(mode)) {
904 return create_const_graph_value(dbgi, new_block, 0);
907 panic("Unexpected Unknown mode");
911 * Transform some Phi nodes
913 static ir_node *gen_Phi(ir_node *node)
915 const arch_register_req_t *req;
916 ir_node *block = be_transform_node(get_nodes_block(node));
917 ir_graph *irg = current_ir_graph;
918 dbg_info *dbgi = get_irn_dbg_info(node);
919 ir_mode *mode = get_irn_mode(node);
922 if (mode_needs_gp_reg(mode)) {
923 /* we shouldn't have any 64bit stuff around anymore */
924 assert(get_mode_size_bits(mode) <= 32);
925 /* all integer operations are on 32bit registers now */
927 req = sparc_reg_classes[CLASS_sparc_gp].class_req;
929 req = arch_no_register_req;
932 /* phi nodes allow loops, so we use the old arguments for now
933 * and fix this later */
934 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
935 copy_node_attr(irg, node, phi);
936 be_duplicate_deps(node, phi);
937 arch_set_out_register_req(phi, 0, req);
938 be_enqueue_preds(node);
944 * Transform a Proj from a Load.
946 static ir_node *gen_Proj_Load(ir_node *node)
948 ir_node *load = get_Proj_pred(node);
949 ir_node *new_load = be_transform_node(load);
950 dbg_info *dbgi = get_irn_dbg_info(node);
951 long proj = get_Proj_proj(node);
953 /* renumber the proj */
954 switch (get_sparc_irn_opcode(new_load)) {
956 /* handle all gp loads equal: they have the same proj numbers. */
957 if (proj == pn_Load_res) {
958 return new_rd_Proj(dbgi, new_load, mode_Iu, pn_sparc_Load_res);
959 } else if (proj == pn_Load_M) {
960 return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Load_M);
964 case iro_sparc_fpaLoad:
965 panic("FP not implemented yet");
969 panic("Unsupported Proj from Load");
972 return be_duplicate_node(node);
976 * Transform the Projs of a be_AddSP.
978 static ir_node *gen_Proj_be_AddSP(ir_node *node)
980 ir_node *pred = get_Proj_pred(node);
981 ir_node *new_pred = be_transform_node(pred);
982 dbg_info *dbgi = get_irn_dbg_info(node);
983 long proj = get_Proj_proj(node);
985 if (proj == pn_be_AddSP_sp) {
986 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
987 pn_sparc_SubSP_stack);
988 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
990 } else if (proj == pn_be_AddSP_res) {
991 return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
992 } else if (proj == pn_be_AddSP_M) {
993 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
996 panic("Unsupported Proj from AddSP");
1000 * Transform the Projs of a be_SubSP.
1002 static ir_node *gen_Proj_be_SubSP(ir_node *node)
1004 ir_node *pred = get_Proj_pred(node);
1005 ir_node *new_pred = be_transform_node(pred);
1006 dbg_info *dbgi = get_irn_dbg_info(node);
1007 long proj = get_Proj_proj(node);
1009 if (proj == pn_be_SubSP_sp) {
1010 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
1011 pn_sparc_AddSP_stack);
1012 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
1014 } else if (proj == pn_be_SubSP_M) {
1015 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_AddSP_M);
1018 panic("Unsupported Proj from SubSP");
1022 * Transform the Projs from a Cmp.
1024 static ir_node *gen_Proj_Cmp(ir_node *node)
1027 panic("not implemented");
1031 * transform Projs from a Div
1033 static ir_node *gen_Proj_Div(ir_node *node)
1035 ir_node *pred = get_Proj_pred(node);
1036 ir_node *new_pred = be_transform_node(pred);
1037 dbg_info *dbgi = get_irn_dbg_info(node);
1038 ir_mode *mode = get_irn_mode(node);
1039 long proj = get_Proj_proj(node);
1043 if (is_sparc_Div(new_pred)) {
1044 return new_rd_Proj(dbgi, new_pred, mode, pn_sparc_Div_res);
1050 panic("Unsupported Proj from Div");
1055 * Transform a Proj node.
1057 static ir_node *gen_Proj(ir_node *node)
1059 ir_graph *irg = current_ir_graph;
1060 dbg_info *dbgi = get_irn_dbg_info(node);
1061 ir_node *pred = get_Proj_pred(node);
1062 long proj = get_Proj_proj(node);
1067 if (is_Store(pred)) {
1068 if (proj == pn_Store_M) {
1069 return be_transform_node(pred);
1071 panic("Unsupported Proj from Store");
1073 } else if (is_Load(pred)) {
1074 return gen_Proj_Load(node);
1075 } else if (be_is_SubSP(pred)) {
1076 //panic("gen_Proj not implemented for SubSP");
1077 return gen_Proj_be_SubSP(node);
1078 } else if (be_is_AddSP(pred)) {
1079 //panic("gen_Proj not implemented for AddSP");
1080 return gen_Proj_be_AddSP(node);
1081 } else if (is_Cmp(pred)) {
1082 //panic("gen_Proj not implemented for Cmp");
1083 return gen_Proj_Cmp(node);
1084 } else if (is_Div(pred)) {
1085 return gen_Proj_Div(node);
1086 } else if (is_Start(pred)) {
1088 if (proj == pn_Start_X_initial_exec) {
1089 ir_node *block = get_nodes_block(pred);
1092 // we exchange the ProjX with a jump
1093 block = be_transform_node(block);
1094 jump = new_rd_Jmp(dbgi, block);
1098 if (node == get_irg_anchor(irg, anchor_tls)) {
1099 return gen_Proj_tls(node);
1103 ir_node *new_pred = be_transform_node(pred);
1104 ir_mode *mode = get_irn_mode(node);
1105 if (mode_needs_gp_reg(mode)) {
1106 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
1107 new_proj->node_nr = node->node_nr;
1112 return be_duplicate_node(node);
1119 static ir_node *gen_Jmp(ir_node *node)
1121 ir_node *block = get_nodes_block(node);
1122 ir_node *new_block = be_transform_node(block);
1123 dbg_info *dbgi = get_irn_dbg_info(node);
1125 return new_bd_sparc_Jmp(dbgi, new_block);
1129 * configure transformation callbacks
1131 void sparc_register_transformers(void)
1133 be_start_transform_setup();
1135 be_set_transform_function(op_Abs, gen_Abs);
1136 be_set_transform_function(op_Add, gen_Add);
1137 be_set_transform_function(op_And, gen_And);
1138 be_set_transform_function(op_be_AddSP, gen_be_AddSP);
1139 be_set_transform_function(op_be_Call, gen_be_Call);
1140 be_set_transform_function(op_be_Copy, gen_be_Copy);
1141 be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
1142 be_set_transform_function(op_be_SubSP, gen_be_SubSP);
1143 be_set_transform_function(op_Cmp, gen_Cmp);
1144 be_set_transform_function(op_Cond, gen_Cond);
1145 be_set_transform_function(op_Const, gen_Const);
1146 be_set_transform_function(op_Conv, gen_Conv);
1147 be_set_transform_function(op_Div, gen_Div);
1148 be_set_transform_function(op_Eor, gen_Xor);
1149 be_set_transform_function(op_Jmp, gen_Jmp);
1150 be_set_transform_function(op_Load, gen_Load);
1151 be_set_transform_function(op_Minus, gen_Minus);
1152 be_set_transform_function(op_Mul, gen_Mul);
1153 be_set_transform_function(op_Mulh, gen_Mulh);
1154 be_set_transform_function(op_Not, gen_Not);
1155 be_set_transform_function(op_Or, gen_Or);
1156 be_set_transform_function(op_Phi, gen_Phi);
1157 be_set_transform_function(op_Proj, gen_Proj);
1158 be_set_transform_function(op_Shl, gen_Shl);
1159 be_set_transform_function(op_Shr, gen_Shr);
1160 be_set_transform_function(op_Shrs, gen_Shra);
1161 be_set_transform_function(op_Store, gen_Store);
1162 be_set_transform_function(op_Sub, gen_Sub);
1163 be_set_transform_function(op_SymConst, gen_SymConst);
1164 be_set_transform_function(op_Unknown, gen_Unknown);
1166 be_set_transform_function(op_sparc_Save, be_duplicate_node);
1170 * Transform a Firm graph into a SPARC graph.
1172 void sparc_transform_graph(sparc_code_gen_t *cg)
1174 sparc_register_transformers();
1176 be_transform_graph(cg->irg, NULL);
1179 void sparc_init_transform(void)
1181 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");