2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into SPARC FIRM)
29 #include "irgraph_t.h"
41 #include "../benode.h"
43 #include "../beutil.h"
44 #include "../betranshlp.h"
45 #include "bearch_sparc_t.h"
47 #include "sparc_nodes_attr.h"
48 #include "sparc_transform.h"
49 #include "sparc_new_nodes.h"
50 #include "gen_sparc_new_nodes.h"
52 #include "gen_sparc_regalloc_if.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static sparc_code_gen_t *env_cg;
60 static ir_node *gen_SymConst(ir_node *node);
63 static inline int mode_needs_gp_reg(ir_mode *mode)
65 return mode_is_int(mode) || mode_is_reference(mode);
69 * Create an And that will zero out upper bits.
71 * @param dbgi debug info
72 * @param block the basic block
73 * @param op the original node
74 * @param src_bits number of lower bits that will remain
76 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
80 return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
81 } else if (src_bits == 16) {
82 ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
83 ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
86 panic("zero extension only supported for 8 and 16 bits");
91 * Generate code for a sign extension.
93 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
96 int shift_width = 32 - src_bits;
97 ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
98 ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
103 * returns true if it is assured, that the upper bits of a node are "clean"
104 * which means for a 16 or 8 bit value, that the upper bits in the register
105 * are 0 for unsigned and a copy of the last significant bit for signed
108 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
110 (void) transformed_node;
116 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
119 int bits = get_mode_size_bits(orig_mode);
123 if (mode_is_signed(orig_mode)) {
124 return gen_sign_extension(dbgi, block, op, bits);
126 return gen_zero_extension(dbgi, block, op, bits);
132 * Creates a possible DAG for a constant.
134 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
139 // we need to load hi & lo separately
140 if (value < -4096 || value > 4095) {
141 ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value);
142 result = new_bd_sparc_LoImm(dbgi, block, hi, value);
145 result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
146 be_dep_on_frame(result);
154 * Create a DAG constructing a given Const.
156 * @param irn a Firm const
158 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
160 tarval *tv = get_Const_tarval(irn);
161 ir_mode *mode = get_tarval_mode(tv);
162 dbg_info *dbgi = get_irn_dbg_info(irn);
166 if (mode_is_reference(mode)) {
167 /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
168 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
169 tv = tarval_convert_to(tv, mode_Iu);
172 value = get_tarval_long(tv);
173 return create_const_graph_value(dbgi, block, value);
177 * create a DAG to load fp constant. sparc only supports loading from global memory
179 static ir_node *create_fp_const_graph(ir_node *irn, ir_node *block)
182 panic("FP constants not implemented");
188 MATCH_COMMUTATIVE = 1 << 0,
189 MATCH_SIZE_NEUTRAL = 1 << 1,
192 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
193 typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode);
194 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
197 * checks if a node's value can be encoded as a immediate
200 static bool is_imm_encodeable(const ir_node *node)
204 //assert(mode_is_float_vector(get_irn_mode(node)));
209 val = get_tarval_long(get_Const_tarval(node));
211 return !(val < -4096 || val > 4095);
215 * helper function for binop operations
217 * @param new_binop_reg_func register generation function ptr
218 * @param new_binop_imm_func immediate generation function ptr
220 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
221 new_binop_reg_func new_reg, new_binop_imm_func new_imm)
223 ir_node *block = be_transform_node(get_nodes_block(node));
224 ir_node *op1 = get_binop_left(node);
226 ir_node *op2 = get_binop_right(node);
228 dbg_info *dbgi = get_irn_dbg_info(node);
231 if (flags & MATCH_SIZE_NEUTRAL) {
232 op1 = arm_skip_downconv(op1);
233 op2 = arm_skip_downconv(op2);
235 assert(get_mode_size_bits(get_irn_mode(node)) == 32);
238 if (is_imm_encodeable(op2)) {
239 ir_node *new_op1 = be_transform_node(op1);
240 return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
243 new_op2 = be_transform_node(op2);
245 if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
246 return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) );
249 new_op1 = be_transform_node(op1);
251 return new_reg(dbgi, block, new_op1, new_op2);
255 * helper function for FP binop operations
257 static ir_node *gen_helper_binfpop(ir_node *node, new_binop_fp_func new_reg)
259 ir_node *block = be_transform_node(get_nodes_block(node));
260 ir_node *op1 = get_binop_left(node);
262 ir_node *op2 = get_binop_right(node);
264 dbg_info *dbgi = get_irn_dbg_info(node);
266 new_op2 = be_transform_node(op2);
267 new_op1 = be_transform_node(op1);
268 return new_reg(dbgi, block, new_op1, new_op2, get_irn_mode(node));
272 * Creates an sparc Add.
274 * @param node FIRM node
275 * @return the created sparc Add node
277 static ir_node *gen_Add(ir_node *node)
279 ir_mode *mode = get_irn_mode(node);
280 ir_node *block = be_transform_node(get_nodes_block(node));
281 dbg_info *dbgi = get_irn_dbg_info(node);
286 if (mode_is_float(mode))
287 panic("FP not implemented yet");
289 return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
294 * Creates an sparc Sub.
296 * @param node FIRM node
297 * @return the created sparc Sub node
299 static ir_node *gen_Sub(ir_node *node)
301 ir_mode *mode = get_irn_mode(node);
302 ir_node *block = be_transform_node(get_nodes_block(node));
303 dbg_info *dbgi = get_irn_dbg_info(node);
308 if (mode_is_float(mode))
309 panic("FP not implemented yet");
311 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
318 * @param node the ir Load node
319 * @return the created sparc Load node
321 static ir_node *gen_Load(ir_node *node)
323 ir_mode *mode = get_Load_mode(node);
324 ir_node *block = be_transform_node(get_nodes_block(node));
325 ir_node *ptr = get_Load_ptr(node);
326 ir_node *new_ptr = be_transform_node(ptr);
327 ir_node *mem = get_Load_mem(node);
328 ir_node *new_mem = be_transform_node(mem);
329 dbg_info *dbgi = get_irn_dbg_info(node);
330 ir_node *new_load = NULL;
332 if (mode_is_float(mode))
333 panic("SPARC: no fp implementation yet");
335 new_load = new_bd_sparc_Load(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
336 set_irn_pinned(new_load, get_irn_pinned(node));
344 * Transforms a Store.
346 * @param node the ir Store node
347 * @return the created sparc Store node
349 static ir_node *gen_Store(ir_node *node)
351 ir_node *block = be_transform_node(get_nodes_block(node));
352 ir_node *ptr = get_Store_ptr(node);
353 ir_node *new_ptr = be_transform_node(ptr);
354 ir_node *mem = get_Store_mem(node);
355 ir_node *new_mem = be_transform_node(mem);
356 ir_node *val = get_Store_value(node);
357 ir_node *new_val = be_transform_node(val);
358 ir_mode *mode = get_irn_mode(val);
359 dbg_info *dbgi = get_irn_dbg_info(node);
360 ir_node *new_store = NULL;
362 if (mode_is_float(mode))
363 panic("SPARC: no fp implementation yet");
365 new_store = new_bd_sparc_Store(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false);
371 * Creates an sparc Mul.
372 * returns the lower 32bits of the 64bit multiply result
374 * @return the created sparc Mul node
376 static ir_node *gen_Mul(ir_node *node) {
377 ir_mode *mode = get_irn_mode(node);
378 dbg_info *dbgi = get_irn_dbg_info(node);
381 ir_node *proj_res_low;
383 if (mode_is_float(mode)) {
384 mul = gen_helper_binfpop(node, new_bd_sparc_fMul);
388 assert(mode_is_data(mode));
389 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
390 arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
392 proj_res_low = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mul_low);
397 * Creates an sparc Mulh.
398 * Mulh returns the upper 32bits of a mul instruction
400 * @return the created sparc Mulh node
402 static ir_node *gen_Mulh(ir_node *node) {
403 ir_mode *mode = get_irn_mode(node);
404 dbg_info *dbgi = get_irn_dbg_info(node);
407 ir_node *proj_res_hi;
409 if (mode_is_float(mode))
410 panic("FP not supported yet");
413 assert(mode_is_data(mode));
414 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
415 //arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
416 proj_res_hi = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mulh_low);
421 * Creates an sparc Div.
423 * @return the created sparc Div node
425 static ir_node *gen_Div(ir_node *node) {
427 ir_mode *mode = get_irn_mode(node);
431 if (mode_is_float(mode))
432 panic("FP not supported yet");
434 //assert(mode_is_data(mode));
435 div = gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Div_reg, new_bd_sparc_Div_imm);
441 * transform abs node:
449 static ir_node *gen_Abs(ir_node *node) {
450 ir_node *block = be_transform_node(get_nodes_block(node));
451 ir_mode *mode = get_irn_mode(node);
452 dbg_info *dbgi = get_irn_dbg_info(node);
453 ir_node *op = get_Abs_op(node);
455 ir_node *mov, *sra, *xor, *sub, *new_op;
457 if (mode_is_float(mode))
458 panic("FP not supported yet");
460 new_op = be_transform_node(op);
462 mov = new_bd_sparc_Mov_reg(dbgi, block, new_op);
463 sra = new_bd_sparc_ShiftRA_imm(dbgi, block, mov, 31);
464 xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra);
465 sub = new_bd_sparc_Sub_reg(dbgi, block, sra, xor);
471 * Transforms a Not node.
473 * @return the created ARM Not node
475 static ir_node *gen_Not(ir_node *node)
477 ir_node *block = be_transform_node(get_nodes_block(node));
478 ir_node *op = get_Not_op(node);
479 ir_node *new_op = be_transform_node(op);
480 dbg_info *dbgi = get_irn_dbg_info(node);
482 return new_bd_sparc_Not(dbgi, block, new_op);
485 static ir_node *gen_And(ir_node *node)
487 ir_mode *mode = get_irn_mode(node);
488 ir_node *block = be_transform_node(get_nodes_block(node));
489 dbg_info *dbgi = get_irn_dbg_info(node);
494 if (mode_is_float(mode))
495 panic("FP not implemented yet");
497 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg, new_bd_sparc_And_imm);
500 static ir_node *gen_Or(ir_node *node)
502 ir_mode *mode = get_irn_mode(node);
503 ir_node *block = be_transform_node(get_nodes_block(node));
504 dbg_info *dbgi = get_irn_dbg_info(node);
509 if (mode_is_float(mode))
510 panic("FP not implemented yet");
512 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg, new_bd_sparc_Or_imm);
515 static ir_node *gen_Xor(ir_node *node)
517 ir_mode *mode = get_irn_mode(node);
518 ir_node *block = be_transform_node(get_nodes_block(node));
519 dbg_info *dbgi = get_irn_dbg_info(node);
524 if (mode_is_float(mode))
525 panic("FP not implemented yet");
527 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg, new_bd_sparc_Xor_imm);
530 static ir_node *gen_Shl(ir_node *node)
532 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLL_reg, new_bd_sparc_ShiftLL_imm);
535 static ir_node *gen_Shr(ir_node *node)
537 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLR_reg, new_bd_sparc_ShiftLR_imm);
540 static ir_node *gen_Shra(ir_node *node)
542 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftRA_reg, new_bd_sparc_ShiftRA_imm);
545 /****** TRANSFORM GENERAL BACKEND NODES ********/
548 * Transforms a Minus node.
551 static ir_node *gen_Minus(ir_node *node)
553 ir_node *block = be_transform_node(get_nodes_block(node));
554 ir_node *op = get_Minus_op(node);
555 ir_node *new_op = be_transform_node(op);
556 dbg_info *dbgi = get_irn_dbg_info(node);
557 ir_mode *mode = get_irn_mode(node);
559 if (mode_is_float(mode)) {
560 panic("FP not implemented yet");
563 assert(mode_is_data(mode));
564 return new_bd_sparc_Minus(dbgi, block, new_op);
568 * Transforms a Const node.
570 * @param node the ir Const node
571 * @return The transformed sparc node.
573 static ir_node *gen_Const(ir_node *node)
575 ir_node *block = be_transform_node(get_nodes_block(node));
576 ir_mode *mode = get_irn_mode(node);
577 dbg_info *dbg = get_irn_dbg_info(node);
581 if (mode_is_float(mode)) {
582 return create_fp_const_graph(node, block);
585 return create_const_graph(node, block);
590 * @param node the ir AddSP node
591 * @return transformed sparc SAVE node
593 static ir_node *gen_be_AddSP(ir_node *node)
595 ir_node *block = be_transform_node(get_nodes_block(node));
596 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
597 ir_node *new_sz = be_transform_node(sz);
598 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
599 ir_node *new_sp = be_transform_node(sp);
600 dbg_info *dbgi = get_irn_dbg_info(node);
601 ir_node *nomem = new_NoMem();
604 /* SPARC stack grows in reverse direction */
605 new_op = new_bd_sparc_AddSP(dbgi, block, new_sp, new_sz, nomem);
613 * @param node the ir SubSP node
614 * @return transformed sparc SAVE node
616 static ir_node *gen_be_SubSP(ir_node *node)
618 ir_node *block = be_transform_node(get_nodes_block(node));
619 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
620 ir_node *new_sz = be_transform_node(sz);
621 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
622 ir_node *new_sp = be_transform_node(sp);
623 dbg_info *dbgi = get_irn_dbg_info(node);
624 ir_node *nomem = new_NoMem();
627 /* SPARC stack grows in reverse direction */
628 new_op = new_bd_sparc_SubSP(dbgi, block, new_sp, new_sz, nomem);
633 * transform FrameAddr
635 static ir_node *gen_be_FrameAddr(ir_node *node)
637 ir_node *block = be_transform_node(get_nodes_block(node));
638 ir_entity *ent = be_get_frame_entity(node);
639 ir_node *fp = be_get_FrameAddr_frame(node);
640 ir_node *new_fp = be_transform_node(fp);
641 dbg_info *dbgi = get_irn_dbg_info(node);
643 new_node = new_bd_sparc_FrameAddr(dbgi, block, new_fp, ent);
648 * Transform a be_Copy.
650 static ir_node *gen_be_Copy(ir_node *node)
652 ir_node *result = be_duplicate_node(node);
653 ir_mode *mode = get_irn_mode(result);
655 if (mode_needs_gp_reg(mode)) {
656 set_irn_mode(node, mode_Iu);
665 static ir_node *gen_be_Call(ir_node *node)
667 ir_node *res = be_duplicate_node(node);
668 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
673 * Transforms a Switch.
676 static ir_node *gen_SwitchJmp(ir_node *node)
678 ir_node *block = be_transform_node(get_nodes_block(node));
679 ir_node *selector = get_Cond_selector(node);
680 dbg_info *dbgi = get_irn_dbg_info(node);
681 ir_node *new_op = be_transform_node(selector);
682 ir_node *const_graph;
686 const ir_edge_t *edge;
693 foreach_out_edge(node, edge) {
694 proj = get_edge_src_irn(edge);
695 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
697 pn = get_Proj_proj(proj);
699 min = pn<min ? pn : min;
700 max = pn>max ? pn : max;
704 n_projs = max - translation + 1;
706 foreach_out_edge(node, edge) {
707 proj = get_edge_src_irn(edge);
708 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
710 pn = get_Proj_proj(proj) - translation;
711 set_Proj_proj(proj, pn);
714 const_graph = create_const_graph_value(dbgi, block, translation);
715 sub = new_bd_sparc_Sub_reg(dbgi, block, new_op, const_graph);
716 return new_bd_sparc_SwitchJmp(dbgi, block, sub, n_projs, get_Cond_default_proj(node) - translation);
720 * Transform Cond nodes
722 static ir_node *gen_Cond(ir_node *node)
724 ir_node *selector = get_Cond_selector(node);
725 ir_mode *mode = get_irn_mode(selector);
731 if (mode != mode_b) {
732 return gen_SwitchJmp(node);
735 // regular if/else jumps
736 assert(is_Proj(selector));
738 block = be_transform_node(get_nodes_block(node));
739 dbgi = get_irn_dbg_info(node);
740 flag_node = be_transform_node(get_Proj_pred(selector));
741 return new_bd_sparc_Branch(dbgi, block, flag_node, get_Proj_proj(selector));
747 static ir_node *gen_Cmp(ir_node *node)
749 ir_node *block = be_transform_node(get_nodes_block(node));
750 ir_node *op1 = get_Cmp_left(node);
751 ir_node *op2 = get_Cmp_right(node);
752 ir_mode *cmp_mode = get_irn_mode(op1);
753 dbg_info *dbgi = get_irn_dbg_info(node);
758 if (mode_is_float(cmp_mode)) {
759 panic("FloatCmp not implemented");
763 if (get_mode_size_bits(cmp_mode) != 32) {
764 panic("CmpMode != 32bit not supported yet");
768 assert(get_irn_mode(op2) == cmp_mode);
769 is_unsigned = !mode_is_signed(cmp_mode);
771 /* compare with 0 can be done with Tst */
773 if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
774 new_op1 = be_transform_node(op1);
775 return new_bd_sparc_Tst(dbgi, block, new_op1, false,
779 if (is_Const(op1) && tarval_is_null(get_Const_tarval(op1))) {
780 new_op2 = be_transform_node(op2);
781 return new_bd_sparc_Tst(dbgi, block, new_op2, true,
786 /* integer compare */
787 new_op1 = be_transform_node(op1);
788 new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
789 new_op2 = be_transform_node(op2);
790 new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
791 return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
795 * Transforms a SymConst node.
797 static ir_node *gen_SymConst(ir_node *node)
799 ir_node *block = be_transform_node(get_nodes_block(node));
800 ir_entity *entity = get_SymConst_entity(node);
801 dbg_info *dbgi = get_irn_dbg_info(node);
804 new_node = new_bd_sparc_SymConst(dbgi, block, entity);
805 be_dep_on_frame(new_node);
810 * Transforms a Conv node.
813 static ir_node *gen_Conv(ir_node *node)
815 ir_node *block = be_transform_node(get_nodes_block(node));
816 ir_node *op = get_Conv_op(node);
817 ir_node *new_op = be_transform_node(op);
818 ir_mode *src_mode = get_irn_mode(op);
819 ir_mode *dst_mode = get_irn_mode(node);
820 dbg_info *dbg = get_irn_dbg_info(node);
822 int src_bits = get_mode_size_bits(src_mode);
823 int dst_bits = get_mode_size_bits(dst_mode);
825 if (src_mode == dst_mode)
828 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
829 assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented");
831 if (mode_is_float(src_mode)) {
832 if (mode_is_float(dst_mode)) {
833 // float -> float conv
834 if (src_bits > dst_bits) {
835 return new_bd_sparc_FpDToFpS(dbg, block, new_op, dst_mode);
837 return new_bd_sparc_FpSToFpD(dbg, block, new_op, dst_mode);
843 return new_bd_sparc_FpSToInt(dbg, block, new_op, dst_mode);
845 return new_bd_sparc_FpDToInt(dbg, block, new_op, dst_mode);
847 panic("quad FP not implemented");
854 return new_bd_sparc_IntToFpS(dbg, block, new_op, src_mode);
856 return new_bd_sparc_IntToFpD(dbg, block, new_op, src_mode);
858 panic("quad FP not implemented");
861 } else { /* complete in gp registers */
865 if (src_bits == dst_bits) {
866 /* kill unneccessary conv */
870 if (src_bits < dst_bits) {
878 if (upper_bits_clean(new_op, min_mode)) {
882 if (mode_is_signed(min_mode)) {
883 return gen_sign_extension(dbg, block, new_op, min_bits);
885 return gen_zero_extension(dbg, block, new_op, min_bits);
890 static ir_node *gen_Unknown(ir_node *node)
892 ir_node *block = get_nodes_block(node);
893 ir_node *new_block = be_transform_node(block);
894 dbg_info *dbgi = get_irn_dbg_info(node);
896 /* just produce a 0 */
897 ir_mode *mode = get_irn_mode(node);
898 if (mode_is_float(mode)) {
899 panic("FP not implemented");
900 be_dep_on_frame(node);
902 } else if (mode_needs_gp_reg(mode)) {
903 return create_const_graph_value(dbgi, new_block, 0);
906 panic("Unexpected Unknown mode");
910 * Transform some Phi nodes
912 static ir_node *gen_Phi(ir_node *node)
914 const arch_register_req_t *req;
915 ir_node *block = be_transform_node(get_nodes_block(node));
916 ir_graph *irg = current_ir_graph;
917 dbg_info *dbgi = get_irn_dbg_info(node);
918 ir_mode *mode = get_irn_mode(node);
921 if (mode_needs_gp_reg(mode)) {
922 /* we shouldn't have any 64bit stuff around anymore */
923 assert(get_mode_size_bits(mode) <= 32);
924 /* all integer operations are on 32bit registers now */
926 req = sparc_reg_classes[CLASS_sparc_gp].class_req;
928 req = arch_no_register_req;
931 /* phi nodes allow loops, so we use the old arguments for now
932 * and fix this later */
933 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
934 copy_node_attr(irg, node, phi);
935 be_duplicate_deps(node, phi);
936 arch_set_out_register_req(phi, 0, req);
937 be_enqueue_preds(node);
943 * Transform a Proj from a Load.
945 static ir_node *gen_Proj_Load(ir_node *node)
947 ir_node *load = get_Proj_pred(node);
948 ir_node *new_load = be_transform_node(load);
949 dbg_info *dbgi = get_irn_dbg_info(node);
950 long proj = get_Proj_proj(node);
952 /* renumber the proj */
953 switch (get_sparc_irn_opcode(new_load)) {
955 /* handle all gp loads equal: they have the same proj numbers. */
956 if (proj == pn_Load_res) {
957 return new_rd_Proj(dbgi, new_load, mode_Iu, pn_sparc_Load_res);
958 } else if (proj == pn_Load_M) {
959 return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Load_M);
963 case iro_sparc_fpaLoad:
964 panic("FP not implemented yet");
968 panic("Unsupported Proj from Load");
971 return be_duplicate_node(node);
975 * Transform the Projs of a be_AddSP.
977 static ir_node *gen_Proj_be_AddSP(ir_node *node)
979 ir_node *pred = get_Proj_pred(node);
980 ir_node *new_pred = be_transform_node(pred);
981 dbg_info *dbgi = get_irn_dbg_info(node);
982 long proj = get_Proj_proj(node);
984 if (proj == pn_be_AddSP_sp) {
985 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
986 pn_sparc_SubSP_stack);
987 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
989 } else if (proj == pn_be_AddSP_res) {
990 return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
991 } else if (proj == pn_be_AddSP_M) {
992 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
995 panic("Unsupported Proj from AddSP");
999 * Transform the Projs of a be_SubSP.
1001 static ir_node *gen_Proj_be_SubSP(ir_node *node)
1003 ir_node *pred = get_Proj_pred(node);
1004 ir_node *new_pred = be_transform_node(pred);
1005 dbg_info *dbgi = get_irn_dbg_info(node);
1006 long proj = get_Proj_proj(node);
1008 if (proj == pn_be_SubSP_sp) {
1009 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
1010 pn_sparc_AddSP_stack);
1011 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
1013 } else if (proj == pn_be_SubSP_M) {
1014 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_AddSP_M);
1017 panic("Unsupported Proj from SubSP");
1021 * Transform the Projs from a Cmp.
1023 static ir_node *gen_Proj_Cmp(ir_node *node)
1026 panic("not implemented");
1030 * transform Projs from a Div
1032 static ir_node *gen_Proj_Div(ir_node *node)
1034 ir_node *pred = get_Proj_pred(node);
1035 ir_node *new_pred = be_transform_node(pred);
1036 dbg_info *dbgi = get_irn_dbg_info(node);
1037 ir_mode *mode = get_irn_mode(node);
1038 long proj = get_Proj_proj(node);
1042 if (is_sparc_Div(new_pred)) {
1043 return new_rd_Proj(dbgi, new_pred, mode, pn_sparc_Div_res);
1049 panic("Unsupported Proj from Div");
1054 * Transform a Proj node.
1056 static ir_node *gen_Proj(ir_node *node)
1058 ir_graph *irg = current_ir_graph;
1059 dbg_info *dbgi = get_irn_dbg_info(node);
1060 ir_node *pred = get_Proj_pred(node);
1061 long proj = get_Proj_proj(node);
1066 if (is_Store(pred)) {
1067 if (proj == pn_Store_M) {
1068 return be_transform_node(pred);
1070 panic("Unsupported Proj from Store");
1072 } else if (is_Load(pred)) {
1073 return gen_Proj_Load(node);
1074 } else if (be_is_SubSP(pred)) {
1075 //panic("gen_Proj not implemented for SubSP");
1076 return gen_Proj_be_SubSP(node);
1077 } else if (be_is_AddSP(pred)) {
1078 //panic("gen_Proj not implemented for AddSP");
1079 return gen_Proj_be_AddSP(node);
1080 } else if (is_Cmp(pred)) {
1081 //panic("gen_Proj not implemented for Cmp");
1082 return gen_Proj_Cmp(node);
1083 } else if (is_Div(pred)) {
1084 return gen_Proj_Div(node);
1085 } else if (is_Start(pred)) {
1087 if (proj == pn_Start_X_initial_exec) {
1088 ir_node *block = get_nodes_block(pred);
1091 // we exchange the ProjX with a jump
1092 block = be_transform_node(block);
1093 jump = new_rd_Jmp(dbgi, block);
1097 if (node == get_irg_anchor(irg, anchor_tls)) {
1098 return gen_Proj_tls(node);
1102 ir_node *new_pred = be_transform_node(pred);
1103 ir_mode *mode = get_irn_mode(node);
1104 if (mode_needs_gp_reg(mode)) {
1105 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
1106 new_proj->node_nr = node->node_nr;
1111 return be_duplicate_node(node);
1118 static ir_node *gen_Jmp(ir_node *node)
1120 ir_node *block = get_nodes_block(node);
1121 ir_node *new_block = be_transform_node(block);
1122 dbg_info *dbgi = get_irn_dbg_info(node);
1124 return new_bd_sparc_Jmp(dbgi, new_block);
1128 * the BAD transformer.
1130 static ir_node *bad_transform(ir_node *irn)
1132 panic("SPARC backend: Not implemented: %+F", irn);
1136 * Set a node emitter. Make it a bit more type safe.
1138 static void set_transformer(ir_op *op, be_transform_func sparc_transform_func)
1140 op->ops.generic = (op_func)sparc_transform_func;
1144 * configure transformation callbacks
1146 void sparc_register_transformers(void)
1148 clear_irp_opcodes_generic_func();
1149 set_transformer(op_Add, gen_Add);
1150 set_transformer(op_Store, gen_Store);
1151 set_transformer(op_Const, gen_Const);
1152 set_transformer(op_Load, gen_Load);
1153 set_transformer(op_Sub, gen_Sub);
1155 set_transformer(op_be_AddSP, gen_be_AddSP);
1156 set_transformer(op_be_SubSP, gen_be_SubSP);
1157 set_transformer(op_be_Copy, gen_be_Copy);
1158 set_transformer(op_be_Call, gen_be_Call);
1159 set_transformer(op_be_FrameAddr, gen_be_FrameAddr);
1161 set_transformer(op_Cond, gen_Cond);
1162 set_transformer(op_Cmp, gen_Cmp);
1164 set_transformer(op_SymConst, gen_SymConst);
1166 set_transformer(op_Phi, gen_Phi);
1167 set_transformer(op_Proj, gen_Proj);
1169 set_transformer(op_Conv, gen_Conv);
1170 set_transformer(op_Jmp, gen_Jmp);
1172 set_transformer(op_Mul, gen_Mul);
1173 set_transformer(op_Mulh, gen_Mulh);
1174 set_transformer(op_Div, gen_Div);
1175 set_transformer(op_Abs, gen_Abs);
1176 set_transformer(op_Shl, gen_Shl);
1177 set_transformer(op_Shr, gen_Shr);
1178 set_transformer(op_Shrs, gen_Shra);
1180 set_transformer(op_Minus, gen_Minus);
1181 set_transformer(op_Not, gen_Not);
1182 set_transformer(op_And, gen_And);
1183 set_transformer(op_Or, gen_Or);
1184 set_transformer(op_Eor, gen_Xor);
1186 set_transformer(op_Unknown, gen_Unknown);
1191 set_transformer(op_CopyB, gen_CopyB);
1192 set_transformer(op_Quot, gen_Quot);
1193 set_transformer(op_Rotl, gen_Rotl);
1196 set_transformer(op_ASM, bad_transform);
1197 set_transformer(op_Builtin, bad_transform);
1198 set_transformer(op_CallBegin, bad_transform);
1199 set_transformer(op_Cast, bad_transform);
1200 set_transformer(op_Confirm, bad_transform);
1201 set_transformer(op_DivMod, bad_transform);
1202 set_transformer(op_EndExcept, bad_transform);
1203 set_transformer(op_EndReg, bad_transform);
1204 set_transformer(op_Filter, bad_transform);
1205 set_transformer(op_Free, bad_transform);
1206 set_transformer(op_Id, bad_transform);
1207 set_transformer(op_InstOf, bad_transform);
1209 set_transformer(op_Mux, bad_transform);
1210 set_transformer(op_Raise, bad_transform);
1211 set_transformer(op_Sel, bad_transform);
1212 set_transformer(op_Tuple, bad_transform);
1217 * Pre-transform all unknown nodes.
1219 static void sparc_pretransform_node(void)
1221 sparc_code_gen_t *cg = env_cg;
1223 //cg->unknown_gp = be_pre_transform_node(cg->unknown_gp);
1224 //cg->unknown_fpa = be_pre_transform_node(cg->unknown_fpa);
1228 * Transform a Firm graph into a SPARC graph.
1230 void sparc_transform_graph(sparc_code_gen_t *cg)
1232 sparc_register_transformers();
1234 be_transform_graph(cg->irg, sparc_pretransform_node);
1237 void sparc_init_transform(void)
1239 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");