2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into SPARC FIRM)
29 #include "irgraph_t.h"
41 #include "../benode.h"
43 #include "../beutil.h"
44 #include "../betranshlp.h"
45 #include "bearch_sparc_t.h"
47 #include "sparc_nodes_attr.h"
48 #include "sparc_transform.h"
49 #include "sparc_new_nodes.h"
50 #include "gen_sparc_new_nodes.h"
52 #include "gen_sparc_regalloc_if.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static sparc_code_gen_t *env_cg;
60 static inline int mode_needs_gp_reg(ir_mode *mode)
62 return mode_is_int(mode) || mode_is_reference(mode);
66 * Create an And that will zero out upper bits.
68 * @param dbgi debug info
69 * @param block the basic block
70 * @param op the original node
71 * @param src_bits number of lower bits that will remain
73 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
77 return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
78 } else if (src_bits == 16) {
79 ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
80 ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
83 panic("zero extension only supported for 8 and 16 bits");
88 * Generate code for a sign extension.
90 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
93 int shift_width = 32 - src_bits;
94 ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
95 ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
100 * returns true if it is assured, that the upper bits of a node are "clean"
101 * which means for a 16 or 8 bit value, that the upper bits in the register
102 * are 0 for unsigned and a copy of the last significant bit for signed
105 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
107 (void) transformed_node;
113 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
116 int bits = get_mode_size_bits(orig_mode);
120 if (mode_is_signed(orig_mode)) {
121 return gen_sign_extension(dbgi, block, op, bits);
123 return gen_zero_extension(dbgi, block, op, bits);
129 * Creates a possible DAG for a constant.
131 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
136 // we need to load hi & lo separately
137 if (value < -4096 || value > 4095) {
138 ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value);
139 result = new_bd_sparc_LoImm(dbgi, block, hi, value);
142 result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
143 be_dep_on_frame(result);
151 * Create a DAG constructing a given Const.
153 * @param irn a Firm const
155 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
157 tarval *tv = get_Const_tarval(irn);
158 ir_mode *mode = get_tarval_mode(tv);
159 dbg_info *dbgi = get_irn_dbg_info(irn);
163 if (mode_is_reference(mode)) {
164 /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
165 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
166 tv = tarval_convert_to(tv, mode_Iu);
169 value = get_tarval_long(tv);
170 return create_const_graph_value(dbgi, block, value);
176 MATCH_COMMUTATIVE = 1 << 0,
177 MATCH_SIZE_NEUTRAL = 1 << 1,
180 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
181 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
184 * checks if a node's value can be encoded as a immediate
187 static bool is_imm_encodeable(const ir_node *node)
194 val = get_tarval_long(get_Const_tarval(node));
196 return !(val < -4096 || val > 4095);
200 * helper function for binop operations
202 * @param new_binop_reg_func register generation function ptr
203 * @param new_binop_imm_func immediate generation function ptr
205 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
206 new_binop_reg_func new_reg, new_binop_imm_func new_imm)
208 ir_node *block = be_transform_node(get_nodes_block(node));
209 ir_node *op1 = get_binop_left(node);
211 ir_node *op2 = get_binop_right(node);
213 dbg_info *dbgi = get_irn_dbg_info(node);
216 if (flags & MATCH_SIZE_NEUTRAL) {
217 op1 = arm_skip_downconv(op1);
218 op2 = arm_skip_downconv(op2);
220 assert(get_mode_size_bits(get_irn_mode(node)) == 32);
223 if (is_imm_encodeable(op2)) {
224 ir_node *new_op1 = be_transform_node(op1);
225 return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
228 new_op2 = be_transform_node(op2);
230 if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
231 return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) );
234 new_op1 = be_transform_node(op1);
236 return new_reg(dbgi, block, new_op1, new_op2);
240 * Creates an sparc Add.
242 * @param node FIRM node
243 * @return the created sparc Add node
245 static ir_node *gen_Add(ir_node *node)
247 ir_mode *mode = get_irn_mode(node);
248 ir_node *block = be_transform_node(get_nodes_block(node));
249 dbg_info *dbgi = get_irn_dbg_info(node);
254 if (mode_is_float(mode))
255 panic("FP not implemented yet");
257 return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
262 * Creates an sparc Sub.
264 * @param node FIRM node
265 * @return the created sparc Sub node
267 static ir_node *gen_Sub(ir_node *node)
269 ir_mode *mode = get_irn_mode(node);
270 ir_node *block = be_transform_node(get_nodes_block(node));
271 dbg_info *dbgi = get_irn_dbg_info(node);
276 if (mode_is_float(mode))
277 panic("FP not implemented yet");
279 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
286 * @param node the ir Load node
287 * @return the created sparc Load node
289 static ir_node *gen_Load(ir_node *node)
291 ir_mode *mode = get_Load_mode(node);
292 ir_node *block = be_transform_node(get_nodes_block(node));
293 ir_node *ptr = get_Load_ptr(node);
294 ir_node *new_ptr = be_transform_node(ptr);
295 ir_node *mem = get_Load_mem(node);
296 ir_node *new_mem = be_transform_node(mem);
297 dbg_info *dbgi = get_irn_dbg_info(node);
298 ir_node *new_load = NULL;
300 if (mode_is_float(mode))
301 panic("SPARC: no fp implementation yet");
303 new_load = new_bd_sparc_Load(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
304 set_irn_pinned(new_load, get_irn_pinned(node));
312 * Transforms a Store.
314 * @param node the ir Store node
315 * @return the created sparc Store node
317 static ir_node *gen_Store(ir_node *node)
319 ir_node *block = be_transform_node(get_nodes_block(node));
320 ir_node *ptr = get_Store_ptr(node);
321 ir_node *new_ptr = be_transform_node(ptr);
322 ir_node *mem = get_Store_mem(node);
323 ir_node *new_mem = be_transform_node(mem);
324 ir_node *val = get_Store_value(node);
325 ir_node *new_val = be_transform_node(val);
326 ir_mode *mode = get_irn_mode(val);
327 dbg_info *dbgi = get_irn_dbg_info(node);
328 ir_node *new_store = NULL;
330 if (mode_is_float(mode))
331 panic("SPARC: no fp implementation yet");
333 new_store = new_bd_sparc_Store(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false);
339 * Creates an sparc Mul.
340 * returns the lower 32bits of the 64bit multiply result
342 * @return the created sparc Mul node
344 static ir_node *gen_Mul(ir_node *node) {
345 ir_mode *mode = get_irn_mode(node);
346 dbg_info *dbgi = get_irn_dbg_info(node);
349 ir_node *proj_res_low;
351 if (mode_is_float(mode))
352 panic("FP not supported yet");
355 assert(mode_is_data(mode));
356 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_UMul_reg, new_bd_sparc_UMul_imm);
357 arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
359 proj_res_low = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_UMul_low);
364 * Creates an sparc Mulh.
365 * Mulh returns the upper 32bits of a mul instruction
367 * @return the created sparc Mulh node
369 static ir_node *gen_Mulh(ir_node *node) {
370 ir_mode *mode = get_irn_mode(node);
371 dbg_info *dbgi = get_irn_dbg_info(node);
374 ir_node *proj_res_hi;
376 if (mode_is_float(mode))
377 panic("FP not supported yet");
380 assert(mode_is_data(mode));
381 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_UMul_reg, new_bd_sparc_UMul_imm);
382 arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
384 proj_res_hi = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_UMul_low); // TODO: this actually should be pn_sparc_UMul_high !
385 //arch_set_irn_register(proj_res_hi, &sparc_flags_regs[REG_Y]);
390 * Creates an sparc Div.
392 * @return the created sparc Div node
394 static ir_node *gen_Div(ir_node *node) {
396 ir_mode *mode = get_irn_mode(node);
400 if (mode_is_float(mode))
401 panic("FP not supported yet");
403 //assert(mode_is_data(mode));
404 div = gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_UDiv_reg, new_bd_sparc_UDiv_imm);
410 * transform abs node:
418 static ir_node *gen_Abs(ir_node *node) {
419 ir_node *block = be_transform_node(get_nodes_block(node));
420 ir_mode *mode = get_irn_mode(node);
421 dbg_info *dbgi = get_irn_dbg_info(node);
422 ir_node *op = get_Abs_op(node);
424 ir_node *mov, *sra, *xor, *sub, *new_op;
426 if (mode_is_float(mode))
427 panic("FP not supported yet");
429 new_op = be_transform_node(op);
431 mov = new_bd_sparc_Mov_reg(dbgi, block, new_op);
432 sra = new_bd_sparc_ShiftRA_imm(dbgi, block, mov, 31);
433 xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra);
434 sub = new_bd_sparc_Sub_reg(dbgi, block, sra, xor);
440 * Transforms a Not node.
442 * @return the created ARM Not node
444 static ir_node *gen_Not(ir_node *node)
446 ir_node *block = be_transform_node(get_nodes_block(node));
447 ir_node *op = get_Not_op(node);
448 ir_node *new_op = be_transform_node(op);
449 dbg_info *dbgi = get_irn_dbg_info(node);
451 return new_bd_sparc_Not(dbgi, block, new_op);
454 static ir_node *gen_And(ir_node *node)
456 ir_mode *mode = get_irn_mode(node);
457 ir_node *block = be_transform_node(get_nodes_block(node));
458 dbg_info *dbgi = get_irn_dbg_info(node);
463 if (mode_is_float(mode))
464 panic("FP not implemented yet");
466 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg, new_bd_sparc_And_imm);
469 static ir_node *gen_Or(ir_node *node)
471 ir_mode *mode = get_irn_mode(node);
472 ir_node *block = be_transform_node(get_nodes_block(node));
473 dbg_info *dbgi = get_irn_dbg_info(node);
478 if (mode_is_float(mode))
479 panic("FP not implemented yet");
481 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg, new_bd_sparc_Or_imm);
484 static ir_node *gen_Shl(ir_node *node)
486 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLL_reg, new_bd_sparc_ShiftLL_imm);
489 static ir_node *gen_Shr(ir_node *node)
491 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLR_reg, new_bd_sparc_ShiftLR_imm);
494 static ir_node *gen_Shra(ir_node *node)
496 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftRA_reg, new_bd_sparc_ShiftRA_imm);
499 /****** TRANSFORM GENERAL BACKEND NODES ********/
502 * Transforms a Minus node.
505 static ir_node *gen_Minus(ir_node *node)
507 ir_node *block = be_transform_node(get_nodes_block(node));
508 ir_node *op = get_Minus_op(node);
509 ir_node *new_op = be_transform_node(op);
510 dbg_info *dbgi = get_irn_dbg_info(node);
511 ir_mode *mode = get_irn_mode(node);
513 if (mode_is_float(mode)) {
514 panic("FP not implemented yet");
517 assert(mode_is_data(mode));
518 return new_bd_sparc_Minus(dbgi, block, new_op);
522 * Transforms a Const node.
524 * @param node the ir Store node
525 * @return The transformed sparc node.
527 static ir_node *gen_Const(ir_node *node)
529 ir_node *block = be_transform_node(get_nodes_block(node));
530 ir_mode *mode = get_irn_mode(node);
531 dbg_info *dbg = get_irn_dbg_info(node);
535 if (mode_is_float(mode)) {
536 panic("FP not supported yet");
538 return create_const_graph(node, block);
543 * @param node the ir AddSP node
544 * @return transformed sparc SAVE node
546 static ir_node *gen_be_AddSP(ir_node *node)
548 ir_node *block = be_transform_node(get_nodes_block(node));
549 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
550 ir_node *new_sz = be_transform_node(sz);
551 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
552 ir_node *new_sp = be_transform_node(sp);
553 dbg_info *dbgi = get_irn_dbg_info(node);
554 ir_node *nomem = new_NoMem();
557 /* SPARC stack grows in reverse direction */
558 new_op = new_bd_sparc_AddSP(dbgi, block, new_sp, new_sz, nomem);
566 * @param node the ir SubSP node
567 * @return transformed sparc SAVE node
569 static ir_node *gen_be_SubSP(ir_node *node)
571 ir_node *block = be_transform_node(get_nodes_block(node));
572 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
573 ir_node *new_sz = be_transform_node(sz);
574 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
575 ir_node *new_sp = be_transform_node(sp);
576 dbg_info *dbgi = get_irn_dbg_info(node);
577 ir_node *nomem = new_NoMem();
580 /* SPARC stack grows in reverse direction */
581 new_op = new_bd_sparc_SubSP(dbgi, block, new_sp, new_sz, nomem);
586 * transform FrameAddr
588 static ir_node *gen_be_FrameAddr(ir_node *node)
590 ir_node *block = be_transform_node(get_nodes_block(node));
591 ir_entity *ent = be_get_frame_entity(node);
592 ir_node *fp = be_get_FrameAddr_frame(node);
593 ir_node *new_fp = be_transform_node(fp);
594 dbg_info *dbgi = get_irn_dbg_info(node);
596 new_node = new_bd_sparc_FrameAddr(dbgi, block, new_fp, ent);
601 * Transform a be_Copy.
603 static ir_node *gen_be_Copy(ir_node *node)
605 ir_node *result = be_duplicate_node(node);
606 ir_mode *mode = get_irn_mode(result);
608 if (mode_needs_gp_reg(mode)) {
609 set_irn_mode(node, mode_Iu);
618 static ir_node *gen_be_Call(ir_node *node)
620 ir_node *res = be_duplicate_node(node);
621 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
626 * Transforms a Switch.
629 static ir_node *gen_SwitchJmp(ir_node *node)
631 ir_node *block = be_transform_node(get_nodes_block(node));
632 ir_node *selector = get_Cond_selector(node);
633 dbg_info *dbgi = get_irn_dbg_info(node);
634 ir_node *new_op = be_transform_node(selector);
635 ir_node *const_graph;
639 const ir_edge_t *edge;
646 foreach_out_edge(node, edge) {
647 proj = get_edge_src_irn(edge);
648 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
650 pn = get_Proj_proj(proj);
652 min = pn<min ? pn : min;
653 max = pn>max ? pn : max;
657 n_projs = max - translation + 1;
659 foreach_out_edge(node, edge) {
660 proj = get_edge_src_irn(edge);
661 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
663 pn = get_Proj_proj(proj) - translation;
664 set_Proj_proj(proj, pn);
667 const_graph = create_const_graph_value(dbgi, block, translation);
668 sub = new_bd_sparc_Sub_reg(dbgi, block, new_op, const_graph);
669 return new_bd_sparc_SwitchJmp(dbgi, block, sub, n_projs, get_Cond_default_proj(node) - translation);
673 * Transform Cond nodes
675 static ir_node *gen_Cond(ir_node *node)
677 ir_node *selector = get_Cond_selector(node);
678 ir_mode *mode = get_irn_mode(selector);
684 if (mode != mode_b) {
685 return gen_SwitchJmp(node);
688 // regular if/else jumps
689 assert(is_Proj(selector));
691 block = be_transform_node(get_nodes_block(node));
692 dbgi = get_irn_dbg_info(node);
693 flag_node = be_transform_node(get_Proj_pred(selector));
694 return new_bd_sparc_Branch(dbgi, block, flag_node, get_Proj_proj(selector));
700 static ir_node *gen_Cmp(ir_node *node)
702 ir_node *block = be_transform_node(get_nodes_block(node));
703 ir_node *op1 = get_Cmp_left(node);
704 ir_node *op2 = get_Cmp_right(node);
705 ir_mode *cmp_mode = get_irn_mode(op1);
706 dbg_info *dbgi = get_irn_dbg_info(node);
711 if (mode_is_float(cmp_mode)) {
712 panic("FloatCmp not implemented");
716 if (get_mode_size_bits(cmp_mode) != 32) {
717 panic("CmpMode != 32bit not supported yet");
721 assert(get_irn_mode(op2) == cmp_mode);
722 is_unsigned = !mode_is_signed(cmp_mode);
724 /* compare with 0 can be done with Tst */
726 if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
727 new_op1 = be_transform_node(op1);
728 return new_bd_sparc_Tst(dbgi, block, new_op1, false,
732 if (is_Const(op1) && tarval_is_null(get_Const_tarval(op1))) {
733 new_op2 = be_transform_node(op2);
734 return new_bd_sparc_Tst(dbgi, block, new_op2, true,
739 /* integer compare */
740 new_op1 = be_transform_node(op1);
741 new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
742 new_op2 = be_transform_node(op2);
743 new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
744 return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
748 * Transforms a SymConst node.
750 static ir_node *gen_SymConst(ir_node *node)
752 ir_node *block = be_transform_node(get_nodes_block(node));
753 ir_entity *entity = get_SymConst_entity(node);
754 dbg_info *dbgi = get_irn_dbg_info(node);
757 new_node = new_bd_sparc_SymConst(dbgi, block, entity);
758 be_dep_on_frame(new_node);
763 * Transforms a Conv node.
766 static ir_node *gen_Conv(ir_node *node)
768 ir_node *block = be_transform_node(get_nodes_block(node));
769 ir_node *op = get_Conv_op(node);
770 ir_node *new_op = be_transform_node(op);
771 ir_mode *src_mode = get_irn_mode(op);
772 ir_mode *dst_mode = get_irn_mode(node);
773 dbg_info *dbg = get_irn_dbg_info(node);
775 if (src_mode == dst_mode)
778 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
779 panic("FP not implemented");
780 } else { /* complete in gp registers */
781 int src_bits = get_mode_size_bits(src_mode);
782 int dst_bits = get_mode_size_bits(dst_mode);
786 if (src_bits == dst_bits) {
787 /* kill unneccessary conv */
791 if (src_bits < dst_bits) {
799 if (upper_bits_clean(new_op, min_mode)) {
803 if (mode_is_signed(min_mode)) {
804 return gen_sign_extension(dbg, block, new_op, min_bits);
806 return gen_zero_extension(dbg, block, new_op, min_bits);
811 static ir_node *gen_Unknown(ir_node *node)
813 ir_node *block = get_nodes_block(node);
814 ir_node *new_block = be_transform_node(block);
815 dbg_info *dbgi = get_irn_dbg_info(node);
817 /* just produce a 0 */
818 ir_mode *mode = get_irn_mode(node);
819 if (mode_is_float(mode)) {
820 panic("FP not implemented");
821 be_dep_on_frame(node);
823 } else if (mode_needs_gp_reg(mode)) {
824 return create_const_graph_value(dbgi, new_block, 0);
827 panic("Unexpected Unknown mode");
831 * Transform some Phi nodes
833 static ir_node *gen_Phi(ir_node *node)
835 const arch_register_req_t *req;
836 ir_node *block = be_transform_node(get_nodes_block(node));
837 ir_graph *irg = current_ir_graph;
838 dbg_info *dbgi = get_irn_dbg_info(node);
839 ir_mode *mode = get_irn_mode(node);
842 if (mode_needs_gp_reg(mode)) {
843 /* we shouldn't have any 64bit stuff around anymore */
844 assert(get_mode_size_bits(mode) <= 32);
845 /* all integer operations are on 32bit registers now */
847 req = sparc_reg_classes[CLASS_sparc_gp].class_req;
849 req = arch_no_register_req;
852 /* phi nodes allow loops, so we use the old arguments for now
853 * and fix this later */
854 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
855 copy_node_attr(irg, node, phi);
856 be_duplicate_deps(node, phi);
857 arch_set_out_register_req(phi, 0, req);
858 be_enqueue_preds(node);
864 * Transform a Proj from a Load.
866 static ir_node *gen_Proj_Load(ir_node *node)
868 ir_node *load = get_Proj_pred(node);
869 ir_node *new_load = be_transform_node(load);
870 dbg_info *dbgi = get_irn_dbg_info(node);
871 long proj = get_Proj_proj(node);
873 /* renumber the proj */
874 switch (get_sparc_irn_opcode(new_load)) {
876 /* handle all gp loads equal: they have the same proj numbers. */
877 if (proj == pn_Load_res) {
878 return new_rd_Proj(dbgi, new_load, mode_Iu, pn_sparc_Load_res);
879 } else if (proj == pn_Load_M) {
880 return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Load_M);
884 case iro_sparc_fpaLoad:
885 panic("FP not implemented yet");
889 panic("Unsupported Proj from Load");
892 return be_duplicate_node(node);
896 * Transform the Projs of a be_AddSP.
898 static ir_node *gen_Proj_be_AddSP(ir_node *node)
900 ir_node *pred = get_Proj_pred(node);
901 ir_node *new_pred = be_transform_node(pred);
902 dbg_info *dbgi = get_irn_dbg_info(node);
903 long proj = get_Proj_proj(node);
905 if (proj == pn_be_AddSP_sp) {
906 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
907 pn_sparc_SubSP_stack);
908 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
910 } else if (proj == pn_be_AddSP_res) {
911 return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
912 } else if (proj == pn_be_AddSP_M) {
913 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
916 panic("Unsupported Proj from AddSP");
920 * Transform the Projs of a be_SubSP.
922 static ir_node *gen_Proj_be_SubSP(ir_node *node)
924 ir_node *pred = get_Proj_pred(node);
925 ir_node *new_pred = be_transform_node(pred);
926 dbg_info *dbgi = get_irn_dbg_info(node);
927 long proj = get_Proj_proj(node);
929 if (proj == pn_be_SubSP_sp) {
930 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
931 pn_sparc_AddSP_stack);
932 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
934 } else if (proj == pn_be_SubSP_M) {
935 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_AddSP_M);
938 panic("Unsupported Proj from SubSP");
942 * Transform the Projs from a Cmp.
944 static ir_node *gen_Proj_Cmp(ir_node *node)
947 panic("not implemented");
951 static ir_node *gen_Proj_Div(ir_node *node)
953 ir_node *pred = get_Proj_pred(node);
954 ir_node *new_pred = be_transform_node(pred);
955 dbg_info *dbgi = get_irn_dbg_info(node);
956 ir_mode *mode = get_irn_mode(node);
957 long proj = get_Proj_proj(node);
961 if (is_sparc_UDiv(new_pred)) {
962 return new_rd_Proj(dbgi, new_pred, mode, pn_sparc_UDiv_res);
968 panic("Unsupported Proj from Div");
973 * Transform a Proj node.
975 static ir_node *gen_Proj(ir_node *node)
977 ir_graph *irg = current_ir_graph;
978 dbg_info *dbgi = get_irn_dbg_info(node);
979 ir_node *pred = get_Proj_pred(node);
980 long proj = get_Proj_proj(node);
985 if (is_Store(pred)) {
986 if (proj == pn_Store_M) {
987 return be_transform_node(pred);
989 panic("Unsupported Proj from Store");
991 } else if (is_Load(pred)) {
992 return gen_Proj_Load(node);
993 } else if (be_is_SubSP(pred)) {
994 //panic("gen_Proj not implemented for SubSP");
995 return gen_Proj_be_SubSP(node);
996 } else if (be_is_AddSP(pred)) {
997 //panic("gen_Proj not implemented for AddSP");
998 return gen_Proj_be_AddSP(node);
999 } else if (is_Cmp(pred)) {
1000 //panic("gen_Proj not implemented for Cmp");
1001 return gen_Proj_Cmp(node);
1002 } else if (is_Div(pred)) {
1003 return gen_Proj_Div(node);
1004 } else if (is_Start(pred)) {
1006 if (proj == pn_Start_X_initial_exec) {
1007 ir_node *block = get_nodes_block(pred);
1010 // we exchange the ProjX with a jump
1011 block = be_transform_node(block);
1012 jump = new_rd_Jmp(dbgi, block);
1016 if (node == get_irg_anchor(irg, anchor_tls)) {
1017 return gen_Proj_tls(node);
1021 ir_node *new_pred = be_transform_node(pred);
1022 ir_mode *mode = get_irn_mode(node);
1023 if (mode_needs_gp_reg(mode)) {
1024 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
1025 new_proj->node_nr = node->node_nr;
1030 return be_duplicate_node(node);
1037 static ir_node *gen_Jmp(ir_node *node)
1039 ir_node *block = get_nodes_block(node);
1040 ir_node *new_block = be_transform_node(block);
1041 dbg_info *dbgi = get_irn_dbg_info(node);
1043 return new_bd_sparc_Jmp(dbgi, new_block);
1047 * configure transformation callbacks
1049 void sparc_register_transformers(void)
1051 be_start_transform_setup();
1053 be_set_transform_function(op_Add, gen_Add);
1054 be_set_transform_function(op_Store, gen_Store);
1055 be_set_transform_function(op_Const, gen_Const);
1056 be_set_transform_function(op_Load, gen_Load);
1057 be_set_transform_function(op_Sub, gen_Sub);
1059 be_set_transform_function(op_be_AddSP, gen_be_AddSP);
1060 be_set_transform_function(op_be_SubSP, gen_be_SubSP);
1061 be_set_transform_function(op_be_Copy, gen_be_Copy);
1062 be_set_transform_function(op_be_Call, gen_be_Call);
1063 be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
1065 be_set_transform_function(op_Cond, gen_Cond);
1066 be_set_transform_function(op_Cmp, gen_Cmp);
1068 be_set_transform_function(op_SymConst, gen_SymConst);
1070 be_set_transform_function(op_Phi, gen_Phi);
1071 be_set_transform_function(op_Proj, gen_Proj);
1073 be_set_transform_function(op_Conv, gen_Conv);
1074 be_set_transform_function(op_Jmp, gen_Jmp);
1076 be_set_transform_function(op_Mul, gen_Mul);
1077 be_set_transform_function(op_Mulh, gen_Mulh);
1078 be_set_transform_function(op_Div, gen_Div);
1079 be_set_transform_function(op_Abs, gen_Abs);
1080 be_set_transform_function(op_Shl, gen_Shl);
1081 be_set_transform_function(op_Shr, gen_Shr);
1082 be_set_transform_function(op_Shrs, gen_Shra);
1084 be_set_transform_function(op_Minus, gen_Minus);
1085 be_set_transform_function(op_Not, gen_Not);
1086 be_set_transform_function(op_And, gen_And);
1087 be_set_transform_function(op_Or, gen_Or);
1089 be_set_transform_function(op_Unknown, gen_Unknown);
1094 be_set_transform_function(op_CopyB, gen_CopyB);
1095 be_set_transform_function(op_Eor, gen_Eor);
1096 be_set_transform_function(op_Quot, gen_Quot);
1097 be_set_transform_function(op_Rotl, gen_Rotl);
1102 * Transform a Firm graph into a SPARC graph.
1104 void sparc_transform_graph(sparc_code_gen_t *cg)
1106 sparc_register_transformers();
1108 be_transform_graph(cg->irg, NULL);
1111 void sparc_init_transform(void)
1113 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");