2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into SPARC FIRM)
29 #include "irgraph_t.h"
41 #include "../benode.h"
43 #include "../beutil.h"
44 #include "../betranshlp.h"
45 #include "bearch_sparc_t.h"
47 #include "sparc_nodes_attr.h"
48 #include "sparc_transform.h"
49 #include "sparc_new_nodes.h"
50 #include "gen_sparc_new_nodes.h"
52 #include "gen_sparc_regalloc_if.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static sparc_code_gen_t *env_cg;
60 static ir_node *gen_SymConst(ir_node *node);
63 static inline int mode_needs_gp_reg(ir_mode *mode)
65 return mode_is_int(mode) || mode_is_reference(mode);
69 * Create an And that will zero out upper bits.
71 * @param dbgi debug info
72 * @param block the basic block
73 * @param op the original node
74 * @param src_bits number of lower bits that will remain
76 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
80 return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
81 } else if (src_bits == 16) {
82 ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
83 ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
86 panic("zero extension only supported for 8 and 16 bits");
91 * Generate code for a sign extension.
93 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
96 int shift_width = 32 - src_bits;
97 ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
98 ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
103 * returns true if it is assured, that the upper bits of a node are "clean"
104 * which means for a 16 or 8 bit value, that the upper bits in the register
105 * are 0 for unsigned and a copy of the last significant bit for signed
108 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
110 (void) transformed_node;
116 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
119 int bits = get_mode_size_bits(orig_mode);
123 if (mode_is_signed(orig_mode)) {
124 return gen_sign_extension(dbgi, block, op, bits);
126 return gen_zero_extension(dbgi, block, op, bits);
132 * Creates a possible DAG for a constant.
134 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
139 // we need to load hi & lo separately
140 if (value < -4096 || value > 4095) {
141 ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value);
142 result = new_bd_sparc_LoImm(dbgi, block, hi, value);
145 result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
146 be_dep_on_frame(result);
154 * Create a DAG constructing a given Const.
156 * @param irn a Firm const
158 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
160 tarval *tv = get_Const_tarval(irn);
161 ir_mode *mode = get_tarval_mode(tv);
162 dbg_info *dbgi = get_irn_dbg_info(irn);
166 if (mode_is_reference(mode)) {
167 /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
168 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
169 tv = tarval_convert_to(tv, mode_Iu);
172 value = get_tarval_long(tv);
173 return create_const_graph_value(dbgi, block, value);
177 * create a DAG to load fp constant. sparc only supports loading from global memory
179 static ir_node *create_fp_const_graph(ir_node *irn, ir_node *block)
183 panic("FP constants not implemented");
189 MATCH_COMMUTATIVE = 1 << 0,
190 MATCH_SIZE_NEUTRAL = 1 << 1,
193 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
194 typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode);
195 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
198 * checks if a node's value can be encoded as a immediate
201 static bool is_imm_encodeable(const ir_node *node)
205 //assert(mode_is_float_vector(get_irn_mode(node)));
210 val = get_tarval_long(get_Const_tarval(node));
212 return !(val < -4096 || val > 4095);
216 * helper function for binop operations
218 * @param new_binop_reg_func register generation function ptr
219 * @param new_binop_imm_func immediate generation function ptr
221 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
222 new_binop_reg_func new_reg, new_binop_imm_func new_imm)
224 ir_node *block = be_transform_node(get_nodes_block(node));
225 ir_node *op1 = get_binop_left(node);
227 ir_node *op2 = get_binop_right(node);
229 dbg_info *dbgi = get_irn_dbg_info(node);
231 if (is_imm_encodeable(op2)) {
232 ir_node *new_op1 = be_transform_node(op1);
233 return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
236 new_op2 = be_transform_node(op2);
238 if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
239 return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) );
242 new_op1 = be_transform_node(op1);
244 return new_reg(dbgi, block, new_op1, new_op2);
248 * helper function for FP binop operations
250 static ir_node *gen_helper_binfpop(ir_node *node, new_binop_fp_func new_reg)
252 ir_node *block = be_transform_node(get_nodes_block(node));
253 ir_node *op1 = get_binop_left(node);
255 ir_node *op2 = get_binop_right(node);
257 dbg_info *dbgi = get_irn_dbg_info(node);
259 new_op2 = be_transform_node(op2);
260 new_op1 = be_transform_node(op1);
261 return new_reg(dbgi, block, new_op1, new_op2, get_irn_mode(node));
265 * Creates an sparc Add.
267 * @param node FIRM node
268 * @return the created sparc Add node
270 static ir_node *gen_Add(ir_node *node)
272 ir_mode *mode = get_irn_mode(node);
273 ir_node *block = be_transform_node(get_nodes_block(node));
274 dbg_info *dbgi = get_irn_dbg_info(node);
279 if (mode_is_float(mode))
280 panic("FP not implemented yet");
282 return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
287 * Creates an sparc Sub.
289 * @param node FIRM node
290 * @return the created sparc Sub node
292 static ir_node *gen_Sub(ir_node *node)
294 ir_mode *mode = get_irn_mode(node);
295 ir_node *block = be_transform_node(get_nodes_block(node));
296 dbg_info *dbgi = get_irn_dbg_info(node);
301 if (mode_is_float(mode))
302 panic("FP not implemented yet");
304 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
311 * @param node the ir Load node
312 * @return the created sparc Load node
314 static ir_node *gen_Load(ir_node *node)
316 ir_mode *mode = get_Load_mode(node);
317 ir_node *block = be_transform_node(get_nodes_block(node));
318 ir_node *ptr = get_Load_ptr(node);
319 ir_node *new_ptr = be_transform_node(ptr);
320 ir_node *mem = get_Load_mem(node);
321 ir_node *new_mem = be_transform_node(mem);
322 dbg_info *dbgi = get_irn_dbg_info(node);
323 ir_node *new_load = NULL;
325 if (mode_is_float(mode))
326 panic("SPARC: no fp implementation yet");
328 new_load = new_bd_sparc_Load(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
329 set_irn_pinned(new_load, get_irn_pinned(node));
337 * Transforms a Store.
339 * @param node the ir Store node
340 * @return the created sparc Store node
342 static ir_node *gen_Store(ir_node *node)
344 ir_node *block = be_transform_node(get_nodes_block(node));
345 ir_node *ptr = get_Store_ptr(node);
346 ir_node *new_ptr = be_transform_node(ptr);
347 ir_node *mem = get_Store_mem(node);
348 ir_node *new_mem = be_transform_node(mem);
349 ir_node *val = get_Store_value(node);
350 ir_node *new_val = be_transform_node(val);
351 ir_mode *mode = get_irn_mode(val);
352 dbg_info *dbgi = get_irn_dbg_info(node);
353 ir_node *new_store = NULL;
355 if (mode_is_float(mode))
356 panic("SPARC: no fp implementation yet");
358 new_store = new_bd_sparc_Store(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false);
364 * Creates an sparc Mul.
365 * returns the lower 32bits of the 64bit multiply result
367 * @return the created sparc Mul node
369 static ir_node *gen_Mul(ir_node *node) {
370 ir_mode *mode = get_irn_mode(node);
371 dbg_info *dbgi = get_irn_dbg_info(node);
374 ir_node *proj_res_low;
376 if (mode_is_float(mode)) {
377 mul = gen_helper_binfpop(node, new_bd_sparc_fMul);
381 assert(mode_is_data(mode));
382 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
383 arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
385 proj_res_low = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mul_low);
390 * Creates an sparc Mulh.
391 * Mulh returns the upper 32bits of a mul instruction
393 * @return the created sparc Mulh node
395 static ir_node *gen_Mulh(ir_node *node) {
396 ir_mode *mode = get_irn_mode(node);
397 dbg_info *dbgi = get_irn_dbg_info(node);
400 ir_node *proj_res_hi;
402 if (mode_is_float(mode))
403 panic("FP not supported yet");
406 assert(mode_is_data(mode));
407 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
408 //arch_irn_add_flags(mul, arch_irn_flags_modify_flags);
409 proj_res_hi = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_Mulh_low);
414 * Creates an sparc Div.
416 * @return the created sparc Div node
418 static ir_node *gen_Div(ir_node *node) {
420 ir_mode *mode = get_irn_mode(node);
424 if (mode_is_float(mode))
425 panic("FP not supported yet");
427 //assert(mode_is_data(mode));
428 div = gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Div_reg, new_bd_sparc_Div_imm);
434 * transform abs node:
442 static ir_node *gen_Abs(ir_node *node) {
443 ir_node *block = be_transform_node(get_nodes_block(node));
444 ir_mode *mode = get_irn_mode(node);
445 dbg_info *dbgi = get_irn_dbg_info(node);
446 ir_node *op = get_Abs_op(node);
448 ir_node *mov, *sra, *xor, *sub, *new_op;
450 if (mode_is_float(mode))
451 panic("FP not supported yet");
453 new_op = be_transform_node(op);
455 mov = new_bd_sparc_Mov_reg(dbgi, block, new_op);
456 sra = new_bd_sparc_ShiftRA_imm(dbgi, block, mov, 31);
457 xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra);
458 sub = new_bd_sparc_Sub_reg(dbgi, block, sra, xor);
464 * Transforms a Not node.
466 * @return the created ARM Not node
468 static ir_node *gen_Not(ir_node *node)
470 ir_node *block = be_transform_node(get_nodes_block(node));
471 ir_node *op = get_Not_op(node);
472 ir_node *new_op = be_transform_node(op);
473 dbg_info *dbgi = get_irn_dbg_info(node);
475 return new_bd_sparc_Not(dbgi, block, new_op);
478 static ir_node *gen_And(ir_node *node)
480 ir_mode *mode = get_irn_mode(node);
481 ir_node *block = be_transform_node(get_nodes_block(node));
482 dbg_info *dbgi = get_irn_dbg_info(node);
487 if (mode_is_float(mode))
488 panic("FP not implemented yet");
490 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_And_reg, new_bd_sparc_And_imm);
493 static ir_node *gen_Or(ir_node *node)
495 ir_mode *mode = get_irn_mode(node);
496 ir_node *block = be_transform_node(get_nodes_block(node));
497 dbg_info *dbgi = get_irn_dbg_info(node);
502 if (mode_is_float(mode))
503 panic("FP not implemented yet");
505 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Or_reg, new_bd_sparc_Or_imm);
508 static ir_node *gen_Xor(ir_node *node)
510 ir_mode *mode = get_irn_mode(node);
511 ir_node *block = be_transform_node(get_nodes_block(node));
512 dbg_info *dbgi = get_irn_dbg_info(node);
517 if (mode_is_float(mode))
518 panic("FP not implemented yet");
520 return gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Xor_reg, new_bd_sparc_Xor_imm);
523 static ir_node *gen_Shl(ir_node *node)
525 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLL_reg, new_bd_sparc_ShiftLL_imm);
528 static ir_node *gen_Shr(ir_node *node)
530 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLR_reg, new_bd_sparc_ShiftLR_imm);
533 static ir_node *gen_Shra(ir_node *node)
535 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftRA_reg, new_bd_sparc_ShiftRA_imm);
538 /****** TRANSFORM GENERAL BACKEND NODES ********/
541 * Transforms a Minus node.
544 static ir_node *gen_Minus(ir_node *node)
546 ir_node *block = be_transform_node(get_nodes_block(node));
547 ir_node *op = get_Minus_op(node);
548 ir_node *new_op = be_transform_node(op);
549 dbg_info *dbgi = get_irn_dbg_info(node);
550 ir_mode *mode = get_irn_mode(node);
552 if (mode_is_float(mode)) {
553 panic("FP not implemented yet");
556 assert(mode_is_data(mode));
557 return new_bd_sparc_Minus(dbgi, block, new_op);
561 * Transforms a Const node.
563 * @param node the ir Const node
564 * @return The transformed sparc node.
566 static ir_node *gen_Const(ir_node *node)
568 ir_node *block = be_transform_node(get_nodes_block(node));
569 ir_mode *mode = get_irn_mode(node);
570 dbg_info *dbg = get_irn_dbg_info(node);
574 if (mode_is_float(mode)) {
575 return create_fp_const_graph(node, block);
578 return create_const_graph(node, block);
583 * @param node the ir AddSP node
584 * @return transformed sparc SAVE node
586 static ir_node *gen_be_AddSP(ir_node *node)
588 ir_node *block = be_transform_node(get_nodes_block(node));
589 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
590 ir_node *new_sz = be_transform_node(sz);
591 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
592 ir_node *new_sp = be_transform_node(sp);
593 dbg_info *dbgi = get_irn_dbg_info(node);
594 ir_node *nomem = new_NoMem();
597 /* SPARC stack grows in reverse direction */
598 new_op = new_bd_sparc_AddSP(dbgi, block, new_sp, new_sz, nomem);
606 * @param node the ir SubSP node
607 * @return transformed sparc SAVE node
609 static ir_node *gen_be_SubSP(ir_node *node)
611 ir_node *block = be_transform_node(get_nodes_block(node));
612 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
613 ir_node *new_sz = be_transform_node(sz);
614 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
615 ir_node *new_sp = be_transform_node(sp);
616 dbg_info *dbgi = get_irn_dbg_info(node);
617 ir_node *nomem = new_NoMem();
620 /* SPARC stack grows in reverse direction */
621 new_op = new_bd_sparc_SubSP(dbgi, block, new_sp, new_sz, nomem);
626 * transform FrameAddr
628 static ir_node *gen_be_FrameAddr(ir_node *node)
630 ir_node *block = be_transform_node(get_nodes_block(node));
631 ir_entity *ent = be_get_frame_entity(node);
632 ir_node *fp = be_get_FrameAddr_frame(node);
633 ir_node *new_fp = be_transform_node(fp);
634 dbg_info *dbgi = get_irn_dbg_info(node);
636 new_node = new_bd_sparc_FrameAddr(dbgi, block, new_fp, ent);
641 * Transform a be_Copy.
643 static ir_node *gen_be_Copy(ir_node *node)
645 ir_node *result = be_duplicate_node(node);
646 ir_mode *mode = get_irn_mode(result);
648 if (mode_needs_gp_reg(mode)) {
649 set_irn_mode(node, mode_Iu);
658 static ir_node *gen_be_Call(ir_node *node)
660 ir_node *res = be_duplicate_node(node);
661 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
666 * Transforms a Switch.
669 static ir_node *gen_SwitchJmp(ir_node *node)
671 ir_node *block = be_transform_node(get_nodes_block(node));
672 ir_node *selector = get_Cond_selector(node);
673 dbg_info *dbgi = get_irn_dbg_info(node);
674 ir_node *new_op = be_transform_node(selector);
675 ir_node *const_graph;
679 const ir_edge_t *edge;
686 foreach_out_edge(node, edge) {
687 proj = get_edge_src_irn(edge);
688 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
690 pn = get_Proj_proj(proj);
692 min = pn<min ? pn : min;
693 max = pn>max ? pn : max;
697 n_projs = max - translation + 1;
699 foreach_out_edge(node, edge) {
700 proj = get_edge_src_irn(edge);
701 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
703 pn = get_Proj_proj(proj) - translation;
704 set_Proj_proj(proj, pn);
707 const_graph = create_const_graph_value(dbgi, block, translation);
708 sub = new_bd_sparc_Sub_reg(dbgi, block, new_op, const_graph);
709 return new_bd_sparc_SwitchJmp(dbgi, block, sub, n_projs, get_Cond_default_proj(node) - translation);
713 * Transform Cond nodes
715 static ir_node *gen_Cond(ir_node *node)
717 ir_node *selector = get_Cond_selector(node);
718 ir_mode *mode = get_irn_mode(selector);
724 if (mode != mode_b) {
725 return gen_SwitchJmp(node);
728 // regular if/else jumps
729 assert(is_Proj(selector));
731 block = be_transform_node(get_nodes_block(node));
732 dbgi = get_irn_dbg_info(node);
733 flag_node = be_transform_node(get_Proj_pred(selector));
734 return new_bd_sparc_Branch(dbgi, block, flag_node, get_Proj_proj(selector));
740 static ir_node *gen_Cmp(ir_node *node)
742 ir_node *block = be_transform_node(get_nodes_block(node));
743 ir_node *op1 = get_Cmp_left(node);
744 ir_node *op2 = get_Cmp_right(node);
745 ir_mode *cmp_mode = get_irn_mode(op1);
746 dbg_info *dbgi = get_irn_dbg_info(node);
751 if (mode_is_float(cmp_mode)) {
752 panic("FloatCmp not implemented");
756 if (get_mode_size_bits(cmp_mode) != 32) {
757 panic("CmpMode != 32bit not supported yet");
761 assert(get_irn_mode(op2) == cmp_mode);
762 is_unsigned = !mode_is_signed(cmp_mode);
764 /* compare with 0 can be done with Tst */
766 if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
767 new_op1 = be_transform_node(op1);
768 return new_bd_sparc_Tst(dbgi, block, new_op1, false,
772 if (is_Const(op1) && tarval_is_null(get_Const_tarval(op1))) {
773 new_op2 = be_transform_node(op2);
774 return new_bd_sparc_Tst(dbgi, block, new_op2, true,
779 /* integer compare */
780 new_op1 = be_transform_node(op1);
781 new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
782 new_op2 = be_transform_node(op2);
783 new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
784 return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
788 * Transforms a SymConst node.
790 static ir_node *gen_SymConst(ir_node *node)
792 ir_node *block = be_transform_node(get_nodes_block(node));
793 ir_entity *entity = get_SymConst_entity(node);
794 dbg_info *dbgi = get_irn_dbg_info(node);
797 new_node = new_bd_sparc_SymConst(dbgi, block, entity);
798 be_dep_on_frame(new_node);
803 * Transforms a Conv node.
806 static ir_node *gen_Conv(ir_node *node)
808 ir_node *block = be_transform_node(get_nodes_block(node));
809 ir_node *op = get_Conv_op(node);
810 ir_node *new_op = be_transform_node(op);
811 ir_mode *src_mode = get_irn_mode(op);
812 ir_mode *dst_mode = get_irn_mode(node);
813 dbg_info *dbg = get_irn_dbg_info(node);
815 int src_bits = get_mode_size_bits(src_mode);
816 int dst_bits = get_mode_size_bits(dst_mode);
818 if (src_mode == dst_mode)
821 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
822 assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented");
824 if (mode_is_float(src_mode)) {
825 if (mode_is_float(dst_mode)) {
826 // float -> float conv
827 if (src_bits > dst_bits) {
828 return new_bd_sparc_FpDToFpS(dbg, block, new_op, dst_mode);
830 return new_bd_sparc_FpSToFpD(dbg, block, new_op, dst_mode);
836 return new_bd_sparc_FpSToInt(dbg, block, new_op, dst_mode);
838 return new_bd_sparc_FpDToInt(dbg, block, new_op, dst_mode);
840 panic("quad FP not implemented");
847 return new_bd_sparc_IntToFpS(dbg, block, new_op, src_mode);
849 return new_bd_sparc_IntToFpD(dbg, block, new_op, src_mode);
851 panic("quad FP not implemented");
854 } else { /* complete in gp registers */
858 if (src_bits == dst_bits) {
859 /* kill unneccessary conv */
863 if (src_bits < dst_bits) {
871 if (upper_bits_clean(new_op, min_mode)) {
875 if (mode_is_signed(min_mode)) {
876 return gen_sign_extension(dbg, block, new_op, min_bits);
878 return gen_zero_extension(dbg, block, new_op, min_bits);
883 static ir_node *gen_Unknown(ir_node *node)
885 ir_node *block = get_nodes_block(node);
886 ir_node *new_block = be_transform_node(block);
887 dbg_info *dbgi = get_irn_dbg_info(node);
889 /* just produce a 0 */
890 ir_mode *mode = get_irn_mode(node);
891 if (mode_is_float(mode)) {
892 panic("FP not implemented");
893 be_dep_on_frame(node);
895 } else if (mode_needs_gp_reg(mode)) {
896 return create_const_graph_value(dbgi, new_block, 0);
899 panic("Unexpected Unknown mode");
903 * Transform some Phi nodes
905 static ir_node *gen_Phi(ir_node *node)
907 const arch_register_req_t *req;
908 ir_node *block = be_transform_node(get_nodes_block(node));
909 ir_graph *irg = current_ir_graph;
910 dbg_info *dbgi = get_irn_dbg_info(node);
911 ir_mode *mode = get_irn_mode(node);
914 if (mode_needs_gp_reg(mode)) {
915 /* we shouldn't have any 64bit stuff around anymore */
916 assert(get_mode_size_bits(mode) <= 32);
917 /* all integer operations are on 32bit registers now */
919 req = sparc_reg_classes[CLASS_sparc_gp].class_req;
921 req = arch_no_register_req;
924 /* phi nodes allow loops, so we use the old arguments for now
925 * and fix this later */
926 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
927 copy_node_attr(irg, node, phi);
928 be_duplicate_deps(node, phi);
929 arch_set_out_register_req(phi, 0, req);
930 be_enqueue_preds(node);
936 * Transform a Proj from a Load.
938 static ir_node *gen_Proj_Load(ir_node *node)
940 ir_node *load = get_Proj_pred(node);
941 ir_node *new_load = be_transform_node(load);
942 dbg_info *dbgi = get_irn_dbg_info(node);
943 long proj = get_Proj_proj(node);
945 /* renumber the proj */
946 switch (get_sparc_irn_opcode(new_load)) {
948 /* handle all gp loads equal: they have the same proj numbers. */
949 if (proj == pn_Load_res) {
950 return new_rd_Proj(dbgi, new_load, mode_Iu, pn_sparc_Load_res);
951 } else if (proj == pn_Load_M) {
952 return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Load_M);
956 case iro_sparc_fpaLoad:
957 panic("FP not implemented yet");
961 panic("Unsupported Proj from Load");
964 return be_duplicate_node(node);
968 * Transform the Projs of a be_AddSP.
970 static ir_node *gen_Proj_be_AddSP(ir_node *node)
972 ir_node *pred = get_Proj_pred(node);
973 ir_node *new_pred = be_transform_node(pred);
974 dbg_info *dbgi = get_irn_dbg_info(node);
975 long proj = get_Proj_proj(node);
977 if (proj == pn_be_AddSP_sp) {
978 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
979 pn_sparc_SubSP_stack);
980 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
982 } else if (proj == pn_be_AddSP_res) {
983 return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
984 } else if (proj == pn_be_AddSP_M) {
985 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
988 panic("Unsupported Proj from AddSP");
992 * Transform the Projs of a be_SubSP.
994 static ir_node *gen_Proj_be_SubSP(ir_node *node)
996 ir_node *pred = get_Proj_pred(node);
997 ir_node *new_pred = be_transform_node(pred);
998 dbg_info *dbgi = get_irn_dbg_info(node);
999 long proj = get_Proj_proj(node);
1001 if (proj == pn_be_SubSP_sp) {
1002 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
1003 pn_sparc_AddSP_stack);
1004 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
1006 } else if (proj == pn_be_SubSP_M) {
1007 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_AddSP_M);
1010 panic("Unsupported Proj from SubSP");
1014 * Transform the Projs from a Cmp.
1016 static ir_node *gen_Proj_Cmp(ir_node *node)
1019 panic("not implemented");
1023 * transform Projs from a Div
1025 static ir_node *gen_Proj_Div(ir_node *node)
1027 ir_node *pred = get_Proj_pred(node);
1028 ir_node *new_pred = be_transform_node(pred);
1029 dbg_info *dbgi = get_irn_dbg_info(node);
1030 ir_mode *mode = get_irn_mode(node);
1031 long proj = get_Proj_proj(node);
1035 if (is_sparc_Div(new_pred)) {
1036 return new_rd_Proj(dbgi, new_pred, mode, pn_sparc_Div_res);
1042 panic("Unsupported Proj from Div");
1047 * Transform a Proj node.
1049 static ir_node *gen_Proj(ir_node *node)
1051 ir_graph *irg = current_ir_graph;
1052 dbg_info *dbgi = get_irn_dbg_info(node);
1053 ir_node *pred = get_Proj_pred(node);
1054 long proj = get_Proj_proj(node);
1059 if (is_Store(pred)) {
1060 if (proj == pn_Store_M) {
1061 return be_transform_node(pred);
1063 panic("Unsupported Proj from Store");
1065 } else if (is_Load(pred)) {
1066 return gen_Proj_Load(node);
1067 } else if (be_is_SubSP(pred)) {
1068 //panic("gen_Proj not implemented for SubSP");
1069 return gen_Proj_be_SubSP(node);
1070 } else if (be_is_AddSP(pred)) {
1071 //panic("gen_Proj not implemented for AddSP");
1072 return gen_Proj_be_AddSP(node);
1073 } else if (is_Cmp(pred)) {
1074 //panic("gen_Proj not implemented for Cmp");
1075 return gen_Proj_Cmp(node);
1076 } else if (is_Div(pred)) {
1077 return gen_Proj_Div(node);
1078 } else if (is_Start(pred)) {
1080 if (proj == pn_Start_X_initial_exec) {
1081 ir_node *block = get_nodes_block(pred);
1084 // we exchange the ProjX with a jump
1085 block = be_transform_node(block);
1086 jump = new_rd_Jmp(dbgi, block);
1090 if (node == get_irg_anchor(irg, anchor_tls)) {
1091 return gen_Proj_tls(node);
1095 ir_node *new_pred = be_transform_node(pred);
1096 ir_mode *mode = get_irn_mode(node);
1097 if (mode_needs_gp_reg(mode)) {
1098 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
1099 new_proj->node_nr = node->node_nr;
1104 return be_duplicate_node(node);
1111 static ir_node *gen_Jmp(ir_node *node)
1113 ir_node *block = get_nodes_block(node);
1114 ir_node *new_block = be_transform_node(block);
1115 dbg_info *dbgi = get_irn_dbg_info(node);
1117 return new_bd_sparc_Jmp(dbgi, new_block);
1121 * configure transformation callbacks
1123 void sparc_register_transformers(void)
1125 be_start_transform_setup();
1127 be_set_transform_function(op_Abs, gen_Abs);
1128 be_set_transform_function(op_Add, gen_Add);
1129 be_set_transform_function(op_And, gen_And);
1130 be_set_transform_function(op_be_AddSP, gen_be_AddSP);
1131 be_set_transform_function(op_be_Call, gen_be_Call);
1132 be_set_transform_function(op_be_Copy, gen_be_Copy);
1133 be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
1134 be_set_transform_function(op_be_SubSP, gen_be_SubSP);
1135 be_set_transform_function(op_Cmp, gen_Cmp);
1136 be_set_transform_function(op_Cond, gen_Cond);
1137 be_set_transform_function(op_Const, gen_Const);
1138 be_set_transform_function(op_Conv, gen_Conv);
1139 be_set_transform_function(op_Div, gen_Div);
1140 be_set_transform_function(op_Eor, gen_Xor);
1141 be_set_transform_function(op_Jmp, gen_Jmp);
1142 be_set_transform_function(op_Load, gen_Load);
1143 be_set_transform_function(op_Minus, gen_Minus);
1144 be_set_transform_function(op_Mul, gen_Mul);
1145 be_set_transform_function(op_Mulh, gen_Mulh);
1146 be_set_transform_function(op_Not, gen_Not);
1147 be_set_transform_function(op_Or, gen_Or);
1148 be_set_transform_function(op_Phi, gen_Phi);
1149 be_set_transform_function(op_Proj, gen_Proj);
1150 be_set_transform_function(op_Shl, gen_Shl);
1151 be_set_transform_function(op_Shr, gen_Shr);
1152 be_set_transform_function(op_Shrs, gen_Shra);
1153 be_set_transform_function(op_Store, gen_Store);
1154 be_set_transform_function(op_Sub, gen_Sub);
1155 be_set_transform_function(op_SymConst, gen_SymConst);
1156 be_set_transform_function(op_Unknown, gen_Unknown);
1158 be_set_transform_function(op_sparc_Save, be_duplicate_node);
1162 * Transform a Firm graph into a SPARC graph.
1164 void sparc_transform_graph(sparc_code_gen_t *cg)
1166 sparc_register_transformers();
1168 be_transform_graph(cg->irg, NULL);
1171 void sparc_init_transform(void)
1173 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");