2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into SPARC FIRM)
29 #include "irgraph_t.h"
41 #include "../benode.h"
43 #include "../beutil.h"
44 #include "../betranshlp.h"
45 #include "bearch_sparc_t.h"
47 #include "sparc_nodes_attr.h"
48 #include "sparc_transform.h"
49 #include "sparc_new_nodes.h"
50 #include "gen_sparc_new_nodes.h"
52 #include "gen_sparc_regalloc_if.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static sparc_code_gen_t *env_cg;
60 static inline int mode_needs_gp_reg(ir_mode *mode)
62 return mode_is_int(mode) || mode_is_reference(mode);
66 * Creates a possible DAG for a constant.
68 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
73 // we need to load hi & lo separately
74 if (value < -4096 || value > 4095) {
75 ir_node *hi = new_bd_sparc_HiImm(dbgi, block, (int) value);
76 result = new_bd_sparc_LoImm(dbgi, block, hi, value);
79 result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
80 be_dep_on_frame(result);
88 * Create a DAG constructing a given Const.
90 * @param irn a Firm const
92 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
94 tarval *tv = get_Const_tarval(irn);
95 ir_mode *mode = get_tarval_mode(tv);
96 dbg_info *dbgi = get_irn_dbg_info(irn);
100 if (mode_is_reference(mode)) {
101 /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
102 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
103 tv = tarval_convert_to(tv, mode_Iu);
106 value = get_tarval_long(tv);
107 return create_const_graph_value(dbgi, block, value);
113 MATCH_COMMUTATIVE = 1 << 0,
114 MATCH_SIZE_NEUTRAL = 1 << 1,
117 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
118 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
121 * checks if a node's value can be encoded as a immediate
124 static bool is_imm_encodeable(const ir_node *node)
131 val = get_tarval_long(get_Const_tarval(node));
133 return !(val < -4096 || val > 4095);
137 * helper function for binop operations
139 * @param new_binop_reg_func register generation function ptr
140 * @param new_binop_imm_func immediate generation function ptr
142 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
143 new_binop_reg_func new_reg, new_binop_imm_func new_imm)
145 ir_node *block = be_transform_node(get_nodes_block(node));
146 ir_node *op1 = get_binop_left(node);
148 ir_node *op2 = get_binop_right(node);
150 dbg_info *dbgi = get_irn_dbg_info(node);
153 if (flags & MATCH_SIZE_NEUTRAL) {
154 op1 = arm_skip_downconv(op1);
155 op2 = arm_skip_downconv(op2);
157 assert(get_mode_size_bits(get_irn_mode(node)) == 32);
160 if (is_imm_encodeable(op2)) {
161 ir_node *new_op1 = be_transform_node(op1);
162 return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
165 new_op2 = be_transform_node(op2);
167 if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
168 return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) );
171 new_op1 = be_transform_node(op1);
173 return new_reg(dbgi, block, new_op1, new_op2);
177 * Creates an sparc Add.
179 * @param node FIRM node
180 * @return the created sparc Add node
182 static ir_node *gen_Add(ir_node *node)
184 ir_mode *mode = get_irn_mode(node);
185 ir_node *block = be_transform_node(get_nodes_block(node));
186 dbg_info *dbgi = get_irn_dbg_info(node);
191 if (mode_is_float(mode))
192 panic("FP not implemented yet");
194 return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
199 * Creates an sparc Sub.
201 * @param node FIRM node
202 * @return the created sparc Sub node
204 static ir_node *gen_Sub(ir_node *node)
206 ir_mode *mode = get_irn_mode(node);
207 ir_node *block = be_transform_node(get_nodes_block(node));
208 dbg_info *dbgi = get_irn_dbg_info(node);
213 if (mode_is_float(mode))
214 panic("FP not implemented yet");
216 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
223 * @param node the ir Load node
224 * @return the created sparc Load node
226 static ir_node *gen_Load(ir_node *node)
228 ir_mode *mode = get_Load_mode(node);
229 ir_node *block = be_transform_node(get_nodes_block(node));
230 ir_node *ptr = get_Load_ptr(node);
231 ir_node *new_ptr = be_transform_node(ptr);
232 ir_node *mem = get_Load_mem(node);
233 ir_node *new_mem = be_transform_node(mem);
234 dbg_info *dbgi = get_irn_dbg_info(node);
235 ir_node *new_load = NULL;
237 if (mode_is_float(mode))
238 panic("SPARC: no fp implementation yet");
240 new_load = new_bd_sparc_Load(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
241 set_irn_pinned(new_load, get_irn_pinned(node));
249 * Transforms a Store.
251 * @param node the ir Store node
252 * @return the created sparc Store node
254 static ir_node *gen_Store(ir_node *node)
256 ir_node *block = be_transform_node(get_nodes_block(node));
257 ir_node *ptr = get_Store_ptr(node);
258 ir_node *new_ptr = be_transform_node(ptr);
259 ir_node *mem = get_Store_mem(node);
260 ir_node *new_mem = be_transform_node(mem);
261 ir_node *val = get_Store_value(node);
262 ir_node *new_val = be_transform_node(val);
263 ir_mode *mode = get_irn_mode(val);
264 dbg_info *dbgi = get_irn_dbg_info(node);
265 ir_node *new_store = NULL;
267 if (mode_is_float(mode))
268 panic("SPARC: no fp implementation yet");
270 new_store = new_bd_sparc_Store(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false);
276 * Creates an sparc Mul.
278 * @return the created sparc Mul node
280 static ir_node *gen_Mul(ir_node *node)
282 ir_mode *mode = get_irn_mode(node);
283 dbg_info *dbgi = get_irn_dbg_info(node);
286 ir_node *proj_res_low;
288 if (mode_is_float(mode))
289 panic("FP not supported yet");
292 assert(mode_is_data(mode));
293 mul = gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_UMul_reg, new_bd_sparc_UMul_imm);
295 proj_res_low = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_UMul_low);
298 //return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
302 * Creates an sparc Div.
304 * @return the created sparc Div node
306 static ir_node *gen_Div(ir_node *node)
308 ir_mode *mode = get_irn_mode(node);
309 //dbg_info *dbgi = get_irn_dbg_info(node);
311 //ir_node *proj_res_low;
313 if (mode_is_float(mode))
314 panic("FP not supported yet");
316 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_UDiv_reg, new_bd_sparc_UDiv_imm);
318 //proj_res = new_rd_Proj(dbgi, mul, mode_Iu, pn_sparc_UDiv_res);
321 //return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
325 * transform abs node:
333 static ir_node *gen_Abs(ir_node *node)
335 ir_node *block = be_transform_node(get_nodes_block(node));
336 ir_mode *mode = get_irn_mode(node);
337 dbg_info *dbgi = get_irn_dbg_info(node);
338 ir_node *op = get_Abs_op(node);
340 ir_node *mov, *sra, *xor, *sub, *new_op;
342 if (mode_is_float(mode))
343 panic("FP not supported yet");
345 new_op = be_transform_node(op);
347 mov = new_bd_sparc_Mov_reg(dbgi, block, new_op);
348 sra = new_bd_sparc_ShiftRA_imm(dbgi, block, mov, 31);
349 xor = new_bd_sparc_Xor_reg(dbgi, block, new_op, sra);
350 sub = new_bd_sparc_Sub_reg(dbgi, block, sra, xor);
356 * Transforms a Not node.
358 * @return the created ARM Not node
360 static ir_node *gen_Not(ir_node *node)
362 ir_node *block = be_transform_node(get_nodes_block(node));
363 ir_node *op = get_Not_op(node);
364 ir_node *new_op = be_transform_node(op);
365 dbg_info *dbgi = get_irn_dbg_info(node);
367 return new_bd_sparc_Not(dbgi, block, new_op);
370 static ir_node *gen_Shl(ir_node *node)
372 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLL_reg, new_bd_sparc_ShiftLL_imm);
375 static ir_node *gen_Shr(ir_node *node)
377 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_ShiftLR_reg, new_bd_sparc_ShiftLR_imm);
380 /****** TRANSFORM GENERAL BACKEND NODES ********/
383 * Transforms a Minus node.
386 static ir_node *gen_Minus(ir_node *node)
388 ir_node *block = be_transform_node(get_nodes_block(node));
389 ir_node *op = get_Minus_op(node);
390 ir_node *new_op = be_transform_node(op);
391 dbg_info *dbgi = get_irn_dbg_info(node);
392 ir_mode *mode = get_irn_mode(node);
394 if (mode_is_float(mode)) {
395 panic("FP not implemented yet");
398 assert(mode_is_data(mode));
399 return new_bd_sparc_Minus(dbgi, block, new_op);
403 * Transforms a Const node.
405 * @param node the ir Store node
406 * @return The transformed sparc node.
408 static ir_node *gen_Const(ir_node *node)
410 ir_node *block = be_transform_node(get_nodes_block(node));
411 ir_mode *mode = get_irn_mode(node);
412 dbg_info *dbg = get_irn_dbg_info(node);
416 if (mode_is_float(mode)) {
417 panic("FP not supported yet");
419 return create_const_graph(node, block);
424 * @param node the ir AddSP node
425 * @return transformed sparc SAVE node
427 static ir_node *gen_be_AddSP(ir_node *node)
429 ir_node *block = be_transform_node(get_nodes_block(node));
430 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
431 ir_node *new_sz = be_transform_node(sz);
432 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
433 ir_node *new_sp = be_transform_node(sp);
434 dbg_info *dbgi = get_irn_dbg_info(node);
435 ir_node *nomem = new_NoMem();
438 /* SPARC stack grows in reverse direction */
439 new_op = new_bd_sparc_AddSP(dbgi, block, new_sp, new_sz, nomem);
447 * @param node the ir SubSP node
448 * @return transformed sparc SAVE node
450 static ir_node *gen_be_SubSP(ir_node *node)
452 ir_node *block = be_transform_node(get_nodes_block(node));
453 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
454 ir_node *new_sz = be_transform_node(sz);
455 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
456 ir_node *new_sp = be_transform_node(sp);
457 dbg_info *dbgi = get_irn_dbg_info(node);
458 ir_node *nomem = new_NoMem();
461 /* SPARC stack grows in reverse direction */
462 new_op = new_bd_sparc_SubSP(dbgi, block, new_sp, new_sz, nomem);
467 * transform FrameAddr
469 static ir_node *gen_be_FrameAddr(ir_node *node)
471 ir_node *block = be_transform_node(get_nodes_block(node));
472 ir_entity *ent = be_get_frame_entity(node);
473 ir_node *fp = be_get_FrameAddr_frame(node);
474 ir_node *new_fp = be_transform_node(fp);
475 dbg_info *dbgi = get_irn_dbg_info(node);
477 new_node = new_bd_sparc_FrameAddr(dbgi, block, new_fp, ent);
482 * Transform a be_Copy.
484 static ir_node *gen_be_Copy(ir_node *node)
486 ir_node *result = be_duplicate_node(node);
487 ir_mode *mode = get_irn_mode(result);
489 if (mode_needs_gp_reg(mode)) {
490 set_irn_mode(node, mode_Iu);
499 static ir_node *gen_be_Call(ir_node *node)
501 ir_node *res = be_duplicate_node(node);
502 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
507 * Transforms a Switch.
510 static ir_node *gen_SwitchJmp(ir_node *node)
512 ir_node *block = be_transform_node(get_nodes_block(node));
513 ir_node *selector = get_Cond_selector(node);
514 dbg_info *dbgi = get_irn_dbg_info(node);
515 ir_node *new_op = be_transform_node(selector);
516 ir_node *const_graph;
520 const ir_edge_t *edge;
527 foreach_out_edge(node, edge) {
528 proj = get_edge_src_irn(edge);
529 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
531 pn = get_Proj_proj(proj);
533 min = pn<min ? pn : min;
534 max = pn>max ? pn : max;
538 n_projs = max - translation + 1;
540 foreach_out_edge(node, edge) {
541 proj = get_edge_src_irn(edge);
542 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
544 pn = get_Proj_proj(proj) - translation;
545 set_Proj_proj(proj, pn);
548 const_graph = create_const_graph_value(dbgi, block, translation);
549 sub = new_bd_sparc_Sub_reg(dbgi, block, new_op, const_graph);
550 return new_bd_sparc_SwitchJmp(dbgi, block, sub, n_projs, get_Cond_default_proj(node) - translation);
554 * Transform Cond nodes
556 static ir_node *gen_Cond(ir_node *node)
558 ir_node *selector = get_Cond_selector(node);
559 ir_mode *mode = get_irn_mode(selector);
565 if (mode != mode_b) {
566 return gen_SwitchJmp(node);
569 // regular if/else jumps
570 assert(is_Proj(selector));
572 block = be_transform_node(get_nodes_block(node));
573 dbgi = get_irn_dbg_info(node);
574 flag_node = be_transform_node(get_Proj_pred(selector));
575 return new_bd_sparc_Branch(dbgi, block, flag_node, get_Proj_proj(selector));
581 static ir_node *gen_Cmp(ir_node *node)
583 ir_node *block = be_transform_node(get_nodes_block(node));
584 ir_node *op1 = get_Cmp_left(node);
585 ir_node *op2 = get_Cmp_right(node);
586 ir_mode *cmp_mode = get_irn_mode(op1);
587 dbg_info *dbgi = get_irn_dbg_info(node);
592 if (mode_is_float(cmp_mode)) {
593 panic("FloatCmp not implemented");
596 if (get_mode_size_bits(cmp_mode) != 32) {
597 panic("CmpMode != 32bit not supported yet");
600 assert(get_irn_mode(op2) == cmp_mode);
601 is_unsigned = !mode_is_signed(cmp_mode);
603 /* compare with 0 can be done with Tst */
605 if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
606 new_op1 = be_transform_node(op1);
607 return new_bd_sparc_Tst(dbgi, block, new_op1, false,
611 if (is_Const(op1) && tarval_is_null(get_Const_tarval(op1))) {
612 new_op2 = be_transform_node(op2);
613 return new_bd_sparc_Tst(dbgi, block, new_op2, true,
618 /* integer compare */
619 new_op1 = be_transform_node(op1);
620 //new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
621 new_op2 = be_transform_node(op2);
622 //new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
623 return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
627 * Transforms a SymConst node.
629 static ir_node *gen_SymConst(ir_node *node)
631 ir_node *block = be_transform_node(get_nodes_block(node));
632 ir_entity *entity = get_SymConst_entity(node);
633 dbg_info *dbgi = get_irn_dbg_info(node);
636 new_node = new_bd_sparc_SymConst(dbgi, block, entity);
637 be_dep_on_frame(new_node);
642 * Create an And that will zero out upper bits.
644 * @param dbgi debug info
645 * @param block the basic block
646 * @param op the original node
647 * @param src_bits number of lower bits that will remain
649 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
653 return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
654 } else if (src_bits == 16) {
655 ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
656 ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
659 panic("zero extension only supported for 8 and 16 bits");
664 * Generate code for a sign extension.
666 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
669 int shift_width = 32 - src_bits;
670 ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
671 ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
676 * returns true if it is assured, that the upper bits of a node are "clean"
677 * which means for a 16 or 8 bit value, that the upper bits in the register
678 * are 0 for unsigned and a copy of the last significant bit for signed
681 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
683 (void) transformed_node;
690 * Transforms a Conv node.
693 static ir_node *gen_Conv(ir_node *node)
695 ir_node *block = be_transform_node(get_nodes_block(node));
696 ir_node *op = get_Conv_op(node);
697 ir_node *new_op = be_transform_node(op);
698 ir_mode *src_mode = get_irn_mode(op);
699 ir_mode *dst_mode = get_irn_mode(node);
700 dbg_info *dbg = get_irn_dbg_info(node);
702 if (src_mode == dst_mode)
705 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
706 panic("FP not implemented");
707 } else { /* complete in gp registers */
708 int src_bits = get_mode_size_bits(src_mode);
709 int dst_bits = get_mode_size_bits(dst_mode);
713 if (src_bits == dst_bits) {
714 /* kill unneccessary conv */
718 if (src_bits < dst_bits) {
726 if (upper_bits_clean(new_op, min_mode)) {
730 if (mode_is_signed(min_mode)) {
731 return gen_sign_extension(dbg, block, new_op, min_bits);
733 return gen_zero_extension(dbg, block, new_op, min_bits);
738 static ir_node *gen_Unknown(ir_node *node)
740 ir_node *block = get_nodes_block(node);
741 ir_node *new_block = be_transform_node(block);
742 dbg_info *dbgi = get_irn_dbg_info(node);
744 /* just produce a 0 */
745 ir_mode *mode = get_irn_mode(node);
746 if (mode_is_float(mode)) {
747 panic("FP not implemented");
748 be_dep_on_frame(node);
750 } else if (mode_needs_gp_reg(mode)) {
751 return create_const_graph_value(dbgi, new_block, 0);
754 panic("Unexpected Unknown mode");
758 * Transform some Phi nodes
760 static ir_node *gen_Phi(ir_node *node)
762 const arch_register_req_t *req;
763 ir_node *block = be_transform_node(get_nodes_block(node));
764 ir_graph *irg = current_ir_graph;
765 dbg_info *dbgi = get_irn_dbg_info(node);
766 ir_mode *mode = get_irn_mode(node);
769 if (mode_needs_gp_reg(mode)) {
770 /* we shouldn't have any 64bit stuff around anymore */
771 assert(get_mode_size_bits(mode) <= 32);
772 /* all integer operations are on 32bit registers now */
774 req = sparc_reg_classes[CLASS_sparc_gp].class_req;
776 req = arch_no_register_req;
779 /* phi nodes allow loops, so we use the old arguments for now
780 * and fix this later */
781 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
782 copy_node_attr(irg, node, phi);
783 be_duplicate_deps(node, phi);
784 arch_set_out_register_req(phi, 0, req);
785 be_enqueue_preds(node);
791 * Transform a Proj from a Load.
793 static ir_node *gen_Proj_Load(ir_node *node)
795 ir_node *load = get_Proj_pred(node);
796 ir_node *new_load = be_transform_node(load);
797 dbg_info *dbgi = get_irn_dbg_info(node);
798 long proj = get_Proj_proj(node);
800 /* renumber the proj */
801 switch (get_sparc_irn_opcode(new_load)) {
803 /* handle all gp loads equal: they have the same proj numbers. */
804 if (proj == pn_Load_res) {
805 return new_rd_Proj(dbgi, new_load, mode_Iu, pn_sparc_Load_res);
806 } else if (proj == pn_Load_M) {
807 return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Load_M);
811 case iro_sparc_fpaLoad:
812 panic("FP not implemented yet");
816 panic("Unsupported Proj from Load");
819 return be_duplicate_node(node);
823 * Transform the Projs of a be_AddSP.
825 static ir_node *gen_Proj_be_AddSP(ir_node *node)
827 ir_node *pred = get_Proj_pred(node);
828 ir_node *new_pred = be_transform_node(pred);
829 dbg_info *dbgi = get_irn_dbg_info(node);
830 long proj = get_Proj_proj(node);
832 if (proj == pn_be_AddSP_sp) {
833 // TODO: check for correct pn_sparc_* flags
834 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
835 pn_sparc_SubSP_stack);
836 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
838 } else if (proj == pn_be_AddSP_res) {
839 // TODO: check for correct pn_sparc_* flags
840 return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
841 } else if (proj == pn_be_AddSP_M) {
842 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
845 panic("Unsupported Proj from AddSP");
849 * Transform the Projs of a be_SubSP.
851 static ir_node *gen_Proj_be_SubSP(ir_node *node)
853 ir_node *pred = get_Proj_pred(node);
854 ir_node *new_pred = be_transform_node(pred);
855 dbg_info *dbgi = get_irn_dbg_info(node);
856 long proj = get_Proj_proj(node);
858 if (proj == pn_be_SubSP_sp) {
859 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
860 pn_sparc_AddSP_stack);
861 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
863 } else if (proj == pn_be_SubSP_M) {
864 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_AddSP_M);
867 panic("Unsupported Proj from SubSP");
871 * Transform the Projs from a Cmp.
873 static ir_node *gen_Proj_Cmp(ir_node *node)
876 panic("not implemented");
881 * Transform a Proj node.
883 static ir_node *gen_Proj(ir_node *node)
885 ir_graph *irg = current_ir_graph;
886 dbg_info *dbgi = get_irn_dbg_info(node);
887 ir_node *pred = get_Proj_pred(node);
888 long proj = get_Proj_proj(node);
893 if (is_Store(pred)) {
894 if (proj == pn_Store_M) {
895 return be_transform_node(pred);
897 panic("Unsupported Proj from Store");
899 } else if (is_Load(pred)) {
900 return gen_Proj_Load(node);
901 } else if (be_is_SubSP(pred)) {
902 //panic("gen_Proj not implemented for SubSP");
903 return gen_Proj_be_SubSP(node);
904 } else if (be_is_AddSP(pred)) {
905 //panic("gen_Proj not implemented for AddSP");
906 return gen_Proj_be_AddSP(node);
907 } else if (is_Cmp(pred)) {
908 //panic("gen_Proj not implemented for Cmp");
909 return gen_Proj_Cmp(node);
910 } else if (is_Start(pred)) {
912 if (proj == pn_Start_X_initial_exec) {
913 ir_node *block = get_nodes_block(pred);
916 // we exchange the ProjX with a jump
917 block = be_transform_node(block);
918 jump = new_rd_Jmp(dbgi, block);
922 if (node == get_irg_anchor(irg, anchor_tls)) {
923 return gen_Proj_tls(node);
927 ir_node *new_pred = be_transform_node(pred);
928 ir_mode *mode = get_irn_mode(node);
929 if (mode_needs_gp_reg(mode)) {
930 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
931 new_proj->node_nr = node->node_nr;
936 return be_duplicate_node(node);
943 static ir_node *gen_Jmp(ir_node *node)
945 ir_node *block = get_nodes_block(node);
946 ir_node *new_block = be_transform_node(block);
947 dbg_info *dbgi = get_irn_dbg_info(node);
949 return new_bd_sparc_Jmp(dbgi, new_block);
953 * the BAD transformer.
955 static ir_node *bad_transform(ir_node *irn)
957 panic("SPARC backend: Not implemented: %+F", irn);
961 * Set a node emitter. Make it a bit more type safe.
963 static void set_transformer(ir_op *op, be_transform_func sparc_transform_func)
965 op->ops.generic = (op_func)sparc_transform_func;
969 * configure transformation callbacks
971 void sparc_register_transformers(void)
973 clear_irp_opcodes_generic_func();
974 set_transformer(op_Add, gen_Add);
975 set_transformer(op_Store, gen_Store);
976 set_transformer(op_Const, gen_Const);
977 set_transformer(op_Load, gen_Load);
978 set_transformer(op_Sub, gen_Sub);
980 set_transformer(op_be_AddSP, gen_be_AddSP);
981 set_transformer(op_be_SubSP, gen_be_SubSP);
982 set_transformer(op_be_Copy, gen_be_Copy);
983 set_transformer(op_be_Call, gen_be_Call);
984 set_transformer(op_be_FrameAddr, gen_be_FrameAddr);
986 set_transformer(op_Cond, gen_Cond);
987 set_transformer(op_Cmp, gen_Cmp);
989 set_transformer(op_SymConst, gen_SymConst);
991 set_transformer(op_Phi, gen_Phi);
992 set_transformer(op_Proj, gen_Proj);
994 set_transformer(op_Conv, gen_Conv);
995 set_transformer(op_Jmp, gen_Jmp);
997 set_transformer(op_Mul, gen_Mul);
998 set_transformer(op_Div, gen_Div);
999 set_transformer(op_Abs, gen_Abs);
1000 set_transformer(op_Shl, gen_Shl);
1001 set_transformer(op_Shr, gen_Shr);
1003 set_transformer(op_Minus, gen_Minus);
1004 set_transformer(op_Not, gen_Not);
1006 set_transformer(op_Unknown, gen_Unknown);
1010 set_transformer(op_And, gen_And);
1011 set_transformer(op_CopyB, gen_CopyB);
1012 set_transformer(op_Eor, gen_Eor);
1013 set_transformer(op_Mul, gen_Mul);
1014 set_transformer(op_Or, gen_Or);
1015 set_transformer(op_Quot, gen_Quot);
1016 set_transformer(op_Rotl, gen_Rotl);
1017 set_transformer(op_Shrs, gen_Shrs);
1020 set_transformer(op_ASM, bad_transform);
1021 set_transformer(op_Builtin, bad_transform);
1022 set_transformer(op_CallBegin, bad_transform);
1023 set_transformer(op_Cast, bad_transform);
1024 set_transformer(op_Confirm, bad_transform);
1025 set_transformer(op_DivMod, bad_transform);
1026 set_transformer(op_EndExcept, bad_transform);
1027 set_transformer(op_EndReg, bad_transform);
1028 set_transformer(op_Filter, bad_transform);
1029 set_transformer(op_Free, bad_transform);
1030 set_transformer(op_Id, bad_transform);
1031 set_transformer(op_InstOf, bad_transform);
1032 set_transformer(op_Mulh, bad_transform);
1033 set_transformer(op_Mux, bad_transform);
1034 set_transformer(op_Raise, bad_transform);
1035 set_transformer(op_Sel, bad_transform);
1036 set_transformer(op_Tuple, bad_transform);
1041 * Pre-transform all unknown nodes.
1043 static void sparc_pretransform_node(void)
1045 sparc_code_gen_t *cg = env_cg;
1047 //cg->unknown_gp = be_pre_transform_node(cg->unknown_gp);
1048 //cg->unknown_fpa = be_pre_transform_node(cg->unknown_fpa);
1052 * Transform a Firm graph into a SPARC graph.
1054 void sparc_transform_graph(sparc_code_gen_t *cg)
1056 sparc_register_transformers();
1058 be_transform_graph(cg->irg, sparc_pretransform_node);
1061 void sparc_init_transform(void)
1063 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");