2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief code selection (transform FIRM into SPARC FIRM)
23 * @version $Id: TEMPLATE_transform.c 26673 2009-10-01 16:43:13Z matze $
29 #include "irgraph_t.h"
41 #include "../benode.h"
43 #include "../beutil.h"
44 #include "../betranshlp.h"
45 #include "bearch_sparc_t.h"
47 #include "sparc_nodes_attr.h"
48 #include "sparc_transform.h"
49 #include "sparc_new_nodes.h"
50 #include "gen_sparc_new_nodes.h"
52 #include "gen_sparc_regalloc_if.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 static sparc_code_gen_t *env_cg;
60 static inline int mode_needs_gp_reg(ir_mode *mode)
62 return mode_is_int(mode) || mode_is_reference(mode);
66 * Creates a possible DAG for a constant.
68 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
73 // TODO: find a better solution for this
74 if (value < -4096 || value > 4095) {
75 panic("FIXME: immediate value exceeds max. size of simm13 (13 bits signed)");
78 result = new_bd_sparc_Mov_imm(dbgi, block, (int) value);
84 * Create a DAG constructing a given Const.
86 * @param irn a Firm const
88 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
90 tarval *tv = get_Const_tarval(irn);
91 ir_mode *mode = get_tarval_mode(tv);
94 if (mode_is_reference(mode)) {
95 /* SPARC V8 is 32bit, so we can safely convert a reference tarval into Iu */
96 assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
97 tv = tarval_convert_to(tv, mode_Iu);
99 value = get_tarval_long(tv);
100 return create_const_graph_value(get_irn_dbg_info(irn), block, value);
106 MATCH_COMMUTATIVE = 1 << 0,
107 MATCH_SIZE_NEUTRAL = 1 << 1,
110 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
111 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, int simm13);
114 * checks wether a node's value can be encoded as a immediate
115 * TODO: pass a result pointer to fetch the encoded immediate
118 static bool is_imm_encodeable(const ir_node *node)
125 val = get_tarval_long(get_Const_tarval(node));
127 return !(val < -4096 || val > 4095);
131 * helper function for binop operations
133 * @param new_binop_reg_func register generation function ptr
134 * @param new_binop_imm_func immediate generation function ptr
136 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
137 new_binop_reg_func new_reg, new_binop_imm_func new_imm)
139 ir_node *block = be_transform_node(get_nodes_block(node));
140 ir_node *op1 = get_binop_left(node);
142 ir_node *op2 = get_binop_right(node);
144 dbg_info *dbgi = get_irn_dbg_info(node);
147 if (flags & MATCH_SIZE_NEUTRAL) {
148 op1 = arm_skip_downconv(op1);
149 op2 = arm_skip_downconv(op2);
151 assert(get_mode_size_bits(get_irn_mode(node)) == 32);
154 if (is_imm_encodeable(op2)) {
155 ir_node *new_op1 = be_transform_node(op1);
156 return new_imm(dbgi, block, new_op1, get_tarval_long(get_Const_tarval(op2)));
159 new_op2 = be_transform_node(op2);
161 if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
162 return new_imm(dbgi, block, new_op2, get_tarval_long(get_Const_tarval(op1)) );
165 new_op1 = be_transform_node(op1);
167 return new_reg(dbgi, block, new_op1, new_op2);
171 * Creates an sparc Add.
173 * @param node FIRM node
174 * @return the created sparc Add node
176 static ir_node *gen_Add(ir_node *node)
178 ir_mode *mode = get_irn_mode(node);
179 ir_node *block = be_transform_node(get_nodes_block(node));
180 ir_node *op1 = get_Add_left(node);
181 ir_node *op2 = get_Add_right(node);
182 dbg_info *dbgi = get_irn_dbg_info(node);
183 ir_node *new_op1 = be_transform_node(op1);
184 ir_node *new_op2 = be_transform_node(op2);
191 if (mode_is_float(mode))
192 panic("FP not implemented yet");
194 return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
199 * Creates an sparc Sub.
201 * @param node FIRM node
202 * @return the created sparc Sub node
204 static ir_node *gen_Sub(ir_node *node)
206 ir_mode *mode = get_irn_mode(node);
207 ir_node *block = be_transform_node(get_nodes_block(node));
208 ir_node *op1 = get_Add_left(node);
209 ir_node *op2 = get_Add_right(node);
210 dbg_info *dbgi = get_irn_dbg_info(node);
211 ir_node *new_op1 = be_transform_node(op1);
212 ir_node *new_op2 = be_transform_node(op2);
219 if (mode_is_float(mode))
220 panic("FP not implemented yet");
222 return gen_helper_binop(node, MATCH_SIZE_NEUTRAL, new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
229 * @param node the ir Load node
230 * @return the created sparc Load node
232 static ir_node *gen_Load(ir_node *node)
234 ir_mode *mode = get_Load_mode(node);
235 ir_node *block = be_transform_node(get_nodes_block(node));
236 ir_node *ptr = get_Load_ptr(node);
237 ir_node *new_ptr = be_transform_node(ptr);
238 ir_node *mem = get_Load_mem(node);
239 ir_node *new_mem = be_transform_node(mem);
240 dbg_info *dbgi = get_irn_dbg_info(node);
241 ir_node *new_load = NULL;
243 if (mode_is_float(mode))
244 panic("SPARC: no fp implementation yet");
246 new_load = new_bd_sparc_Load(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
247 set_irn_pinned(new_load, get_irn_pinned(node));
255 * Transforms a Store.
257 * @param node the ir Store node
258 * @return the created sparc Store node
260 static ir_node *gen_Store(ir_node *node)
262 ir_node *block = be_transform_node(get_nodes_block(node));
263 ir_node *ptr = get_Store_ptr(node);
264 ir_node *new_ptr = be_transform_node(ptr);
265 ir_node *mem = get_Store_mem(node);
266 ir_node *new_mem = be_transform_node(mem);
267 ir_node *val = get_Store_value(node);
268 ir_node *new_val = be_transform_node(val);
269 ir_mode *mode = get_irn_mode(val);
270 dbg_info *dbgi = get_irn_dbg_info(node);
271 ir_node *new_store = NULL;
273 if (mode_is_float(mode))
274 panic("SPARC: no fp implementation yet");
276 new_store = new_bd_sparc_Store(dbgi, block, new_ptr, new_val, new_mem, mode, NULL, 0, 0, false);
283 /****** TRANSFORM GENERAL BACKEND NODES ********/
286 * Transforms a Const node.
288 * @param node the ir Store node
289 * @return The transformed sparc node.
291 static ir_node *gen_Const(ir_node *node)
293 ir_node *block = be_transform_node(get_nodes_block(node));
294 ir_mode *mode = get_irn_mode(node);
295 dbg_info *dbg = get_irn_dbg_info(node);
299 if (mode_is_float(mode)) {
300 panic("FP not supported yet");
302 return create_const_graph(node, block);
307 * @param node the ir AddSP node
308 * @return transformed sparc SAVE node
310 static ir_node *gen_be_AddSP(ir_node *node)
312 ir_node *block = be_transform_node(get_nodes_block(node));
313 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
314 ir_node *new_sz = be_transform_node(sz);
315 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
316 ir_node *new_sp = be_transform_node(sp);
317 dbg_info *dbgi = get_irn_dbg_info(node);
318 ir_node *nomem = new_NoMem();
321 /* SPARC stack grows in reverse direction */
322 new_op = new_bd_sparc_AddSP(dbgi, block, new_sp, new_sz, nomem);
330 * @param node the ir SubSP node
331 * @return transformed sparc SAVE node
333 static ir_node *gen_be_SubSP(ir_node *node)
335 ir_node *block = be_transform_node(get_nodes_block(node));
336 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
337 ir_node *new_sz = be_transform_node(sz);
338 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
339 ir_node *new_sp = be_transform_node(sp);
340 dbg_info *dbgi = get_irn_dbg_info(node);
341 ir_node *nomem = new_NoMem();
344 /* SPARC stack grows in reverse direction */
345 new_op = new_bd_sparc_SubSP(dbgi, block, new_sp, new_sz, nomem);
350 * transform FrameAddr
352 static ir_node *gen_be_FrameAddr(ir_node *node)
354 ir_node *block = be_transform_node(get_nodes_block(node));
355 ir_entity *ent = be_get_frame_entity(node);
356 ir_node *fp = be_get_FrameAddr_frame(node);
357 ir_node *new_fp = be_transform_node(fp);
358 dbg_info *dbgi = get_irn_dbg_info(node);
360 new_node = new_bd_sparc_FrameAddr(dbgi, block, new_fp, ent);
365 * Transform a be_Copy.
367 static ir_node *gen_be_Copy(ir_node *node)
369 ir_node *result = be_duplicate_node(node);
370 ir_mode *mode = get_irn_mode(result);
372 if (mode_needs_gp_reg(mode)) {
373 set_irn_mode(node, mode_Iu);
382 static ir_node *gen_be_Call(ir_node *node)
384 ir_node *res = be_duplicate_node(node);
385 arch_irn_add_flags(res, arch_irn_flags_modify_flags);
390 * Transforms a Switch.
393 static ir_node *gen_SwitchJmp(ir_node *node)
395 ir_node *block = be_transform_node(get_nodes_block(node));
396 ir_node *selector = get_Cond_selector(node);
397 dbg_info *dbgi = get_irn_dbg_info(node);
398 ir_node *new_op = be_transform_node(selector);
399 ir_node *const_graph;
403 const ir_edge_t *edge;
410 foreach_out_edge(node, edge) {
411 proj = get_edge_src_irn(edge);
412 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
414 pn = get_Proj_proj(proj);
416 min = pn<min ? pn : min;
417 max = pn>max ? pn : max;
421 n_projs = max - translation + 1;
423 foreach_out_edge(node, edge) {
424 proj = get_edge_src_irn(edge);
425 assert(is_Proj(proj) && "Only proj allowed at SwitchJmp");
427 pn = get_Proj_proj(proj) - translation;
428 set_Proj_proj(proj, pn);
431 const_graph = create_const_graph_value(dbgi, block, translation);
432 sub = new_bd_sparc_Sub_reg(dbgi, block, new_op, const_graph);
433 return new_bd_sparc_SwitchJmp(dbgi, block, sub, n_projs, get_Cond_default_proj(node) - translation);
437 * Transform Cond nodes
439 static ir_node *gen_Cond(ir_node *node)
441 ir_node *selector = get_Cond_selector(node);
442 ir_mode *mode = get_irn_mode(selector);
448 if (mode != mode_b) {
449 return gen_SwitchJmp(node);
452 // regular if/else jumps
453 assert(is_Proj(selector));
455 block = be_transform_node(get_nodes_block(node));
456 dbgi = get_irn_dbg_info(node);
457 flag_node = be_transform_node(get_Proj_pred(selector));
458 return new_bd_sparc_Branch(dbgi, block, flag_node, get_Proj_proj(selector));
464 static ir_node *gen_Cmp(ir_node *node)
466 ir_node *block = be_transform_node(get_nodes_block(node));
467 ir_node *op1 = get_Cmp_left(node);
468 ir_node *op2 = get_Cmp_right(node);
469 ir_mode *cmp_mode = get_irn_mode(op1);
470 dbg_info *dbgi = get_irn_dbg_info(node);
475 if (mode_is_float(cmp_mode)) {
476 panic("FloatCmp not implemented");
479 if (get_mode_size_bits(cmp_mode) != 32) {
480 panic("CmpMode != 32bit not supported yet");
483 assert(get_irn_mode(op2) == cmp_mode);
484 is_unsigned = !mode_is_signed(cmp_mode);
486 /* compare with 0 can be done with Tst */
487 if (is_Const(op2) && tarval_is_null(get_Const_tarval(op2))) {
488 new_op1 = be_transform_node(op1);
489 return new_bd_sparc_Tst(dbgi, block, new_op1, false,
493 if (is_Const(op1) && tarval_is_null(get_Const_tarval(op1))) {
494 new_op2 = be_transform_node(op2);
495 return new_bd_sparc_Tst(dbgi, block, new_op2, true,
499 /* integer compare */
500 new_op1 = be_transform_node(op1);
501 //new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
502 new_op2 = be_transform_node(op2);
503 //new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
504 return new_bd_sparc_Cmp_reg(dbgi, block, new_op1, new_op2, false, is_unsigned);
508 * Transforms a SymConst node.
510 static ir_node *gen_SymConst(ir_node *node)
512 ir_node *block = be_transform_node(get_nodes_block(node));
513 ir_entity *entity = get_SymConst_entity(node);
514 dbg_info *dbgi = get_irn_dbg_info(node);
517 new_node = new_bd_sparc_SymConst(dbgi, block, entity);
518 be_dep_on_frame(new_node);
523 * Create an And that will zero out upper bits.
525 * @param dbgi debug info
526 * @param block the basic block
527 * @param op the original node
528 * @param src_bits number of lower bits that will remain
530 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
534 return new_bd_sparc_And_imm(dbgi, block, op, 0xFF);
535 } else if (src_bits == 16) {
536 ir_node *lshift = new_bd_sparc_ShiftLL_imm(dbgi, block, op, 16);
537 ir_node *rshift = new_bd_sparc_ShiftLR_imm(dbgi, block, lshift, 16);
540 panic("zero extension only supported for 8 and 16 bits");
545 * Generate code for a sign extension.
547 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
550 int shift_width = 32 - src_bits;
551 ir_node *lshift_node = new_bd_sparc_ShiftLL_imm(dbgi, block, op, shift_width);
552 ir_node *rshift_node = new_bd_sparc_ShiftRA_imm(dbgi, block, lshift_node, shift_width);
557 * returns true if it is assured, that the upper bits of a node are "clean"
558 * which means for a 16 or 8 bit value, that the upper bits in the register
559 * are 0 for unsigned and a copy of the last significant bit for signed
562 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
564 (void) transformed_node;
571 * Transforms a Conv node.
574 static ir_node *gen_Conv(ir_node *node)
576 ir_node *block = be_transform_node(get_nodes_block(node));
577 ir_node *op = get_Conv_op(node);
578 ir_node *new_op = be_transform_node(op);
579 ir_mode *src_mode = get_irn_mode(op);
580 ir_mode *dst_mode = get_irn_mode(node);
581 dbg_info *dbg = get_irn_dbg_info(node);
583 if (src_mode == dst_mode)
586 if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
587 panic("FP not implemented");
588 } else { /* complete in gp registers */
589 int src_bits = get_mode_size_bits(src_mode);
590 int dst_bits = get_mode_size_bits(dst_mode);
594 if (src_bits == dst_bits) {
595 /* kill unneccessary conv */
599 if (src_bits < dst_bits) {
607 if (upper_bits_clean(new_op, min_mode)) {
611 if (mode_is_signed(min_mode)) {
612 return gen_sign_extension(dbg, block, new_op, min_bits);
614 return gen_zero_extension(dbg, block, new_op, min_bits);
620 * Transform some Phi nodes
622 static ir_node *gen_Phi(ir_node *node)
624 const arch_register_req_t *req;
625 ir_node *block = be_transform_node(get_nodes_block(node));
626 ir_graph *irg = current_ir_graph;
627 dbg_info *dbgi = get_irn_dbg_info(node);
628 ir_mode *mode = get_irn_mode(node);
631 if (mode_needs_gp_reg(mode)) {
632 /* we shouldn't have any 64bit stuff around anymore */
633 assert(get_mode_size_bits(mode) <= 32);
634 /* all integer operations are on 32bit registers now */
636 req = sparc_reg_classes[CLASS_sparc_gp].class_req;
638 req = arch_no_register_req;
641 /* phi nodes allow loops, so we use the old arguments for now
642 * and fix this later */
643 phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
644 copy_node_attr(irg, node, phi);
645 be_duplicate_deps(node, phi);
646 arch_set_out_register_req(phi, 0, req);
647 be_enqueue_preds(node);
653 * Transform a Proj from a Load.
655 static ir_node *gen_Proj_Load(ir_node *node)
657 ir_node *load = get_Proj_pred(node);
658 ir_node *new_load = be_transform_node(load);
659 dbg_info *dbgi = get_irn_dbg_info(node);
660 long proj = get_Proj_proj(node);
662 /* renumber the proj */
663 switch (get_sparc_irn_opcode(new_load)) {
665 /* handle all gp loads equal: they have the same proj numbers. */
666 if (proj == pn_Load_res) {
667 return new_rd_Proj(dbgi, new_load, mode_Iu, pn_sparc_Load_res);
668 } else if (proj == pn_Load_M) {
669 return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Load_M);
673 case iro_sparc_fpaLoad:
674 panic("FP not implemented yet");
678 panic("Unsupported Proj from Load");
681 return be_duplicate_node(node);
685 * Transform the Projs of a be_AddSP.
687 static ir_node *gen_Proj_be_AddSP(ir_node *node)
689 ir_node *pred = get_Proj_pred(node);
690 ir_node *new_pred = be_transform_node(pred);
691 dbg_info *dbgi = get_irn_dbg_info(node);
692 long proj = get_Proj_proj(node);
694 if (proj == pn_be_AddSP_sp) {
695 // TODO: check for correct pn_sparc_* flags
696 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
697 pn_sparc_SubSP_stack);
698 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
700 } else if (proj == pn_be_AddSP_res) {
701 // TODO: check for correct pn_sparc_* flags
702 return new_rd_Proj(dbgi, new_pred, mode_Iu, pn_sparc_SubSP_stack);
703 } else if (proj == pn_be_AddSP_M) {
704 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_SubSP_M);
707 panic("Unsupported Proj from AddSP");
711 * Transform the Projs of a be_SubSP.
713 static ir_node *gen_Proj_be_SubSP(ir_node *node)
715 ir_node *pred = get_Proj_pred(node);
716 ir_node *new_pred = be_transform_node(pred);
717 dbg_info *dbgi = get_irn_dbg_info(node);
718 long proj = get_Proj_proj(node);
720 if (proj == pn_be_SubSP_sp) {
721 ir_node *res = new_rd_Proj(dbgi, new_pred, mode_Iu,
722 pn_sparc_AddSP_stack);
723 arch_set_irn_register(res, &sparc_gp_regs[REG_SP]);
725 } else if (proj == pn_be_SubSP_M) {
726 return new_rd_Proj(dbgi, new_pred, mode_M, pn_sparc_AddSP_M);
729 panic("Unsupported Proj from SubSP");
733 * Transform the Projs from a Cmp.
735 static ir_node *gen_Proj_Cmp(ir_node *node)
738 panic("not implemented");
743 * Transform a Proj node.
745 static ir_node *gen_Proj(ir_node *node)
747 ir_graph *irg = current_ir_graph;
748 dbg_info *dbgi = get_irn_dbg_info(node);
749 ir_node *pred = get_Proj_pred(node);
750 long proj = get_Proj_proj(node);
755 if (is_Store(pred)) {
756 if (proj == pn_Store_M) {
757 return be_transform_node(pred);
759 panic("Unsupported Proj from Store");
761 } else if (is_Load(pred)) {
762 return gen_Proj_Load(node);
763 } else if (be_is_SubSP(pred)) {
764 //panic("gen_Proj not implemented for SubSP");
765 return gen_Proj_be_SubSP(node);
766 } else if (be_is_AddSP(pred)) {
767 //panic("gen_Proj not implemented for AddSP");
768 return gen_Proj_be_AddSP(node);
769 } else if (is_Cmp(pred)) {
770 //panic("gen_Proj not implemented for Cmp");
771 return gen_Proj_Cmp(node);
772 } else if (is_Start(pred)) {
774 if (proj == pn_Start_X_initial_exec) {
775 ir_node *block = get_nodes_block(pred);
778 // we exchange the ProjX with a jump
779 block = be_transform_node(block);
780 jump = new_rd_Jmp(dbgi, block);
784 if (node == get_irg_anchor(irg, anchor_tls)) {
785 return gen_Proj_tls(node);
789 ir_node *new_pred = be_transform_node(pred);
790 ir_mode *mode = get_irn_mode(node);
791 if (mode_needs_gp_reg(mode)) {
792 ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
793 new_proj->node_nr = node->node_nr;
798 return be_duplicate_node(node);
804 static ir_node *gen_Jmp(ir_node *node)
806 ir_node *block = get_nodes_block(node);
807 ir_node *new_block = be_transform_node(block);
808 dbg_info *dbgi = get_irn_dbg_info(node);
810 return new_bd_sparc_Jmp(dbgi, new_block);
814 * the BAD transformer.
816 static ir_node *bad_transform(ir_node *irn)
818 panic("SPARC backend: Not implemented: %+F", irn);
822 * Set a node emitter. Make it a bit more type safe.
824 static void set_transformer(ir_op *op, be_transform_func sparc_transform_func)
826 op->ops.generic = (op_func)sparc_transform_func;
830 * configure transformation callbacks
832 void sparc_register_transformers(void)
834 clear_irp_opcodes_generic_func();
835 set_transformer(op_Add, gen_Add);
836 set_transformer(op_Store, gen_Store);
837 set_transformer(op_Const, gen_Const);
838 set_transformer(op_Load, gen_Load);
839 set_transformer(op_Sub, gen_Sub);
841 set_transformer(op_be_AddSP, gen_be_AddSP);
842 set_transformer(op_be_SubSP, gen_be_SubSP);
843 set_transformer(op_be_Copy, gen_be_Copy);
844 set_transformer(op_be_Call, gen_be_Call);
845 set_transformer(op_be_FrameAddr, gen_be_FrameAddr);
847 set_transformer(op_Cond, gen_Cond);
848 set_transformer(op_Cmp, gen_Cmp);
850 set_transformer(op_SymConst, gen_SymConst);
852 set_transformer(op_Phi, gen_Phi);
853 set_transformer(op_Proj, gen_Proj);
855 set_transformer(op_Conv, gen_Conv);
856 set_transformer(op_Jmp, gen_Jmp);
860 set_transformer(op_Abs, gen_Abs);
861 set_transformer(op_Add, gen_Add);
862 set_transformer(op_And, gen_And);
863 set_transformer(op_Const, gen_Const);
864 set_transformer(op_Conv, gen_Conv);
865 set_transformer(op_CopyB, gen_CopyB);
866 set_transformer(op_Eor, gen_Eor);
867 set_transformer(op_Jmp, gen_Jmp);
868 set_transformer(op_Load, gen_Load);
869 set_transformer(op_Minus, gen_Minus);
870 set_transformer(op_Mul, gen_Mul);
871 set_transformer(op_Not, gen_Not);
872 set_transformer(op_Or, gen_Or);
873 set_transformer(op_Quot, gen_Quot);
874 set_transformer(op_Rotl, gen_Rotl);
875 set_transformer(op_Shl, gen_Shl);
876 set_transformer(op_Shr, gen_Shr);
877 set_transformer(op_Shrs, gen_Shrs);
878 set_transformer(op_Store, gen_Store);
879 set_transformer(op_Sub, gen_Sub);
880 set_transformer(op_Unknown, gen_Unknown);
883 set_transformer(op_ASM, bad_transform);
884 set_transformer(op_Builtin, bad_transform);
885 set_transformer(op_CallBegin, bad_transform);
886 set_transformer(op_Cast, bad_transform);
887 set_transformer(op_Confirm, bad_transform);
888 set_transformer(op_DivMod, bad_transform);
889 set_transformer(op_EndExcept, bad_transform);
890 set_transformer(op_EndReg, bad_transform);
891 set_transformer(op_Filter, bad_transform);
892 set_transformer(op_Free, bad_transform);
893 set_transformer(op_Id, bad_transform);
894 set_transformer(op_InstOf, bad_transform);
895 set_transformer(op_Mulh, bad_transform);
896 set_transformer(op_Mux, bad_transform);
897 set_transformer(op_Raise, bad_transform);
898 set_transformer(op_Sel, bad_transform);
899 set_transformer(op_Tuple, bad_transform);
904 * Pre-transform all unknown nodes.
906 static void sparc_pretransform_node(void)
908 sparc_code_gen_t *cg = env_cg;
910 //cg->unknown_gp = be_pre_transform_node(cg->unknown_gp);
911 //cg->unknown_fpa = be_pre_transform_node(cg->unknown_fpa);
915 * Transform a Firm graph into a SPARC graph.
917 void sparc_transform_graph(sparc_code_gen_t *cg)
919 sparc_register_transformers();
921 be_transform_graph(cg->birg, sparc_pretransform_node);
924 void sparc_init_transform(void)
926 FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");