factor out code for address of register param taken
[libfirm] / ir / be / sparc / sparc_transform.c
1 /*
2  * Copyright (C) 1995-2010 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   code selection (transform FIRM into SPARC FIRM)
23  * @author  Hannes Rapp, Matthias Braun
24  * @version $Id$
25  */
26 #include "config.h"
27
28 #include <stdint.h>
29 #include <stdbool.h>
30
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "irgmod.h"
35 #include "iredges.h"
36 #include "ircons.h"
37 #include "irprintf.h"
38 #include "iroptimize.h"
39 #include "dbginfo.h"
40 #include "iropt_t.h"
41 #include "debug.h"
42 #include "error.h"
43 #include "util.h"
44
45 #include "../benode.h"
46 #include "../beirg.h"
47 #include "../beutil.h"
48 #include "../betranshlp.h"
49 #include "../beabihelper.h"
50 #include "bearch_sparc_t.h"
51
52 #include "sparc_nodes_attr.h"
53 #include "sparc_transform.h"
54 #include "sparc_new_nodes.h"
55 #include "gen_sparc_new_nodes.h"
56
57 #include "gen_sparc_regalloc_if.h"
58 #include "sparc_cconv.h"
59
60 #include <limits.h>
61
62 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
63
64 static beabi_helper_env_t    *abihelper;
65 static const arch_register_t *sp_reg = &sparc_registers[REG_SP];
66 static const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER];
67 static calling_convention_t  *current_cconv = NULL;
68 static ir_mode               *mode_gp;
69 static ir_mode               *mode_flags;
70 static ir_mode               *mode_fp;
71 static ir_mode               *mode_fp2;
72 //static ir_mode               *mode_fp4;
73 static pmap                  *node_to_stack;
74
75 static const arch_register_t *const caller_saves[] = {
76         &sparc_registers[REG_G1],
77         &sparc_registers[REG_G2],
78         &sparc_registers[REG_G3],
79         &sparc_registers[REG_G4],
80         &sparc_registers[REG_O0],
81         &sparc_registers[REG_O1],
82         &sparc_registers[REG_O2],
83         &sparc_registers[REG_O3],
84         &sparc_registers[REG_O4],
85         &sparc_registers[REG_O5],
86
87         &sparc_registers[REG_F0],
88         &sparc_registers[REG_F1],
89         &sparc_registers[REG_F2],
90         &sparc_registers[REG_F3],
91         &sparc_registers[REG_F4],
92         &sparc_registers[REG_F5],
93         &sparc_registers[REG_F6],
94         &sparc_registers[REG_F7],
95         &sparc_registers[REG_F8],
96         &sparc_registers[REG_F9],
97         &sparc_registers[REG_F10],
98         &sparc_registers[REG_F11],
99         &sparc_registers[REG_F12],
100         &sparc_registers[REG_F13],
101         &sparc_registers[REG_F14],
102         &sparc_registers[REG_F15],
103         &sparc_registers[REG_F16],
104         &sparc_registers[REG_F17],
105         &sparc_registers[REG_F18],
106         &sparc_registers[REG_F19],
107         &sparc_registers[REG_F20],
108         &sparc_registers[REG_F21],
109         &sparc_registers[REG_F22],
110         &sparc_registers[REG_F23],
111         &sparc_registers[REG_F24],
112         &sparc_registers[REG_F25],
113         &sparc_registers[REG_F26],
114         &sparc_registers[REG_F27],
115         &sparc_registers[REG_F28],
116         &sparc_registers[REG_F29],
117         &sparc_registers[REG_F30],
118         &sparc_registers[REG_F31],
119 };
120
121 static const arch_register_t *const omit_fp_callee_saves[] = {
122         &sparc_registers[REG_L0],
123         &sparc_registers[REG_L1],
124         &sparc_registers[REG_L2],
125         &sparc_registers[REG_L3],
126         &sparc_registers[REG_L4],
127         &sparc_registers[REG_L5],
128         &sparc_registers[REG_L6],
129         &sparc_registers[REG_L7],
130         &sparc_registers[REG_I0],
131         &sparc_registers[REG_I1],
132         &sparc_registers[REG_I2],
133         &sparc_registers[REG_I3],
134         &sparc_registers[REG_I4],
135         &sparc_registers[REG_I5],
136 };
137
138 static inline bool mode_needs_gp_reg(ir_mode *mode)
139 {
140         if (mode_is_int(mode) || mode_is_reference(mode)) {
141                 /* we should only see 32bit code */
142                 assert(get_mode_size_bits(mode) <= 32);
143                 return true;
144         }
145         return false;
146 }
147
148 /**
149  * Create an And that will zero out upper bits.
150  *
151  * @param dbgi      debug info
152  * @param block     the basic block
153  * @param op        the original node
154  * @param src_bits  number of lower bits that will remain
155  */
156 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
157                                    int src_bits)
158 {
159         if (src_bits == 8) {
160                 return new_bd_sparc_And_imm(dbgi, block, op, NULL, 0xFF);
161         } else if (src_bits == 16) {
162                 ir_node *lshift = new_bd_sparc_Sll_imm(dbgi, block, op, NULL, 16);
163                 ir_node *rshift = new_bd_sparc_Srl_imm(dbgi, block, lshift, NULL, 16);
164                 return rshift;
165         } else {
166                 panic("zero extension only supported for 8 and 16 bits");
167         }
168 }
169
170 /**
171  * Generate code for a sign extension.
172  *
173  * @param dbgi      debug info
174  * @param block     the basic block
175  * @param op        the original node
176  * @param src_bits  number of lower bits that will remain
177  */
178 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
179                                    int src_bits)
180 {
181         int shift_width = 32 - src_bits;
182         ir_node *lshift_node = new_bd_sparc_Sll_imm(dbgi, block, op, NULL, shift_width);
183         ir_node *rshift_node = new_bd_sparc_Sra_imm(dbgi, block, lshift_node, NULL, shift_width);
184         return rshift_node;
185 }
186
187 /**
188  * returns true if it is assured, that the upper bits of a node are "clean"
189  * which means for a 16 or 8 bit value, that the upper bits in the register
190  * are 0 for unsigned and a copy of the last significant bit for signed
191  * numbers.
192  */
193 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
194 {
195         (void) transformed_node;
196         (void) mode;
197         /* TODO */
198         return false;
199 }
200
201 /**
202  * Extend a value to 32 bit signed/unsigned depending on its mode.
203  *
204  * @param dbgi      debug info
205  * @param block     the basic block
206  * @param op        the original node
207  * @param orig_mode the original mode of op
208  */
209 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
210                               ir_mode *orig_mode)
211 {
212         int bits = get_mode_size_bits(orig_mode);
213         if (bits == 32)
214                 return op;
215
216         if (mode_is_signed(orig_mode)) {
217                 return gen_sign_extension(dbgi, block, op, bits);
218         } else {
219                 return gen_zero_extension(dbgi, block, op, bits);
220         }
221 }
222
223 typedef enum {
224         MATCH_NONE         = 0,
225         MATCH_COMMUTATIVE  = 1U << 0, /**< commutative operation. */
226         MATCH_MODE_NEUTRAL = 1U << 1, /**< the higher bits of the inputs don't
227                                            influence the significant lower bit at
228                                            all (for cases where mode < 32bit) */
229 } match_flags_t;
230 ENUM_BITSET(match_flags_t)
231
232 typedef ir_node* (*new_binop_reg_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
233 typedef ir_node* (*new_binop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode);
234 typedef ir_node* (*new_binop_imm_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_entity *entity, int32_t immediate);
235 typedef ir_node* (*new_unop_fp_func) (dbg_info *dbgi, ir_node *block, ir_node *op1, ir_mode *mode);
236
237 /**
238  * checks if a node's value can be encoded as a immediate
239  */
240 static bool is_imm_encodeable(const ir_node *node)
241 {
242         long value;
243         if (!is_Const(node))
244                 return false;
245
246         value = get_tarval_long(get_Const_tarval(node));
247         return sparc_is_value_imm_encodeable(value);
248 }
249
250 static bool needs_extension(ir_mode *mode)
251 {
252         return get_mode_size_bits(mode) < get_mode_size_bits(mode_gp);
253 }
254
255 /**
256  * Check, if a given node is a Down-Conv, ie. a integer Conv
257  * from a mode with a mode with more bits to a mode with lesser bits.
258  * Moreover, we return only true if the node has not more than 1 user.
259  *
260  * @param node   the node
261  * @return non-zero if node is a Down-Conv
262  */
263 static bool is_downconv(const ir_node *node)
264 {
265         ir_mode *src_mode;
266         ir_mode *dest_mode;
267
268         if (!is_Conv(node))
269                 return false;
270
271         src_mode  = get_irn_mode(get_Conv_op(node));
272         dest_mode = get_irn_mode(node);
273         return
274                 mode_needs_gp_reg(src_mode)  &&
275                 mode_needs_gp_reg(dest_mode) &&
276                 get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
277 }
278
279 static ir_node *sparc_skip_downconv(ir_node *node)
280 {
281         while (is_downconv(node)) {
282                 node = get_Conv_op(node);
283         }
284         return node;
285 }
286
287 /**
288  * helper function for binop operations
289  *
290  * @param new_reg  register generation function ptr
291  * @param new_imm  immediate generation function ptr
292  */
293 static ir_node *gen_helper_binop_args(ir_node *node,
294                                       ir_node *op1, ir_node *op2,
295                                       match_flags_t flags,
296                                       new_binop_reg_func new_reg,
297                                       new_binop_imm_func new_imm)
298 {
299         dbg_info *dbgi  = get_irn_dbg_info(node);
300         ir_node  *block = be_transform_node(get_nodes_block(node));
301         ir_node  *new_op1;
302         ir_node  *new_op2;
303         ir_mode  *mode1;
304         ir_mode  *mode2;
305
306         if (flags & MATCH_MODE_NEUTRAL) {
307                 op1 = sparc_skip_downconv(op1);
308                 op2 = sparc_skip_downconv(op2);
309         }
310         mode1 = get_irn_mode(op1);
311         mode2 = get_irn_mode(op2);
312         /* we shouldn't see 64bit code */
313         assert(get_mode_size_bits(mode1) <= 32);
314         assert(get_mode_size_bits(mode2) <= 32);
315
316         if (is_imm_encodeable(op2)) {
317                 int32_t  immediate = get_tarval_long(get_Const_tarval(op2));
318                 new_op1 = be_transform_node(op1);
319                 if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
320                         new_op1 = gen_extension(dbgi, block, new_op1, mode1);
321                 }
322                 return new_imm(dbgi, block, new_op1, NULL, immediate);
323         }
324         new_op2 = be_transform_node(op2);
325         if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode2)) {
326                 new_op2 = gen_extension(dbgi, block, new_op2, mode2);
327         }
328
329         if ((flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
330                 int32_t immediate = get_tarval_long(get_Const_tarval(op1));
331                 return new_imm(dbgi, block, new_op2, NULL, immediate);
332         }
333
334         new_op1 = be_transform_node(op1);
335         if (! (flags & MATCH_MODE_NEUTRAL) && needs_extension(mode1)) {
336                 new_op1 = gen_extension(dbgi, block, new_op1, mode1);
337         }
338         return new_reg(dbgi, block, new_op1, new_op2);
339 }
340
341 static ir_node *gen_helper_binop(ir_node *node, match_flags_t flags,
342                                  new_binop_reg_func new_reg,
343                                  new_binop_imm_func new_imm)
344 {
345         ir_node *op1 = get_binop_left(node);
346         ir_node *op2 = get_binop_right(node);
347         return gen_helper_binop_args(node, op1, op2, flags, new_reg, new_imm);
348 }
349
350 /**
351  * helper function for FP binop operations
352  */
353 static ir_node *gen_helper_binfpop(ir_node *node, ir_mode *mode,
354                                    new_binop_fp_func new_func_single,
355                                    new_binop_fp_func new_func_double,
356                                    new_binop_fp_func new_func_quad)
357 {
358         ir_node  *block   = be_transform_node(get_nodes_block(node));
359         ir_node  *op1     = get_binop_left(node);
360         ir_node  *new_op1 = be_transform_node(op1);
361         ir_node  *op2     = get_binop_right(node);
362         ir_node  *new_op2 = be_transform_node(op2);
363         dbg_info *dbgi    = get_irn_dbg_info(node);
364         unsigned  bits    = get_mode_size_bits(mode);
365
366         switch (bits) {
367         case 32:
368                 return new_func_single(dbgi, block, new_op1, new_op2, mode);
369         case 64:
370                 return new_func_double(dbgi, block, new_op1, new_op2, mode);
371         case 128:
372                 return new_func_quad(dbgi, block, new_op1, new_op2, mode);
373         default:
374                 break;
375         }
376         panic("unsupported mode %+F for float op", mode);
377 }
378
379 static ir_node *gen_helper_unfpop(ir_node *node, ir_mode *mode,
380                                   new_unop_fp_func new_func_single,
381                                   new_unop_fp_func new_func_double,
382                                   new_unop_fp_func new_func_quad)
383 {
384         ir_node  *block   = be_transform_node(get_nodes_block(node));
385         ir_node  *op1     = get_binop_left(node);
386         ir_node  *new_op1 = be_transform_node(op1);
387         dbg_info *dbgi    = get_irn_dbg_info(node);
388         unsigned  bits    = get_mode_size_bits(mode);
389
390         switch (bits) {
391         case 32:
392                 return new_func_single(dbgi, block, new_op1, mode);
393         case 64:
394                 return new_func_double(dbgi, block, new_op1, mode);
395         case 128:
396                 return new_func_quad(dbgi, block, new_op1, mode);
397         default:
398                 break;
399         }
400         panic("unsupported mode %+F for float op", mode);
401 }
402
403 typedef ir_node* (*new_binopx_imm_func)(dbg_info *dbgi, ir_node *block,
404                                         ir_node *op1, ir_node *flags,
405                                         ir_entity *imm_entity, int32_t imm);
406
407 typedef ir_node* (*new_binopx_reg_func)(dbg_info *dbgi, ir_node *block,
408                                         ir_node *op1, ir_node *op2,
409                                         ir_node *flags);
410
411 static ir_node *gen_helper_binopx(ir_node *node, match_flags_t match_flags,
412                                   new_binopx_reg_func new_binopx_reg,
413                                   new_binopx_imm_func new_binopx_imm)
414 {
415         dbg_info *dbgi      = get_irn_dbg_info(node);
416         ir_node  *block     = be_transform_node(get_nodes_block(node));
417         ir_node  *op1       = get_irn_n(node, 0);
418         ir_node  *op2       = get_irn_n(node, 1);
419         ir_node  *flags     = get_irn_n(node, 2);
420         ir_node  *new_flags = be_transform_node(flags);
421         ir_node  *new_op1;
422         ir_node  *new_op2;
423
424         /* only support for mode-neutral implemented so far */
425         assert(match_flags & MATCH_MODE_NEUTRAL);
426
427         if (is_imm_encodeable(op2)) {
428                 int32_t  immediate = get_tarval_long(get_Const_tarval(op2));
429                 new_op1 = be_transform_node(op1);
430                 return new_binopx_imm(dbgi, block, new_op1, new_flags, NULL, immediate);
431         }
432         new_op2 = be_transform_node(op2);
433         if ((match_flags & MATCH_COMMUTATIVE) && is_imm_encodeable(op1)) {
434                 int32_t immediate = get_tarval_long(get_Const_tarval(op1));
435                 return new_binopx_imm(dbgi, block, new_op2, new_flags, NULL, immediate);
436         }
437         new_op1 = be_transform_node(op1);
438         return new_binopx_reg(dbgi, block, new_op1, new_op2, new_flags);
439
440 }
441
442 static ir_node *get_g0(void)
443 {
444         return be_prolog_get_reg_value(abihelper, &sparc_registers[REG_G0]);
445 }
446
447 typedef struct address_t {
448         ir_node   *ptr;
449         ir_node   *ptr2;
450         ir_entity *entity;
451         int32_t    offset;
452 } address_t;
453
454 /**
455  * Match a load/store address
456  */
457 static void match_address(ir_node *ptr, address_t *address, bool use_ptr2)
458 {
459         ir_node   *base   = ptr;
460         ir_node   *ptr2   = NULL;
461         int32_t    offset = 0;
462         ir_entity *entity = NULL;
463
464         if (is_Add(base)) {
465                 ir_node *add_right = get_Add_right(base);
466                 if (is_Const(add_right)) {
467                         base    = get_Add_left(base);
468                         offset += get_tarval_long(get_Const_tarval(add_right));
469                 }
470         }
471         /* Note that we don't match sub(x, Const) or chains of adds/subs
472          * because this should all be normalized by now */
473
474         /* we only use the symconst if we're the only user otherwise we probably
475          * won't save anything but produce multiple sethi+or combinations with
476          * just different offsets */
477         if (is_SymConst(base) && get_irn_n_edges(base) == 1) {
478                 dbg_info *dbgi      = get_irn_dbg_info(ptr);
479                 ir_node  *block     = get_nodes_block(ptr);
480                 ir_node  *new_block = be_transform_node(block);
481                 entity = get_SymConst_entity(base);
482                 base   = new_bd_sparc_SetHi(dbgi, new_block, entity, offset);
483         } else if (use_ptr2 && is_Add(base) && entity == NULL && offset == 0) {
484                 ptr2 = be_transform_node(get_Add_right(base));
485                 base = be_transform_node(get_Add_left(base));
486         } else {
487                 if (sparc_is_value_imm_encodeable(offset)) {
488                         base = be_transform_node(base);
489                 } else {
490                         base   = be_transform_node(ptr);
491                         offset = 0;
492                 }
493         }
494
495         address->ptr    = base;
496         address->ptr2   = ptr2;
497         address->entity = entity;
498         address->offset = offset;
499 }
500
501 /**
502  * Creates an sparc Add.
503  *
504  * @param node   FIRM node
505  * @return the created sparc Add node
506  */
507 static ir_node *gen_Add(ir_node *node)
508 {
509         ir_mode *mode = get_irn_mode(node);
510         ir_node *right;
511
512         if (mode_is_float(mode)) {
513                 return gen_helper_binfpop(node, mode, new_bd_sparc_fadd_s,
514                                           new_bd_sparc_fadd_d, new_bd_sparc_fadd_q);
515         }
516
517         /* special case: + 0x1000 can be represented as - 0x1000 */
518         right = get_Add_right(node);
519         if (is_Const(right)) {
520                 ir_node   *left = get_Add_left(node);
521                 ir_tarval *tv;
522                 uint32_t   val;
523                 /* is this simple address arithmetic? then we can let the linker do
524                  * the calculation. */
525                 if (is_SymConst(left) && get_irn_n_edges(left) == 1) {
526                         dbg_info *dbgi  = get_irn_dbg_info(node);
527                         ir_node  *block = be_transform_node(get_nodes_block(node));
528                         address_t address;
529
530                         /* the value of use_ptr2 shouldn't matter here */
531                         match_address(node, &address, false);
532                         assert(is_sparc_SetHi(address.ptr));
533                         return new_bd_sparc_Or_imm(dbgi, block, address.ptr,
534                                                    address.entity, address.offset);
535                 }
536
537                 tv  = get_Const_tarval(right);
538                 val = get_tarval_long(tv);
539                 if (val == 0x1000) {
540                         dbg_info *dbgi   = get_irn_dbg_info(node);
541                         ir_node  *block  = be_transform_node(get_nodes_block(node));
542                         ir_node  *op     = get_Add_left(node);
543                         ir_node  *new_op = be_transform_node(op);
544                         return new_bd_sparc_Sub_imm(dbgi, block, new_op, NULL, -0x1000);
545                 }
546         }
547
548         return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
549                                 new_bd_sparc_Add_reg, new_bd_sparc_Add_imm);
550 }
551
552 static ir_node *gen_AddCC_t(ir_node *node)
553 {
554         return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
555                                 new_bd_sparc_AddCC_reg, new_bd_sparc_AddCC_imm);
556 }
557
558 static ir_node *gen_Proj_AddCC_t(ir_node *node)
559 {
560         long     pn       = get_Proj_proj(node);
561         ir_node *pred     = get_Proj_pred(node);
562         ir_node *new_pred = be_transform_node(pred);
563
564         switch (pn) {
565         case pn_sparc_AddCC_t_res:
566                 return new_r_Proj(new_pred, mode_gp, pn_sparc_AddCC_res);
567         case pn_sparc_AddCC_t_flags:
568                 return new_r_Proj(new_pred, mode_flags, pn_sparc_AddCC_flags);
569         default:
570                 panic("Invalid AddCC_t proj found");
571         }
572 }
573
574 static ir_node *gen_AddX_t(ir_node *node)
575 {
576         return gen_helper_binopx(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
577                                  new_bd_sparc_AddX_reg, new_bd_sparc_AddX_imm);
578 }
579
580 /**
581  * Creates an sparc Sub.
582  *
583  * @param node       FIRM node
584  * @return the created sparc Sub node
585  */
586 static ir_node *gen_Sub(ir_node *node)
587 {
588         ir_mode *mode = get_irn_mode(node);
589
590         if (mode_is_float(mode)) {
591                 return gen_helper_binfpop(node, mode, new_bd_sparc_fsub_s,
592                                           new_bd_sparc_fsub_d, new_bd_sparc_fsub_q);
593         }
594
595         return gen_helper_binop(node, MATCH_MODE_NEUTRAL,
596                                 new_bd_sparc_Sub_reg, new_bd_sparc_Sub_imm);
597 }
598
599 static ir_node *gen_SubCC_t(ir_node *node)
600 {
601         return gen_helper_binop(node, MATCH_MODE_NEUTRAL,
602                                 new_bd_sparc_SubCC_reg, new_bd_sparc_SubCC_imm);
603 }
604
605 static ir_node *gen_Proj_SubCC_t(ir_node *node)
606 {
607         long     pn       = get_Proj_proj(node);
608         ir_node *pred     = get_Proj_pred(node);
609         ir_node *new_pred = be_transform_node(pred);
610
611         switch (pn) {
612         case pn_sparc_SubCC_t_res:
613                 return new_r_Proj(new_pred, mode_gp, pn_sparc_SubCC_res);
614         case pn_sparc_SubCC_t_flags:
615                 return new_r_Proj(new_pred, mode_flags, pn_sparc_SubCC_flags);
616         default:
617                 panic("Invalid SubCC_t proj found");
618         }
619 }
620
621 static ir_node *gen_SubX_t(ir_node *node)
622 {
623         return gen_helper_binopx(node, MATCH_MODE_NEUTRAL,
624                                  new_bd_sparc_SubX_reg, new_bd_sparc_SubX_imm);
625 }
626
627 static ir_node *create_ldf(dbg_info *dbgi, ir_node *block, ir_node *ptr,
628                            ir_node *mem, ir_mode *mode, ir_entity *entity,
629                            long offset, bool is_frame_entity)
630 {
631         unsigned bits = get_mode_size_bits(mode);
632         assert(mode_is_float(mode));
633         if (bits == 32) {
634                 return new_bd_sparc_Ldf_s(dbgi, block, ptr, mem, mode, entity,
635                                           offset, is_frame_entity);
636         } else if (bits == 64) {
637                 return new_bd_sparc_Ldf_d(dbgi, block, ptr, mem, mode, entity,
638                                           offset, is_frame_entity);
639         } else {
640                 assert(bits == 128);
641                 return new_bd_sparc_Ldf_q(dbgi, block, ptr, mem, mode, entity,
642                                           offset, is_frame_entity);
643         }
644 }
645
646 static ir_node *create_stf(dbg_info *dbgi, ir_node *block, ir_node *value,
647                            ir_node *ptr, ir_node *mem, ir_mode *mode,
648                            ir_entity *entity, long offset,
649                            bool is_frame_entity)
650 {
651         unsigned bits = get_mode_size_bits(mode);
652         assert(mode_is_float(mode));
653         if (bits == 32) {
654                 return new_bd_sparc_Stf_s(dbgi, block, value, ptr, mem, mode, entity,
655                                           offset, is_frame_entity);
656         } else if (bits == 64) {
657                 return new_bd_sparc_Stf_d(dbgi, block, value, ptr, mem, mode, entity,
658                                           offset, is_frame_entity);
659         } else {
660                 assert(bits == 128);
661                 return new_bd_sparc_Stf_q(dbgi, block, value, ptr, mem, mode, entity,
662                                           offset, is_frame_entity);
663         }
664 }
665
666 /**
667  * Transforms a Load.
668  *
669  * @param node    the ir Load node
670  * @return the created sparc Load node
671  */
672 static ir_node *gen_Load(ir_node *node)
673 {
674         dbg_info *dbgi     = get_irn_dbg_info(node);
675         ir_mode  *mode     = get_Load_mode(node);
676         ir_node  *block    = be_transform_node(get_nodes_block(node));
677         ir_node  *ptr      = get_Load_ptr(node);
678         ir_node  *mem      = get_Load_mem(node);
679         ir_node  *new_mem  = be_transform_node(mem);
680         ir_node  *new_load = NULL;
681         address_t address;
682
683         if (get_Load_unaligned(node) == align_non_aligned) {
684                 panic("sparc: transformation of unaligned Loads not implemented yet");
685         }
686
687         if (mode_is_float(mode)) {
688                 match_address(ptr, &address, false);
689                 new_load = create_ldf(dbgi, block, address.ptr, new_mem, mode,
690                                       address.entity, address.offset, false);
691         } else {
692                 match_address(ptr, &address, true);
693                 if (address.ptr2 != NULL) {
694                         assert(address.entity == NULL && address.offset == 0);
695                         new_load = new_bd_sparc_Ld_reg(dbgi, block, address.ptr,
696                                                        address.ptr2, new_mem, mode);
697                 } else {
698                         new_load = new_bd_sparc_Ld_imm(dbgi, block, address.ptr, new_mem,
699                                                        mode, address.entity, address.offset,
700                                                        false);
701                 }
702         }
703         set_irn_pinned(new_load, get_irn_pinned(node));
704
705         return new_load;
706 }
707
708 /**
709  * Transforms a Store.
710  *
711  * @param node    the ir Store node
712  * @return the created sparc Store node
713  */
714 static ir_node *gen_Store(ir_node *node)
715 {
716         ir_node  *block    = be_transform_node(get_nodes_block(node));
717         ir_node  *ptr      = get_Store_ptr(node);
718         ir_node  *mem      = get_Store_mem(node);
719         ir_node  *new_mem  = be_transform_node(mem);
720         ir_node  *val      = get_Store_value(node);
721         ir_node  *new_val  = be_transform_node(val);
722         ir_mode  *mode     = get_irn_mode(val);
723         dbg_info *dbgi     = get_irn_dbg_info(node);
724         ir_node  *new_store = NULL;
725         address_t address;
726
727         if (get_Store_unaligned(node) == align_non_aligned) {
728                 panic("sparc: transformation of unaligned Stores not implemented yet");
729         }
730
731         if (mode_is_float(mode)) {
732                 /* TODO: variants with reg+reg address mode */
733                 match_address(ptr, &address, false);
734                 new_store = create_stf(dbgi, block, new_val, address.ptr, new_mem,
735                                        mode, address.entity, address.offset, false);
736         } else {
737                 assert(get_mode_size_bits(mode) <= 32);
738                 match_address(ptr, &address, true);
739                 if (address.ptr2 != NULL) {
740                         assert(address.entity == NULL && address.offset == 0);
741                         new_store = new_bd_sparc_St_reg(dbgi, block, new_val, address.ptr,
742                                                         address.ptr2, new_mem, mode);
743                 } else {
744                         new_store = new_bd_sparc_St_imm(dbgi, block, new_val, address.ptr,
745                                                         new_mem, mode, address.entity,
746                                                         address.offset, false);
747                 }
748         }
749         set_irn_pinned(new_store, get_irn_pinned(node));
750
751         return new_store;
752 }
753
754 /**
755  * Creates an sparc Mul.
756  * returns the lower 32bits of the 64bit multiply result
757  *
758  * @return the created sparc Mul node
759  */
760 static ir_node *gen_Mul(ir_node *node)
761 {
762         ir_mode *mode = get_irn_mode(node);
763         if (mode_is_float(mode)) {
764                 return gen_helper_binfpop(node, mode, new_bd_sparc_fmul_s,
765                                           new_bd_sparc_fmul_d, new_bd_sparc_fmul_q);
766         }
767
768         return gen_helper_binop(node, MATCH_COMMUTATIVE | MATCH_MODE_NEUTRAL,
769                                 new_bd_sparc_Mul_reg, new_bd_sparc_Mul_imm);
770 }
771
772 /**
773  * Creates an sparc Mulh.
774  * Mulh returns the upper 32bits of a mul instruction
775  *
776  * @return the created sparc Mulh node
777  */
778 static ir_node *gen_Mulh(ir_node *node)
779 {
780         ir_mode *mode = get_irn_mode(node);
781         ir_node *mul;
782
783         if (mode_is_float(mode))
784                 panic("FP not supported yet");
785
786         mul = gen_helper_binop(node, MATCH_COMMUTATIVE, new_bd_sparc_Mulh_reg, new_bd_sparc_Mulh_imm);
787         return new_r_Proj(mul, mode_gp, pn_sparc_Mulh_low);
788 }
789
790 static ir_node *gen_sign_extension_value(ir_node *node)
791 {
792         ir_node *block     = get_nodes_block(node);
793         ir_node *new_block = be_transform_node(block);
794         ir_node *new_node  = be_transform_node(node);
795         /* TODO: we could do some shortcuts for some value types probably.
796          * (For constants or other cases where we know the sign bit in
797          *  advance) */
798         return new_bd_sparc_Sra_imm(NULL, new_block, new_node, NULL, 31);
799 }
800
801 /**
802  * Creates an sparc Div.
803  *
804  * @return the created sparc Div node
805  */
806 static ir_node *gen_Div(ir_node *node)
807 {
808         dbg_info *dbgi      = get_irn_dbg_info(node);
809         ir_node  *block     = get_nodes_block(node);
810         ir_node  *new_block = be_transform_node(block);
811         ir_mode  *mode      = get_Div_resmode(node);
812         ir_node  *left      = get_Div_left(node);
813         ir_node  *left_low  = be_transform_node(left);
814         ir_node  *right     = get_Div_right(node);
815         ir_node  *res;
816
817         if (mode_is_float(mode)) {
818                 return gen_helper_binfpop(node, mode, new_bd_sparc_fdiv_s,
819                                                                   new_bd_sparc_fdiv_d, new_bd_sparc_fdiv_q);
820         }
821
822         if (mode_is_signed(mode)) {
823                 ir_node *left_high = gen_sign_extension_value(left);
824
825                 if (is_imm_encodeable(right)) {
826                         int32_t immediate = get_tarval_long(get_Const_tarval(right));
827                         res = new_bd_sparc_SDiv_imm(dbgi, new_block, left_high, left_low,
828                                                     NULL, immediate);
829                 } else {
830                         ir_node *new_right = be_transform_node(right);
831                         res = new_bd_sparc_SDiv_reg(dbgi, new_block, left_high, left_low,
832                                                     new_right);
833                 }
834         } else {
835                 ir_node *left_high = get_g0();
836                 if (is_imm_encodeable(right)) {
837                         int32_t immediate = get_tarval_long(get_Const_tarval(right));
838                         res = new_bd_sparc_UDiv_imm(dbgi, new_block, left_high, left_low,
839                                                     NULL, immediate);
840                 } else {
841                         ir_node *new_right = be_transform_node(right);
842                         res = new_bd_sparc_UDiv_reg(dbgi, new_block, left_high, left_low,
843                                                     new_right);
844                 }
845         }
846
847         return res;
848 }
849
850 /**
851  * Transforms a Not node.
852  *
853  * @return the created sparc Not node
854  */
855 static ir_node *gen_Not(ir_node *node)
856 {
857         ir_node  *op     = get_Not_op(node);
858         ir_node  *zero   = get_g0();
859         dbg_info *dbgi   = get_irn_dbg_info(node);
860         ir_node  *block  = be_transform_node(get_nodes_block(node));
861         ir_node  *new_op = be_transform_node(op);
862
863         /* Note: Not(Eor()) is normalize in firm localopts already so
864          * we don't match it for xnor here */
865
866         /* Not can be represented with xnor 0, n */
867         return new_bd_sparc_XNor_reg(dbgi, block, zero, new_op);
868 }
869
870 static ir_node *gen_helper_bitop(ir_node *node,
871                                  new_binop_reg_func new_reg,
872                                  new_binop_imm_func new_imm,
873                                  new_binop_reg_func new_not_reg,
874                                  new_binop_imm_func new_not_imm)
875 {
876         ir_node *op1 = get_binop_left(node);
877         ir_node *op2 = get_binop_right(node);
878         if (is_Not(op1)) {
879                 return gen_helper_binop_args(node, op2, get_Not_op(op1),
880                                              MATCH_MODE_NEUTRAL,
881                                              new_not_reg, new_not_imm);
882         }
883         if (is_Not(op2)) {
884                 return gen_helper_binop_args(node, op1, get_Not_op(op2),
885                                              MATCH_MODE_NEUTRAL,
886                                              new_not_reg, new_not_imm);
887         }
888         return gen_helper_binop_args(node, op1, op2,
889                                                                  MATCH_MODE_NEUTRAL | MATCH_COMMUTATIVE,
890                                                                  new_reg, new_imm);
891 }
892
893 static ir_node *gen_And(ir_node *node)
894 {
895         return gen_helper_bitop(node,
896                                 new_bd_sparc_And_reg,
897                                 new_bd_sparc_And_imm,
898                                 new_bd_sparc_AndN_reg,
899                                 new_bd_sparc_AndN_imm);
900 }
901
902 static ir_node *gen_Or(ir_node *node)
903 {
904         return gen_helper_bitop(node,
905                                 new_bd_sparc_Or_reg,
906                                 new_bd_sparc_Or_imm,
907                                 new_bd_sparc_OrN_reg,
908                                 new_bd_sparc_OrN_imm);
909 }
910
911 static ir_node *gen_Eor(ir_node *node)
912 {
913         return gen_helper_bitop(node,
914                                 new_bd_sparc_Xor_reg,
915                                 new_bd_sparc_Xor_imm,
916                                 new_bd_sparc_XNor_reg,
917                                 new_bd_sparc_XNor_imm);
918 }
919
920 static ir_node *gen_Shl(ir_node *node)
921 {
922         return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sll_reg, new_bd_sparc_Sll_imm);
923 }
924
925 static ir_node *gen_Shr(ir_node *node)
926 {
927         return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Srl_reg, new_bd_sparc_Srl_imm);
928 }
929
930 static ir_node *gen_Shrs(ir_node *node)
931 {
932         return gen_helper_binop(node, MATCH_NONE, new_bd_sparc_Sra_reg, new_bd_sparc_Sra_imm);
933 }
934
935 /**
936  * Transforms a Minus node.
937  */
938 static ir_node *gen_Minus(ir_node *node)
939 {
940         ir_mode  *mode = get_irn_mode(node);
941         ir_node  *op;
942         ir_node  *block;
943         ir_node  *new_op;
944         ir_node  *zero;
945         dbg_info *dbgi;
946
947         if (mode_is_float(mode)) {
948                 return gen_helper_unfpop(node, mode, new_bd_sparc_fneg_s,
949                                          new_bd_sparc_fneg_d, new_bd_sparc_fneg_q);
950         }
951         block  = be_transform_node(get_nodes_block(node));
952         dbgi   = get_irn_dbg_info(node);
953         op     = get_Minus_op(node);
954         new_op = be_transform_node(op);
955         zero   = get_g0();
956         return new_bd_sparc_Sub_reg(dbgi, block, zero, new_op);
957 }
958
959 /**
960  * Create an entity for a given (floating point) tarval
961  */
962 static ir_entity *create_float_const_entity(ir_tarval *tv)
963 {
964         const arch_env_t *arch_env = be_get_irg_arch_env(current_ir_graph);
965         sparc_isa_t      *isa      = (sparc_isa_t*) arch_env;
966         ir_entity        *entity   = (ir_entity*) pmap_get(isa->constants, tv);
967         ir_initializer_t *initializer;
968         ir_mode          *mode;
969         ir_type          *type;
970         ir_type          *glob;
971
972         if (entity != NULL)
973                 return entity;
974
975         mode   = get_tarval_mode(tv);
976         type   = get_type_for_mode(mode);
977         glob   = get_glob_type();
978         entity = new_entity(glob, id_unique("C%u"), type);
979         set_entity_visibility(entity, ir_visibility_private);
980         add_entity_linkage(entity, IR_LINKAGE_CONSTANT);
981
982         initializer = create_initializer_tarval(tv);
983         set_entity_initializer(entity, initializer);
984
985         pmap_insert(isa->constants, tv, entity);
986         return entity;
987 }
988
989 static ir_node *gen_float_const(dbg_info *dbgi, ir_node *block, ir_tarval *tv)
990 {
991         ir_entity *entity = create_float_const_entity(tv);
992         ir_node   *hi     = new_bd_sparc_SetHi(dbgi, block, entity, 0);
993         ir_node   *mem    = get_irg_no_mem(current_ir_graph);
994         ir_mode   *mode   = get_tarval_mode(tv);
995         ir_node   *new_op
996                 = create_ldf(dbgi, block, hi, mem, mode, entity, 0, false);
997         ir_node   *proj   = new_r_Proj(new_op, mode, pn_sparc_Ldf_res);
998
999         set_irn_pinned(new_op, op_pin_state_floats);
1000         return proj;
1001 }
1002
1003 static ir_node *gen_Const(ir_node *node)
1004 {
1005         ir_node   *block = be_transform_node(get_nodes_block(node));
1006         ir_mode   *mode  = get_irn_mode(node);
1007         dbg_info  *dbgi  = get_irn_dbg_info(node);
1008         ir_tarval *tv    = get_Const_tarval(node);
1009         long       value;
1010
1011         if (mode_is_float(mode)) {
1012                 return gen_float_const(dbgi, block, tv);
1013         }
1014
1015         value = get_tarval_long(tv);
1016         if (value == 0) {
1017                 return get_g0();
1018         } else if (sparc_is_value_imm_encodeable(value)) {
1019                 return new_bd_sparc_Or_imm(dbgi, block, get_g0(), NULL, value);
1020         } else {
1021                 ir_node *hi = new_bd_sparc_SetHi(dbgi, block, NULL, value);
1022                 if ((value & 0x3ff) != 0) {
1023                         return new_bd_sparc_Or_imm(dbgi, block, hi, NULL, value & 0x3ff);
1024                 } else {
1025                         return hi;
1026                 }
1027         }
1028 }
1029
1030 static ir_mode *get_cmp_mode(ir_node *b_value)
1031 {
1032         ir_node *op;
1033
1034         if (!is_Cmp(b_value))
1035                 panic("can't determine cond signednes (no cmp)");
1036         op = get_Cmp_left(b_value);
1037         return get_irn_mode(op);
1038 }
1039
1040 static ir_node *make_address(dbg_info *dbgi, ir_node *block, ir_entity *entity,
1041                              int32_t offset)
1042 {
1043         ir_node *hi  = new_bd_sparc_SetHi(dbgi, block, entity, offset);
1044         ir_node *low = new_bd_sparc_Or_imm(dbgi, block, hi, entity, offset);
1045
1046         if (get_entity_owner(entity) == get_tls_type())
1047                 panic("thread local storage not supported yet in sparc backend");
1048         return low;
1049 }
1050
1051 static ir_node *gen_SwitchJmp(ir_node *node)
1052 {
1053         dbg_info        *dbgi         = get_irn_dbg_info(node);
1054         ir_node         *block        = be_transform_node(get_nodes_block(node));
1055         ir_node         *selector     = get_Cond_selector(node);
1056         ir_node         *new_selector = be_transform_node(selector);
1057         long             default_pn   = get_Cond_default_proj(node);
1058         ir_entity       *entity;
1059         ir_node         *table_address;
1060         ir_node         *idx;
1061         ir_node         *load;
1062         ir_node         *address;
1063
1064         /* switch with smaller mode not implemented yet */
1065         assert(get_mode_size_bits(get_irn_mode(selector)) == 32);
1066
1067         entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type());
1068         set_entity_visibility(entity, ir_visibility_private);
1069         add_entity_linkage(entity, IR_LINKAGE_CONSTANT);
1070
1071         /* construct base address */
1072         table_address = make_address(dbgi, block, entity, 0);
1073         /* scale index */
1074         idx = new_bd_sparc_Sll_imm(dbgi, block, new_selector, NULL, 2);
1075         /* load from jumptable */
1076         load = new_bd_sparc_Ld_reg(dbgi, block, table_address, idx,
1077                                    get_irg_no_mem(current_ir_graph),
1078                                    mode_gp);
1079         address = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
1080
1081         return new_bd_sparc_SwitchJmp(dbgi, block, address, default_pn, entity);
1082 }
1083
1084 static ir_node *gen_Cond(ir_node *node)
1085 {
1086         ir_node    *selector = get_Cond_selector(node);
1087         ir_mode    *mode     = get_irn_mode(selector);
1088         ir_mode    *cmp_mode;
1089         ir_node    *block;
1090         ir_node    *flag_node;
1091         bool        is_unsigned;
1092         ir_relation relation;
1093         dbg_info   *dbgi;
1094
1095         // switch/case jumps
1096         if (mode != mode_b) {
1097                 return gen_SwitchJmp(node);
1098         }
1099
1100         // regular if/else jumps
1101         assert(is_Cmp(selector));
1102
1103         cmp_mode = get_cmp_mode(selector);
1104
1105         block       = be_transform_node(get_nodes_block(node));
1106         dbgi        = get_irn_dbg_info(node);
1107         flag_node   = be_transform_node(selector);
1108         relation    = get_Cmp_relation(selector);
1109         is_unsigned = !mode_is_signed(cmp_mode);
1110         if (mode_is_float(cmp_mode)) {
1111                 assert(!is_unsigned);
1112                 return new_bd_sparc_fbfcc(dbgi, block, flag_node, relation);
1113         } else {
1114                 return new_bd_sparc_Bicc(dbgi, block, flag_node, relation, is_unsigned);
1115         }
1116 }
1117
1118 /**
1119  * transform Cmp
1120  */
1121 static ir_node *gen_Cmp(ir_node *node)
1122 {
1123         ir_node *op1      = get_Cmp_left(node);
1124         ir_node *op2      = get_Cmp_right(node);
1125         ir_mode *cmp_mode = get_irn_mode(op1);
1126         assert(get_irn_mode(op2) == cmp_mode);
1127
1128         if (mode_is_float(cmp_mode)) {
1129                 ir_node  *block   = be_transform_node(get_nodes_block(node));
1130                 dbg_info *dbgi    = get_irn_dbg_info(node);
1131                 ir_node  *new_op1 = be_transform_node(op1);
1132                 ir_node  *new_op2 = be_transform_node(op2);
1133                 unsigned  bits    = get_mode_size_bits(cmp_mode);
1134                 if (bits == 32) {
1135                         return new_bd_sparc_fcmp_s(dbgi, block, new_op1, new_op2, cmp_mode);
1136                 } else if (bits == 64) {
1137                         return new_bd_sparc_fcmp_d(dbgi, block, new_op1, new_op2, cmp_mode);
1138                 } else {
1139                         assert(bits == 128);
1140                         return new_bd_sparc_fcmp_q(dbgi, block, new_op1, new_op2, cmp_mode);
1141                 }
1142         }
1143
1144         /* when we compare a bitop like and,or,... with 0 then we can directly use
1145          * the bitopcc variant.
1146          * Currently we only do this when we're the only user of the node...
1147          */
1148         if (is_Const(op2) && is_Const_null(op2) && get_irn_n_edges(op1) == 1) {
1149                 if (is_And(op1)) {
1150                         return gen_helper_bitop(op1,
1151                                                 new_bd_sparc_AndCCZero_reg,
1152                                                 new_bd_sparc_AndCCZero_imm,
1153                                                 new_bd_sparc_AndNCCZero_reg,
1154                                                 new_bd_sparc_AndNCCZero_imm);
1155                 } else if (is_Or(op1)) {
1156                         return gen_helper_bitop(op1,
1157                                                 new_bd_sparc_OrCCZero_reg,
1158                                                 new_bd_sparc_OrCCZero_imm,
1159                                                 new_bd_sparc_OrNCCZero_reg,
1160                                                 new_bd_sparc_OrNCCZero_imm);
1161                 } else if (is_Eor(op1)) {
1162                         return gen_helper_bitop(op1,
1163                                                 new_bd_sparc_XorCCZero_reg,
1164                                                 new_bd_sparc_XorCCZero_imm,
1165                                                 new_bd_sparc_XNorCCZero_reg,
1166                                                 new_bd_sparc_XNorCCZero_imm);
1167                 }
1168         }
1169
1170         /* integer compare */
1171         return gen_helper_binop_args(node, op1, op2, MATCH_NONE,
1172                                      new_bd_sparc_Cmp_reg, new_bd_sparc_Cmp_imm);
1173 }
1174
1175 /**
1176  * Transforms a SymConst node.
1177  */
1178 static ir_node *gen_SymConst(ir_node *node)
1179 {
1180         ir_entity *entity    = get_SymConst_entity(node);
1181         dbg_info  *dbgi      = get_irn_dbg_info(node);
1182         ir_node   *block     = get_nodes_block(node);
1183         ir_node   *new_block = be_transform_node(block);
1184         return make_address(dbgi, new_block, entity, 0);
1185 }
1186
1187 static ir_node *create_fftof(dbg_info *dbgi, ir_node *block, ir_node *op,
1188                              ir_mode *src_mode, ir_mode *dst_mode)
1189 {
1190         unsigned src_bits = get_mode_size_bits(src_mode);
1191         unsigned dst_bits = get_mode_size_bits(dst_mode);
1192         if (src_bits == 32) {
1193                 if (dst_bits == 64) {
1194                         return new_bd_sparc_fftof_s_d(dbgi, block, op, src_mode, dst_mode);
1195                 } else {
1196                         assert(dst_bits == 128);
1197                         return new_bd_sparc_fftof_s_q(dbgi, block, op, src_mode, dst_mode);
1198                 }
1199         } else if (src_bits == 64) {
1200                 if (dst_bits == 32) {
1201                         return new_bd_sparc_fftof_d_s(dbgi, block, op, src_mode, dst_mode);
1202                 } else {
1203                         assert(dst_bits == 128);
1204                         return new_bd_sparc_fftof_d_q(dbgi, block, op, src_mode, dst_mode);
1205                 }
1206         } else {
1207                 assert(src_bits == 128);
1208                 if (dst_bits == 32) {
1209                         return new_bd_sparc_fftof_q_s(dbgi, block, op, src_mode, dst_mode);
1210                 } else {
1211                         assert(dst_bits == 64);
1212                         return new_bd_sparc_fftof_q_d(dbgi, block, op, src_mode, dst_mode);
1213                 }
1214         }
1215 }
1216
1217 static ir_node *create_ftoi(dbg_info *dbgi, ir_node *block, ir_node *op,
1218                             ir_mode *src_mode)
1219 {
1220         ir_node  *ftoi;
1221         unsigned  bits = get_mode_size_bits(src_mode);
1222         if (bits == 32) {
1223                 ftoi = new_bd_sparc_fftoi_s(dbgi, block, op, src_mode);
1224         } else if (bits == 64) {
1225                 ftoi = new_bd_sparc_fftoi_d(dbgi, block, op, src_mode);
1226         } else {
1227                 assert(bits == 128);
1228                 ftoi = new_bd_sparc_fftoi_q(dbgi, block, op, src_mode);
1229         }
1230
1231         {
1232         ir_graph *irg   = get_irn_irg(block);
1233         ir_node  *sp    = get_irg_frame(irg);
1234         ir_node  *nomem = get_irg_no_mem(irg);
1235         ir_node  *stf   = create_stf(dbgi, block, ftoi, sp, nomem, src_mode,
1236                                      NULL, 0, true);
1237         ir_node  *ld    = new_bd_sparc_Ld_imm(dbgi, block, sp, stf, mode_gp,
1238                                               NULL, 0, true);
1239         ir_node  *res   = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res);
1240         set_irn_pinned(stf, op_pin_state_floats);
1241         set_irn_pinned(ld, op_pin_state_floats);
1242         return res;
1243         }
1244 }
1245
1246 static ir_node *create_itof(dbg_info *dbgi, ir_node *block, ir_node *op,
1247                             ir_mode *dst_mode)
1248 {
1249         ir_graph *irg   = get_irn_irg(block);
1250         ir_node  *sp    = get_irg_frame(irg);
1251         ir_node  *nomem = get_irg_no_mem(irg);
1252         ir_node  *st    = new_bd_sparc_St_imm(dbgi, block, op, sp, nomem,
1253                                               mode_gp, NULL, 0, true);
1254         ir_node  *ldf   = new_bd_sparc_Ldf_s(dbgi, block, sp, st, mode_fp,
1255                                              NULL, 0, true);
1256         ir_node  *res   = new_r_Proj(ldf, mode_fp, pn_sparc_Ldf_res);
1257         unsigned  bits  = get_mode_size_bits(dst_mode);
1258         set_irn_pinned(st, op_pin_state_floats);
1259         set_irn_pinned(ldf, op_pin_state_floats);
1260
1261         if (bits == 32) {
1262                 return new_bd_sparc_fitof_s(dbgi, block, res, dst_mode);
1263         } else if (bits == 64) {
1264                 return new_bd_sparc_fitof_d(dbgi, block, res, dst_mode);
1265         } else {
1266                 assert(bits == 128);
1267                 return new_bd_sparc_fitof_q(dbgi, block, res, dst_mode);
1268         }
1269 }
1270
1271 static ir_node *gen_Conv(ir_node *node)
1272 {
1273         ir_node  *block    = be_transform_node(get_nodes_block(node));
1274         ir_node  *op       = get_Conv_op(node);
1275         ir_mode  *src_mode = get_irn_mode(op);
1276         ir_mode  *dst_mode = get_irn_mode(node);
1277         dbg_info *dbgi     = get_irn_dbg_info(node);
1278         ir_node  *new_op;
1279
1280         int src_bits = get_mode_size_bits(src_mode);
1281         int dst_bits = get_mode_size_bits(dst_mode);
1282
1283         if (src_mode == mode_b)
1284                 panic("ConvB not lowered %+F", node);
1285
1286         new_op = be_transform_node(op);
1287         if (src_mode == dst_mode)
1288                 return new_op;
1289
1290         if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
1291                 assert((src_bits <= 64 && dst_bits <= 64) && "quad FP not implemented");
1292
1293                 if (mode_is_float(src_mode)) {
1294                         if (mode_is_float(dst_mode)) {
1295                                 /* float -> float conv */
1296                                 return create_fftof(dbgi, block, new_op, src_mode, dst_mode);
1297                         } else {
1298                                 /* float -> int conv */
1299                                 if (!mode_is_signed(dst_mode))
1300                                         panic("float to unsigned not implemented yet");
1301                                 return create_ftoi(dbgi, block, new_op, src_mode);
1302                         }
1303                 } else {
1304                         /* int -> float conv */
1305                         if (src_bits < 32) {
1306                                 new_op = gen_extension(dbgi, block, new_op, src_mode);
1307                         } else if (src_bits == 32 && !mode_is_signed(src_mode)) {
1308                                 panic("unsigned to float not lowered!");
1309                         }
1310                         return create_itof(dbgi, block, new_op, dst_mode);
1311                 }
1312         } else if (src_mode == mode_b) {
1313                 panic("ConvB not lowered %+F", node);
1314         } else { /* complete in gp registers */
1315                 int min_bits;
1316                 ir_mode *min_mode;
1317
1318                 if (src_bits == dst_bits) {
1319                         /* kill unnecessary conv */
1320                         return new_op;
1321                 }
1322
1323                 if (src_bits < dst_bits) {
1324                         min_bits = src_bits;
1325                         min_mode = src_mode;
1326                 } else {
1327                         min_bits = dst_bits;
1328                         min_mode = dst_mode;
1329                 }
1330
1331                 if (upper_bits_clean(new_op, min_mode)) {
1332                         return new_op;
1333                 }
1334
1335                 if (mode_is_signed(min_mode)) {
1336                         return gen_sign_extension(dbgi, block, new_op, min_bits);
1337                 } else {
1338                         return gen_zero_extension(dbgi, block, new_op, min_bits);
1339                 }
1340         }
1341 }
1342
1343 static ir_node *gen_Unknown(ir_node *node)
1344 {
1345         /* just produce a 0 */
1346         ir_mode *mode = get_irn_mode(node);
1347         if (mode_is_float(mode)) {
1348                 ir_node *block = be_transform_node(get_nodes_block(node));
1349                 return gen_float_const(NULL, block, get_mode_null(mode));
1350         } else if (mode_needs_gp_reg(mode)) {
1351                 return get_g0();
1352         }
1353
1354         panic("Unexpected Unknown mode");
1355 }
1356
1357 /**
1358  * Produces the type which sits between the stack args and the locals on the
1359  * stack.
1360  */
1361 static ir_type *sparc_get_between_type(void)
1362 {
1363         static ir_type *between_type  = NULL;
1364         static ir_type *between_type0 = NULL;
1365
1366         if (current_cconv->omit_fp) {
1367                 if (between_type0 == NULL) {
1368                         between_type0
1369                                 = new_type_class(new_id_from_str("sparc_between_type"));
1370                         set_type_size_bytes(between_type0, 0);
1371                 }
1372                 return between_type0;
1373         }
1374
1375         if (between_type == NULL) {
1376                 between_type = new_type_class(new_id_from_str("sparc_between_type"));
1377                 set_type_size_bytes(between_type, SPARC_MIN_STACKSIZE);
1378         }
1379
1380         return between_type;
1381 }
1382
1383 static ir_type *compute_arg_type(ir_graph *irg)
1384 {
1385         ir_entity  *entity    = get_irg_entity(irg);
1386         ir_type    *mtp       = get_entity_type(entity);
1387         size_t      n_params  = get_method_n_params(mtp);
1388         ir_entity **param_map = ALLOCANZ(ir_entity*, n_params);
1389
1390         ir_type *frame_type      = get_irg_frame_type(irg);
1391         size_t   n_frame_members = get_compound_n_members(frame_type);
1392         size_t   f;
1393         size_t   i;
1394
1395         ir_type *res = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
1396
1397         /* search for existing value_param entities */
1398         for (f = n_frame_members; f > 0; ) {
1399                 ir_entity *member = get_compound_member(frame_type, --f);
1400                 size_t     num;
1401                 const reg_or_stackslot_t *param;
1402
1403                 if (!is_parameter_entity(member))
1404                         continue;
1405                 num = get_entity_parameter_number(member);
1406                 assert(num < n_params);
1407                 if (param_map[num] != NULL)
1408                         panic("multiple entities for parameter %u in %+F found", f, irg);
1409
1410                 param = &current_cconv->parameters[num];
1411                 if (param->reg0 != NULL)
1412                         continue;
1413
1414                 param_map[num] = member;
1415                 /* move to new arg_type */
1416                 set_entity_owner(member, res);
1417         }
1418
1419         for (i = 0; i < n_params; ++i) {
1420                 reg_or_stackslot_t *param = &current_cconv->parameters[i];
1421                 ir_entity          *entity;
1422
1423                 if (param->reg0 != NULL)
1424                         continue;
1425                 entity = param_map[i];
1426                 if (entity == NULL)
1427                         entity = new_parameter_entity(res, i, param->type);
1428                 param->entity = entity;
1429                 set_entity_offset(entity, param->offset);
1430         }
1431
1432         return res;
1433 }
1434
1435 static void create_stacklayout(ir_graph *irg)
1436 {
1437         be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
1438
1439         /* calling conventions must be decided by now */
1440         assert(current_cconv != NULL);
1441
1442         memset(layout, 0, sizeof(*layout));
1443
1444         layout->frame_type     = get_irg_frame_type(irg);
1445         layout->between_type   = sparc_get_between_type();
1446         layout->arg_type       = compute_arg_type(irg);
1447         layout->initial_offset = 0;
1448         layout->initial_bias   = 0;
1449         layout->sp_relative    = current_cconv->omit_fp;
1450
1451         assert(N_FRAME_TYPES == 3);
1452         layout->order[0] = layout->frame_type;
1453         layout->order[1] = layout->between_type;
1454         layout->order[2] = layout->arg_type;
1455 }
1456
1457 /**
1458  * transform the start node to the prolog code
1459  */
1460 static ir_node *gen_Start(ir_node *node)
1461 {
1462         ir_graph  *irg           = get_irn_irg(node);
1463         ir_entity *entity        = get_irg_entity(irg);
1464         ir_type   *function_type = get_entity_type(entity);
1465         ir_node   *block         = get_nodes_block(node);
1466         ir_node   *new_block     = be_transform_node(block);
1467         dbg_info  *dbgi          = get_irn_dbg_info(node);
1468         ir_node   *start;
1469         size_t     i;
1470
1471         /* stackpointer is important at function prolog */
1472         be_prolog_add_reg(abihelper, sp_reg,
1473                         arch_register_req_type_produces_sp | arch_register_req_type_ignore);
1474         be_prolog_add_reg(abihelper, &sparc_registers[REG_G0],
1475                 arch_register_req_type_ignore);
1476         /* function parameters in registers */
1477         for (i = 0; i < get_method_n_params(function_type); ++i) {
1478                 const reg_or_stackslot_t *param = &current_cconv->parameters[i];
1479                 if (param->reg0 != NULL) {
1480                         be_prolog_add_reg(abihelper, param->reg0,
1481                                           arch_register_req_type_none);
1482                 }
1483                 if (param->reg1 != NULL) {
1484                         be_prolog_add_reg(abihelper, param->reg1,
1485                                           arch_register_req_type_none);
1486                 }
1487         }
1488         /* we need the values of the callee saves (Note: non omit-fp mode has no
1489          * callee saves) */
1490         if (current_cconv->omit_fp) {
1491                 size_t n_callee_saves = ARRAY_SIZE(omit_fp_callee_saves);
1492                 size_t c;
1493                 for (c = 0; c < n_callee_saves; ++c) {
1494                         be_prolog_add_reg(abihelper, omit_fp_callee_saves[c],
1495                                           arch_register_req_type_none);
1496                 }
1497         } else {
1498                 be_prolog_add_reg(abihelper, fp_reg, arch_register_req_type_ignore);
1499         }
1500
1501         start = be_prolog_create_start(abihelper, dbgi, new_block);
1502         return start;
1503 }
1504
1505 static ir_node *get_stack_pointer_for(ir_node *node)
1506 {
1507         /* get predecessor in stack_order list */
1508         ir_node *stack_pred = be_get_stack_pred(abihelper, node);
1509         ir_node *stack;
1510
1511         if (stack_pred == NULL) {
1512                 /* first stack user in the current block. We can simply use the
1513                  * initial sp_proj for it */
1514                 ir_node *sp_proj = be_prolog_get_reg_value(abihelper, sp_reg);
1515                 return sp_proj;
1516         }
1517
1518         be_transform_node(stack_pred);
1519         stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
1520         if (stack == NULL) {
1521                 return get_stack_pointer_for(stack_pred);
1522         }
1523
1524         return stack;
1525 }
1526
1527 /**
1528  * transform a Return node into epilogue code + return statement
1529  */
1530 static ir_node *gen_Return(ir_node *node)
1531 {
1532         ir_node  *block     = get_nodes_block(node);
1533         ir_node  *new_block = be_transform_node(block);
1534         dbg_info *dbgi      = get_irn_dbg_info(node);
1535         ir_node  *mem       = get_Return_mem(node);
1536         ir_node  *new_mem   = be_transform_node(mem);
1537         ir_node  *sp        = get_stack_pointer_for(node);
1538         size_t    n_res     = get_Return_n_ress(node);
1539         ir_node  *bereturn;
1540         size_t    i;
1541
1542         be_epilog_begin(abihelper);
1543         be_epilog_set_memory(abihelper, new_mem);
1544         /* connect stack pointer with initial stack pointer. fix_stack phase
1545            will later serialize all stack pointer adjusting nodes */
1546         be_epilog_add_reg(abihelper, sp_reg,
1547                         arch_register_req_type_produces_sp | arch_register_req_type_ignore,
1548                         sp);
1549
1550         /* result values */
1551         for (i = 0; i < n_res; ++i) {
1552                 ir_node                  *res_value     = get_Return_res(node, i);
1553                 ir_node                  *new_res_value = be_transform_node(res_value);
1554                 const reg_or_stackslot_t *slot          = &current_cconv->results[i];
1555                 const arch_register_t    *reg           = slot->reg0;
1556                 assert(slot->reg1 == NULL);
1557                 be_epilog_add_reg(abihelper, reg, arch_register_req_type_none,
1558                                   new_res_value);
1559         }
1560         /* callee saves */
1561         if (current_cconv->omit_fp) {
1562                 size_t n_callee_saves = ARRAY_SIZE(omit_fp_callee_saves);
1563                 for (i = 0; i < n_callee_saves; ++i) {
1564                         const arch_register_t *reg   = omit_fp_callee_saves[i];
1565                         ir_node               *value
1566                                 = be_prolog_get_reg_value(abihelper, reg);
1567                         be_epilog_add_reg(abihelper, reg, arch_register_req_type_none,
1568                                           value);
1569                 }
1570         }
1571
1572         bereturn = be_epilog_create_return(abihelper, dbgi, new_block);
1573         return bereturn;
1574 }
1575
1576 static ir_node *bitcast_int_to_float(dbg_info *dbgi, ir_node *block,
1577                                      ir_node *value0, ir_node *value1)
1578 {
1579         ir_graph *irg   = current_ir_graph;
1580         ir_node  *sp    = get_irg_frame(irg);
1581         ir_node  *nomem = get_irg_no_mem(irg);
1582         ir_node  *st    = new_bd_sparc_St_imm(dbgi, block, value0, sp, nomem,
1583                                               mode_gp, NULL, 0, true);
1584         ir_mode  *mode;
1585         ir_node  *ldf;
1586         ir_node  *mem;
1587         set_irn_pinned(st, op_pin_state_floats);
1588
1589         if (value1 != NULL) {
1590                 ir_node *st1 = new_bd_sparc_St_imm(dbgi, block, value1, sp, nomem,
1591                                                    mode_gp, NULL, 4, true);
1592                 ir_node *in[2] = { st, st1 };
1593                 ir_node *sync  = new_r_Sync(block, 2, in);
1594                 set_irn_pinned(st1, op_pin_state_floats);
1595                 mem  = sync;
1596                 mode = mode_fp2;
1597         } else {
1598                 mem  = st;
1599                 mode = mode_fp;
1600         }
1601
1602         ldf = create_ldf(dbgi, block, sp, mem, mode, NULL, 0, true);
1603         set_irn_pinned(ldf, op_pin_state_floats);
1604
1605         return new_r_Proj(ldf, mode, pn_sparc_Ldf_res);
1606 }
1607
1608 static void bitcast_float_to_int(dbg_info *dbgi, ir_node *block,
1609                                  ir_node *node, ir_mode *float_mode,
1610                                  ir_node **result)
1611 {
1612         ir_graph *irg   = current_ir_graph;
1613         ir_node  *stack = get_irg_frame(irg);
1614         ir_node  *nomem = get_irg_no_mem(irg);
1615         ir_node  *stf   = create_stf(dbgi, block, node, stack, nomem, float_mode,
1616                                      NULL, 0, true);
1617         int       bits  = get_mode_size_bits(float_mode);
1618         ir_node  *ld;
1619         set_irn_pinned(stf, op_pin_state_floats);
1620
1621         ld = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp, NULL, 0, true);
1622         set_irn_pinned(ld, op_pin_state_floats);
1623         result[0] = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res);
1624
1625         if (bits == 64) {
1626                 ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp,
1627                                                    NULL, 4, true);
1628                 set_irn_pinned(ld, op_pin_state_floats);
1629                 result[1] = new_r_Proj(ld2, mode_gp, pn_sparc_Ld_res);
1630
1631                 arch_irn_add_flags(ld, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot);
1632                 arch_irn_add_flags(ld2, (arch_irn_flags_t)sparc_arch_irn_flag_needs_64bit_spillslot);
1633         } else {
1634                 assert(bits == 32);
1635                 result[1] = NULL;
1636         }
1637 }
1638
1639 static ir_node *gen_Call(ir_node *node)
1640 {
1641         ir_graph        *irg          = get_irn_irg(node);
1642         ir_node         *callee       = get_Call_ptr(node);
1643         ir_node         *block        = get_nodes_block(node);
1644         ir_node         *new_block    = be_transform_node(block);
1645         ir_node         *mem          = get_Call_mem(node);
1646         ir_node         *new_mem      = be_transform_node(mem);
1647         dbg_info        *dbgi         = get_irn_dbg_info(node);
1648         ir_type         *type         = get_Call_type(node);
1649         size_t           n_params     = get_Call_n_params(node);
1650         /* max inputs: memory, callee, register arguments */
1651         ir_node        **sync_ins     = ALLOCAN(ir_node*, n_params);
1652         struct obstack  *obst         = be_get_be_obst(irg);
1653         calling_convention_t *cconv
1654                 = sparc_decide_calling_convention(type, NULL);
1655         size_t           n_param_regs = cconv->n_param_regs;
1656         /* param-regs + mem + stackpointer + callee */
1657         unsigned         max_inputs   = 3 + n_param_regs;
1658         ir_node        **in           = ALLOCAN(ir_node*, max_inputs);
1659         const arch_register_req_t **in_req
1660                 = OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
1661         int              in_arity     = 0;
1662         int              sync_arity   = 0;
1663         int              n_caller_saves
1664                 = sizeof(caller_saves)/sizeof(caller_saves[0]);
1665         ir_entity       *entity       = NULL;
1666         ir_node         *new_frame    = get_stack_pointer_for(node);
1667         ir_node         *incsp;
1668         int              mem_pos;
1669         ir_node         *res;
1670         size_t           p;
1671         int              i;
1672         int              o;
1673         int              out_arity;
1674
1675         assert(n_params == get_method_n_params(type));
1676
1677         /* construct arguments */
1678
1679         /* memory input */
1680         in_req[in_arity] = arch_no_register_req;
1681         mem_pos          = in_arity;
1682         ++in_arity;
1683
1684         /* stack pointer input */
1685         /* construct an IncSP -> we have to always be sure that the stack is
1686          * aligned even if we don't push arguments on it */
1687         incsp = be_new_IncSP(sp_reg, new_block, new_frame,
1688                              cconv->param_stack_size, 1);
1689         in_req[in_arity] = sp_reg->single_req;
1690         in[in_arity]     = incsp;
1691         ++in_arity;
1692
1693         /* parameters */
1694         for (p = 0; p < n_params; ++p) {
1695                 ir_node                  *value      = get_Call_param(node, p);
1696                 ir_node                  *new_value  = be_transform_node(value);
1697                 const reg_or_stackslot_t *param      = &cconv->parameters[p];
1698                 ir_type                  *param_type = get_method_param_type(type, p);
1699                 ir_mode                  *mode       = get_type_mode(param_type);
1700                 ir_node                  *new_values[2];
1701                 ir_node                  *str;
1702
1703                 if (mode_is_float(mode) && param->reg0 != NULL) {
1704                         unsigned size_bits = get_mode_size_bits(mode);
1705                         assert(size_bits <= 64);
1706                         bitcast_float_to_int(dbgi, new_block, new_value, mode, new_values);
1707                 } else {
1708                         new_values[0] = new_value;
1709                         new_values[1] = NULL;
1710                 }
1711
1712                 /* put value into registers */
1713                 if (param->reg0 != NULL) {
1714                         in[in_arity]     = new_values[0];
1715                         in_req[in_arity] = param->reg0->single_req;
1716                         ++in_arity;
1717                         if (new_values[1] == NULL)
1718                                 continue;
1719                 }
1720                 if (param->reg1 != NULL) {
1721                         assert(new_values[1] != NULL);
1722                         in[in_arity]     = new_values[1];
1723                         in_req[in_arity] = param->reg1->single_req;
1724                         ++in_arity;
1725                         continue;
1726                 }
1727
1728                 /* we need a store if we're here */
1729                 if (new_values[1] != NULL) {
1730                         new_value = new_values[1];
1731                         mode      = mode_gp;
1732                 }
1733
1734                 /* create a parameter frame if necessary */
1735                 if (mode_is_float(mode)) {
1736                         str = create_stf(dbgi, new_block, new_value, incsp, new_mem,
1737                                          mode, NULL, param->offset, true);
1738                 } else {
1739                         str = new_bd_sparc_St_imm(dbgi, new_block, new_value, incsp,
1740                                                   new_mem, mode, NULL, param->offset, true);
1741                 }
1742                 set_irn_pinned(str, op_pin_state_floats);
1743                 sync_ins[sync_arity++] = str;
1744         }
1745
1746         /* construct memory input */
1747         if (sync_arity == 0) {
1748                 in[mem_pos] = new_mem;
1749         } else if (sync_arity == 1) {
1750                 in[mem_pos] = sync_ins[0];
1751         } else {
1752                 in[mem_pos] = new_rd_Sync(NULL, new_block, sync_arity, sync_ins);
1753         }
1754
1755         if (is_SymConst(callee)) {
1756                 entity = get_SymConst_entity(callee);
1757         } else {
1758                 in[in_arity]     = be_transform_node(callee);
1759                 in_req[in_arity] = sparc_reg_classes[CLASS_sparc_gp].class_req;
1760                 ++in_arity;
1761         }
1762         assert(in_arity <= (int)max_inputs);
1763
1764         /* outputs:
1765          *  - memory
1766          *  - caller saves
1767          */
1768         out_arity = 1 + n_caller_saves;
1769
1770         /* create call node */
1771         if (entity != NULL) {
1772                 res = new_bd_sparc_Call_imm(dbgi, new_block, in_arity, in, out_arity,
1773                                             entity, 0);
1774         } else {
1775                 res = new_bd_sparc_Call_reg(dbgi, new_block, in_arity, in, out_arity);
1776         }
1777         arch_set_in_register_reqs(res, in_req);
1778
1779         /* create output register reqs */
1780         o = 0;
1781         arch_set_out_register_req(res, o++, arch_no_register_req);
1782         for (i = 0; i < n_caller_saves; ++i) {
1783                 const arch_register_t *reg = caller_saves[i];
1784                 arch_set_out_register_req(res, o++, reg->single_req);
1785         }
1786         assert(o == out_arity);
1787
1788         /* copy pinned attribute */
1789         set_irn_pinned(res, get_irn_pinned(node));
1790
1791         /* IncSP to destroy the call stackframe */
1792         incsp = be_new_IncSP(sp_reg, new_block, incsp, -cconv->param_stack_size, 0);
1793         /* if we are the last IncSP producer in a block then we have to keep
1794          * the stack value.
1795          * Note: This here keeps all producers which is more than necessary */
1796         add_irn_dep(incsp, res);
1797         keep_alive(incsp);
1798
1799         pmap_insert(node_to_stack, node, incsp);
1800
1801         sparc_free_calling_convention(cconv);
1802         return res;
1803 }
1804
1805 static ir_node *gen_Sel(ir_node *node)
1806 {
1807         dbg_info  *dbgi      = get_irn_dbg_info(node);
1808         ir_node   *block     = get_nodes_block(node);
1809         ir_node   *new_block = be_transform_node(block);
1810         ir_node   *ptr       = get_Sel_ptr(node);
1811         ir_node   *new_ptr   = be_transform_node(ptr);
1812         ir_entity *entity    = get_Sel_entity(node);
1813
1814         /* must be the frame pointer all other sels must have been lowered
1815          * already */
1816         assert(is_Proj(ptr) && is_Start(get_Proj_pred(ptr)));
1817
1818         return new_bd_sparc_FrameAddr(dbgi, new_block, new_ptr, entity, 0);
1819 }
1820
1821 static const arch_register_req_t float1_req = {
1822         arch_register_req_type_normal,
1823         &sparc_reg_classes[CLASS_sparc_fp],
1824         NULL,
1825         0,
1826         0,
1827         1
1828 };
1829 static const arch_register_req_t float2_req = {
1830         arch_register_req_type_normal | arch_register_req_type_aligned,
1831         &sparc_reg_classes[CLASS_sparc_fp],
1832         NULL,
1833         0,
1834         0,
1835         2
1836 };
1837 static const arch_register_req_t float4_req = {
1838         arch_register_req_type_normal | arch_register_req_type_aligned,
1839         &sparc_reg_classes[CLASS_sparc_fp],
1840         NULL,
1841         0,
1842         0,
1843         4
1844 };
1845
1846
1847 static const arch_register_req_t *get_float_req(ir_mode *mode)
1848 {
1849         unsigned bits = get_mode_size_bits(mode);
1850
1851         assert(mode_is_float(mode));
1852         if (bits == 32) {
1853                 return &float1_req;
1854         } else if (bits == 64) {
1855                 return &float2_req;
1856         } else {
1857                 assert(bits == 128);
1858                 return &float4_req;
1859         }
1860 }
1861
1862 /**
1863  * Transform some Phi nodes
1864  */
1865 static ir_node *gen_Phi(ir_node *node)
1866 {
1867         const arch_register_req_t *req;
1868         ir_node  *block = be_transform_node(get_nodes_block(node));
1869         ir_graph *irg   = current_ir_graph;
1870         dbg_info *dbgi  = get_irn_dbg_info(node);
1871         ir_mode  *mode  = get_irn_mode(node);
1872         ir_node  *phi;
1873
1874         if (mode_needs_gp_reg(mode)) {
1875                 /* we shouldn't have any 64bit stuff around anymore */
1876                 assert(get_mode_size_bits(mode) <= 32);
1877                 /* all integer operations are on 32bit registers now */
1878                 mode = mode_gp;
1879                 req  = sparc_reg_classes[CLASS_sparc_gp].class_req;
1880         } else if (mode_is_float(mode)) {
1881                 mode = mode;
1882                 req  = get_float_req(mode);
1883         } else {
1884                 req = arch_no_register_req;
1885         }
1886
1887         /* phi nodes allow loops, so we use the old arguments for now
1888          * and fix this later */
1889         phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1);
1890         copy_node_attr(irg, node, phi);
1891         be_duplicate_deps(node, phi);
1892         arch_set_out_register_req(phi, 0, req);
1893         be_enqueue_preds(node);
1894         return phi;
1895 }
1896
1897 /**
1898  * Transform a Proj from a Load.
1899  */
1900 static ir_node *gen_Proj_Load(ir_node *node)
1901 {
1902         ir_node  *load     = get_Proj_pred(node);
1903         ir_node  *new_load = be_transform_node(load);
1904         dbg_info *dbgi     = get_irn_dbg_info(node);
1905         long      pn       = get_Proj_proj(node);
1906
1907         /* renumber the proj */
1908         switch (get_sparc_irn_opcode(new_load)) {
1909         case iro_sparc_Ld:
1910                 /* handle all gp loads equal: they have the same proj numbers. */
1911                 if (pn == pn_Load_res) {
1912                         return new_rd_Proj(dbgi, new_load, mode_gp, pn_sparc_Ld_res);
1913                 } else if (pn == pn_Load_M) {
1914                         return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Ld_M);
1915                 }
1916                 break;
1917         case iro_sparc_Ldf:
1918                 if (pn == pn_Load_res) {
1919                         return new_rd_Proj(dbgi, new_load, mode_fp, pn_sparc_Ldf_res);
1920                 } else if (pn == pn_Load_M) {
1921                         return new_rd_Proj(dbgi, new_load, mode_M, pn_sparc_Ld_M);
1922                 }
1923                 break;
1924         default:
1925                 break;
1926         }
1927         panic("Unsupported Proj from Load");
1928 }
1929
1930 static ir_node *gen_Proj_Store(ir_node *node)
1931 {
1932         ir_node  *store     = get_Proj_pred(node);
1933         ir_node  *new_store = be_transform_node(store);
1934         long      pn        = get_Proj_proj(node);
1935
1936         /* renumber the proj */
1937         switch (get_sparc_irn_opcode(new_store)) {
1938         case iro_sparc_St:
1939                 if (pn == pn_Store_M) {
1940                         return new_store;
1941                 }
1942                 break;
1943         case iro_sparc_Stf:
1944                 if (pn == pn_Store_M) {
1945                         return new_store;
1946                 }
1947                 break;
1948         default:
1949                 break;
1950         }
1951         panic("Unsupported Proj from Store");
1952 }
1953
1954 /**
1955  * Transform the Projs from a Cmp.
1956  */
1957 static ir_node *gen_Proj_Cmp(ir_node *node)
1958 {
1959         (void) node;
1960         panic("not implemented");
1961 }
1962
1963 /**
1964  * transform Projs from a Div
1965  */
1966 static ir_node *gen_Proj_Div(ir_node *node)
1967 {
1968         ir_node  *pred     = get_Proj_pred(node);
1969         ir_node  *new_pred = be_transform_node(pred);
1970         long      pn       = get_Proj_proj(node);
1971
1972         assert(is_sparc_SDiv(new_pred) || is_sparc_UDiv(new_pred)
1973                || is_sparc_fdiv(new_pred));
1974         assert((int)pn_sparc_SDiv_res == (int)pn_sparc_UDiv_res);
1975         assert((int)pn_sparc_SDiv_M   == (int)pn_sparc_UDiv_M);
1976         assert((int)pn_sparc_SDiv_res == (int)pn_sparc_fdiv_res);
1977         assert((int)pn_sparc_SDiv_M   == (int)pn_sparc_fdiv_M);
1978         switch (pn) {
1979         case pn_Div_res:
1980                 return new_r_Proj(new_pred, mode_gp, pn_sparc_SDiv_res);
1981         case pn_Div_M:
1982                 return new_r_Proj(new_pred, mode_gp, pn_sparc_SDiv_M);
1983         default:
1984                 break;
1985         }
1986         panic("Unsupported Proj from Div");
1987 }
1988
1989 static ir_node *get_frame_base(void)
1990 {
1991         const arch_register_t *reg = current_cconv->omit_fp ? sp_reg : fp_reg;
1992         return be_prolog_get_reg_value(abihelper, reg);
1993 }
1994
1995 static ir_node *gen_Proj_Start(ir_node *node)
1996 {
1997         ir_node *block     = get_nodes_block(node);
1998         ir_node *new_block = be_transform_node(block);
1999         long     pn        = get_Proj_proj(node);
2000         /* make sure prolog is constructed */
2001         be_transform_node(get_Proj_pred(node));
2002
2003         switch ((pn_Start) pn) {
2004         case pn_Start_X_initial_exec:
2005                 /* exchange ProjX with a jump */
2006                 return new_bd_sparc_Ba(NULL, new_block);
2007         case pn_Start_M:
2008                 return be_prolog_get_memory(abihelper);
2009         case pn_Start_T_args:
2010                 return new_r_Bad(get_irn_irg(block), mode_T);
2011         case pn_Start_P_frame_base:
2012                 return get_frame_base();
2013         }
2014         panic("Unexpected start proj: %ld\n", pn);
2015 }
2016
2017 static ir_node *gen_Proj_Proj_Start(ir_node *node)
2018 {
2019         long       pn          = get_Proj_proj(node);
2020         ir_node   *block       = get_nodes_block(node);
2021         ir_node   *new_block   = be_transform_node(block);
2022         ir_entity *entity      = get_irg_entity(current_ir_graph);
2023         ir_type   *method_type = get_entity_type(entity);
2024         ir_type   *param_type  = get_method_param_type(method_type, pn);
2025         const reg_or_stackslot_t *param;
2026
2027         /* Proj->Proj->Start must be a method argument */
2028         assert(get_Proj_proj(get_Proj_pred(node)) == pn_Start_T_args);
2029
2030         param = &current_cconv->parameters[pn];
2031
2032         if (param->reg0 != NULL) {
2033                 /* argument transmitted in register */
2034                 ir_mode               *mode  = get_type_mode(param_type);
2035                 const arch_register_t *reg   = param->reg0;
2036                 ir_node               *value = be_prolog_get_reg_value(abihelper, reg);
2037
2038                 if (mode_is_float(mode)) {
2039                         ir_node *value1 = NULL;
2040
2041                         if (param->reg1 != NULL) {
2042                                 value1 = be_prolog_get_reg_value(abihelper, param->reg1);
2043                         } else if (param->entity != NULL) {
2044                                 ir_node *fp  = be_prolog_get_reg_value(abihelper, fp_reg);
2045                                 ir_node *mem = be_prolog_get_memory(abihelper);
2046                                 ir_node *ld  = new_bd_sparc_Ld_imm(NULL, new_block, fp, mem,
2047                                                                    mode_gp, param->entity,
2048                                                                    0, true);
2049                                 value1 = new_r_Proj(ld, mode_gp, pn_sparc_Ld_res);
2050                         }
2051
2052                         /* convert integer value to float */
2053                         value = bitcast_int_to_float(NULL, new_block, value, value1);
2054                 }
2055                 return value;
2056         } else {
2057                 /* argument transmitted on stack */
2058                 ir_node  *mem  = be_prolog_get_memory(abihelper);
2059                 ir_mode  *mode = get_type_mode(param->type);
2060                 ir_node  *base = get_frame_base();
2061                 ir_node  *load;
2062                 ir_node  *value;
2063
2064                 if (mode_is_float(mode)) {
2065                         load  = create_ldf(NULL, new_block, base, mem, mode,
2066                                            param->entity, 0, true);
2067                         value = new_r_Proj(load, mode_fp, pn_sparc_Ldf_res);
2068                 } else {
2069                         load  = new_bd_sparc_Ld_imm(NULL, new_block, base, mem, mode,
2070                                                     param->entity, 0, true);
2071                         value = new_r_Proj(load, mode_gp, pn_sparc_Ld_res);
2072                 }
2073                 set_irn_pinned(load, op_pin_state_floats);
2074
2075                 return value;
2076         }
2077 }
2078
2079 static ir_node *gen_Proj_Call(ir_node *node)
2080 {
2081         long     pn        = get_Proj_proj(node);
2082         ir_node *call      = get_Proj_pred(node);
2083         ir_node *new_call  = be_transform_node(call);
2084
2085         switch ((pn_Call) pn) {
2086         case pn_Call_M:
2087                 return new_r_Proj(new_call, mode_M, 0);
2088         case pn_Call_X_regular:
2089         case pn_Call_X_except:
2090         case pn_Call_T_result:
2091                 break;
2092         }
2093         panic("Unexpected Call proj %ld\n", pn);
2094 }
2095
2096 /**
2097  * Finds number of output value of a mode_T node which is constrained to
2098  * a single specific register.
2099  */
2100 static int find_out_for_reg(ir_node *node, const arch_register_t *reg)
2101 {
2102         int n_outs = arch_irn_get_n_outs(node);
2103         int o;
2104
2105         for (o = 0; o < n_outs; ++o) {
2106                 const arch_register_req_t *req = arch_get_out_register_req(node, o);
2107                 if (req == reg->single_req)
2108                         return o;
2109         }
2110         return -1;
2111 }
2112
2113 static ir_node *gen_Proj_Proj_Call(ir_node *node)
2114 {
2115         long                  pn            = get_Proj_proj(node);
2116         ir_node              *call          = get_Proj_pred(get_Proj_pred(node));
2117         ir_node              *new_call      = be_transform_node(call);
2118         ir_type              *function_type = get_Call_type(call);
2119         calling_convention_t *cconv
2120                 = sparc_decide_calling_convention(function_type, NULL);
2121         const reg_or_stackslot_t *res = &cconv->results[pn];
2122         const arch_register_t    *reg = res->reg0;
2123         ir_mode                  *mode;
2124         int                       regn;
2125
2126         assert(res->reg0 != NULL && res->reg1 == NULL);
2127         regn = find_out_for_reg(new_call, reg);
2128         if (regn < 0) {
2129                 panic("Internal error in calling convention for return %+F", node);
2130         }
2131         mode = res->reg0->reg_class->mode;
2132
2133         sparc_free_calling_convention(cconv);
2134
2135         return new_r_Proj(new_call, mode, regn);
2136 }
2137
2138 /**
2139  * Transform a Proj node.
2140  */
2141 static ir_node *gen_Proj(ir_node *node)
2142 {
2143         ir_node *pred = get_Proj_pred(node);
2144
2145         switch (get_irn_opcode(pred)) {
2146         case iro_Store:
2147                 return gen_Proj_Store(node);
2148         case iro_Load:
2149                 return gen_Proj_Load(node);
2150         case iro_Call:
2151                 return gen_Proj_Call(node);
2152         case iro_Cmp:
2153                 return gen_Proj_Cmp(node);
2154         case iro_Cond:
2155                 return be_duplicate_node(node);
2156         case iro_Div:
2157                 return gen_Proj_Div(node);
2158         case iro_Start:
2159                 return gen_Proj_Start(node);
2160         case iro_Proj: {
2161                 ir_node *pred_pred = get_Proj_pred(pred);
2162                 if (is_Call(pred_pred)) {
2163                         return gen_Proj_Proj_Call(node);
2164                 } else if (is_Start(pred_pred)) {
2165                         return gen_Proj_Proj_Start(node);
2166                 }
2167                 /* FALLTHROUGH */
2168         }
2169         default:
2170                 if (is_sparc_AddCC_t(pred)) {
2171                         return gen_Proj_AddCC_t(node);
2172                 } else if (is_sparc_SubCC_t(pred)) {
2173                         return gen_Proj_SubCC_t(node);
2174                 }
2175                 panic("code selection didn't expect Proj after %+F\n", pred);
2176         }
2177 }
2178
2179 /**
2180  * transform a Jmp
2181  */
2182 static ir_node *gen_Jmp(ir_node *node)
2183 {
2184         ir_node  *block     = get_nodes_block(node);
2185         ir_node  *new_block = be_transform_node(block);
2186         dbg_info *dbgi      = get_irn_dbg_info(node);
2187
2188         return new_bd_sparc_Ba(dbgi, new_block);
2189 }
2190
2191 /**
2192  * configure transformation callbacks
2193  */
2194 static void sparc_register_transformers(void)
2195 {
2196         be_start_transform_setup();
2197
2198         be_set_transform_function(op_Add,          gen_Add);
2199         be_set_transform_function(op_And,          gen_And);
2200         be_set_transform_function(op_Call,         gen_Call);
2201         be_set_transform_function(op_Cmp,          gen_Cmp);
2202         be_set_transform_function(op_Cond,         gen_Cond);
2203         be_set_transform_function(op_Const,        gen_Const);
2204         be_set_transform_function(op_Conv,         gen_Conv);
2205         be_set_transform_function(op_Div,          gen_Div);
2206         be_set_transform_function(op_Eor,          gen_Eor);
2207         be_set_transform_function(op_Jmp,          gen_Jmp);
2208         be_set_transform_function(op_Load,         gen_Load);
2209         be_set_transform_function(op_Minus,        gen_Minus);
2210         be_set_transform_function(op_Mul,          gen_Mul);
2211         be_set_transform_function(op_Mulh,         gen_Mulh);
2212         be_set_transform_function(op_Not,          gen_Not);
2213         be_set_transform_function(op_Or,           gen_Or);
2214         be_set_transform_function(op_Phi,          gen_Phi);
2215         be_set_transform_function(op_Proj,         gen_Proj);
2216         be_set_transform_function(op_Return,       gen_Return);
2217         be_set_transform_function(op_Sel,          gen_Sel);
2218         be_set_transform_function(op_Shl,          gen_Shl);
2219         be_set_transform_function(op_Shr,          gen_Shr);
2220         be_set_transform_function(op_Shrs,         gen_Shrs);
2221         be_set_transform_function(op_Start,        gen_Start);
2222         be_set_transform_function(op_Store,        gen_Store);
2223         be_set_transform_function(op_Sub,          gen_Sub);
2224         be_set_transform_function(op_SymConst,     gen_SymConst);
2225         be_set_transform_function(op_Unknown,      gen_Unknown);
2226
2227         be_set_transform_function(op_sparc_AddX_t, gen_AddX_t);
2228         be_set_transform_function(op_sparc_AddCC_t,gen_AddCC_t);
2229         be_set_transform_function(op_sparc_Save,   be_duplicate_node);
2230         be_set_transform_function(op_sparc_SubX_t, gen_SubX_t);
2231         be_set_transform_function(op_sparc_SubCC_t,gen_SubCC_t);
2232 }
2233
2234 /**
2235  * Transform a Firm graph into a SPARC graph.
2236  */
2237 void sparc_transform_graph(ir_graph *irg)
2238 {
2239         ir_entity *entity = get_irg_entity(irg);
2240         ir_type   *frame_type;
2241
2242         sparc_register_transformers();
2243
2244         node_to_stack = pmap_create();
2245
2246         mode_gp    = mode_Iu;
2247         mode_fp    = mode_F;
2248         mode_fp2   = mode_D;
2249         mode_flags = mode_Bu;
2250         //mode_fp4 = ?
2251
2252         abihelper = be_abihelper_prepare(irg);
2253         be_collect_stacknodes(abihelper);
2254         current_cconv
2255                 = sparc_decide_calling_convention(get_entity_type(entity), irg);
2256         create_stacklayout(irg);
2257         be_add_parameter_entity_stores(irg);
2258
2259         be_transform_graph(irg, NULL);
2260
2261         be_abihelper_finish(abihelper);
2262         sparc_free_calling_convention(current_cconv);
2263
2264         frame_type = get_irg_frame_type(irg);
2265         if (get_type_state(frame_type) == layout_undefined)
2266                 default_layout_compound_type(frame_type);
2267
2268         pmap_destroy(node_to_stack);
2269         node_to_stack = NULL;
2270
2271         be_add_missing_keeps(irg);
2272
2273         /* do code placement, to optimize the position of constants */
2274         place_code(irg);
2275 }
2276
2277 void sparc_init_transform(void)
2278 {
2279         FIRM_DBG_REGISTER(dbg, "firm.be.sparc.transform");
2280 }