2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Peephole optimization and legalization of a sparc function
23 * @author Matthias Braun
25 * A note on sparc stackpointer (sp) behaviour:
26 * The ABI expects SPARC_MIN_STACKSIZE bytes to be available at the
27 * stackpointer. This space will be used to spill register windows,
28 * and for spilling va_arg arguments (maybe we can optimize this away for
29 * statically known not-va-arg-functions...)
30 * This in effect means that we allocate that extra space at the function begin
31 * which is easy. But this space isn't really fixed at the beginning of the
32 * stackframe. Instead you should rather imagine the space as always being the
33 * last-thing on the stack.
34 * So when addressing anything stack-specific we have to account for this
35 * area, while our compiler thinks the space is occupied at the beginning
36 * of the stack frame. The code here among other things adjusts these offsets
41 #include "bearch_sparc_t.h"
42 #include "gen_sparc_regalloc_if.h"
43 #include "sparc_new_nodes.h"
44 #include "sparc_transform.h"
51 #include "bepeephole.h"
54 #include "bespillslots.h"
58 static ir_heights_t *heights;
60 static void kill_unused_stacknodes(ir_node *node)
62 if (get_irn_n_edges(node) > 0)
65 if (be_is_IncSP(node)) {
68 } else if (is_Phi(node)) {
69 int arity = get_irn_arity(node);
70 ir_node **ins = ALLOCAN(ir_node*, arity);
73 memcpy(ins, get_irn_in(node), arity*sizeof(ins[0]));
76 for (i = 0; i < arity; ++i)
77 kill_unused_stacknodes(ins[i]);
81 static void introduce_epilog(ir_node *ret)
83 arch_register_t const *const sp_reg = &sparc_registers[REG_SP];
84 assert(arch_get_irn_register_req_in(ret, n_sparc_Return_sp) == sp_reg->single_req);
86 ir_node *const sp = get_irn_n(ret, n_sparc_Return_sp);
87 ir_node *const block = get_nodes_block(ret);
88 ir_graph *const irg = get_irn_irg(ret);
89 be_stack_layout_t *const layout = be_get_irg_stack_layout(irg);
90 if (!layout->sp_relative) {
91 arch_register_t const *const fp_reg = &sparc_registers[REG_FRAME_POINTER];
92 ir_node *const fp = be_get_initial_reg_value(irg, fp_reg);
93 ir_node *const new_sp = be_get_initial_reg_value(irg, sp_reg);
94 ir_node *const restore = new_bd_sparc_RestoreZero(NULL, block, new_sp, fp);
95 sched_add_before(ret, restore);
96 arch_set_irn_register(restore, sp_reg);
97 set_irn_n(ret, n_sparc_Return_sp, restore);
98 kill_unused_stacknodes(sp);
100 ir_type *const frame_type = get_irg_frame_type(irg);
101 unsigned const frame_size = get_type_size_bytes(frame_type);
102 ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0);
103 set_irn_n(ret, n_sparc_Return_sp, incsp);
104 sched_add_before(ret, incsp);
108 void sparc_introduce_prolog_epilog(ir_graph *irg)
110 const arch_register_t *sp_reg = &sparc_registers[REG_SP];
111 ir_node *start = get_irg_start(irg);
112 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
113 ir_node *block = get_nodes_block(start);
114 ir_node *initial_sp = be_get_initial_reg_value(irg, sp_reg);
115 ir_node *schedpoint = start;
116 ir_type *frame_type = get_irg_frame_type(irg);
117 unsigned frame_size = get_type_size_bytes(frame_type);
119 /* introduce epilog for every return node */
121 ir_node *end_block = get_irg_end_block(irg);
122 int arity = get_irn_arity(end_block);
125 for (i = 0; i < arity; ++i) {
126 ir_node *ret = get_irn_n(end_block, i);
127 assert(is_sparc_Return(ret));
128 introduce_epilog(ret);
132 while (be_is_Keep(sched_next(schedpoint)))
133 schedpoint = sched_next(schedpoint);
135 if (!layout->sp_relative) {
136 ir_node *const save = new_bd_sparc_Save_imm(NULL, block, initial_sp, NULL, -(SPARC_MIN_STACKSIZE + frame_size));
137 arch_set_irn_register(save, sp_reg);
138 sched_add_after(schedpoint, save);
141 edges_reroute_except(initial_sp, save, save);
143 /* we still need the Save even if noone is explicitely using the
144 * value. (TODO: this isn't 100% correct yet, something at the end of
145 * the function should hold the Save, even if we use a restore
146 * which just overrides it instead of using the value)
148 if (get_irn_n_edges(save) == 0) {
149 ir_node *in[] = { save };
150 ir_node *keep = be_new_Keep(block, 1, in);
151 sched_add_after(schedpoint, keep);
154 ir_node *const incsp = be_new_IncSP(sp_reg, block, initial_sp, frame_size, 0);
155 edges_reroute_except(initial_sp, incsp, incsp);
156 sched_add_after(schedpoint, incsp);
161 * Creates a constant from an immediate value.
163 static ir_node *create_constant_from_immediate(ir_node *node, int offset)
165 dbg_info *dbgi = get_irn_dbg_info(node);
166 ir_node *block = get_nodes_block(node);
167 ir_node *high = new_bd_sparc_SetHi(dbgi, block, NULL, offset);
169 sched_add_before(node, high);
170 arch_set_irn_register(high, &sparc_registers[REG_G4]);
172 if ((offset & 0x3ff) != 0) {
173 ir_node *low = new_bd_sparc_Or_imm(dbgi, block, high, NULL, offset & 0x3ff);
175 sched_add_before(node, low);
176 arch_set_irn_register(low, &sparc_registers[REG_G4]);
184 static void finish_sparc_Save(ir_node *node)
186 sparc_attr_t *attr = get_sparc_attr(node);
187 int offset = attr->immediate_value;
189 if (! sparc_is_value_imm_encodeable(offset)) {
190 ir_node *base = get_irn_n(node, n_sparc_Save_stack);
191 dbg_info *dbgi = get_irn_dbg_info(node);
192 ir_node *block = get_nodes_block(node);
193 ir_node *constant = create_constant_from_immediate(node, offset);
194 ir_node *new_save = new_bd_sparc_Save_reg(dbgi, block, base, constant);
195 const arch_register_t *reg = arch_get_irn_register(node);
197 /* we have a Save with immediate */
198 assert(get_irn_arity(node) == 1);
200 sched_add_before(node, new_save);
201 arch_set_irn_register(new_save, reg);
202 be_peephole_exchange(node, new_save);
207 * SPARC immediates are limited. Split IncSP with bigger immediates if
210 static void finish_be_IncSP(ir_node *node)
212 int offset = be_get_IncSP_offset(node);
214 /* we might have to break the IncSP apart if the constant has become too big */
215 if (! sparc_is_value_imm_encodeable(offset) && ! sparc_is_value_imm_encodeable(-offset)) {
216 ir_node *sp = be_get_IncSP_pred(node);
217 dbg_info *dbgi = get_irn_dbg_info(node);
218 ir_node *block = get_nodes_block(node);
219 ir_node *constant = create_constant_from_immediate(node, offset);
220 ir_node *sub = new_bd_sparc_Sub_reg(dbgi, block, sp, constant);
222 sched_add_before(node, sub);
223 arch_set_irn_register(sub, &sparc_registers[REG_SP]);
224 be_peephole_exchange(node, sub);
229 * Adjust sp-relative offsets.
231 * Split into multiple instructions if offset exceeds SPARC immediate range.
233 static void finish_sparc_FrameAddr(ir_node *node)
235 sparc_attr_t *attr = get_sparc_attr(node);
236 int offset = attr->immediate_value;
238 if (! sparc_is_value_imm_encodeable(offset)) {
239 ir_node *base = get_irn_n(node, n_sparc_FrameAddr_base);
240 dbg_info *dbgi = get_irn_dbg_info(node);
241 ir_node *block = get_nodes_block(node);
242 ir_node *constant = create_constant_from_immediate(node, offset);
243 ir_node *new_frameaddr = new_bd_sparc_Add_reg(dbgi, block, base, constant);
244 const arch_register_t *reg = arch_get_irn_register(node);
246 sched_add_before(node, new_frameaddr);
247 arch_set_irn_register(new_frameaddr, reg);
248 be_peephole_exchange(node, new_frameaddr);
252 static void finish_sparc_Ld(ir_node *node)
254 sparc_attr_t *attr = get_sparc_attr(node);
255 int offset = attr->immediate_value;
256 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
258 if (! load_store_attr->is_frame_entity)
261 if (! sparc_is_value_imm_encodeable(offset)) {
262 ir_node *ptr = get_irn_n(node, n_sparc_Ld_ptr);
263 dbg_info *dbgi = get_irn_dbg_info(node);
264 ir_node *block = get_nodes_block(node);
265 ir_node *mem = get_irn_n(node, n_sparc_Ld_mem);
266 ir_mode *load_store_mode = load_store_attr->load_store_mode;
267 ir_node *constant = create_constant_from_immediate(node, offset);
268 ir_node *new_load = new_bd_sparc_Ld_reg(dbgi, block, ptr, constant, mem, load_store_mode);
269 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
271 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
272 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
274 sched_add_before(node, new_load);
275 be_foreach_out(node, i) {
276 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
278 be_peephole_exchange(node, new_load);
283 static void split_sparc_ldf(ir_node *node)
285 sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
286 unsigned bits = get_mode_size_bits(attr->load_store_mode);
287 /* split 128bit loads into 2 64bit loads */
289 dbg_info *dbgi = get_irn_dbg_info(node);
290 ir_node *block = get_nodes_block(node);
291 ir_node *ptr = get_irn_n(node, n_sparc_Ldf_ptr);
292 ir_node *mem = get_irn_n(node, n_sparc_Ldf_mem);
294 = new_bd_sparc_Ldf_d(dbgi, block, ptr, mem, mode_D,
295 attr->base.immediate_value_entity,
296 attr->base.immediate_value + 8,
297 attr->is_frame_entity);
298 ir_node *new_mem = new_r_Proj(new_load, mode_M, pn_sparc_Ldf_M);
300 const arch_register_t *reg
301 = arch_get_irn_register_out(node, pn_sparc_Ldf_res);
302 unsigned reg_index = reg->global_index;
304 arch_set_irn_register_out(new_load, pn_sparc_Ldf_res,
305 &sparc_registers[reg_index+2]);
307 attr->load_store_mode = mode_D;
308 set_irn_n(node, n_sparc_Ldf_mem, new_mem);
309 sched_add_before(node, new_load);
313 static void finish_sparc_Ldf(ir_node *node)
315 sparc_attr_t *attr = get_sparc_attr(node);
316 int offset = attr->immediate_value;
317 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
319 if (! load_store_attr->is_frame_entity)
322 if (! sparc_is_value_imm_encodeable(offset)) {
323 ir_node *ptr = get_irn_n(node, n_sparc_Ldf_ptr);
324 dbg_info *dbgi = get_irn_dbg_info(node);
325 ir_node *block = get_nodes_block(node);
326 ir_node *mem = get_irn_n(node, n_sparc_Ldf_mem);
327 ir_mode *load_store_mode = load_store_attr->load_store_mode;
328 ir_node *constant = create_constant_from_immediate(node, offset);
329 ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
330 ir_node *new_load = new_bd_sparc_Ldf_s(dbgi, block, new_ptr, mem, load_store_mode, NULL, 0, true);
331 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
333 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
334 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
336 sched_add_before(node, new_load);
337 be_foreach_out(node, i) {
338 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
340 be_peephole_exchange(node, new_load);
345 static void finish_sparc_St(ir_node *node)
347 sparc_attr_t *attr = get_sparc_attr(node);
348 int offset = attr->immediate_value;
349 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
351 if (! load_store_attr->is_frame_entity)
354 if (! sparc_is_value_imm_encodeable(offset)) {
355 ir_node *ptr = get_irn_n(node, n_sparc_St_ptr);
356 dbg_info *dbgi = get_irn_dbg_info(node);
357 ir_node *block = get_nodes_block(node);
358 ir_node *mem = get_irn_n(node, n_sparc_St_mem);
359 ir_node *value = get_irn_n(node, n_sparc_St_val);
360 ir_mode *load_store_mode = load_store_attr->load_store_mode;
361 ir_node *constant = create_constant_from_immediate(node, offset);
362 ir_node *new_load = new_bd_sparc_St_reg(dbgi, block, value, ptr, constant, mem, load_store_mode);
363 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
365 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
366 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
368 sched_add_before(node, new_load);
369 be_foreach_out(node, i) {
370 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
372 be_peephole_exchange(node, new_load);
377 static void finish_sparc_Stf(ir_node *node)
379 sparc_attr_t *attr = get_sparc_attr(node);
380 int offset = attr->immediate_value;
381 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
383 if (! load_store_attr->is_frame_entity)
386 if (! sparc_is_value_imm_encodeable(offset)) {
387 ir_node *ptr = get_irn_n(node, n_sparc_Stf_ptr);
388 dbg_info *dbgi = get_irn_dbg_info(node);
389 ir_node *block = get_nodes_block(node);
390 ir_node *mem = get_irn_n(node, n_sparc_Stf_mem);
391 ir_node *value = get_irn_n(node, n_sparc_Stf_val);
392 ir_mode *load_store_mode = load_store_attr->load_store_mode;
393 ir_node *constant = create_constant_from_immediate(node, offset);
394 ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
395 ir_node *new_load = new_bd_sparc_Stf_s(dbgi, block, value, new_ptr, mem, load_store_mode, NULL, 0, true);
396 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
398 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
399 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
401 sched_add_before(node, new_load);
402 be_foreach_out(node, i) {
403 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
405 be_peephole_exchange(node, new_load);
410 static void peephole_be_IncSP(ir_node *node)
413 node = be_peephole_IncSP_IncSP(node);
414 if (!be_is_IncSP(node))
417 pred = be_get_IncSP_pred(node);
418 if (is_sparc_Save(pred) && be_has_only_one_user(pred)) {
419 int offset = -be_get_IncSP_offset(node);
420 sparc_attr_t *attr = get_sparc_attr(pred);
421 attr->immediate_value += offset;
422 be_peephole_exchange(node, pred);
426 static void peephole_sparc_FrameAddr(ir_node *node)
428 /* the peephole code currently doesn't allow this since it changes
429 * the register. Find out why and how to workaround this... */
431 const sparc_attr_t *attr = get_sparc_attr_const(node);
432 if (attr->immediate_value == 0) {
433 ir_node *base = get_irn_n(node, n_sparc_FrameAddr_base);
434 be_peephole_exchange(node, base);
440 /* output must not be local, or out reg. Since the destination of the restore
441 * is the rotated register-file where only the old in-registers are still
442 * visible (as out-registers) */
443 static bool is_restorezeroopt_reg(const arch_register_t *reg)
445 unsigned index = reg->global_index;
446 return (index >= REG_G0 && index <= REG_G7)
447 || (index >= REG_I0 && index <= REG_I7);
450 static void replace_with_restore_reg(ir_node *node, ir_node *replaced,
451 ir_node *op0, ir_node *op1)
453 dbg_info *dbgi = get_irn_dbg_info(node);
454 ir_node *stack_in = get_irn_n(node, n_sparc_RestoreZero_stack);
455 ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
456 ir_node *block = get_nodes_block(node);
457 ir_mode *mode = get_irn_mode(node);
458 ir_node *new_node = new_bd_sparc_Restore_reg(dbgi, block, stack_in, fp,
460 ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
461 ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
462 const arch_register_t *reg = arch_get_irn_register(replaced);
463 const arch_register_t *sp = &sparc_registers[REG_SP];
464 arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
465 arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
467 sched_add_before(node, new_node);
468 be_peephole_exchange(node, stack);
469 be_peephole_exchange(replaced, res);
472 static void replace_with_restore_imm(ir_node *node, ir_node *replaced,
473 ir_node *op, ir_entity *imm_entity,
476 dbg_info *dbgi = get_irn_dbg_info(node);
477 ir_node *stack_in = get_irn_n(node, n_sparc_RestoreZero_stack);
478 ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
479 ir_node *block = get_nodes_block(node);
480 ir_mode *mode = get_irn_mode(node);
481 ir_node *new_node = new_bd_sparc_Restore_imm(dbgi, block, stack_in, fp,
482 op, imm_entity, immediate);
483 ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
484 ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
485 const arch_register_t *reg = arch_get_irn_register(replaced);
486 const arch_register_t *sp = &sparc_registers[REG_SP];
487 arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
488 arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
490 sched_add_before(node, new_node);
491 be_peephole_exchange(node, stack);
492 be_peephole_exchange(replaced, res);
495 static void peephole_sparc_RestoreZero(ir_node *node)
497 /* restore gives us a free "add" instruction, let's try to use that to fold
498 * an instruction in. We can do the following:
500 * - Copy values (g0 + reg)
501 * - Produce constants (g0 + immediate)
502 * - Perform an add (reg + reg)
503 * - Perform a sub with immediate (reg + (-immediate))
505 * Note: In an ideal world, this would not be a peephole optimization but
506 * already performed during code selection. Since about all foldable ops are
507 * arguments of the return node. However we have a hard time doing this
508 * since we construct epilogue code only after register allocation
509 * (and therefore after code selection).
511 int n_tries = 10; /* limit our search */
513 for (ir_node *schedpoint = node;;) {
514 const arch_register_t *reg;
515 schedpoint = sched_prev(schedpoint);
516 if (sched_is_begin(schedpoint))
522 if (arch_get_irn_n_outs(schedpoint) == 0)
525 if (!mode_is_data(get_irn_mode(schedpoint)))
528 reg = arch_get_irn_register(schedpoint);
529 if (!is_restorezeroopt_reg(reg))
532 if (be_is_Copy(schedpoint) && be_can_move_down(heights, schedpoint, node)) {
533 ir_node *const op = be_get_Copy_op(schedpoint);
534 replace_with_restore_imm(node, schedpoint, op, NULL, 0);
535 } else if (is_sparc_Or(schedpoint) &&
536 arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
537 arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
538 be_can_move_down(heights, schedpoint, node)) {
539 /* it's a constant */
540 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
541 ir_entity *entity = attr->immediate_value_entity;
542 int32_t immediate = attr->immediate_value;
543 ir_node *g0 = get_irn_n(schedpoint, 0);
544 replace_with_restore_imm(node, schedpoint, g0, entity, immediate);
545 } else if (is_sparc_Add(schedpoint) &&
546 be_can_move_down(heights, schedpoint, node)) {
547 if (arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
548 ir_node *op = get_irn_n(schedpoint, 0);
549 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
550 ir_entity *entity = attr->immediate_value_entity;
551 int32_t imm = attr->immediate_value;
552 replace_with_restore_imm(node, schedpoint, op, entity, imm);
554 ir_node *op0 = get_irn_n(schedpoint, 0);
555 ir_node *op1 = get_irn_n(schedpoint, 1);
556 replace_with_restore_reg(node, schedpoint, op0, op1);
558 } else if (is_sparc_Sub(schedpoint) &&
559 arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
560 arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
561 be_can_move_down(heights, schedpoint, node)) {
562 /* it's a constant */
563 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
564 ir_entity *entity = attr->immediate_value_entity;
565 int32_t imm = attr->immediate_value;
566 if (entity == NULL && sparc_is_value_imm_encodeable(-imm)) {
567 ir_node *g0 = get_irn_n(schedpoint, 0);
568 replace_with_restore_imm(node, schedpoint, g0, NULL, -imm);
573 /* when we're here then we performed a folding and are done */
578 static void finish_sparc_Return(ir_node *node)
580 /* Ensure that the restore is directly before the return. */
581 sched_foreach_reverse_from(sched_prev(node), restore) {
582 if (is_sparc_Restore(restore) || is_sparc_RestoreZero(restore)) {
583 sched_remove(restore);
584 sched_add_before(node, restore);
590 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
592 assert(op->ops.generic == NULL);
593 op->ops.generic = (op_func) func;
596 static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
598 be_fec_env_t *env = (be_fec_env_t*)data;
602 const sparc_load_store_attr_t *attr;
604 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
605 mode = get_irn_mode(node);
606 align = get_mode_size_bytes(mode);
607 be_node_needs_frame_entity(env, node, mode, align);
611 if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
614 attr = get_sparc_load_store_attr_const(node);
615 entity = attr->base.immediate_value_entity;
616 mode = attr->load_store_mode;
619 if (!attr->is_frame_entity)
621 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
623 align = get_mode_size_bytes(mode);
624 be_node_needs_frame_entity(env, node, mode, align);
627 static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
629 if (is_be_node(node)) {
630 be_node_set_frame_entity(node, entity);
632 /* we only say be_node_needs_frame_entity on nodes with load_store
633 * attributes, so this should be fine */
634 sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
635 assert(attr->is_frame_entity);
636 assert(attr->base.immediate_value_entity == NULL);
637 attr->base.immediate_value_entity = entity;
641 void sparc_finish_graph(ir_graph *irg)
643 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
644 bool at_begin = stack_layout->sp_relative ? true : false;
645 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
647 irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
648 be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
649 be_free_frame_entity_coalescer(fec_env);
650 sparc_adjust_stack_entity_offsets(irg);
652 sparc_introduce_prolog_epilog(irg);
654 /* fix stack entity offsets */
655 be_abi_fix_stack_nodes(irg);
656 sparc_fix_stack_bias(irg);
658 heights = heights_new(irg);
660 /* perform peephole optimizations */
661 ir_clear_opcodes_generic_func();
662 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
663 register_peephole_optimisation(op_sparc_FrameAddr, peephole_sparc_FrameAddr);
664 register_peephole_optimisation(op_sparc_RestoreZero,
665 peephole_sparc_RestoreZero);
666 register_peephole_optimisation(op_sparc_Ldf, split_sparc_ldf);
667 be_peephole_opt(irg);
669 /* perform legalizations (mostly fix nodes with too big immediates) */
670 ir_clear_opcodes_generic_func();
671 register_peephole_optimisation(op_be_IncSP, finish_be_IncSP);
672 register_peephole_optimisation(op_sparc_FrameAddr, finish_sparc_FrameAddr);
673 register_peephole_optimisation(op_sparc_Ld, finish_sparc_Ld);
674 register_peephole_optimisation(op_sparc_Ldf, finish_sparc_Ldf);
675 register_peephole_optimisation(op_sparc_Return, finish_sparc_Return);
676 register_peephole_optimisation(op_sparc_Save, finish_sparc_Save);
677 register_peephole_optimisation(op_sparc_St, finish_sparc_St);
678 register_peephole_optimisation(op_sparc_Stf, finish_sparc_Stf);
679 be_peephole_opt(irg);
681 heights_free(heights);
683 be_remove_dead_nodes_from_schedule(irg);