2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Peephole optimization and legalization of a sparc function
9 * @author Matthias Braun
11 * A note on sparc stackpointer (sp) behaviour:
12 * The ABI expects SPARC_MIN_STACKSIZE bytes to be available at the
13 * stackpointer. This space will be used to spill register windows,
14 * and for spilling va_arg arguments (maybe we can optimize this away for
15 * statically known not-va-arg-functions...)
16 * This in effect means that we allocate that extra space at the function begin
17 * which is easy. But this space isn't really fixed at the beginning of the
18 * stackframe. Instead you should rather imagine the space as always being the
19 * last-thing on the stack.
20 * So when addressing anything stack-specific we have to account for this
21 * area, while our compiler thinks the space is occupied at the beginning
22 * of the stack frame. The code here among other things adjusts these offsets
27 #include "bearch_sparc_t.h"
28 #include "gen_sparc_regalloc_if.h"
29 #include "sparc_new_nodes.h"
30 #include "sparc_transform.h"
37 #include "bepeephole.h"
40 #include "bespillslots.h"
44 static ir_heights_t *heights;
46 static void kill_unused_stacknodes(ir_node *node)
48 if (get_irn_n_edges(node) > 0)
51 if (be_is_IncSP(node)) {
54 } else if (is_Phi(node)) {
55 int arity = get_irn_arity(node);
56 ir_node **ins = ALLOCAN(ir_node*, arity);
59 memcpy(ins, get_irn_in(node), arity*sizeof(ins[0]));
62 for (i = 0; i < arity; ++i)
63 kill_unused_stacknodes(ins[i]);
67 static void introduce_epilog(ir_node *ret)
69 arch_register_t const *const sp_reg = &sparc_registers[REG_SP];
70 assert(arch_get_irn_register_req_in(ret, n_sparc_Return_sp) == sp_reg->single_req);
72 ir_node *const sp = get_irn_n(ret, n_sparc_Return_sp);
73 ir_node *const block = get_nodes_block(ret);
74 ir_graph *const irg = get_irn_irg(ret);
75 be_stack_layout_t *const layout = be_get_irg_stack_layout(irg);
76 if (!layout->sp_relative) {
77 arch_register_t const *const fp_reg = &sparc_registers[REG_FRAME_POINTER];
78 ir_node *const fp = be_get_initial_reg_value(irg, fp_reg);
79 ir_node *const new_sp = be_get_initial_reg_value(irg, sp_reg);
80 ir_node *const restore = new_bd_sparc_RestoreZero(NULL, block, new_sp, fp);
81 sched_add_before(ret, restore);
82 arch_set_irn_register(restore, sp_reg);
83 set_irn_n(ret, n_sparc_Return_sp, restore);
84 kill_unused_stacknodes(sp);
86 ir_type *const frame_type = get_irg_frame_type(irg);
87 unsigned const frame_size = get_type_size_bytes(frame_type);
88 ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0);
89 set_irn_n(ret, n_sparc_Return_sp, incsp);
90 sched_add_before(ret, incsp);
94 void sparc_introduce_prolog_epilog(ir_graph *irg)
96 const arch_register_t *sp_reg = &sparc_registers[REG_SP];
97 ir_node *start = get_irg_start(irg);
98 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
99 ir_node *block = get_nodes_block(start);
100 ir_node *initial_sp = be_get_initial_reg_value(irg, sp_reg);
101 ir_node *schedpoint = start;
102 ir_type *frame_type = get_irg_frame_type(irg);
103 unsigned frame_size = get_type_size_bytes(frame_type);
105 /* introduce epilog for every return node */
107 ir_node *end_block = get_irg_end_block(irg);
108 int arity = get_irn_arity(end_block);
111 for (i = 0; i < arity; ++i) {
112 ir_node *ret = get_irn_n(end_block, i);
113 assert(is_sparc_Return(ret));
114 introduce_epilog(ret);
118 while (be_is_Keep(sched_next(schedpoint)))
119 schedpoint = sched_next(schedpoint);
121 if (!layout->sp_relative) {
122 ir_node *const save = new_bd_sparc_Save_imm(NULL, block, initial_sp, NULL, -(SPARC_MIN_STACKSIZE + frame_size));
123 arch_set_irn_register(save, sp_reg);
124 sched_add_after(schedpoint, save);
127 edges_reroute_except(initial_sp, save, save);
129 /* we still need the Save even if noone is explicitely using the
130 * value. (TODO: this isn't 100% correct yet, something at the end of
131 * the function should hold the Save, even if we use a restore
132 * which just overrides it instead of using the value)
134 if (get_irn_n_edges(save) == 0) {
135 ir_node *in[] = { save };
136 ir_node *keep = be_new_Keep(block, 1, in);
137 sched_add_after(schedpoint, keep);
140 ir_node *const incsp = be_new_IncSP(sp_reg, block, initial_sp, frame_size, 0);
141 edges_reroute_except(initial_sp, incsp, incsp);
142 sched_add_after(schedpoint, incsp);
147 * Creates a constant from an immediate value.
149 static ir_node *create_constant_from_immediate(ir_node *node, int offset)
151 dbg_info *dbgi = get_irn_dbg_info(node);
152 ir_node *block = get_nodes_block(node);
153 ir_node *high = new_bd_sparc_SetHi(dbgi, block, NULL, offset);
155 sched_add_before(node, high);
156 arch_set_irn_register(high, &sparc_registers[REG_G4]);
158 if ((offset & 0x3ff) != 0) {
159 ir_node *low = new_bd_sparc_Or_imm(dbgi, block, high, NULL, offset & 0x3ff);
161 sched_add_before(node, low);
162 arch_set_irn_register(low, &sparc_registers[REG_G4]);
170 static void finish_sparc_Save(ir_node *node)
172 sparc_attr_t *attr = get_sparc_attr(node);
173 int offset = attr->immediate_value;
175 if (! sparc_is_value_imm_encodeable(offset)) {
176 ir_node *base = get_irn_n(node, n_sparc_Save_stack);
177 dbg_info *dbgi = get_irn_dbg_info(node);
178 ir_node *block = get_nodes_block(node);
179 ir_node *constant = create_constant_from_immediate(node, offset);
180 ir_node *new_save = new_bd_sparc_Save_reg(dbgi, block, base, constant);
181 const arch_register_t *reg = arch_get_irn_register(node);
183 /* we have a Save with immediate */
184 assert(get_irn_arity(node) == 1);
186 sched_add_before(node, new_save);
187 arch_set_irn_register(new_save, reg);
188 be_peephole_exchange(node, new_save);
193 * SPARC immediates are limited. Split IncSP with bigger immediates if
196 static void finish_be_IncSP(ir_node *node)
198 int offset = be_get_IncSP_offset(node);
200 /* we might have to break the IncSP apart if the constant has become too big */
201 if (! sparc_is_value_imm_encodeable(offset) && ! sparc_is_value_imm_encodeable(-offset)) {
202 ir_node *sp = be_get_IncSP_pred(node);
203 dbg_info *dbgi = get_irn_dbg_info(node);
204 ir_node *block = get_nodes_block(node);
205 ir_node *constant = create_constant_from_immediate(node, offset);
206 ir_node *sub = new_bd_sparc_Sub_reg(dbgi, block, sp, constant);
208 sched_add_before(node, sub);
209 arch_set_irn_register(sub, &sparc_registers[REG_SP]);
210 be_peephole_exchange(node, sub);
215 * Adjust sp-relative offsets.
217 * Split into multiple instructions if offset exceeds SPARC immediate range.
219 static void finish_sparc_FrameAddr(ir_node *node)
221 sparc_attr_t *attr = get_sparc_attr(node);
222 int offset = attr->immediate_value;
224 if (! sparc_is_value_imm_encodeable(offset)) {
225 ir_node *base = get_irn_n(node, n_sparc_FrameAddr_base);
226 dbg_info *dbgi = get_irn_dbg_info(node);
227 ir_node *block = get_nodes_block(node);
228 ir_node *constant = create_constant_from_immediate(node, offset);
229 ir_node *new_frameaddr = new_bd_sparc_Add_reg(dbgi, block, base, constant);
230 const arch_register_t *reg = arch_get_irn_register(node);
232 sched_add_before(node, new_frameaddr);
233 arch_set_irn_register(new_frameaddr, reg);
234 be_peephole_exchange(node, new_frameaddr);
238 static void finish_sparc_Ld(ir_node *node)
240 sparc_attr_t *attr = get_sparc_attr(node);
241 int offset = attr->immediate_value;
242 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
244 if (! load_store_attr->is_frame_entity)
247 if (! sparc_is_value_imm_encodeable(offset)) {
248 ir_node *ptr = get_irn_n(node, n_sparc_Ld_ptr);
249 dbg_info *dbgi = get_irn_dbg_info(node);
250 ir_node *block = get_nodes_block(node);
251 ir_node *mem = get_irn_n(node, n_sparc_Ld_mem);
252 ir_mode *load_store_mode = load_store_attr->load_store_mode;
253 ir_node *constant = create_constant_from_immediate(node, offset);
254 ir_node *new_load = new_bd_sparc_Ld_reg(dbgi, block, ptr, constant, mem, load_store_mode);
255 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
257 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
258 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
260 sched_add_before(node, new_load);
261 be_foreach_out(node, i) {
262 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
264 be_peephole_exchange(node, new_load);
269 static void split_sparc_ldf(ir_node *node)
271 sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
272 unsigned bits = get_mode_size_bits(attr->load_store_mode);
273 /* split 128bit loads into 2 64bit loads */
275 dbg_info *dbgi = get_irn_dbg_info(node);
276 ir_node *block = get_nodes_block(node);
277 ir_node *ptr = get_irn_n(node, n_sparc_Ldf_ptr);
278 ir_node *mem = get_irn_n(node, n_sparc_Ldf_mem);
280 = new_bd_sparc_Ldf_d(dbgi, block, ptr, mem, mode_D,
281 attr->base.immediate_value_entity,
282 attr->base.immediate_value + 8,
283 attr->is_frame_entity);
284 ir_node *new_mem = new_r_Proj(new_load, mode_M, pn_sparc_Ldf_M);
286 const arch_register_t *reg
287 = arch_get_irn_register_out(node, pn_sparc_Ldf_res);
288 unsigned reg_index = reg->global_index;
290 arch_set_irn_register_out(new_load, pn_sparc_Ldf_res,
291 &sparc_registers[reg_index+2]);
293 attr->load_store_mode = mode_D;
294 set_irn_n(node, n_sparc_Ldf_mem, new_mem);
295 sched_add_before(node, new_load);
299 static void finish_sparc_Ldf(ir_node *node)
301 sparc_attr_t *attr = get_sparc_attr(node);
302 int offset = attr->immediate_value;
303 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
305 if (! load_store_attr->is_frame_entity)
308 if (! sparc_is_value_imm_encodeable(offset)) {
309 ir_node *ptr = get_irn_n(node, n_sparc_Ldf_ptr);
310 dbg_info *dbgi = get_irn_dbg_info(node);
311 ir_node *block = get_nodes_block(node);
312 ir_node *mem = get_irn_n(node, n_sparc_Ldf_mem);
313 ir_mode *load_store_mode = load_store_attr->load_store_mode;
314 ir_node *constant = create_constant_from_immediate(node, offset);
315 ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
316 ir_node *new_load = new_bd_sparc_Ldf_s(dbgi, block, new_ptr, mem, load_store_mode, NULL, 0, true);
317 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
319 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
320 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
322 sched_add_before(node, new_load);
323 be_foreach_out(node, i) {
324 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
326 be_peephole_exchange(node, new_load);
331 static void finish_sparc_St(ir_node *node)
333 sparc_attr_t *attr = get_sparc_attr(node);
334 int offset = attr->immediate_value;
335 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
337 if (! load_store_attr->is_frame_entity)
340 if (! sparc_is_value_imm_encodeable(offset)) {
341 ir_node *ptr = get_irn_n(node, n_sparc_St_ptr);
342 dbg_info *dbgi = get_irn_dbg_info(node);
343 ir_node *block = get_nodes_block(node);
344 ir_node *mem = get_irn_n(node, n_sparc_St_mem);
345 ir_node *value = get_irn_n(node, n_sparc_St_val);
346 ir_mode *load_store_mode = load_store_attr->load_store_mode;
347 ir_node *constant = create_constant_from_immediate(node, offset);
348 ir_node *new_load = new_bd_sparc_St_reg(dbgi, block, value, ptr, constant, mem, load_store_mode);
349 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
351 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
352 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
354 sched_add_before(node, new_load);
355 be_foreach_out(node, i) {
356 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
358 be_peephole_exchange(node, new_load);
363 static void finish_sparc_Stf(ir_node *node)
365 sparc_attr_t *attr = get_sparc_attr(node);
366 int offset = attr->immediate_value;
367 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
369 if (! load_store_attr->is_frame_entity)
372 if (! sparc_is_value_imm_encodeable(offset)) {
373 ir_node *ptr = get_irn_n(node, n_sparc_Stf_ptr);
374 dbg_info *dbgi = get_irn_dbg_info(node);
375 ir_node *block = get_nodes_block(node);
376 ir_node *mem = get_irn_n(node, n_sparc_Stf_mem);
377 ir_node *value = get_irn_n(node, n_sparc_Stf_val);
378 ir_mode *load_store_mode = load_store_attr->load_store_mode;
379 ir_node *constant = create_constant_from_immediate(node, offset);
380 ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
381 ir_node *new_load = new_bd_sparc_Stf_s(dbgi, block, value, new_ptr, mem, load_store_mode, NULL, 0, true);
382 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
384 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
385 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
387 sched_add_before(node, new_load);
388 be_foreach_out(node, i) {
389 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
391 be_peephole_exchange(node, new_load);
396 static void peephole_be_IncSP(ir_node *node)
399 node = be_peephole_IncSP_IncSP(node);
400 if (!be_is_IncSP(node))
403 pred = be_get_IncSP_pred(node);
404 if (is_sparc_Save(pred) && be_has_only_one_user(pred)) {
405 int offset = -be_get_IncSP_offset(node);
406 sparc_attr_t *attr = get_sparc_attr(pred);
407 attr->immediate_value += offset;
408 be_peephole_exchange(node, pred);
412 static void peephole_sparc_FrameAddr(ir_node *node)
414 /* the peephole code currently doesn't allow this since it changes
415 * the register. Find out why and how to workaround this... */
419 /* output must not be local, or out reg. Since the destination of the restore
420 * is the rotated register-file where only the old in-registers are still
421 * visible (as out-registers) */
422 static bool is_restorezeroopt_reg(const arch_register_t *reg)
424 unsigned index = reg->global_index;
425 return (index >= REG_G0 && index <= REG_G7)
426 || (index >= REG_I0 && index <= REG_I7);
429 static void replace_with_restore_reg(ir_node *node, ir_node *replaced,
430 ir_node *op0, ir_node *op1)
432 dbg_info *dbgi = get_irn_dbg_info(node);
433 ir_node *stack_in = get_irn_n(node, n_sparc_RestoreZero_stack);
434 ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
435 ir_node *block = get_nodes_block(node);
436 ir_mode *mode = get_irn_mode(node);
437 ir_node *new_node = new_bd_sparc_Restore_reg(dbgi, block, stack_in, fp,
439 ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
440 ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
441 const arch_register_t *reg = arch_get_irn_register(replaced);
442 const arch_register_t *sp = &sparc_registers[REG_SP];
443 arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
444 arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
446 sched_add_before(node, new_node);
447 be_peephole_exchange(node, stack);
448 be_peephole_exchange(replaced, res);
451 static void replace_with_restore_imm(ir_node *node, ir_node *replaced,
452 ir_node *op, ir_entity *imm_entity,
455 dbg_info *dbgi = get_irn_dbg_info(node);
456 ir_node *stack_in = get_irn_n(node, n_sparc_RestoreZero_stack);
457 ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
458 ir_node *block = get_nodes_block(node);
459 ir_mode *mode = get_irn_mode(node);
460 ir_node *new_node = new_bd_sparc_Restore_imm(dbgi, block, stack_in, fp,
461 op, imm_entity, immediate);
462 ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
463 ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
464 const arch_register_t *reg = arch_get_irn_register(replaced);
465 const arch_register_t *sp = &sparc_registers[REG_SP];
466 arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
467 arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
469 sched_add_before(node, new_node);
470 be_peephole_exchange(node, stack);
471 be_peephole_exchange(replaced, res);
474 static void peephole_sparc_RestoreZero(ir_node *node)
476 /* restore gives us a free "add" instruction, let's try to use that to fold
477 * an instruction in. We can do the following:
479 * - Copy values (g0 + reg)
480 * - Produce constants (g0 + immediate)
481 * - Perform an add (reg + reg)
482 * - Perform a sub with immediate (reg + (-immediate))
484 * Note: In an ideal world, this would not be a peephole optimization but
485 * already performed during code selection. Since about all foldable ops are
486 * arguments of the return node. However we have a hard time doing this
487 * since we construct epilogue code only after register allocation
488 * (and therefore after code selection).
490 int n_tries = 10; /* limit our search */
492 sched_foreach_reverse_before(node, schedpoint) {
496 if (arch_get_irn_n_outs(schedpoint) == 0)
499 if (!mode_is_data(get_irn_mode(schedpoint)))
502 arch_register_t const *const reg = arch_get_irn_register(schedpoint);
503 if (!is_restorezeroopt_reg(reg))
506 if (be_is_Copy(schedpoint) && be_can_move_down(heights, schedpoint, node)) {
507 ir_node *const op = be_get_Copy_op(schedpoint);
508 replace_with_restore_imm(node, schedpoint, op, NULL, 0);
509 } else if (is_sparc_Or(schedpoint) &&
510 arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
511 arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
512 be_can_move_down(heights, schedpoint, node)) {
513 /* it's a constant */
514 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
515 ir_entity *entity = attr->immediate_value_entity;
516 int32_t immediate = attr->immediate_value;
517 ir_node *g0 = get_irn_n(schedpoint, 0);
518 replace_with_restore_imm(node, schedpoint, g0, entity, immediate);
519 } else if (is_sparc_Add(schedpoint) &&
520 be_can_move_down(heights, schedpoint, node)) {
521 if (arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
522 ir_node *op = get_irn_n(schedpoint, 0);
523 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
524 ir_entity *entity = attr->immediate_value_entity;
525 int32_t imm = attr->immediate_value;
526 replace_with_restore_imm(node, schedpoint, op, entity, imm);
528 ir_node *op0 = get_irn_n(schedpoint, 0);
529 ir_node *op1 = get_irn_n(schedpoint, 1);
530 replace_with_restore_reg(node, schedpoint, op0, op1);
532 } else if (is_sparc_Sub(schedpoint) &&
533 arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
534 arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
535 be_can_move_down(heights, schedpoint, node)) {
536 /* it's a constant */
537 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
538 ir_entity *entity = attr->immediate_value_entity;
539 int32_t imm = attr->immediate_value;
540 if (entity == NULL && sparc_is_value_imm_encodeable(-imm)) {
541 ir_node *g0 = get_irn_n(schedpoint, 0);
542 replace_with_restore_imm(node, schedpoint, g0, NULL, -imm);
547 /* when we're here then we performed a folding and are done */
552 static void finish_sparc_Return(ir_node *node)
554 /* Ensure that the restore is directly before the return. */
555 sched_foreach_reverse_before(node, restore) {
556 if (is_sparc_Restore(restore) || is_sparc_RestoreZero(restore)) {
557 sched_remove(restore);
558 sched_add_before(node, restore);
564 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
566 assert(op->ops.generic == NULL);
567 op->ops.generic = (op_func) func;
570 static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
572 be_fec_env_t *env = (be_fec_env_t*)data;
576 const sparc_load_store_attr_t *attr;
578 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
579 mode = get_irn_mode(node);
580 align = get_mode_size_bytes(mode);
581 be_node_needs_frame_entity(env, node, mode, align);
585 if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
588 attr = get_sparc_load_store_attr_const(node);
589 entity = attr->base.immediate_value_entity;
590 mode = attr->load_store_mode;
593 if (!attr->is_frame_entity)
595 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
597 align = get_mode_size_bytes(mode);
598 be_node_needs_frame_entity(env, node, mode, align);
601 static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
603 if (is_be_node(node)) {
604 be_node_set_frame_entity(node, entity);
606 /* we only say be_node_needs_frame_entity on nodes with load_store
607 * attributes, so this should be fine */
608 sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
609 assert(attr->is_frame_entity);
610 assert(attr->base.immediate_value_entity == NULL);
611 attr->base.immediate_value_entity = entity;
615 void sparc_finish_graph(ir_graph *irg)
617 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
618 bool at_begin = stack_layout->sp_relative ? true : false;
619 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
621 irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
622 be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
623 be_free_frame_entity_coalescer(fec_env);
624 sparc_adjust_stack_entity_offsets(irg);
626 sparc_introduce_prolog_epilog(irg);
628 /* fix stack entity offsets */
629 be_abi_fix_stack_nodes(irg);
630 sparc_fix_stack_bias(irg);
632 heights = heights_new(irg);
634 /* perform peephole optimizations */
635 ir_clear_opcodes_generic_func();
636 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
637 register_peephole_optimisation(op_sparc_FrameAddr, peephole_sparc_FrameAddr);
638 register_peephole_optimisation(op_sparc_RestoreZero,
639 peephole_sparc_RestoreZero);
640 register_peephole_optimisation(op_sparc_Ldf, split_sparc_ldf);
641 be_peephole_opt(irg);
643 /* perform legalizations (mostly fix nodes with too big immediates) */
644 ir_clear_opcodes_generic_func();
645 register_peephole_optimisation(op_be_IncSP, finish_be_IncSP);
646 register_peephole_optimisation(op_sparc_FrameAddr, finish_sparc_FrameAddr);
647 register_peephole_optimisation(op_sparc_Ld, finish_sparc_Ld);
648 register_peephole_optimisation(op_sparc_Ldf, finish_sparc_Ldf);
649 register_peephole_optimisation(op_sparc_Return, finish_sparc_Return);
650 register_peephole_optimisation(op_sparc_Save, finish_sparc_Save);
651 register_peephole_optimisation(op_sparc_St, finish_sparc_St);
652 register_peephole_optimisation(op_sparc_Stf, finish_sparc_Stf);
653 be_peephole_opt(irg);
655 heights_free(heights);
657 be_remove_dead_nodes_from_schedule(irg);