2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Peephole optimization and legalization of a sparc function
23 * @author Matthias Braun
26 * A note on sparc stackpointer (sp) behaviour:
27 * The ABI expects SPARC_MIN_STACKSIZE bytes to be available at the
28 * stackpointer. This space will be used to spill register windows,
29 * and for spilling va_arg arguments (maybe we can optimize this away for
30 * statically known not-va-arg-functions...)
31 * This in effect means that we allocate that extra space at the function begin
32 * which is easy. But this space isn't really fixed at the beginning of the
33 * stackframe. Instead you should rather imagine the space as always being the
34 * last-thing on the stack.
35 * So when addressing anything stack-specific we have to account for this
36 * area, while our compiler thinks the space is occupied at the beginning
37 * of the stack frame. The code here among other things adjusts these offsets
42 #include "bearch_sparc_t.h"
43 #include "gen_sparc_regalloc_if.h"
44 #include "sparc_new_nodes.h"
45 #include "sparc_transform.h"
52 #include "bepeephole.h"
55 #include "bespillslots.h"
59 static ir_heights_t *heights;
61 static void kill_unused_stacknodes(ir_node *node)
63 if (get_irn_n_edges(node) > 0)
66 if (be_is_IncSP(node)) {
69 } else if (is_Phi(node)) {
70 int arity = get_irn_arity(node);
71 ir_node **ins = ALLOCAN(ir_node*, arity);
74 memcpy(ins, get_irn_in(node), arity*sizeof(ins[0]));
77 for (i = 0; i < arity; ++i)
78 kill_unused_stacknodes(ins[i]);
82 static void introduce_epilog(ir_node *ret)
84 const arch_register_t *sp_reg = &sparc_registers[REG_SP];
85 ir_graph *irg = get_irn_irg(ret);
86 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
87 ir_node *block = get_nodes_block(ret);
88 ir_type *frame_type = get_irg_frame_type(irg);
89 unsigned frame_size = get_type_size_bytes(frame_type);
90 int sp_idx = be_find_return_reg_input(ret, sp_reg);
91 ir_node *sp = get_irn_n(ret, sp_idx);
93 if (!layout->sp_relative) {
94 const arch_register_t *fp_reg = &sparc_registers[REG_FRAME_POINTER];
95 ir_node *fp = be_get_initial_reg_value(irg, fp_reg);
96 ir_node *restore = new_bd_sparc_RestoreZero(NULL, block, fp);
97 sched_add_before(ret, restore);
98 arch_set_irn_register(restore, sp_reg);
99 set_irn_n(ret, sp_idx, restore);
101 kill_unused_stacknodes(sp);
103 ir_node *incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0);
104 set_irn_n(ret, sp_idx, incsp);
105 sched_add_before(ret, incsp);
109 void sparc_introduce_prolog_epilog(ir_graph *irg)
111 const arch_register_t *sp_reg = &sparc_registers[REG_SP];
112 ir_node *start = get_irg_start(irg);
113 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
114 ir_node *block = get_nodes_block(start);
115 ir_node *initial_sp = be_get_initial_reg_value(irg, sp_reg);
116 ir_node *sp = initial_sp;
117 ir_node *schedpoint = start;
118 ir_type *frame_type = get_irg_frame_type(irg);
119 unsigned frame_size = get_type_size_bytes(frame_type);
121 /* introduce epilog for every return node */
123 ir_node *end_block = get_irg_end_block(irg);
124 int arity = get_irn_arity(end_block);
127 for (i = 0; i < arity; ++i) {
128 ir_node *ret = get_irn_n(end_block, i);
129 assert(is_sparc_Return(ret));
130 introduce_epilog(ret);
134 while (be_is_Keep(sched_next(schedpoint)))
135 schedpoint = sched_next(schedpoint);
137 if (!layout->sp_relative) {
138 ir_node *save = new_bd_sparc_Save_imm(NULL, block, sp, NULL,
139 -SPARC_MIN_STACKSIZE-frame_size);
140 arch_set_irn_register(save, sp_reg);
141 sched_add_after(schedpoint, save);
144 edges_reroute(initial_sp, save);
145 set_irn_n(save, n_sparc_Save_stack, initial_sp);
147 /* we still need the Save even if noone is explicitely using the
148 * value. (TODO: this isn't 100% correct yet, something at the end of
149 * the function should hold the Save, even if we use a restore
150 * which just overrides it instead of using the value)
152 if (get_irn_n_edges(save) == 0) {
153 ir_node *in[] = { save };
154 ir_node *keep = be_new_Keep(block, 1, in);
155 sched_add_after(schedpoint, keep);
158 ir_node *incsp = be_new_IncSP(sp_reg, block, sp, frame_size, 0);
159 edges_reroute(initial_sp, incsp);
160 be_set_IncSP_pred(incsp, sp);
161 sched_add_after(schedpoint, incsp);
166 * Creates a constant from an immediate value.
168 static ir_node *create_constant_from_immediate(ir_node *node, int offset)
170 dbg_info *dbgi = get_irn_dbg_info(node);
171 ir_node *block = get_nodes_block(node);
172 ir_node *high = new_bd_sparc_SetHi(dbgi, block, NULL, offset);
174 sched_add_before(node, high);
175 arch_set_irn_register(high, &sparc_registers[REG_G4]);
177 if ((offset & 0x3ff) != 0) {
178 ir_node *low = new_bd_sparc_Or_imm(dbgi, block, high, NULL, offset & 0x3ff);
180 sched_add_before(node, low);
181 arch_set_irn_register(low, &sparc_registers[REG_G4]);
189 static void finish_sparc_Save(ir_node *node)
191 sparc_attr_t *attr = get_sparc_attr(node);
192 int offset = attr->immediate_value;
194 if (! sparc_is_value_imm_encodeable(offset)) {
195 ir_node *base = get_irn_n(node, n_sparc_Save_stack);
196 dbg_info *dbgi = get_irn_dbg_info(node);
197 ir_node *block = get_nodes_block(node);
198 ir_node *constant = create_constant_from_immediate(node, offset);
199 ir_node *new_save = new_bd_sparc_Save_reg(dbgi, block, base, constant);
200 const arch_register_t *reg = arch_get_irn_register(node);
202 /* we have a Save with immediate */
203 assert(get_irn_arity(node) == 1);
205 sched_add_before(node, new_save);
206 arch_set_irn_register(new_save, reg);
207 be_peephole_exchange(node, new_save);
212 * SPARC immediates are limited. Split IncSP with bigger immediates if
215 static void finish_be_IncSP(ir_node *node)
217 int offset = be_get_IncSP_offset(node);
219 /* we might have to break the IncSP apart if the constant has become too big */
220 if (! sparc_is_value_imm_encodeable(offset) && ! sparc_is_value_imm_encodeable(-offset)) {
221 ir_node *sp = be_get_IncSP_pred(node);
222 dbg_info *dbgi = get_irn_dbg_info(node);
223 ir_node *block = get_nodes_block(node);
224 ir_node *constant = create_constant_from_immediate(node, offset);
225 ir_node *sub = new_bd_sparc_Sub_reg(dbgi, block, sp, constant);
227 sched_add_before(node, sub);
228 arch_set_irn_register(sub, &sparc_registers[REG_SP]);
229 be_peephole_exchange(node, sub);
234 * Adjust sp-relative offsets.
236 * Split into multiple instructions if offset exceeds SPARC immediate range.
238 static void finish_sparc_FrameAddr(ir_node *node)
240 sparc_attr_t *attr = get_sparc_attr(node);
241 int offset = attr->immediate_value;
243 if (! sparc_is_value_imm_encodeable(offset)) {
244 ir_node *base = get_irn_n(node, n_sparc_FrameAddr_base);
245 dbg_info *dbgi = get_irn_dbg_info(node);
246 ir_node *block = get_nodes_block(node);
247 ir_node *constant = create_constant_from_immediate(node, offset);
248 ir_node *new_frameaddr = new_bd_sparc_Add_reg(dbgi, block, base, constant);
249 const arch_register_t *reg = arch_get_irn_register(node);
251 sched_add_before(node, new_frameaddr);
252 arch_set_irn_register(new_frameaddr, reg);
253 exchange(node, new_frameaddr);
257 static void finish_sparc_Ld(ir_node *node)
259 sparc_attr_t *attr = get_sparc_attr(node);
260 int offset = attr->immediate_value;
261 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
263 if (! load_store_attr->is_frame_entity)
266 if (! sparc_is_value_imm_encodeable(offset)) {
267 ir_node *ptr = get_irn_n(node, n_sparc_Ld_ptr);
268 dbg_info *dbgi = get_irn_dbg_info(node);
269 ir_node *block = get_nodes_block(node);
270 ir_node *mem = get_irn_n(node, n_sparc_Ld_mem);
271 ir_mode *load_store_mode = load_store_attr->load_store_mode;
272 ir_node *constant = create_constant_from_immediate(node, offset);
273 ir_node *new_load = new_bd_sparc_Ld_reg(dbgi, block, ptr, constant, mem, load_store_mode);
274 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
275 unsigned n_outs = arch_get_irn_n_outs(node);
278 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
279 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
281 sched_add_before(node, new_load);
282 for (i = 0; i < n_outs; i++) {
283 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
285 exchange(node, new_load);
290 static void finish_sparc_Ldf(ir_node *node)
292 sparc_attr_t *attr = get_sparc_attr(node);
293 int offset = attr->immediate_value;
294 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
296 if (! load_store_attr->is_frame_entity)
299 if (! sparc_is_value_imm_encodeable(offset)) {
300 ir_node *ptr = get_irn_n(node, n_sparc_Ldf_ptr);
301 dbg_info *dbgi = get_irn_dbg_info(node);
302 ir_node *block = get_nodes_block(node);
303 ir_node *mem = get_irn_n(node, n_sparc_Ldf_mem);
304 ir_mode *load_store_mode = load_store_attr->load_store_mode;
305 ir_node *constant = create_constant_from_immediate(node, offset);
306 ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
307 ir_node *new_load = new_bd_sparc_Ldf_s(dbgi, block, new_ptr, mem, load_store_mode, NULL, 0, true);
308 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
309 unsigned n_outs = arch_get_irn_n_outs(node);
312 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
313 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
315 sched_add_before(node, new_load);
316 for (i = 0; i < n_outs; i++) {
317 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
319 exchange(node, new_load);
324 static void finish_sparc_St(ir_node *node)
326 sparc_attr_t *attr = get_sparc_attr(node);
327 int offset = attr->immediate_value;
328 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
330 if (! load_store_attr->is_frame_entity)
333 if (! sparc_is_value_imm_encodeable(offset)) {
334 ir_node *ptr = get_irn_n(node, n_sparc_St_ptr);
335 dbg_info *dbgi = get_irn_dbg_info(node);
336 ir_node *block = get_nodes_block(node);
337 ir_node *mem = get_irn_n(node, n_sparc_St_mem);
338 ir_node *value = get_irn_n(node, n_sparc_St_val);
339 ir_mode *load_store_mode = load_store_attr->load_store_mode;
340 ir_node *constant = create_constant_from_immediate(node, offset);
341 ir_node *new_load = new_bd_sparc_St_reg(dbgi, block, value, ptr, constant, mem, load_store_mode);
342 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
343 unsigned n_outs = arch_get_irn_n_outs(node);
346 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
347 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
349 sched_add_before(node, new_load);
350 for (i = 0; i < n_outs; i++) {
351 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
353 exchange(node, new_load);
358 static void finish_sparc_Stf(ir_node *node)
360 sparc_attr_t *attr = get_sparc_attr(node);
361 int offset = attr->immediate_value;
362 const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
364 if (! load_store_attr->is_frame_entity)
367 if (! sparc_is_value_imm_encodeable(offset)) {
368 ir_node *ptr = get_irn_n(node, n_sparc_Stf_ptr);
369 dbg_info *dbgi = get_irn_dbg_info(node);
370 ir_node *block = get_nodes_block(node);
371 ir_node *mem = get_irn_n(node, n_sparc_Stf_mem);
372 ir_node *value = get_irn_n(node, n_sparc_Stf_val);
373 ir_mode *load_store_mode = load_store_attr->load_store_mode;
374 ir_node *constant = create_constant_from_immediate(node, offset);
375 ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
376 ir_node *new_load = new_bd_sparc_Stf_s(dbgi, block, value, new_ptr, mem, load_store_mode, NULL, 0, true);
377 sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
378 unsigned n_outs = arch_get_irn_n_outs(node);
381 new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
382 new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
384 sched_add_before(node, new_load);
385 for (i = 0; i < n_outs; i++) {
386 arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
388 exchange(node, new_load);
393 static void peephole_be_IncSP(ir_node *node)
396 node = be_peephole_IncSP_IncSP(node);
397 if (!be_is_IncSP(node))
400 pred = be_get_IncSP_pred(node);
401 if (is_sparc_Save(pred) && be_has_only_one_user(pred)) {
402 int offset = -be_get_IncSP_offset(node);
403 sparc_attr_t *attr = get_sparc_attr(pred);
404 attr->immediate_value += offset;
405 be_peephole_exchange(node, pred);
409 static void peephole_sparc_FrameAddr(ir_node *node)
411 /* the peephole code currently doesn't allow this since it changes
412 * the register. Find out why and how to workaround this... */
414 const sparc_attr_t *attr = get_sparc_attr_const(node);
415 if (attr->immediate_value == 0) {
416 ir_node *base = get_irn_n(node, n_sparc_FrameAddr_base);
417 be_peephole_exchange(node, base);
423 /* output must not be local, or out reg. Since the destination of the restore
424 * is the rotated register-file where only the old in-registers are still
425 * visible (as out-registers) */
426 static bool is_restorezeroopt_reg(const arch_register_t *reg)
428 unsigned index = reg->global_index;
429 return (index >= REG_G0 && index <= REG_G7)
430 || (index >= REG_I0 && index <= REG_I7);
433 static void replace_with_restore_reg(ir_node *node, ir_node *replaced,
434 ir_node *op0, ir_node *op1)
436 dbg_info *dbgi = get_irn_dbg_info(node);
437 ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
438 ir_node *block = get_nodes_block(node);
439 ir_mode *mode = get_irn_mode(node);
440 ir_node *new_node = new_bd_sparc_Restore_reg(dbgi, block, fp, op0, op1);
441 ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
442 ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
443 const arch_register_t *reg = arch_get_irn_register(replaced);
444 const arch_register_t *sp = &sparc_registers[REG_SP];
445 arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
446 arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
448 sched_add_before(node, new_node);
449 be_peephole_exchange(node, stack);
450 be_peephole_exchange(replaced, res);
453 static void replace_with_restore_imm(ir_node *node, ir_node *replaced,
454 ir_node *op, ir_entity *imm_entity,
457 dbg_info *dbgi = get_irn_dbg_info(node);
458 ir_node *fp = get_irn_n(node, n_sparc_RestoreZero_frame_pointer);
459 ir_node *block = get_nodes_block(node);
460 ir_mode *mode = get_irn_mode(node);
462 = new_bd_sparc_Restore_imm(dbgi, block, fp, op, imm_entity, immediate);
463 ir_node *stack = new_r_Proj(new_node, mode, pn_sparc_Restore_stack);
464 ir_node *res = new_r_Proj(new_node, mode, pn_sparc_Restore_res);
465 const arch_register_t *reg = arch_get_irn_register(replaced);
466 const arch_register_t *sp = &sparc_registers[REG_SP];
467 arch_set_irn_register_out(new_node, pn_sparc_Restore_stack, sp);
468 arch_set_irn_register_out(new_node, pn_sparc_Restore_res, reg);
470 sched_add_before(node, new_node);
471 be_peephole_exchange(node, stack);
472 be_peephole_exchange(replaced, res);
475 static void peephole_sparc_RestoreZero(ir_node *node)
477 /* restore gives us a free "add" instruction, let's try to use that to fold
478 * an instruction in. We can do the following:
480 * - Copy values (g0 + reg)
481 * - Produce constants (g0 + immediate)
482 * - Perform an add (reg + reg)
483 * - Perform a sub with immediate (reg + (-immediate))
485 * Note: In an ideal world, this would not be a peephole optimization but
486 * already performed during code selection. Since about all foldable ops are
487 * arguments of the return node. However we have a hard time doing this
488 * since we construct epilogue code only after register allocation
489 * (and therefore after code selection).
491 int n_tries = 10; /* limit our search */
492 ir_node *schedpoint = node;
494 while (sched_has_prev(schedpoint)) {
495 const arch_register_t *reg;
496 schedpoint = sched_prev(schedpoint);
501 if (arch_get_irn_n_outs(schedpoint) == 0)
504 if (!mode_is_data(get_irn_mode(schedpoint)))
507 reg = arch_get_irn_register(schedpoint);
508 if (!is_restorezeroopt_reg(reg))
511 if (be_is_Copy(schedpoint) && be_can_move_before(heights, schedpoint, node)) {
512 ir_node *op = get_irn_n(schedpoint, n_be_Copy_op);
513 replace_with_restore_imm(node, schedpoint, op, NULL, 0);
514 } else if (is_sparc_Or(schedpoint) &&
515 arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
516 arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
517 be_can_move_before(heights, schedpoint, node)) {
518 /* it's a constant */
519 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
520 ir_entity *entity = attr->immediate_value_entity;
521 int32_t immediate = attr->immediate_value;
522 ir_node *g0 = get_irn_n(schedpoint, 0);
523 replace_with_restore_imm(node, schedpoint, g0, entity, immediate);
524 } else if (is_sparc_Add(schedpoint) &&
525 be_can_move_before(heights, schedpoint, node)) {
526 if (arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
527 ir_node *op = get_irn_n(schedpoint, 0);
528 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
529 ir_entity *entity = attr->immediate_value_entity;
530 int32_t imm = attr->immediate_value;
531 replace_with_restore_imm(node, schedpoint, op, entity, imm);
533 ir_node *op0 = get_irn_n(schedpoint, 0);
534 ir_node *op1 = get_irn_n(schedpoint, 1);
535 replace_with_restore_reg(node, schedpoint, op0, op1);
537 } else if (is_sparc_Sub(schedpoint) &&
538 arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
539 arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
540 be_can_move_before(heights, schedpoint, node)) {
541 /* it's a constant */
542 const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
543 ir_entity *entity = attr->immediate_value_entity;
544 int32_t imm = attr->immediate_value;
545 if (entity == NULL && sparc_is_value_imm_encodeable(-imm)) {
546 ir_node *g0 = get_irn_n(schedpoint, 0);
547 replace_with_restore_imm(node, schedpoint, g0, NULL, -imm);
552 /* when we're here then we performed a folding and are done */
557 static void finish_sparc_Return(ir_node *node)
559 ir_node *schedpoint = node;
561 /* see that there is no code between Return and restore, if there is move
562 * it in front of the restore */
564 if (!sched_has_prev(schedpoint))
566 schedpoint = sched_prev(schedpoint);
567 if (is_sparc_Restore(schedpoint) || is_sparc_RestoreZero(schedpoint))
570 restore = schedpoint;
571 schedpoint = sched_prev(node);
572 /* move all code between return and restore up */
573 while (schedpoint != restore) {
574 ir_node *next_schedpoint = sched_prev(schedpoint);
575 sched_remove(schedpoint);
576 sched_add_before(restore, schedpoint);
577 schedpoint = next_schedpoint;
581 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
583 assert(op->ops.generic == NULL);
584 op->ops.generic = (op_func) func;
587 static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
589 be_fec_env_t *env = (be_fec_env_t*)data;
593 const sparc_load_store_attr_t *attr;
595 if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
596 mode = get_irn_mode(node);
597 align = get_mode_size_bytes(mode);
598 be_node_needs_frame_entity(env, node, mode, align);
602 if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
605 attr = get_sparc_load_store_attr_const(node);
606 entity = attr->base.immediate_value_entity;
607 mode = attr->load_store_mode;
610 if (!attr->is_frame_entity)
612 if (arch_get_irn_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
614 align = get_mode_size_bytes(mode);
615 be_node_needs_frame_entity(env, node, mode, align);
618 static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
620 if (is_be_node(node)) {
621 be_node_set_frame_entity(node, entity);
623 /* we only say be_node_needs_frame_entity on nodes with load_store
624 * attributes, so this should be fine */
625 sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
626 assert(attr->is_frame_entity);
627 assert(attr->base.immediate_value_entity == NULL);
628 attr->base.immediate_value_entity = entity;
632 void sparc_finish(ir_graph *irg)
634 be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
635 bool at_begin = stack_layout->sp_relative ? true : false;
636 be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
638 irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
639 be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
640 be_free_frame_entity_coalescer(fec_env);
642 sparc_introduce_prolog_epilog(irg);
644 /* fix stack entity offsets */
645 be_abi_fix_stack_nodes(irg);
646 sparc_fix_stack_bias(irg);
648 heights = heights_new(irg);
650 /* perform peephole optimizations */
651 clear_irp_opcodes_generic_func();
652 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
653 register_peephole_optimisation(op_sparc_FrameAddr, peephole_sparc_FrameAddr);
654 register_peephole_optimisation(op_sparc_RestoreZero,
655 peephole_sparc_RestoreZero);
656 be_peephole_opt(irg);
658 /* perform legalizations (mostly fix nodes with too big immediates) */
659 clear_irp_opcodes_generic_func();
660 register_peephole_optimisation(op_be_IncSP, finish_be_IncSP);
661 register_peephole_optimisation(op_sparc_FrameAddr, finish_sparc_FrameAddr);
662 register_peephole_optimisation(op_sparc_Ld, finish_sparc_Ld);
663 register_peephole_optimisation(op_sparc_Ldf, finish_sparc_Ldf);
664 register_peephole_optimisation(op_sparc_Return, finish_sparc_Return);
665 register_peephole_optimisation(op_sparc_Save, finish_sparc_Save);
666 register_peephole_optimisation(op_sparc_St, finish_sparc_St);
667 register_peephole_optimisation(op_sparc_Stf, finish_sparc_Stf);
668 be_peephole_opt(irg);
670 heights_free(heights);
672 be_remove_dead_nodes_from_schedule(irg);