#include "../bemodule.h"
#include "../begnuas.h"
#include "../belistsched.h"
+#include "../bestack.h"
#include "bearch_TEMPLATE_t.h"
*/
static void TEMPLATE_finish_irg(ir_graph *irg)
{
- (void) irg;
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
}
/* Some stuff you need to do after scheduling but before register allocation */
}
-static void TEMPLATE_after_ra(ir_graph *irg)
-{
- (void) irg;
- /* Some stuff you need to do immediatly after register allocation */
-}
-
static void TEMPLATE_init_graph(ir_graph *irg)
{
(void) irg;
NULL, /* before_abi */
TEMPLATE_prepare_graph,
TEMPLATE_before_ra,
- TEMPLATE_after_ra,
TEMPLATE_finish_irg,
TEMPLATE_emit_routine,
TEMPLATE_register_saved_by,
#include "../belistsched.h"
#include "../beflags.h"
#include "../bespillslots.h"
+#include "../bestack.h"
#include "bearch_amd64_t.h"
dump_ir_graph(irg, "transformed");
}
-
-/**
- * Called immediatly before emit phase.
- */
-static void amd64_finish_irg(ir_graph *irg)
-{
- (void) irg;
-}
-
static void amd64_before_ra(ir_graph *irg)
{
be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
}
-
static void transform_Reload(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
}
}
-static void amd64_after_ra(ir_graph *irg)
+/**
+ * Called immediatly before emit phase.
+ */
+static void amd64_finish_irg(ir_graph *irg)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
bool at_begin = stack_layout->sp_relative ? true : false;
be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
+
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
}
/**
NULL, /* before_abi */
amd64_prepare_graph,
amd64_before_ra,
- amd64_after_ra,
amd64_finish_irg,
amd64_gen_routine,
amd64_register_saved_by,
#include "../begnuas.h"
#include "../belistsched.h"
#include "../beflags.h"
+#include "../bestack.h"
#include "bearch_arm_t.h"
place_code(irg);
}
-/**
- * Called immediately before emit phase.
- */
-static void arm_finish_irg(ir_graph *irg)
+static void arm_collect_frame_entity_nodes(ir_node *node, void *data)
{
- /* do peephole optimizations and fix stack offsets */
- arm_peephole_optimization(irg);
+ be_fec_env_t *env = (be_fec_env_t*)data;
+ const ir_mode *mode;
+ int align;
+ ir_entity *entity;
+ const arm_load_store_attr_t *attr;
+
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ mode = get_irn_mode(node);
+ align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ return;
+ }
+
+ switch (get_arm_irn_opcode(node)) {
+ case iro_arm_Ldf:
+ case iro_arm_Ldr:
+ break;
+ default:
+ return;
+ }
+
+ attr = get_arm_load_store_attr_const(node);
+ entity = attr->entity;
+ mode = attr->load_store_mode;
+ align = get_mode_size_bytes(mode);
+ if (entity != NULL)
+ return;
+ if (!attr->is_frame_entity)
+ return;
+ be_node_needs_frame_entity(env, node, mode, align);
}
-static void arm_before_ra(ir_graph *irg)
+static void arm_set_frame_entity(ir_node *node, ir_entity *entity)
{
- be_sched_fix_flags(irg, &arm_reg_classes[CLASS_arm_flags], NULL, NULL);
+ if (is_be_node(node)) {
+ be_node_set_frame_entity(node, entity);
+ } else {
+ arm_load_store_attr_t *attr = get_arm_load_store_attr(node);
+ attr->entity = entity;
+ }
}
static void transform_Reload(ir_node *node)
}
}
-static void arm_collect_frame_entity_nodes(ir_node *node, void *data)
-{
- be_fec_env_t *env = (be_fec_env_t*)data;
- const ir_mode *mode;
- int align;
- ir_entity *entity;
- const arm_load_store_attr_t *attr;
-
- if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- mode = get_irn_mode(node);
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- return;
- }
-
- switch (get_arm_irn_opcode(node)) {
- case iro_arm_Ldf:
- case iro_arm_Ldr:
- break;
- default:
- return;
- }
-
- attr = get_arm_load_store_attr_const(node);
- entity = attr->entity;
- mode = attr->load_store_mode;
- align = get_mode_size_bytes(mode);
- if (entity != NULL)
- return;
- if (!attr->is_frame_entity)
- return;
- be_node_needs_frame_entity(env, node, mode, align);
-}
-
-static void arm_set_frame_entity(ir_node *node, ir_entity *entity)
-{
- if (is_be_node(node)) {
- be_node_set_frame_entity(node, entity);
- } else {
- arm_load_store_attr_t *attr = get_arm_load_store_attr(node);
- attr->entity = entity;
- }
-}
-
-static void arm_after_ra(ir_graph *irg)
+/**
+ * Called immediately before emit phase.
+ */
+static void arm_finish_irg(ir_graph *irg)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
bool at_begin = stack_layout->sp_relative ? true : false;
be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, arm_after_ra_walker, NULL);
+
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
+
+ /* do peephole optimizations and fix stack offsets */
+ arm_peephole_optimization(irg);
+}
+
+static void arm_before_ra(ir_graph *irg)
+{
+ be_sched_fix_flags(irg, &arm_reg_classes[CLASS_arm_flags], NULL, NULL);
}
/**
NULL, /* before_abi */
arm_prepare_graph,
arm_before_ra,
- arm_after_ra,
arm_finish_irg,
arm_gen_routine,
NULL, /* register_saved_by */
*/
void (*before_ra)(ir_graph *irg);
- /**
- * Called after register allocation.
- */
- void (*after_ra)(ir_graph *irg);
-
/**
* Called directly before done is called. This should be the last place
* where the irg is modified.
dump(DUMP_RA, irg, "ra");
- /* let the code generator prepare the graph for emitter */
- be_timer_push(T_FINISH);
- if (arch_env->impl->after_ra != NULL)
- arch_env->impl->after_ra(irg);
- be_timer_pop(T_FINISH);
-
- /* fix stack offsets */
- be_timer_push(T_ABI);
- be_abi_fix_stack_nodes(irg);
- be_remove_dead_nodes_from_schedule(irg);
- be_abi_fix_stack_bias(irg);
- be_timer_pop(T_ABI);
-
- dump(DUMP_SCHED, irg, "fix_stack_after_ra");
-
be_timer_push(T_FINISH);
if (arch_env->impl->finish != NULL)
arch_env->impl->finish(irg);
#include "../betranshlp.h"
#include "../belistsched.h"
#include "../beabihelper.h"
+#include "../bestack.h"
#include "bearch_ia32_t.h"
}
/**
- * We transform Spill and Reload here. This needs to be done before
- * stack biasing otherwise we would miss the corrected offset for these nodes.
+ * Last touchups for the graph before emit: x87 simulation to replace the
+ * virtual with real x87 instructions, creating a block schedule and peephole
+ * optimisations.
*/
-static void ia32_after_ra(ir_graph *irg)
+static void ia32_finish(ir_graph *irg)
{
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
bool at_begin = stack_layout->sp_relative ? true : false;
be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
introduce_prolog_epilog(irg);
-}
-/**
- * Last touchups for the graph before emit: x87 simulation to replace the
- * virtual with real x87 instructions, creating a block schedule and peephole
- * optimisations.
- */
-static void ia32_finish(ir_graph *irg)
-{
- ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
+ /* fix 2-address code constraints */
ia32_finish_irg(irg);
/* we might have to rewrite x87 virtual registers */
/* do peephole optimisations */
ia32_peephole_optimization(irg);
+ be_remove_dead_nodes_from_schedule(irg);
+
/* create block schedule, this also removes empty blocks which might
* produce critical edges */
irg_data->blk_sched = be_create_block_schedule(irg);
ia32_before_abi, /* before abi introduce hook */
ia32_prepare_graph,
ia32_before_ra, /* before register allocation hook */
- ia32_after_ra, /* after register allocation hook */
ia32_finish, /* called before codegen */
ia32_emit, /* emit && done */
ia32_register_saved_by,
#include "../bemachine.h"
#include "../bemodule.h"
#include "../beirg.h"
-#include "../bespillslots.h"
#include "../begnuas.h"
#include "../belistsched.h"
#include "../beflags.h"
NULL, sparc_modifies_fp_flags);
}
-/**
- * transform reload node => load
- */
-static void transform_Reload(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
- ir_node *mem = get_irn_n(node, n_be_Reload_mem);
- ir_mode *mode = get_irn_mode(node);
- ir_entity *entity = be_get_frame_entity(node);
- const arch_register_t *reg;
- ir_node *proj;
- ir_node *load;
-
- ir_node *sched_point = sched_prev(node);
-
- load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0, true);
- sched_add_after(sched_point, load);
- sched_remove(node);
-
- proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res);
-
- reg = arch_get_irn_register(node);
- arch_set_irn_register(proj, reg);
-
- exchange(node, proj);
-}
-
-/**
- * transform spill node => store
- */
-static void transform_Spill(ir_node *node)
-{
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
- ir_graph *irg = get_irn_irg(node);
- ir_node *mem = get_irg_no_mem(irg);
- ir_node *val = get_irn_n(node, n_be_Spill_val);
- ir_mode *mode = get_irn_mode(val);
- ir_entity *entity = be_get_frame_entity(node);
- ir_node *sched_point;
- ir_node *store;
-
- sched_point = sched_prev(node);
- store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0, true);
- sched_remove(node);
- sched_add_after(sched_point, store);
-
- exchange(node, store);
-}
-
-/**
- * walker to transform be_Spill and be_Reload nodes
- */
-static void sparc_after_ra_walker(ir_node *block, void *data)
-{
- ir_node *node, *prev;
- (void) data;
-
- for (node = sched_last(block); !sched_is_begin(node); node = prev) {
- prev = sched_prev(node);
-
- if (be_is_Reload(node)) {
- transform_Reload(node);
- } else if (be_is_Spill(node)) {
- transform_Spill(node);
- }
- }
-}
-
-static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
-{
- be_fec_env_t *env = (be_fec_env_t*)data;
- const ir_mode *mode;
- int align;
- ir_entity *entity;
- const sparc_load_store_attr_t *attr;
-
- if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- mode = get_irn_mode(node);
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- return;
- }
-
- if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
- return;
-
- attr = get_sparc_load_store_attr_const(node);
- entity = attr->base.immediate_value_entity;
- mode = attr->load_store_mode;
- if (entity != NULL)
- return;
- if (!attr->is_frame_entity)
- return;
- if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
- mode = mode_Lu;
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
-}
-
-static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
-{
- if (is_be_node(node)) {
- be_node_set_frame_entity(node, entity);
- } else {
- /* we only say be_node_needs_frame_entity on nodes with load_store
- * attributes, so this should be fine */
- sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
- assert(attr->is_frame_entity);
- assert(attr->base.immediate_value_entity == NULL);
- attr->base.immediate_value_entity = entity;
- }
-}
-
-static void sparc_after_ra(ir_graph *irg)
-{
- be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
- bool at_begin = stack_layout->sp_relative ? true : false;
- be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
-
- irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
- be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
- be_free_frame_entity_coalescer(fec_env);
-
- irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL);
-
- sparc_introduce_prolog_epilog(irg);
-}
-
static void sparc_init_graph(ir_graph *irg)
{
(void) irg;
NULL, /* before_abi */
sparc_prepare_graph,
sparc_before_ra,
- sparc_after_ra,
sparc_finish,
sparc_emit_routine,
NULL, /* register_saved_by */
#include "irprog.h"
#include "irgmod.h"
#include "ircons.h"
+#include "irgwalk.h"
#include "../bepeephole.h"
#include "../benode.h"
#include "../besched.h"
+#include "../bespillslots.h"
+#include "../bestack.h"
+#include "../beirgmod.h"
static void kill_unused_stacknodes(ir_node *node)
{
op->ops.generic = (op_func) func;
}
+/**
+ * transform reload node => load
+ */
+static void transform_Reload(ir_node *node)
+{
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
+ ir_node *mem = get_irn_n(node, n_be_Reload_mem);
+ ir_mode *mode = get_irn_mode(node);
+ ir_entity *entity = be_get_frame_entity(node);
+ const arch_register_t *reg;
+ ir_node *proj;
+ ir_node *load;
+
+ ir_node *sched_point = sched_prev(node);
+
+ load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0, true);
+ sched_add_after(sched_point, load);
+ sched_remove(node);
+
+ proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res);
+
+ reg = arch_get_irn_register(node);
+ arch_set_irn_register(proj, reg);
+
+ exchange(node, proj);
+}
+
+/**
+ * transform spill node => store
+ */
+static void transform_Spill(ir_node *node)
+{
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *mem = get_irg_no_mem(irg);
+ ir_node *val = get_irn_n(node, n_be_Spill_val);
+ ir_mode *mode = get_irn_mode(val);
+ ir_entity *entity = be_get_frame_entity(node);
+ ir_node *sched_point;
+ ir_node *store;
+
+ sched_point = sched_prev(node);
+ store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0, true);
+ sched_remove(node);
+ sched_add_after(sched_point, store);
+
+ exchange(node, store);
+}
+
+/**
+ * walker to transform be_Spill and be_Reload nodes
+ */
+static void sparc_after_ra_walker(ir_node *block, void *data)
+{
+ ir_node *node, *prev;
+ (void) data;
+
+ for (node = sched_last(block); !sched_is_begin(node); node = prev) {
+ prev = sched_prev(node);
+
+ if (be_is_Reload(node)) {
+ transform_Reload(node);
+ } else if (be_is_Spill(node)) {
+ transform_Spill(node);
+ }
+ }
+}
+
+static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
+{
+ be_fec_env_t *env = (be_fec_env_t*)data;
+ const ir_mode *mode;
+ int align;
+ ir_entity *entity;
+ const sparc_load_store_attr_t *attr;
+
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ mode = get_irn_mode(node);
+ align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ return;
+ }
+
+ if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
+ return;
+
+ attr = get_sparc_load_store_attr_const(node);
+ entity = attr->base.immediate_value_entity;
+ mode = attr->load_store_mode;
+ if (entity != NULL)
+ return;
+ if (!attr->is_frame_entity)
+ return;
+ if (arch_irn_get_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
+ mode = mode_Lu;
+ align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+}
+
+static void sparc_set_frame_entity(ir_node *node, ir_entity *entity)
+{
+ if (is_be_node(node)) {
+ be_node_set_frame_entity(node, entity);
+ } else {
+ /* we only say be_node_needs_frame_entity on nodes with load_store
+ * attributes, so this should be fine */
+ sparc_load_store_attr_t *attr = get_sparc_load_store_attr(node);
+ assert(attr->is_frame_entity);
+ assert(attr->base.immediate_value_entity == NULL);
+ attr->base.immediate_value_entity = entity;
+ }
+}
+
void sparc_finish(ir_graph *irg)
{
+ be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
+ bool at_begin = stack_layout->sp_relative ? true : false;
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
+
+ irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
+ be_free_frame_entity_coalescer(fec_env);
+
+ irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL);
+
+ sparc_introduce_prolog_epilog(irg);
+
+ /* fix stack entity offsets */
+ be_abi_fix_stack_nodes(irg);
+ be_abi_fix_stack_bias(irg);
+
/* perform peephole optimizations */
clear_irp_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_sparc_St, finish_sparc_LdSt);
register_peephole_optimisation(op_sparc_Stf, finish_sparc_LdSt);
be_peephole_opt(irg);
+
+ be_remove_dead_nodes_from_schedule(irg);
}