#include "bespillbelady.h"
#include "bespillmorgan.h"
+#include "bespillslots.h"
#include "belower.h"
#ifdef WITH_ILP
);
dump(BE_CH_DUMP_SPILL, irg, chordal_env.cls, "-spill", dump_ir_block_graph_sched);
- be_compute_spill_offsets(&chordal_env);
- //be_coalesce_spillslots(&chordal_env);
check_for_memory_operands(&chordal_env);
be_abi_fix_stack_nodes(bi->abi, chordal_env.lv);
bitset_free(chordal_env.ignore_colors);
}
+ be_coalesce_spillslots(&chordal_env);
+
BE_TIMER_PUSH(ra_timer.t_epilog);
dump(BE_CH_DUMP_LOWER, irg, NULL, "-spilloff", dump_ir_block_graph_sched);
JavaVMInitArgs args;
JavaVMOption *opts;
- int result = 0;
long (JNICALL * create_func)(JavaVM **, void **, void *) = find_jvm_symbol(jvm_lib, "JNI_CreateJavaVM");
if(!create_func) {
ret = create_func(&env->jvm, (void **) &env->jni, &args);
free(opts);
if(ret != JNI_OK) {
- fprintf(stderr, "JNI_CreateJavaVM returned errrocode %d\n" , ret);
+ fprintf(stderr, "JNI_CreateJavaVM returned errrocode %ld\n" , ret);
return 0;
}
snprintf(cp_param, sizeof(cp_param), "-Djava.class.path=%s", jar_file);
args[0] = cp_param;
- if(!start_vm(&env, sizeof(args) / sizeof(args[0], args), args)) {
+ if(!start_vm(&env, sizeof(args) / sizeof(args[0]), args)) {
fprintf(stderr, "Couldn't initialize java VM\n");
abort();
}
static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
-#if 0
-typedef enum _node_kind_t {
- node_kind_spill,
- node_kind_reload,
- node_kind_perm,
- node_kind_copy,
- node_kind_kill,
- node_kind_last
-} node_kind_t;
-#endif
-
typedef enum {
be_req_kind_old_limited,
be_req_kind_negate_old_limited,
ir_type *call_tp; /**< The call type, copied from the original Call node. */
} be_call_attr_t;
-/** The be_Spill attribute type. */
typedef struct {
- be_frame_attr_t frame_attr;
- ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
-} be_spill_attr_t;
+ be_node_attr_t node_attr;
+ entity **in_entities;
+ entity **out_entities;
+} be_memperm_attr_t;
ir_op *op_be_Spill;
ir_op *op_be_Reload;
ir_op *op_be_Perm;
+ir_op *op_be_MemPerm;
ir_op *op_be_Copy;
ir_op *op_be_Keep;
ir_op *op_be_CopyKeep;
/* Acquire all needed opcodes. */
beo_base = get_next_ir_opcodes(beo_Last - 1);
- op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
- op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
- op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
- op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
- op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
- op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
- op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
- op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
+ op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
+ op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
+ op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
+ op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
+ op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
+ op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
+ op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
set_op_tag(op_be_Spill, &be_node_tag);
set_op_tag(op_be_Reload, &be_node_tag);
set_op_tag(op_be_Perm, &be_node_tag);
+ set_op_tag(op_be_MemPerm, &be_node_tag);
set_op_tag(op_be_Copy, &be_node_tag);
set_op_tag(op_be_Keep, &be_node_tag);
set_op_tag(op_be_CopyKeep, &be_node_tag);
}
-ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill, ir_node *ctx)
+ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
{
- be_spill_attr_t *a;
+ be_frame_attr_t *a;
ir_node *in[2];
ir_node *res;
in[1] = to_spill;
res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
a = init_node_attr(res, 2);
- a->frame_attr.ent = NULL;
- a->frame_attr.offset = 0;
- a->spill_ctx = ctx;
+ a->ent = NULL;
+ a->offset = 0;
be_node_set_reg_class(res, 0, cls_frame);
be_node_set_reg_class(res, 1, cls);
return irn;
}
+ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
+{
+ int i;
+ ir_node *frame = get_irg_frame(irg);
+ const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
+ ir_node *irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n, in);
+ be_memperm_attr_t *attr;
+
+ init_node_attr(irn, n);
+ for(i = 0; i < n; ++i) {
+ be_node_set_reg_class(irn, i, cls_frame);
+ be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
+ }
+
+ attr = get_irn_attr(irn);
+
+ attr->in_entities = obstack_alloc(irg->obst, n*sizeof(attr->in_entities[0]));
+ memset(attr->in_entities, 0, n*sizeof(attr->in_entities[0]));
+ attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
+ memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
+
+ return irn;
+}
+
+
ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
{
ir_node *in[1];
int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
+int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
return NULL;
}
+void be_set_frame_entity(const ir_node *irn, entity* ent)
+{
+ be_frame_attr_t *a;
+
+ assert(be_has_frame_entity(irn));
+
+ a = get_irn_attr(irn);
+ a->ent = ent;
+}
+
+void be_set_MemPerm_in_entity(const ir_node *irn, int n, entity *ent)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < get_irn_arity(irn));
+
+ attr->in_entities[n] = ent;
+}
+
+entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < get_irn_arity(irn));
+
+ return attr->in_entities[n];
+}
+
+void be_set_MemPerm_out_entity(const ir_node *irn, int n, entity *ent)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < get_irn_arity(irn));
+
+ attr->out_entities[n] = ent;
+}
+
+entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
+{
+ be_memperm_attr_t *attr = get_irn_attr(irn);
+
+ assert(be_is_MemPerm(irn));
+ assert(n < get_irn_arity(irn));
+
+ return attr->out_entities[n];
+}
+
static void be_limited(void *data, bitset_t *bs)
{
be_req_t *req = data;
return a->dir;
}
-void be_set_Spill_entity(ir_node *irn, entity *ent)
-{
- be_spill_attr_t *a = get_irn_attr(irn);
- assert(be_is_Spill(irn));
- a->frame_attr.ent = ent;
-}
-
-void be_set_Spill_context(ir_node *irn, ir_node *ctx)
-{
- be_spill_attr_t *a = get_irn_attr(irn);
- assert(be_is_Spill(irn));
- a->spill_ctx = ctx;
-}
-
-static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
-{
- unsigned nr = get_irn_visited(irn);
-
- set_irn_visited(irn, visited_nr);
-
- if(is_Phi(irn)) {
- int i, n;
- if(nr < visited_nr) {
- for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
- if(n != NULL)
- return n;
- }
- }
- }
-
- else if(be_get_irn_opcode(irn) == beo_Spill)
- return irn;
-
- return NULL;
-}
-
-ir_node *be_get_Spill_context(const ir_node *irn) {
- const be_spill_attr_t *a = get_irn_attr(irn);
- assert(be_is_Spill(irn));
- return a->spill_ctx;
-}
-
-/**
- * Finds a spill for a reload.
- * If the reload is directly using the spill, this is simple,
- * else we perform DFS from the reload (over all PhiMs) and return
- * the first spill node we find.
- */
-static INLINE ir_node *find_a_spill(const ir_node *irn)
-{
- ir_graph *irg = get_irn_irg(irn);
- unsigned visited_nr = get_irg_visited(irg) + 1;
-
- assert(be_is_Reload(irn));
- set_irg_visited(irg, visited_nr);
- return find_a_spill_walker(be_get_Reload_mem(irn), visited_nr);
-}
-
-entity *be_get_spill_entity(const ir_node *irn)
-{
- switch(be_get_irn_opcode(irn)) {
- case beo_Reload:
- {
- ir_node *spill = find_a_spill(irn);
- return be_get_spill_entity(spill);
- }
- case beo_Spill:
- {
- be_spill_attr_t *a = get_irn_attr(irn);
- return a->frame_attr.ent;
- }
- default:
- assert(0 && "Must give spill/reload node");
- break;
- }
-
- return NULL;
-}
-
-static void link_reload_walker(ir_node *irn, void *data)
-{
- ir_node **root = (ir_node **) data;
- if(be_is_Reload(irn)) {
- set_irn_link(irn, *root);
- *root = irn;
- }
-}
-
-void be_copy_entities_to_reloads(ir_graph *irg)
-{
- ir_node *irn = NULL;
- irg_walk_graph(irg, link_reload_walker, NULL, (void *) &irn);
-
- while(irn) {
- be_frame_attr_t *a = get_irn_attr(irn);
- entity *ent = be_get_spill_entity(irn);
- a->ent = ent;
- irn = get_irn_link(irn);
- }
-}
-
-ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
+ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
{
ir_node *bl = get_nodes_block(irn);
ir_graph *irg = get_irn_irg(bl);
ir_node *frame = get_irg_frame(irg);
- ir_node *insert = bl;
ir_node *spill;
const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
- spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
- return spill;
-
-#if 0
- /*
- * search the right insertion point. a spill of a phi cannot be put
- * directly after the phi, if there are some phis behind the one which
- * is spilled. Also, a spill of a Proj must be after all Projs of the
- * same tuple node.
- *
- * Here's one special case:
- * If the spill is in the start block, the spill must be after the frame
- * pointer is set up. This is done by setting insert to the end of the block
- * which is its default initialization (see above).
- */
-
- insert = sched_next(irn);
- if(insert != bl && bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
- insert = sched_next(frame);
-
- while((is_Phi(insert) || is_Proj(insert)) && !sched_is_end(insert))
- insert = sched_next(insert);
-
- sched_add_before(insert, spill);
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
return spill;
-#endif
}
ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
h->regs = pmap_create();
}
-
/*
_ _ _ ____ _
| \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
}
switch(be_get_irn_opcode(irn)) {
- case beo_Spill:
- {
- be_spill_attr_t *a = (be_spill_attr_t *) at;
- ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
- }
- break;
-
case beo_IncSP:
{
be_stack_attr_t *a = (be_stack_attr_t *) at;
extern ir_op *op_be_Spill;
extern ir_op *op_be_Reload;
extern ir_op *op_be_Perm;
+extern ir_op *op_be_MemPerm;
extern ir_op *op_be_Copy;
extern ir_op *op_be_Keep;
extern ir_op *op_be_CopyKeep;
beo_Spill,
beo_Reload,
beo_Perm,
+ beo_MemPerm,
beo_Copy,
beo_Keep,
beo_CopyKeep,
/**
* Make a new Spill node.
*/
-ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *node_to_spill, ir_node *ctx);
+ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *node_to_spill);
/**
* Position numbers for the be_Reload inputs.
* Make a new Perm node.
*/
ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
+ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[]);
ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
* @param spill_ctx The context in which the spill is introduced (This is mostly == irn up to the case of Phis).
* @return The new spill node.
*/
-ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *spill_ctx);
+ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn);
/**
* Make a reload and insert it into the schedule.
int be_is_Reload(const ir_node *irn);
int be_is_Copy(const ir_node *irn);
int be_is_Perm(const ir_node *irn);
+int be_is_MemPerm(const ir_node *irn);
int be_is_Keep(const ir_node *irn);
int be_is_CopyKeep(const ir_node *irn);
int be_is_Call(const ir_node *irn);
*/
entity *be_get_frame_entity(const ir_node *irn);
-void be_set_Spill_entity(ir_node *irn, entity *ent);
-entity *be_get_spill_entity(const ir_node *irn);
-
-void be_set_Spill_context(ir_node *irn, ir_node *ctx);
-ir_node *be_get_Spill_context(const ir_node *irn);
-
+void be_set_frame_entity(const ir_node *irn, entity* ent);
ir_node* be_get_Reload_mem(const ir_node *irn);
ir_node* be_get_Reload_frame(const ir_node* irn);
-/**
- * Set the entities of a Reload to the ones of the Spill it is pointing to.
- * @param irg The graph.
- */
-void be_copy_entities_to_reloads(ir_graph *irg);
+void be_set_MemPerm_in_entity(const ir_node *irn, int n, entity* ent);
+entity *be_get_MemPerm_in_entity(const ir_node *irn, int n);
+
+void be_set_MemPerm_out_entity(const ir_node *irn, int n, entity* ent);
+entity *be_get_MemPerm_out_entity(const ir_node *irn, int n);
/**
* Impose a register constraint on a backend node.
/* all ordinary nodes must be spilled */
DBG((raenv->dbg, LEVEL_2, " spilling %+F\n", irn));
- spill = be_spill(raenv->aenv, irn, ctx);
+ spill = be_spill(raenv->aenv, irn);
/* remember the spill */
pset_insert_ptr(spills, spill);
-/**
- * Author: Daniel Grund, Sebastian Hack
+/*
+ * Author: Daniel Grund, Sebastian Hack, Matthias Braun
* Date: 29.09.2005
* Copyright: (c) Universitaet Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
#include "irgwalk.h"
#include "array.h"
#include "pdeq.h"
+#include "unionfind.h"
+#include "execfreq.h"
#include "belive_t.h"
#include "besched_t.h"
#include "bespill.h"
+#include "belive_t.h"
#include "benode_t.h"
#include "bechordal_t.h"
+#include "bejavacoal.h"
/* This enables re-computation of values. Current state: Unfinished and buggy. */
#undef BUGGY_REMAT
typedef struct _reloader_t reloader_t;
-typedef struct _spill_info_t spill_info_t;
struct _reloader_t {
reloader_t *next;
ir_node *reloader;
};
-struct _spill_info_t {
+typedef struct _spill_info_t {
ir_node *spilled_node;
reloader_t *reloaders;
-};
-typedef struct _spill_ctx_t {
- ir_node *spilled; /**< The spilled node. */
- ir_node *user; /**< The node this spill is for. */
- ir_node *spill; /**< The spill itself. */
-} spill_ctx_t;
+ ir_node *spill;
+} spill_info_t;
struct _spill_env_t {
const arch_register_class_t *cls;
const be_chordal_env_t *chordal_env;
struct obstack obst;
- set *spill_ctxs;
set *spills; /**< all spill_info_t's, which must be placed */
pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */
- ir_node **copies; /**< set of copies placed because of phi spills */
+
DEBUG_ONLY(firm_dbg_module_t *dbg;)
};
-/* associated Phi -> Spill*/
-typedef struct _phi_spill_assoc_t {
- ir_node *phi;
- ir_node *spill;
-} phi_spill_assoc_t;
-
-/**
- * Compare two Phi->Spill associations.
- */
-static int cmp_phi_spill_assoc(const void *a, const void *b, size_t n) {
- const phi_spill_assoc_t *p1 = a;
- const phi_spill_assoc_t *p2 = b;
- return p1->phi != p2->phi;
-}
-
-/**
- * compare two spill contexts.
- */
-static int cmp_spillctx(const void *a, const void *b, size_t n) {
- const spill_ctx_t *p = a;
- const spill_ctx_t *q = b;
- return p->user != q->user || p->spilled != q->spilled;
-}
-
/**
* Compare two spill infos.
*/
return xx->spilled_node != yy->spilled_node;
}
+/**
+ * Returns spill info for a specific value (the value that is to be spilled)
+ */
+static spill_info_t *get_spillinfo(const spill_env_t *env, ir_node *value) {
+ spill_info_t info, *res;
+ int hash = HASH_PTR(value);
+
+ info.spilled_node = value;
+ res = set_find(env->spills, &info, sizeof(info), hash);
+
+ if (res == NULL) {
+ info.reloaders = NULL;
+ info.spill = NULL;
+ res = set_insert(env->spills, &info, sizeof(info), hash);
+ }
+
+ return res;
+}
+
DEBUG_ONLY(
/* Sets the debug module of a spill environment. */
void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
/* Creates a new spill environment. */
spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) {
spill_env_t *env = xmalloc(sizeof(env[0]));
- env->spill_ctxs = new_set(cmp_spillctx, 1024);
env->spills = new_set(cmp_spillinfo, 1024);
env->cls = chordal_env->cls;
env->chordal_env = chordal_env;
env->mem_phis = pset_new_ptr_default();
- env->copies = NEW_ARR_F(ir_node*, 0);
obstack_init(&env->obst);
return env;
}
/* Deletes a spill environment. */
void be_delete_spill_env(spill_env_t *env) {
- del_set(env->spill_ctxs);
del_set(env->spills);
del_pset(env->mem_phis);
- DEL_ARR_F(env->copies);
obstack_free(&env->obst, NULL);
free(env);
}
-/**
- * Returns a spill context. If the context did not exists, create one.
- *
- * @param sc the set containing all spill contexts
- * @param to_spill the node that should be spilled
- * @param ctx_irn an user of the spilled node
- *
- * @return a spill context.
- */
-static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
- spill_ctx_t templ;
-
- templ.spilled = to_spill;
- templ.user = ctx_irn;
- templ.spill = NULL;
-
- return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
-}
-
/**
* Schedules a node after an instruction. (That is the place after all projs and phis
* that are scheduled after the instruction)
*
* @return a be_Spill node
*/
-static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
- spill_ctx_t *ctx;
- const be_main_env_t *env = senv->chordal_env->birg->main_env;
- DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
+static void spill_irn(spill_env_t *env, spill_info_t *spillinfo) {
+ const be_main_env_t *mainenv = env->chordal_env->birg->main_env;
+ ir_node *to_spill = spillinfo->spilled_node;
- // Has the value already been spilled?
- ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
- if(ctx->spill)
- return ctx->spill;
+ DBG((env->dbg, LEVEL_1, "%+F\n", to_spill));
/* Trying to spill an already spilled value, no need for a new spill
* node then, we can simply connect to the same one for this reload
*/
- if(be_is_Reload(irn)) {
- return get_irn_n(irn, be_pos_Reload_mem);
+ if(be_is_Reload(to_spill)) {
+ spillinfo->spill = get_irn_n(to_spill, be_pos_Reload_mem);
+ return;
}
- ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
- sched_add_after_insn(irn, ctx->spill);
-
- return ctx->spill;
+ spillinfo->spill = be_spill(mainenv->arch_env, to_spill);
+ sched_add_after_insn(to_spill, spillinfo->spill);
}
+static void spill_node(spill_env_t *env, spill_info_t *spillinfo);
+
/**
- * Removes all copies introduced for phi-spills
+ * If the first usage of a Phi result would be out of memory
+ * there is no sense in allocating a register for it.
+ * Thus we spill it and all its operands to the same spill slot.
+ * Therefore the phi/dataB becomes a phi/Memory
+ *
+ * @param senv the spill environment
+ * @param phi the Phi node that should be spilled
+ * @param ctx_irn an user of the spilled node
*/
-static void remove_copies(spill_env_t *env) {
+static void spill_phi(spill_env_t *env, spill_info_t *spillinfo) {
+ ir_node *phi = spillinfo->spilled_node;
int i;
+ int arity = get_irn_arity(phi);
+ ir_node *block = get_nodes_block(phi);
+ ir_node **ins;
- for(i = 0; i < ARR_LEN(env->copies); ++i) {
- ir_node *node = env->copies[i];
- ir_node *src;
- const ir_edge_t *edge, *ne;
+ assert(is_Phi(phi));
- assert(be_is_Copy(node));
+ /* build a new PhiM */
+ ins = alloca(sizeof(ir_node*) * arity);
+ for(i = 0; i < arity; ++i) {
+ ins[i] = get_irg_bad(env->chordal_env->irg);
+ }
+ spillinfo->spill = new_r_Phi(env->chordal_env->irg, block, arity, ins, mode_M);
- src = be_get_Copy_op(node);
- foreach_out_edge_safe(node, edge, ne) {
- ir_node *user = get_edge_src_irn(edge);
- int user_pos = get_edge_src_pos(edge);
+ for(i = 0; i < arity; ++i) {
+ ir_node *arg = get_irn_n(phi, i);
+ spill_info_t *arg_info = get_spillinfo(env, arg);
- set_irn_n(user, user_pos, src);
- }
+ spill_node(env, arg_info);
+
+ set_irn_n(spillinfo->spill, i, arg_info->spill);
}
+}
+
+/**
+ * Spill a node.
+ *
+ * @param senv the spill environment
+ * @param to_spill the node that should be spilled
+ */
+static void spill_node(spill_env_t *env, spill_info_t *spillinfo) {
+ ir_node *to_spill;
+
+ // the node should be tagged for spilling already...
+ if(spillinfo->spill != NULL)
+ return;
- ARR_SETLEN(ir_node*, env->copies, 0);
+ to_spill = spillinfo->spilled_node;
+ if (is_Phi(to_spill) && pset_find_ptr(env->mem_phis, spillinfo->spilled_node)) {
+ spill_phi(env, spillinfo);
+ } else {
+ spill_irn(env, spillinfo);
+ }
}
static INLINE ir_node *skip_projs(ir_node *node) {
return node;
}
+#if 0
/**
* Searchs the schedule backwards until we reach the first use or def of a
* value or a phi.
// simply return first node if no def or use found
return sched_first(block);
}
-
-/**
- * If the first usage of a Phi result would be out of memory
- * there is no sense in allocating a register for it.
- * Thus we spill it and all its operands to the same spill slot.
- * Therefore the phi/dataB becomes a phi/Memory
- *
- * @param senv the spill environment
- * @param phi the Phi node that should be spilled
- * @param ctx_irn an user of the spilled node
- *
- * @return a be_Spill node
- */
-static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set *already_visited_phis, bitset_t *bs) {
- int i;
- int arity = get_irn_arity(phi);
- ir_graph *irg = senv->chordal_env->irg;
- ir_node *bl = get_nodes_block(phi);
- ir_node **ins, *phi_spill;
- phi_spill_assoc_t key;
- spill_ctx_t *ctx;
-
- assert(is_Phi(phi));
- DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
-
- /* build a new PhiM */
- NEW_ARR_A(ir_node *, ins, arity);
- for (i = 0; i < arity; ++i) {
- ins[i] = new_r_Bad(irg);
- }
- phi_spill = new_r_Phi(senv->chordal_env->irg, bl, arity, ins, mode_M);
- key.phi = phi;
- key.spill = phi_spill;
- set_insert(already_visited_phis, &key, sizeof(key), HASH_PTR(phi));
- bitset_set(bs, get_irn_idx(phi));
-
- /* search an existing spill for this context */
- ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
-
- /* if not found spill the phi */
- if (! ctx->spill) {
- /* collect all arguments of the phi */
- for (i = 0; i < arity; ++i) {
- ir_node *arg = get_irn_n(phi, i);
- ir_node *sub_res;
- phi_spill_assoc_t *entry;
-
- if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) {
- // looping edge?
- if(arg == phi) {
- sub_res = phi_spill;
- } else if (! bitset_is_set(bs, get_irn_idx(arg))) {
- sub_res = spill_phi(senv, arg, ctx_irn, already_visited_phis, bs);
- } else {
- /* we already visited the argument phi: get it's spill */
- key.phi = arg;
- key.spill = NULL;
- entry = set_find(already_visited_phis, &key, sizeof(key), HASH_PTR(arg));
- assert(entry && "argument phi already visited, but no spill found?!?");
- sub_res = entry->spill;
- assert(sub_res && "spill missing?!?");
- }
- } else {
- sub_res = be_spill_irn(senv, arg, ctx_irn);
- }
-
- set_irn_n(phi_spill, i, sub_res);
- }
-
- ctx->spill = phi_spill;
- }
- return ctx->spill;
-}
-
-/**
- * Spill a node.
- *
- * @param senv the spill environment
- * @param to_spill the node that should be spilled
- *
- * @return a be_Spill node
- */
-static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) {
- ir_graph *irg = get_irn_irg(to_spill);
- ir_node *res;
-
- if (pset_find_ptr(senv->mem_phis, to_spill)) {
- set *already_visited_phis = new_set(cmp_phi_spill_assoc, 10);
- bitset_t *bs = bitset_alloca(get_irg_last_idx(irg));
- res = spill_phi(senv, to_spill, to_spill, already_visited_phis, bs);
- del_set(already_visited_phis);
- } else {
- res = be_spill_irn(senv, to_spill, to_spill);
- }
-
- return res;
-}
+#endif
#ifdef BUGGY_REMAT
return res;
}
-static void place_copies_for_phi(spill_env_t *env, ir_node* node) {
- int i, arity;
-
- assert(is_Phi(node));
-
- /* We have to place copy nodes in the predecessor blocks to temporarily
- * produce new values that get separate spill slots
- */
- for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
- ir_node *pred_block, *arg, *copy, *insert_point;
-
- /* Don't do anything for looping edges (there's no need
- * and placing copies here breaks stuff as it suddenly
- * generates new living values through the whole loop)
- */
- arg = get_irn_n(node, i);
- if(arg == node)
- continue;
-
- pred_block = get_Block_cfgpred_block(get_nodes_block(node), i);
- copy = be_new_Copy(env->cls, get_irn_irg(arg), pred_block, arg);
-
- ARR_APP1(ir_node*, env->copies, copy);
- insert_point = find_last_use_def(env, pred_block, arg);
- sched_add_before(insert_point, copy);
-
- set_irn_n(node, i, copy);
- }
-}
-
-void be_place_copies(spill_env_t *env) {
- ir_node *node;
-
- foreach_pset(env->mem_phis, node) {
- place_copies_for_phi(env, node);
- }
-}
-
void be_spill_phi(spill_env_t *env, ir_node *node) {
- spill_ctx_t *spill_ctx;
+ int i, arity;
assert(is_Phi(node));
pset_insert_ptr(env->mem_phis, node);
- // remove spill context for this phi (if there was one)
- spill_ctx = be_get_spill_ctx(env->spill_ctxs, node, node);
- if(spill_ctx != NULL) {
- spill_ctx->spill = NULL;
+ // create spillinfos for the phi arguments
+ get_spillinfo(env, node);
+ for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
+ ir_node *arg = get_irn_n(node, i);
+ get_spillinfo(env, arg);
}
}
void be_insert_spills_reloads(spill_env_t *env) {
const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
- //ir_node *node;
spill_info_t *si;
-#if 0
- // Matze: This should be pointless as beladies fix_block_borders
- // should result in the same
- DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
- foreach_pset(env->mem_phis, node) {
- const ir_edge_t *e;
-
- assert(is_Phi(node));
-
- /* Add reloads for mem_phis */
- /* BETTER: These reloads (1) should only be inserted, if they are really needed */
- DBG((env->dbg, LEVEL_1, " Mem-phi %+F\n", node));
- foreach_out_edge(node, e) {
- ir_node *user = e->src;
- if (is_Phi(user) && !pset_find_ptr(env->mem_phis, user)) {
- ir_node *use_bl = get_nodes_block(user);
- DBG((env->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
- be_add_reload_on_edge(env, node, use_bl, e->pos); /* (1) */
- }
- }
- }
-#endif
-
/* process each spilled node */
DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n"));
for(si = set_first(env->spills); si; si = set_next(env->spills)) {
if (check_remat_conditions(env, si->spilled_node, rld->reloader)) {
new_val = do_remat(env, si->spilled_node, rld->reloader);
} else {
- /* the spill for this reloader */
- ir_node *spill = be_spill_node(env, si->spilled_node);
+ /* make sure we have a spill */
+ spill_node(env, si);
/* do a reload */
- new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill);
+ new_val = be_reload(arch_env, env->cls, rld->reloader, mode, si->spill);
}
DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
pset_insert_ptr(values, new_val);
}
- /* introduce copies, rewire the uses */
- assert(pset_count(values) > 0 && "???");
- pset_insert_ptr(values, si->spilled_node);
- be_ssa_constr_set_ignore(env->chordal_env->dom_front, env->chordal_env->lv, values, env->mem_phis);
+ if(pset_count(values) > 0) {
+ /* introduce copies, rewire the uses */
+ pset_insert_ptr(values, si->spilled_node);
+ be_ssa_constr_set_ignore(env->chordal_env->dom_front, env->chordal_env->lv, values, env->mem_phis);
+ }
del_pset(values);
}
- remove_copies(env);
-
// reloads are placed now, but we might reuse the spill environment for further spilling decisions
del_set(env->spills);
env->spills = new_set(cmp_spillinfo, 1024);
}
void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) {
- spill_info_t templ, *res;
+ spill_info_t *info;
reloader_t *rel;
assert(sched_is_scheduled(before));
assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill));
- templ.spilled_node = to_spill;
- templ.reloaders = NULL;
- res = set_insert(env->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
+ info = get_spillinfo(env, to_spill);
+
+ if(is_Phi(to_spill)) {
+ int i, arity;
+ // create spillinfos for the phi arguments
+ for(i = 0, arity = get_irn_arity(to_spill); i < arity; ++i) {
+ ir_node *arg = get_irn_n(to_spill, i);
+ get_spillinfo(env, arg);
+ }
+ }
rel = obstack_alloc(&env->obst, sizeof(rel[0]));
rel->reloader = before;
- rel->next = res->reloaders;
- res->reloaders = rel;
-
+ rel->next = info->reloaders;
+ info->reloaders = rel;
be_liveness_add_missing(env->chordal_env->lv);
}
// add the reload before the (cond-)jump
be_add_reload(env, to_spill, last);
}
-
-/****************************************
-
- SPILL SLOT MANAGEMENT AND OPTS
-
-****************************************/
-
-typedef struct _spill_slot_t {
- unsigned size;
- unsigned align;
- pset *members;
- ir_mode *largest_mode; /* the mode of all members with largest size */
-} spill_slot_t;
-
-typedef struct _ss_env_t {
- struct obstack ob;
- be_chordal_env_t *cenv;
- pmap *slots; /* maps spill_contexts to spill_slots */
- pmap *types; /* maps modes to types */
- DEBUG_ONLY(firm_dbg_module_t *dbg;)
-} ss_env_t;
-
-
-/**
- * Walker: compute the spill slots
- */
-static void compute_spill_slots_walker(ir_node *spill, void *env) {
- ss_env_t *ssenv = env;
- arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
- ir_node *ctx;
- pmap_entry *entry;
- spill_slot_t *ss;
- const arch_register_class_t *cls;
-
- if (! be_is_Spill(spill))
- return;
-
- cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
-
- if (cls != ssenv->cenv->cls)
- return;
-
- /* check, if this spill is for a context already known */
- ctx = be_get_Spill_context(spill);
- entry = pmap_find(ssenv->slots, ctx);
-
- if (! entry) {
- ir_mode *largest_mode = arch_register_class_mode(cls);
-
- /* this is a new spill context */
- ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
- ss->members = pset_new_ptr(8);
- ss->largest_mode = largest_mode;
- ss->size = get_mode_size_bytes(ss->largest_mode);
- ss->align = arch_isa_get_reg_class_alignment(arch_env->isa, cls);
- pmap_insert(ssenv->slots, ctx, ss);
- } else {
- /* values with the same spill_ctx must go into the same spill slot */
- ss = entry->value;
-
-#ifndef NDEBUG
- /* ugly mega assert :-) */
- {
- ir_node *irn;
- struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
- const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
- int size = get_mode_size_bytes(arch_register_class_mode(cls));
- assert((int) ss->size == size && "Different sizes for the same spill slot are not allowed.");
- for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
- /* use values_interfere here, because it uses the dominance check,
- which does work for values in memory */
- assert(!values_interfere(ssenv->cenv->lv, spill, irn) && "Spills for the same spill slot must not interfere!");
- }
- }
-#endif /* NDEBUG */
- }
-
- pset_insert_ptr(ss->members, spill);
-}
-
-/**
- * qsort compare function, sort spill slots by size.
- */
-static int ss_sorter(const void *v1, const void *v2) {
- const spill_slot_t **ss1 = (const spill_slot_t **)v1;
- const spill_slot_t **ss2 = (const spill_slot_t **)v2;
- return ((int) (*ss2)->size) - ((int) (*ss1)->size);
-}
-
-
-/**
- * This function should optimize the spill slots.
- * - Coalescing of multiple slots
- * - Ordering the slots
- *
- * Input slots are in @p ssenv->slots
- * @p size The count of initial spill slots in @p ssenv->slots
- * This also is the size of the preallocated array @p ass
- *
- * @return An array of spill slots @p ass in specific order
- **/
-static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t *ass[]) {
- int i, o, used_slots;
- pmap_entry *entr;
-
- i=0;
- pmap_foreach(ssenv->slots, entr)
- ass[i++] = entr->value;
-
- /* Sort the array to minimize fragmentation and cache footprint.
- Large slots come first */
- qsort(ass, size, sizeof(ass[0]), ss_sorter);
-
- /* For each spill slot:
- - assign a new offset to this slot
- - xor find another slot to coalesce with */
- used_slots = 0;
- for (i=0; i<size; ++i) {
- /* for each spill slot */
- ir_node *n1;
- int tgt_slot = -1;
-
- DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
-
-
- for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
- /* check inter-slot-pairs for interference */
- ir_node *n2;
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
- if(values_interfere(ssenv->cenv->lv, n1, n2)) {
- pset_break(ass[i]->members);
- pset_break(ass[o]->members);
- DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
- goto interf_detected;
- }
-
- /* if we are here, there is no interference between ass[i] and ass[o] */
- tgt_slot = o;
-
-interf_detected: /*nothing*/ ;
- }
-
- /* now the members of ass[i] join the members of ass[tgt_slot] */
-
- /* do we need a new slot? */
- if (tgt_slot == -1) {
- tgt_slot = used_slots;
- used_slots++;
-
- /* init slot */
- if (tgt_slot != i) {
- ass[tgt_slot]->size = ass[i]->size;
- del_pset(ass[tgt_slot]->members);
- ass[tgt_slot]->members = pset_new_ptr(8);
- }
- }
-
- /* copy the members to the target pset */
- /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
- if (tgt_slot != i)
- for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
- pset_insert_ptr(ass[tgt_slot]->members, n1);
- }
-}
-
-#define ALIGN_SPILL_AREA 16
-#define pset_foreach(pset, elm) for(elm=pset_first(pset); elm; elm=pset_next(pset))
-
-/**
- * Returns a spill type for a mode. Keep them in a map to reduce
- * the number of types.
- *
- * @param types a map containing all created types
- * @param ss the spill slot
- *
- * Note that type types should are identical for every mode.
- * This rule might break if two different register classes return the same
- * mode but different alignments.
- */
-static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) {
- pmap_entry *e = pmap_find(types, ss->largest_mode);
- ir_type *res;
-
- if (! e) {
- char buf[64];
- snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
- buf[sizeof(buf) - 1] = '\0';
- res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
- set_type_alignment_bytes(res, ss->align);
- pmap_insert(types, ss->largest_mode, res);
- } else {
- res = e->value;
- assert(get_type_alignment_bytes(res) == (int)ss->align);
- }
-
- return res;
-}
-
-/**
- * Create spill slot entities on the frame type.
- *
- * @param ssenv the spill environment
- * @param n number of spill slots
- * @param ss array of spill slots
- */
-static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) {
- int i, offset, frame_align;
- ir_type *frame;
-
- /* do not align the frame if no spill slots are needed */
- if (n_slots <= 0)
- return;
-
- frame = get_irg_frame_type(ssenv->cenv->irg);
-
- /* aligning by increasing frame size */
- offset = get_type_size_bytes(frame);
- offset = round_up2(offset, ALIGN_SPILL_AREA);
-
- /* create entities and assign offsets according to size and alignment*/
- for (i = 0; i < n_slots; ++i) {
- char buf[64];
- ident *name;
- entity *spill_ent;
- ir_node *irn;
-
- /* build entity */
- snprintf(buf, sizeof(buf), "spill_slot_%d", i);
- buf[sizeof(buf) - 1] = '\0';
- name = new_id_from_str(buf);
-
- spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));
-
- /* align */
- offset = round_up2(offset, ss[i]->align);
- /* set */
- set_entity_offset_bytes(spill_ent, offset);
- /* next possible offset */
- offset += round_up2(ss[i]->size, ss[i]->align);
-
- pset_foreach(ss[i]->members, irn)
- be_set_Spill_entity(irn, spill_ent);
- }
-
-
- /* set final size of stack frame */
- frame_align = get_type_alignment_bytes(frame);
- set_type_size_bytes(frame, round_up2(offset, frame_align));
-}
-
-void be_compute_spill_offsets(be_chordal_env_t *cenv) {
- ss_env_t ssenv;
- spill_slot_t **ss;
- int ss_size;
- pmap_entry *pme;
-
- obstack_init(&ssenv.ob);
- ssenv.cenv = cenv;
- ssenv.slots = pmap_create();
- ssenv.types = pmap_create();
- FIRM_DBG_REGISTER(ssenv.dbg, "firm.be.spillslots");
-
- /* Get initial spill slots */
- irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
-
- /* Build an empty array for optimized spill slots */
- ss_size = pmap_count(ssenv.slots);
- ss = obstack_alloc(&ssenv.ob, ss_size * sizeof(*ss));
- optimize_slots(&ssenv, ss_size, ss);
-
- /* Integrate slots into the stack frame entity */
- assign_entities(&ssenv, ss_size, ss);
-
- /* Clean up */
- pmap_foreach(ssenv.slots, pme)
- del_pset(((spill_slot_t *)pme->value)->members);
- pmap_destroy(ssenv.slots);
- pmap_destroy(ssenv.types);
- obstack_free(&ssenv.ob, NULL);
-
- be_copy_entities_to_reloads(cenv->irg);
-}
-/**
+/*
* Author: Daniel Grund, Sebastian Hack
* Date: 29.09.2005
* Copyright: (c) Universitaet Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
*/
-
#ifndef BESPILL_H_
#define BESPILL_H_
*/
void be_spill_phi(spill_env_t *env, ir_node *node);
-/**
- * Places the necessary copies for the spilled phis in the graph
- * This has to be done once before be_insert_spill_reloads, after
- * all phis to spill have been marked with be_spill_phi.
- */
-void be_place_copies(spill_env_t *env);
-
-/**
- * Computes the spill offsets for all spill nodes in the irg
- */
-void be_compute_spill_offsets(be_chordal_env_t *cenv);
-
/**
* Sets the debug module of a spill environment.
*/
static void belady(ir_node *blk, void *env);
/*
- * Computes set of live-ins for each block with multiple predecessors and
- * places copies in the predecessors when phis get spilled
+ * Computes set of live-ins for each block with multiple predecessors
+ * and notifies spill algorithm which phis need to be spilled
*/
-static void place_copy_walker(ir_node *block, void *data) {
+static void spill_phi_walker(ir_node *block, void *data) {
belady_env_t *env = data;
block_info_t *block_info;
ir_node *first, *irn;
be_clear_links(chordal_env->irg);
/* Decide which phi nodes will be spilled and place copies for them into the graph */
- irg_block_walk_graph(chordal_env->irg, place_copy_walker, NULL, &env);
- be_place_copies(env.senv);
+ irg_block_walk_graph(chordal_env->irg, spill_phi_walker, NULL, &env);
/* Fix high register pressure with belady algorithm */
irg_block_walk_graph(chordal_env->irg, NULL, belady, &env);
/* belady was block-local, fix the global flow by adding reloads on the edges */
return outer_spills_needed;
}
-void be_spill_morgan(const be_chordal_env_t *chordal_env) {
+void be_spill_morgan(be_chordal_env_t *chordal_env) {
morgan_env_t env;
FIRM_DBG_REGISTER(dbg, "ir.be.spillmorgan");
/* construct control flow loop tree */
construct_cf_backedges(chordal_env->irg);
+ //dump_looptree(0, get_irg_loop(env.irg));
+ //dump_execfreqs(env.irg);
+
/* construct loop out edges and livethrough_unused sets for loops and blocks */
irg_block_walk_graph(chordal_env->irg, NULL, construct_loop_edges, &env);
construct_loop_livethrough_unused(&env, get_irg_loop(env.irg));
*/
reduce_register_pressure_in_loop(&env, get_irg_loop(env.irg), 0);
- /* Place copies for spilled phis */
- be_place_copies(env.senv);
/* Insert real spill/reload nodes and fix usages */
be_insert_spills_reloads(env.senv);
#include "be_t.h"
#include "bechordal.h"
-void be_spill_morgan(const be_chordal_env_t *env);
+void be_spill_morgan(be_chordal_env_t *env);
#endif
}
/** insert a spill at an arbitrary position */
-ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
+ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
{
ir_node *bl = is_Block(insert)?insert:get_nodes_block(insert);
ir_graph *irg = get_irn_irg(bl);
const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
- spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
+ spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
/*
* search the right insertion point. a spill of a phi cannot be put
DBG((si->dbg, LEVEL_3, "\t inserting spill for value %+F after %+F\n", irn, before));
- spill = be_spill2(arch_env, irn, before, irn);
+ spill = be_spill2(arch_env, irn, before);
defs = set_insert_def(si->values, value);
assert(defs);
DBG((si->dbg, LEVEL_2, "\t inserting mem copy for value %+F after %+F\n", value, insert_pos));
- spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos, value);
+ spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
return spill;
}
/* set spill context to phi class if it has one ;) */
+#if 0
+ // Matze: not needed anymore
cls = get_phi_class(irn);
if(cls)
be_set_Spill_context(irn, cls);
else
be_set_Spill_context(irn, irn);
+#endif
}
* 1) Simplest case (phi with a non-phi arg):
* A single copy is inserted.
*
- * 2) Phi chain (phi (with phi-arg)* with non=phi arg):
+ * 2) Phi chain (phi (with phi-arg)* with non-phi arg):
* Several copies are placed, each after returning from recursion.
*
* 3) Phi-loop:
transform_to_Load(&tenv);
}
else if (be_is_Spill(node)) {
+ ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
/* we always spill the whole register */
tenv.dbg = get_irn_dbg_info(node);
tenv.irn = node;
- tenv.mode = fix_spill_mode(cg, get_irn_mode(be_get_Spill_context(node)));
+ tenv.mode = fix_spill_mode(cg, get_irn_mode(spillval));
transform_to_Store(&tenv);
}
}