bera.h bechordalspill.c beasm_dump_globals.c beasm_asm_gnu.c \
sp_matrix.c becopyoptmain.c becopyopt.c becopyheur.c \
becopyilp.c becopystat.c bearch_firm.c bearch.c bechordal_draw.c \
- bechordal_draw.h lpp_rzcpx.c
+ bechordal_draw.h beirgmod.c beirgmod.h benode.c benode.h lpp_rzcpx.c
include $(topdir)/MakeRules
int i;
for(i = env->handlers_tos - 1; i >= 0; --i) {
- const arch_irn_ops_t *ops = env->handlers[i]->get_irn_ops(irn);
+ const arch_irn_handler_t *handler = env->handlers[i];
+ const arch_irn_ops_t *ops = handler->get_irn_ops(handler, irn);
+
if(ops)
return ops;
}
return fallback_irn_ops;
}
+const arch_register_req_t *arch_get_register_req(const arch_env_t *env,
+ arch_register_req_t *req, const ir_node *irn, int pos)
+{
+ const arch_irn_ops_t *ops = get_irn_ops(env, irn);
+ return ops->get_irn_reg_req(ops, req, irn, pos);
+}
+
int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn,
int pos, const arch_register_class_t *cls, bitset_t *bs)
{
+ arch_register_req_t local_req;
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
- const arch_register_req_t *req = ops->get_irn_reg_req(irn, pos);
+ const arch_register_req_t *req = ops->get_irn_reg_req(ops, &local_req, irn, pos);
switch(req->type) {
case arch_register_req_type_normal:
int arch_is_register_operand(const arch_env_t *env,
const ir_node *irn, int pos)
{
+ arch_register_req_t local_req;
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
- const arch_register_req_t *req = ops->get_irn_reg_req(irn, pos);
+ const arch_register_req_t *req = ops->get_irn_reg_req(ops, &local_req, irn, pos);
return req != NULL;
}
const arch_register_class_t *
arch_get_irn_reg_class(const arch_env_t *env, const ir_node *irn, int pos)
{
+ arch_register_req_t local_req;
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
- const arch_register_req_t *req = ops->get_irn_reg_req(irn, pos);
+ const arch_register_req_t *req = ops->get_irn_reg_req(ops, &local_req, irn, pos);
return req ? req->cls : NULL;
}
{
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
assert(idx >= 0);
- return ops->get_irn_reg(irn, idx);
+ return ops->get_irn_reg(ops, irn, idx);
}
extern void arch_set_irn_register(const arch_env_t *env,
{
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
assert(idx >= 0);
- ops->set_irn_reg(irn, idx, reg);
+ ops->set_irn_reg(ops, irn, idx, reg);
}
#include "firm_config.h"
-#include "irop_t.h"
-#include "irnode_t.h"
-#include "irmode_t.h"
+#include "irnode.h"
+#include "irmode.h"
#include "hashptr.h"
#include "fourcc.h"
#include "list.h"
#include "ident.h"
-#include "bearch.h"
-
struct _bitset_t;
typedef struct _arch_register_class_t arch_register_class_t;
typedef struct _arch_enum_member_t arch_enum_member_t;
typedef struct _arch_isa_if_t arch_isa_if_t;
typedef struct _arch_env_t arch_env_t;
+typedef struct _arch_irn_ops_t arch_irn_ops_t;
+typedef struct _arch_irn_handler_t arch_irn_handler_t;
typedef enum _arch_register_type_t {
arch_register_type_none = 0,
* Different types of register allocation requirements.
*/
typedef enum _arch_register_req_type_t {
- arch_register_req_type_normal, /** All registers in the class
+ arch_register_req_type_none = 0, /** No register requirement. */
+
+ arch_register_req_type_normal = 1, /** All registers in the class
are allowed. */
- arch_register_req_type_limited, /** Only a real subset of
+ arch_register_req_type_limited = 2, /** Only a real subset of
the class is allowed. */
- arch_register_req_type_equal, /** The register must equal
+ arch_register_req_type_equal = 4, /** The register must equal
another one at the node. */
- arch_register_req_type_unequal, /** The register must be unequal
+ arch_register_req_type_unequal = 8, /** The register must be unequal
to some other at the node. */
- arch_register_req_type_pair /** The register is part of a
+ arch_register_req_type_pair = 16 /** The register is part of a
register pair. */
} arch_register_req_type_t;
+#define arch_register_req_is_constr(x) \
+ ((x)->type & (arch_register_req_type_pair + arch_register_req_type_limited - 1) != 0)
+
/**
* Expresses requirements to register allocation for an operand.
*/
arch_irn_class_normal,
arch_irn_class_spill,
arch_irn_class_reload,
- arch_irn_class_copy
+ arch_irn_class_copy,
+ arch_irn_class_perm
} arch_irn_class_t;
/*
* instruction has n input and m output operands.
*/
+#define _BEARCH_TRANSFORM_INDEX(cmp, index) ((index) cmp 0 ? -((index) + 1) : (index))
+
/**
* Make an in position from an index.
* @param index The index.
* @return The position representing the index as an in operand.
*/
-#define arch_pos_make_in(index) (index)
+#define arch_pos_make_in(index) _BEARCH_TRANSFORM_INDEX(<, index)
/**
* Make an out position from an index.
* @param index The index.
* @return The position representing the index as an out operand.
*/
-#define arch_pos_make_out(index) (-((index) + 1))
+#define arch_pos_make_out(index) _BEARCH_TRANSFORM_INDEX(>=, index)
/**
* Check, if a position denotes an input operand.
* @param pos The position.
* @return The index of the position.
*/
-#define arch_pos_get_index(pos) ((pos) < 0 ? -(pos) - 1 : (pos))
+#define arch_pos_get_index(pos) _BEARCH_TRANSFORM_INDEX(<, pos)
-typedef struct _arch_irn_ops_t {
+struct _arch_irn_ops_t {
/**
* Get the register requirements for a given operand.
+ * @param self The self pointer.
* @param irn The node.
* @param pos The operand's position.
- * @return The register requirements for the selected operand,
- * or NULL, if the operand is no register.
+ * @return The register requirements for the selected operand.
+ * The pointer returned is never NULL.
*/
- const arch_register_req_t *(*get_irn_reg_req)(const ir_node *irn, int pos);
+ const arch_register_req_t *(*get_irn_reg_req)(const arch_irn_ops_t *self,
+ arch_register_req_t *req,
+ const ir_node *irn, int pos);
/**
* Get the number of operands of a node.
* output (a number < 0).
* @return The number of operands for either in, or output.
*/
- int (*get_n_operands)(const ir_node *irn, int in_out);
+ int (*get_n_operands)(const arch_irn_ops_t *self, const ir_node *irn, int in_out);
/**
* Set the register for an output operand.
* @note If the operand is not a register operand,
* the call is ignored.
*/
- void (*set_irn_reg)(ir_node *irn, int idx, const arch_register_t *reg);
+ void (*set_irn_reg)(const arch_irn_ops_t *self, ir_node *irn,
+ int idx, const arch_register_t *reg);
/**
* Get the register allocated for an output operand.
* @c arch_register_invalid, if no register has yet been
* allocated for this node.
*/
- const arch_register_t *(*get_irn_reg)(const ir_node *irn, int idx);
+ const arch_register_t *(*get_irn_reg)(const arch_irn_ops_t *self,
+ const ir_node *irn, int idx);
/**
* Classify the node.
* @param irn The node.
* @return A classification.
*/
- arch_irn_class_t (*classify)(const ir_node *irn);
+ arch_irn_class_t (*classify)(const arch_irn_ops_t *self, const ir_node *irn);
+
+};
-} arch_irn_ops_t;
+/**
+ * Get the register requirements for a node.
+ * @param env The architecture environment.
+ * @param req A pointer to a requirements structure, where the data can
+ * be put into.
+ * @param irn The node.
+ * @param pos The position of the operand you're interested in.
+ * @return A pointer to the register requirements which may <b>not</b>
+ * neccessarily be equal to @p req. If NULL is returned, the
+ * operand was no register operand.
+ */
+extern const arch_register_req_t *
+arch_get_register_req(const arch_env_t *env, arch_register_req_t *req,
+ const ir_node *irn, int pos);
/**
* Check if an operand is a register operand.
/**
* Somebody who can be asked about nodes.
*/
-typedef struct _arch_irn_handler_t {
+struct _arch_irn_handler_t {
/**
* Get the operations of an irn.
+ * @param self The handler from which the method is invoked.
* @param irn Some node.
* @return Operations for that irn.
*/
- const arch_irn_ops_t *(*get_irn_ops)(const ir_node *irn);
+ const arch_irn_ops_t *(*get_irn_ops)(const arch_irn_handler_t *handler,
+ const ir_node *irn);
-} arch_irn_handler_t;
+};
/**
* Architecture interface.
#endif
#include "bitset.h"
+#include "obst.h"
+
#include "bearch.h"
#include "irreflect.h"
}
static const arch_register_req_t *
-firm_get_irn_reg_req(const ir_node *irn, int pos)
+firm_get_irn_reg_req(const arch_irn_ops_t *self,
+ arch_register_req_t *req, const ir_node *irn, int pos)
{
- return mode_is_datab(get_irn_mode(irn)) ? &firm_std_reg_req : NULL;
+ if(mode_is_datab(get_irn_mode(irn)))
+ memcpy(req, &firm_std_reg_req, sizeof(*req));
+ else
+ req = NULL;
+
+ return req;
}
-static int firm_get_n_operands(const ir_node *irn, int in_out)
+static int firm_get_n_operands(const arch_irn_ops_t *self, const ir_node *irn, int in_out)
{
int sig = rflct_get_signature(irn);
return rflct_get_args_count(get_irn_opcode(irn), sig, in_out >= 0);
return set_insert(reg_set, &templ, sizeof(templ), hash);
}
-static void firm_set_irn_reg(ir_node *irn, int pos, const arch_register_t *reg)
+static void firm_set_irn_reg(const arch_irn_ops_t *self, ir_node *irn,
+ int pos, const arch_register_t *reg)
{
struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn, pos);
assoc->reg = reg;
}
-static const arch_register_t *firm_get_irn_reg(const ir_node *irn, int pos)
+static const arch_register_t *firm_get_irn_reg(const arch_irn_ops_t *self,
+ const ir_node *irn, int pos)
{
struct irn_reg_assoc *assoc = get_irn_reg_assoc(irn, pos);
return assoc->reg;
}
-static arch_irn_class_t firm_classify(const ir_node *irn)
+static arch_irn_class_t firm_classify(const arch_irn_ops_t *self, const ir_node *irn)
{
return arch_irn_class_normal;
}
firm_get_reg_class
};
-static const arch_irn_ops_t *firm_get_irn_ops(const ir_node *irn)
+static const arch_irn_ops_t *firm_get_irn_ops(const arch_irn_handler_t *self,
+ const ir_node *irn)
{
return &irn_ops;
}
}
const draw_chordal_opts_t draw_chordal_def_opts = {
- 10, 10, 30, 8
+ 10, 10, 30, 8, 10, 10
};
typedef struct _draw_chordal_env_t {
struct block_dims *dims = pmap_get(env->block_dims, bl);
ir_node *sub;
rect_t *rect = &dims->subtree_box;
- int h_space = 0;
+ int h_space = 0, v_space = 0;
memset(rect, 0, sizeof(*rect));
rect->x = x;
rect->h = max(rect->h, bl_dim->subtree_box.h);
h_space = opts->h_gap;
+ v_space = opts->v_gap;
}
rect->w = max(rect->w, dims->box.w + opts->h_gap);
dims->box.x = x + doz(rect->w, dims->box.w) / 2;
- dims->box.y = rect->h + opts->v_gap;
+ dims->box.y = rect->h + v_space;
- rect->h += dims->box.h + opts->v_gap;
+ rect->h += dims->box.h + v_space;
}
static void set_y(const draw_chordal_env_t *env, ir_node *bl, int up)
env->plotter->vtab->set_color(env->plotter, &color);
env->plotter->vtab->line(env->plotter,
- dims->box.x + x, dims->box.y + dims->box.h,
- dom_dims->box.x + x, dom_dims->box.y);
+ dims->box.x + x,
+ dims->box.y + dims->box.h,
+ dom_dims->box.x + x,
+ dom_dims->box.y);
}
}
}
#endif
}
-static void draw(draw_chordal_env_t *env, const rect_t *bbox)
+static void draw(draw_chordal_env_t *env, const rect_t *start_box)
{
plotter_t *p = env->plotter;
+ rect_t bbox;
- p->vtab->begin(p, bbox);
+ bbox.x = bbox.y = 0;
+ bbox.w = start_box->w + 2 * env->opts->x_margin;
+ bbox.h = start_box->h + 2 * env->opts->y_margin;
+
+ p->vtab->begin(p, &bbox);
irg_block_walk_graph(env->chordal_env->irg, draw_block, NULL, env);
p->vtab->finish(p);
}
obstack_init(&env.obst);
irg_block_walk_graph(chordal_env->irg, block_dims_walker, NULL, &env);
- layout(&env, start_block, 0);
- set_y(&env, start_block, 0);
+ layout(&env, start_block, opts->x_margin);
+ set_y(&env, start_block, opts->y_margin);
start_dims = pmap_get(env.block_dims, start_block);
draw(&env, &start_dims->subtree_box);
int h_inter_gap;
int v_gap;
int v_inter_gap;
+ int x_margin;
+ int y_margin;
};
extern const draw_chordal_opts_t draw_chordal_def_opts;
#include "bitset.h"
#include "list.h"
+#include "obst.h"
#include "pset.h"
#include "pmap.h"
#include "set.h"
#include <malloc.h>
#endif
+#include "irprog.h"
+
#include "lpp.h"
#include "xmalloc.h"
#include "becopyopt.h"
#include "config.h"
#endif
+#include "irprog.h"
+
#include "xmalloc.h"
#include "bechordal_t.h"
#include "becopyopt.h"
--- /dev/null
+
+#include <stdlib.h>
+
+#include "hashptr.h"
+#include "pdeq.h"
+#include "pset.h"
+#include "pmap.h"
+
+#include "irflag_t.h"
+#include "ircons_t.h"
+#include "irnode_t.h"
+#include "irmode_t.h"
+#include "irdom_t.h"
+#include "iredges_t.h"
+
+#include "besched_t.h"
+#include "beirgmod.h"
+
+struct _dom_front_info_t {
+ pmap *df_map;
+};
+
+static void compute_df_local(ir_node *bl, void *data)
+{
+ pmap *df_map = ((dom_front_info_t *) data)->df_map;
+ ir_node *idom = get_Block_idom(bl);
+ int i, n;
+
+ for(i = 0, n = get_irn_arity(bl); i < n; ++i) {
+
+ /* The predecessor block */
+ ir_node *pred = get_nodes_block(get_irn_n(bl, i));
+
+ /* The dominance frontier set of the predecessor. */
+ pset *df = pmap_get(df_map, pred);
+
+ /*
+ * Create the dominance frontier set of the predecessor
+ * if it didn't exist.
+ */
+ if(!df) {
+ df = pset_new_ptr(16);
+ pmap_insert(df_map, pred, df);
+ }
+
+
+ if(pred != idom && bl)
+ pset_insert_ptr(df, bl);
+ }
+}
+
+static void compute_df_up(ir_node *bl, void *data)
+{
+ pmap *df_map = ((dom_front_info_t *) data)->df_map;
+ ir_node *y;
+
+ for(y = get_Block_dominated_first(bl); y; y = get_Block_dominated_next(y)) {
+ ir_node *w;
+ pset *df = pmap_get(df_map, y);
+
+ for(w = pset_first(df); df; w = pset_next(df))
+ if(!block_dominates(bl, w) || bl == w)
+ pset_insert_ptr(df, w);
+ }
+}
+
+dom_front_info_t *be_compute_dominance_frontiers(ir_graph *irg)
+{
+ dom_front_info_t *info = malloc(sizeof(*info));
+
+ info->df_map = pmap_create();
+
+ /* Perhaps both can we interwined */
+ dom_tree_walk_irg(irg, compute_df_local, NULL, info);
+ dom_tree_walk_irg(irg, NULL, compute_df_up, info);
+ return info;
+}
+
+void be_free_dominance_frontiers(dom_front_info_t *info)
+{
+ pmap_entry *ent;
+
+ for(ent = pmap_first(info->df_map); ent; ent = pmap_next(info->df_map))
+ del_pset(ent->value);
+
+ pmap_destroy(info->df_map);
+ free(info);
+}
+
+pset *be_get_dominance_frontier(dom_front_info_t *info, ir_node *block)
+{
+ return pmap_get(info->df_map, block);
+}
+
+/**
+ * Algorithm to place the Phi-Functions.
+ * @see Appel, Modern Compiler Implementation in Java, 2nd ed., p. 399ff
+ *
+ * This function takes an original node and a set of already placed
+ * copies of that node called @p copies. It places phi nodes at the
+ * iterated dominance frontiers of these copies and puts these phi nodes
+ * in the @p copies set, since they are another form of copies of the
+ * original value.
+ *
+ * The rename phase (see below) is responsible for fixing up the usages
+ * of the original node.
+ *
+ * @param orig The original node.
+ * @param copies A set contianing nodes representing a copy of the
+ * original node. Each node must be inserted into the block's schedule.
+ * @param copy_blocks A set in which the blocks are recorded which
+ * contain a copy. This is just for efficiency in later phases (see
+ * rename).
+ */
+static void place_phi_functions(ir_node *orig, pset *copies,
+ pset *copy_blocks, dom_front_info_t *df_info)
+{
+ int i;
+ ir_node *orig_block = get_nodes_block(orig);
+ ir_graph *irg = get_irn_irg(orig);
+ ir_mode *mode = get_irn_mode(orig);
+ pdeq *worklist = new_pdeq();
+ pset *phi_blocks = pset_new_ptr(8);
+ ir_node **ins = NULL;
+ void *it;
+
+ /*
+ * Allocate an array for all blocks where the copies and the original
+ * value were defined.
+ */
+ int n_orig_blocks = pset_count(copy_blocks) + 1;
+ ir_node **orig_blocks = malloc(n_orig_blocks * sizeof(orig_blocks[0]));
+
+ /*
+ * Fill the array of definition blocks.
+ */
+ orig_blocks[0] = get_nodes_block(orig);
+
+ /*
+ * Fill the worklist queue and the rest of the orig blocks array.
+ */
+ for(it = pset_first(copies), i = 1; it; it = pset_next(copies)) {
+ ir_node *copy_block = get_nodes_block(it);
+
+ assert(block_dominates(orig_block, copy_block)
+ && "The block of the copy must be dominated by the block of the value");
+
+ pdeq_putr(worklist, it);
+ orig_blocks[i++] = copy_block;
+ }
+
+ while(!pdeq_empty(worklist)) {
+ ir_node *irn = pdeq_getl(worklist);
+ ir_node *bl = get_nodes_block(irn);
+ ir_node *y;
+ int n_preds = get_irn_arity(bl);
+ pset *df = be_get_dominance_frontier(df_info, bl);
+
+ for(y = pset_first(df); y; y = pset_next(df)) {
+
+ if(!pset_find_ptr(phi_blocks, y)) {
+ ir_node *phi;
+ int insert = 1;
+
+ /*
+ * Set the orig node as the only operand of the
+ * phi node.
+ */
+ ins = realloc(ins, n_preds * sizeof(ins[0]));
+ for(i = 0; i < n_preds; ++i)
+ ins[0] = orig;
+
+ /* Insert phi node */
+ phi = new_r_Phi(irg, bl, n_preds, ins, mode);
+
+ /*
+ * The phi node itself is also a copy of the original
+ * value. So put it in the copies set also, so that
+ * the rename phase can treat them right.
+ */
+ pset_insert_ptr(copies, phi);
+ pset_insert_ptr(copy_blocks, y);
+
+ /* Insert the phi node into the schedule */
+ sched_add_before(sched_first(y), phi);
+
+ /* Insert the phi node in the phi blocks set. */
+ pset_insert_ptr(phi_blocks, y);
+
+ /*
+ * If orig or a copy of it were not defined in y,
+ * add y to the worklist.
+ */
+ for(i = 0; i < n_orig_blocks; ++i)
+ if(orig_blocks[i] == y) {
+ insert = 0;
+ break;
+ }
+
+ if(insert)
+ pdeq_putr(worklist, y);
+
+ }
+ }
+ }
+
+ del_pset(phi_blocks);
+ del_pdeq(worklist);
+
+ free(orig_blocks);
+
+ if(ins)
+ free(ins);
+}
+
+/**
+ * Find the copy of the given original node whose value is 'active'
+ * at a usage.
+ *
+ * The usage is given as a node and a position. Initially, the given operand
+ * points to a node for which copies were introduced. We have to find
+ * the valid copy for this usage. This is done by travering the
+ * dominance tree upwards. If the usage is a phi function, we start
+ * traversing from the predecessor block which corresponds to the phi
+ * usage.
+ *
+ * @param usage The node which uses the original node.
+ * @param pos The number of the argument which corresponds to the
+ * original node.
+ * @param copy_blocks A set containing all basic block in which copies
+ * of the original node are located.
+ * @param copies A set containing all node which are copies from the
+ * original node.
+ * @return The valid copy for usage.
+ */
+static ir_node *search_def(ir_node *usage, int pos, pset *copies, pset *copy_blocks)
+{
+ ir_node *curr_bl;
+
+ curr_bl = get_nodes_block(usage);
+
+ /*
+ * If the usage is in a phi node, search the copy in the
+ * predecessor denoted by pos.
+ */
+ if(is_Phi(usage))
+ curr_bl = get_nodes_block(get_irn_n(curr_bl, pos));
+
+ /*
+ * Traverse the dominance tree upwards from the
+ * predecessor block of the usage.
+ */
+ while(curr_bl != NULL) {
+
+ /*
+ * If this block contains a copy, search the block
+ * instruction by instruction.
+ */
+ if(pset_find_ptr(copy_blocks, curr_bl)) {
+ ir_node *irn;
+
+ /* Look at each instruction from last to first. */
+ sched_foreach_reverse(curr_bl, irn) {
+
+ /* Take the first copy we find. */
+ if(pset_find_ptr(copies, irn))
+ return irn;
+ }
+
+ assert(0 && "We must have encountered a copy");
+ }
+
+ /* If were not done yet, look in the immediate dominator */
+ curr_bl = get_Block_idom(curr_bl);
+ }
+
+ return NULL;
+}
+
+
+static void fix_usages(ir_node *orig, pset *copies, pset *copy_blocks)
+{
+ int i;
+ int n_outs = 0;
+ const ir_edge_t *edge;
+ const ir_edge_t **outs;
+
+ /* Count the number of outs. */
+ foreach_out_edge(orig, edge)
+ n_outs++;
+
+ /*
+ * Put all outs into an array.
+ * This is neccessary, since the outs would be modified while
+ * interating on them what could bring the outs module in trouble.
+ */
+ outs = malloc(n_outs * sizeof(outs[0]));
+ foreach_out_edge(orig, edge)
+ outs[i++] = edge;
+
+ /*
+ * Search the valid def for each out and set it.
+ */
+ for(i = 0; i < n_outs; ++i) {
+ ir_node *def;
+ ir_node *irn = get_edge_src_irn(outs[i]);
+ int pos = get_edge_src_pos(outs[i]);
+
+ def = search_def(irn, pos, copies, copy_blocks);
+ set_irn_n(irn, pos, def);
+ }
+
+ free(outs);
+}
+
+void be_introduce_copies(dom_front_info_t *info, ir_node *orig, int n, ir_node *copy_nodes[])
+{
+ pset *copies = pset_new_ptr(2 * n);
+ pset *copy_blocks = pset_new_ptr(2 * n);
+ int save_optimize = get_optimize();
+ int i;
+
+ /* Fill the sets. */
+ for(i = 0; i < n; ++i) {
+ pset_insert_ptr(copies, copy_nodes[i]);
+ pset_insert_ptr(copy_blocks, get_nodes_block(copy_nodes[i]));
+ }
+
+ /*
+ * Disable optimization so that the phi functions do not
+ * disappear.
+ */
+ set_optimize(0);
+
+ /*
+ * Place the phi functions and reroute the usages.
+ */
+ place_phi_functions(orig, copies, copy_blocks, info);
+ fix_usages(orig, copies, copy_blocks);
+
+ /* reset the optimizations */
+ set_optimize(save_optimize);
+
+ del_pset(copies);
+ del_pset(copy_blocks);
+}
--- /dev/null
+
+/**
+ * IRG modifications for be routines.
+ * @date 4.5.2005
+ *
+ * Copyright (C) 2005 Universitaet Karlsruhe.
+ * Released under the GPL.
+ */
+
+#ifndef _BEIRGMOD_H
+#define _BEIRGMOD_H
+
+typedef struct _dom_front_info_t dom_front_info_t;
+
+dom_front_info_t *be_compute_dominance_frontiers(ir_graph *irg);
+pset *be_get_dominance_frontier(dom_front_info_t *info, ir_node *block);
+void be_free_dominance_frontiers(dom_front_info_t *info);
+
+void be_introduce_copies(dom_front_info_t *info, ir_node *orig,
+ int n, ir_node *copy_nodes[]);
+
+#endif
#include "bitset.h"
#include "irprog.h"
+#include "irgopt.h"
#include "irgraph.h"
#include "irdump.h"
#include "phiclass.h"
#include "becopyoptmain.h"
#include "becopystat.h"
#include "bearch_firm.h"
+#include "benode_t.h"
#include "beasm_dump_globals.h"
#include "beasm_asm_gnu.h"
static void be_init_arch_env(arch_env_t *env)
{
- arch_env_init(env, &firm_isa);
- arch_env_add_irn_handler(env, &firm_irn_handler);
+ const arch_isa_if_t *isa = &firm_isa;
+ be_node_factory_t *nf;
+
+ nf = be_new_node_factory(isa);
+ arch_env_init(env, isa);
env->isa->init();
+
+ arch_env_add_irn_handler(env, &firm_irn_handler);
+ arch_env_add_irn_handler(env, be_node_get_irn_handler(nf));
}
static void be_main_loop(void)
int j, m;
ir_graph *irg = get_irp_irg(i);
+ remove_critical_cf_edges(irg);
+
localize_consts(irg);
#ifdef DUMP_LOCALIZED
dump_consts_local(0);
--- /dev/null
+/**
+ * @file benode.c
+ * @date 17.05.2005
+ * @author Sebastian Hack
+ *
+ * Backend node support.
+ *
+ * Copyright (C) 2005 Universitaet Karlsruhe
+ * Released under the GPL
+ */
+
+#include <stdlib.h>
+
+#include "obst.h"
+#include "set.h"
+#include "pmap.h"
+#include "offset.h"
+
+#include "irop_t.h"
+#include "irmode_t.h"
+#include "irnode_t.h"
+
+#include "benode_t.h"
+
+struct _be_node_factory_t {
+ const arch_isa_if_t *isa;
+
+ struct obstack obst;
+ set *ops;
+ pmap *irn_op_map;
+ pmap *reg_req_map;
+
+ arch_irn_handler_t handler;
+ arch_irn_ops_t irn_ops;
+};
+
+typedef enum _node_kind_t {
+ node_kind_spill,
+ node_kind_reload,
+ node_kind_perm,
+ node_kind_copy,
+ node_kind_last
+} node_kind_t;
+
+typedef struct {
+ node_kind_t kind;
+ const arch_register_class_t *cls;
+ ir_op *op;
+ int n_pos;
+ int *pos;
+} be_op_t;
+
+static int templ_pos_Spill[] = {
+ arch_pos_make_in(0)
+};
+
+static int templ_pos_Reload[] = {
+ arch_pos_make_out(0)
+};
+
+static int templ_pos_Copy[] = {
+ arch_pos_make_in(0),
+ arch_pos_make_out(0)
+};
+
+#define ARRSIZE(x) (sizeof(x) / sizeof(x[0]))
+
+static int cmp_op_map(const void *a, const void *b, size_t size)
+{
+ const be_op_t *x = a;
+ const be_op_t *y = b;
+
+ return !(x->kind == y->kind && x->cls == y->cls);
+}
+
+static be_op_t *get_op(const be_node_factory_t *fact,
+ const arch_register_class_t *cls, node_kind_t kind)
+{
+ be_op_t templ;
+
+ templ.kind = kind;
+ templ.cls = cls;
+
+ return set_insert(fact->ops, &templ, sizeof(templ),
+ HASH_PTR(cls) + 7 * kind);
+}
+
+ir_node *new_Spill(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, ir_node *node_to_spill)
+{
+ ir_node *in[1];
+ ir_op *op = get_op(factory, cls, node_kind_spill)->op;
+
+ assert(op && "Spill opcode must be present for this register class");
+ in[0] = node_to_spill;
+
+ return new_ir_node(NULL, irg, bl, op, mode_M, 1, in);
+}
+
+ir_node *new_Reload(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, ir_node *spill_node)
+{
+ ir_mode *mode;
+ ir_node *in[1];
+ ir_op *op = get_op(factory, cls, node_kind_reload)->op;
+
+ assert(op && "Reload opcode must be present for this register class");
+ assert(is_Spill(factory, spill_node) && "Operand of Reload must be a Spill");
+ in[0] = spill_node;
+ mode = get_irn_mode(get_irn_n(spill_node, 0));
+
+ return new_ir_node(NULL, irg, bl, op, mode, 1, in);
+}
+
+ir_node *new_Perm(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, int arity, ir_node **in)
+{
+ ir_op *op = get_op(factory, cls, node_kind_perm)->op;
+
+ return new_ir_node(NULL, irg, bl, op, mode_T, arity, in);
+}
+
+ir_node *new_Copy(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, ir_node *in)
+{
+ ir_node *ins[1];
+ ir_op *op = get_op(factory, cls, node_kind_perm)->op;
+
+ ins[0] = in;
+
+ return new_ir_node(NULL, irg, bl, op, get_irn_mode(in), 1, ins);
+}
+
+/**
+ * If the node is a proj, reset the node to the proj's target and return
+ * the proj number.
+ * @param node The address of a node pointer.
+ * @param def A default value.
+ * @return If *node is a Proj, *node is set to the Proj's target and
+ * the Proj number is returned. Else *node remains the same and @p def
+ * is returned.
+ */
+static int redir_proj(const ir_node **node, int def)
+{
+ const ir_node *n = *node;
+
+ if(is_Proj(n)) {
+ *node = get_Proj_pred(n);
+ def = get_Proj_proj(n);
+ }
+
+ return def;
+}
+
+static const arch_register_req_t *
+be_node_get_irn_reg_req(const arch_irn_ops_t *_self,
+ arch_register_req_t *req, const ir_node *irn, int pos)
+{
+ be_op_t *bo;
+ const be_node_factory_t *factory =
+ container_of(_self, const be_node_factory_t, irn_ops);
+
+ pos = arch_pos_make_out(redir_proj(&irn, arch_pos_get_index(pos)));
+
+ bo = pmap_get(factory->irn_op_map, get_irn_op(irn));
+
+ if(bo) {
+ int i;
+
+ req->type = arch_register_req_type_normal;
+ req->cls = bo->cls;
+
+ for(i = 0; i < bo->n_pos; ++pos)
+ if(pos == bo->pos[i])
+ return req;
+ }
+
+ return NULL;
+}
+
+static int
+be_node_get_n_operands(const arch_irn_ops_t *_self, const ir_node *irn, int in_out)
+{
+ be_op_t *bo;
+ int i, res = 0;
+ const be_node_factory_t *factory =
+ container_of(_self, const be_node_factory_t, irn_ops);
+
+ redir_proj(&irn, 0);
+ bo = pmap_get(factory->irn_op_map, get_irn_op(irn));
+
+ for(i = 0; i < bo->n_pos; ++i)
+ res += (bo->pos[i] ^ in_out) > 0;
+
+ return res;
+}
+
+void
+be_node_set_irn_reg(const arch_irn_ops_t *_self, ir_node *irn,
+ int idx, const arch_register_t *reg)
+{
+ const arch_register_t **regs;
+
+ idx = redir_proj((const ir_node **) &irn, idx);
+ regs = (const arch_register_t **) &irn->attr;
+ regs[idx] = reg;
+}
+
+const arch_register_t *
+be_node_get_irn_reg(const arch_irn_ops_t *_self, const ir_node *irn, int idx)
+{
+ const arch_register_t **regs;
+
+ idx = redir_proj(&irn, idx);
+ regs = (const arch_register_t **) &irn->attr;
+ return regs[idx];
+}
+
+arch_irn_class_t be_node_classify(const arch_irn_ops_t *_self, const ir_node *irn)
+{
+ const be_node_factory_t *factory =
+ container_of(_self, const be_node_factory_t, irn_ops);
+
+ be_op_t *bo;
+ int idx;
+
+ idx = redir_proj(&irn, 0);
+ bo = pmap_get(factory->irn_op_map, get_irn_op(irn));
+
+ return 0;
+}
+
+static const arch_irn_ops_t *
+be_node_get_irn_ops(const arch_irn_handler_t *_self, const ir_node *irn)
+{
+ be_op_t *bo;
+ const be_node_factory_t *factory =
+ container_of(_self, const be_node_factory_t, handler);
+
+ redir_proj(&irn, 0);
+ bo = pmap_get(factory->irn_op_map, get_irn_op(irn));
+
+ return bo ? &factory->irn_ops : NULL;
+}
+
+const arch_irn_handler_t *be_node_get_irn_handler(const be_node_factory_t *f)
+{
+ return &f->handler;
+}
+
+int is_Spill(const be_node_factory_t *f, const ir_node *irn)
+{
+ be_op_t *bo;
+ bo = pmap_get(f->irn_op_map, get_irn_op(irn));
+ return bo->kind == node_kind_spill;
+}
+
+be_node_factory_t *be_new_node_factory(const arch_isa_if_t *isa)
+{
+ be_node_factory_t *factory;
+ char buf[256];
+ int i, j, n;
+
+ factory = malloc(sizeof(*factory));
+ factory->ops = new_set(cmp_op_map, 64);
+ factory->irn_op_map = pmap_create();
+ obstack_init(&factory->obst);
+
+ factory->handler.get_irn_ops = be_node_get_irn_ops;
+
+ factory->irn_ops.get_irn_reg_req = be_node_get_irn_reg_req;
+ factory->irn_ops.get_n_operands = be_node_get_n_operands;
+ factory->irn_ops.set_irn_reg = be_node_set_irn_reg;
+ factory->irn_ops.get_irn_reg = be_node_get_irn_reg;
+ factory->irn_ops.classify = be_node_classify;
+
+ for(i = 0, n = isa->get_n_reg_class(); i < n; ++i) {
+ const arch_register_class_t *cls = isa->get_reg_class(i);
+ be_op_t *ent;
+
+ ent = get_op(factory, cls, node_kind_spill);
+ snprintf(buf, sizeof(buf), "Spill_%s", cls->name);
+ ent->op = new_ir_op(get_next_ir_opcode(), buf, op_pin_state_pinned,
+ 0, 0, oparity_unary, 0);
+ ent->n_pos = ARRSIZE(templ_pos_Spill);
+ ent->pos = templ_pos_Spill;
+ pmap_insert(factory->irn_op_map, ent->op, ent);
+
+ ent = get_op(factory, cls, node_kind_reload);
+ snprintf(buf, sizeof(buf), "Reload_%s", cls->name);
+ ent->op = new_ir_op(get_next_ir_opcode(), buf, op_pin_state_pinned, 0, 0,
+ oparity_unary, sizeof(const arch_register_t *));
+ ent->n_pos = ARRSIZE(templ_pos_Reload);
+ ent->pos = templ_pos_Reload;
+ pmap_insert(factory->irn_op_map, ent->op, ent);
+
+ ent = get_op(factory, cls, node_kind_copy);
+ snprintf(buf, sizeof(buf), "Copy_%s", cls->name);
+ ent->op = new_ir_op(get_next_ir_opcode(), buf, op_pin_state_pinned, 0, 0,
+ oparity_unary, sizeof(const arch_register_t *));
+ ent->n_pos = ARRSIZE(templ_pos_Copy);
+ ent->pos = templ_pos_Copy;
+ pmap_insert(factory->irn_op_map, ent->op, ent);
+
+ ent = get_op(factory, cls, node_kind_perm);
+ snprintf(buf, sizeof(buf), "Perm_%s", cls->name);
+ ent->op = new_ir_op(get_next_ir_opcode(), buf, op_pin_state_pinned, 0, 0,
+ oparity_variable, sizeof(const arch_register_t) * cls->n_regs);
+ ent->n_pos = 2 * cls->n_regs;
+ ent->pos = obstack_alloc(&factory->obst, sizeof(ent->pos[0]) * ent->n_pos);
+ for(j = 0; j < ent->n_pos; j += 2) {
+ ent->pos[j] = arch_pos_make_in(j);
+ ent->pos[j + 1] = arch_pos_make_out(j);
+ }
+ pmap_insert(factory->irn_op_map, ent->op, ent);
+
+ }
+
+ return factory;
+}
+
+#if 0
+ir_node *be_spill(const be_node_factory_t *factory,
+ const arch_env_t *env, ir_node *node_to_spill)
+{
+ ir_node *res;
+ if(is_Reload(node_to_spill))
+ res = get_irn_n(node_to_spill, 0);
+ else {
+ ir_node *bl = get_nodes_block(node_to_spill);
+ ir_graph *irg = get_irn_irg(bl);
+
+ res = new_Spill(factory, cls, irg, bl, node_to_spill);
+ }
+
+ return res;
+}
+
+ir_node *be_reload(const be_node_factory_t *factory,
+ const arch_env_t *env, ir_node *spill)
+{
+}
+#endif
--- /dev/null
+/**
+ * @file benode_t.h
+ * @date 17.05.2005
+ * @author Sebastian Hack
+ *
+ * Backend node support.
+ *
+ * Copyright (C) 2005 Universitaet Karlsruhe
+ * Released under the GPL
+ */
+
+#ifndef _BENODE_T_H
+#define _BENODE_T_H
+
+#include "irmode.h"
+#include "irnode.h"
+
+#include "bearch.h"
+
+typedef struct _be_node_factory_t be_node_factory_t;
+
+be_node_factory_t *be_new_node_factory(const arch_isa_if_t *isa);
+
+const arch_irn_handler_t *be_node_get_irn_handler(const be_node_factory_t *f);
+
+ir_node *new_Spill(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, ir_node *node_to_spill);
+
+ir_node *new_Reload(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, ir_node *spill_node);
+
+ir_node *new_Perm(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *bl, int arity, ir_node **in);
+
+ir_node *new_Copy(const be_node_factory_t *factory,
+ const arch_register_class_t *cls,
+ ir_graph *irg, ir_node *block, ir_node *in);
+
+ir_node *be_spill(const be_node_factory_t *factory, const arch_env_t *env, ir_node *irn);
+ir_node *be_reload(const be_node_factory_t *factory, const arch_env_t *env, ir_node *irn);
+
+int is_Spill(const be_node_factory_t *f, const ir_node *irn);
+
+ir_node *get_Reload_Spill(ir_node *reload);
+
+void insert_perm(const be_node_factory_t *factory,
+ const arch_register_class_t *reg_class,
+ ir_node *in_front_of);
+
+#endif /* _BENODE_T_H */
+#include <stdlib.h>
+
#include "impl.h"
#include "irprintf.h"
#include "irgwalk.h"
#include "irnode.h"
+#include "debug.h"
#include "besched_t.h"
#include "besched.h"
FIRM_IMPL1(sched_prev, ir_node *, const ir_node *)
FIRM_IMPL1(sched_first, ir_node *, const ir_node *)
FIRM_IMPL1(sched_last, ir_node *, const ir_node *)
+FIRM_IMPL2(sched_add_after, ir_node *, ir_node *, ir_node *)
+FIRM_IMPL2(sched_add_before, ir_node *, ir_node *, ir_node *)
size_t sched_irn_data_offset = 0;
void be_sched_init(void)
{
sched_irn_data_offset = register_additional_node_data(sizeof(sched_info_t));
+ firm_dbg_register("be.sched");
}
void be_sched_test(void)
obstack_free(&obst, NULL);
}
+
+void sched_renumber(const ir_node *block)
+{
+ ir_node *irn;
+ sched_info_t *inf;
+ sched_timestep_t step = 0;
+
+ sched_foreach(block, irn) {
+ inf = get_irn_sched_info(irn);
+ inf->time_step = step;
+ step += SCHED_INITIAL_GRANULARITY;
+ }
+}
+
+
+int sched_verify(const ir_node *block)
+{
+ firm_dbg_module_t *dbg_sched = firm_dbg_register("be.sched");
+ int res = 1;
+ const ir_node *irn;
+ int i, n;
+ int *save_time_step;
+
+ /* Count the number of nodes in the schedule. */
+ n = 0;
+ sched_foreach(block, irn)
+ n++;
+
+ save_time_step = malloc(n * sizeof(save_time_step[0]));
+
+ i = 0;
+ sched_foreach(block, irn) {
+ sched_info_t *info = get_irn_sched_info(irn);
+ save_time_step[i] = info->time_step;
+ info->time_step = i;
+
+ i += 1;
+ }
+
+ /*
+ * Check if each relevant operand of a node is scheduled before
+ * the node itself.
+ */
+ sched_foreach(block, irn) {
+ int i, n;
+ int step = sched_get_time_step(irn);
+
+ for(i = 0, n = get_irn_arity(irn); i < n; i++) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if(mode_is_datab(get_irn_mode(op))
+ && get_nodes_block(op) == block
+ && sched_get_time_step(op) > step) {
+
+ DBG((dbg_sched, LEVEL_DEFAULT,
+ "%n is operand of %n but scheduled after", op, irn));
+ res = 0;
+ }
+ }
+ }
+
+ /* Check, if the time steps are correct */
+ for(i = 1; i < n; ++i) {
+ if(save_time_step[i] - save_time_step[i - 1] <= 0) {
+ DBG((dbg_sched, LEVEL_DEFAULT,
+ "position %d: time step shrinks (from %d to %d)\n",
+ i, save_time_step[i - 1], save_time_step[i]));
+ res = 0;
+ }
+ }
+
+ /* Restore the old time steps */
+ i = 0;
+ sched_foreach(block, irn) {
+ sched_info_t *info = get_irn_sched_info(irn);
+ info->time_step = save_time_step[i++];
+ }
+
+ free(save_time_step);
+ return res;
+}
+
+static void sched_verify_walker(ir_node *irn, void *data)
+{
+ int *res = data;
+ *res &= sched_verify(irn);
+}
+
+int sched_verify_irg(ir_graph *irg)
+{
+ int res = 1;
+ irg_block_walk_graph(irg, sched_verify_walker, NULL, &res);
+
+ return res;
+}
#ifndef _BESCHED_T_H
#define _BESCHED_T_H
+#define SCHED_INITIAL_GRANULARITY (1 << 14)
+
#include "list.h"
#include "irnode_t.h"
#include "irgraph_t.h"
#include "besched.h"
+typedef unsigned int sched_timestep_t;
+
extern size_t sched_irn_data_offset;
typedef struct _sched_info_t {
struct list_head list;
- int time_step;
+ sched_timestep_t time_step;
} sched_info_t;
#define _sched_entry(list_head) (list_entry(list_head, sched_info_t, list))
return !list_empty(&info->list) ? get_sched_info_irn(_sched_entry(info->list.prev)) : NULL;
}
+/**
+ * Reassign the time steps in the schedule.
+ * @param block The schedule to update.
+ */
+void sched_renumber(const ir_node *block);
+
+static INLINE void _sched_set_time_stamp(ir_node *irn)
+{
+ sched_info_t *inf = get_irn_sched_info(irn);
+ sched_timestep_t before_ts = _sched_entry(inf->list.prev)->time_step;
+ sched_timestep_t after_ts = _sched_entry(inf->list.next)->time_step;
+
+ /*
+ * If we are the last, we can give us a big time step,
+ * else we have to compute our time step from our
+ * neighbours.
+ */
+ if(after_ts == 0)
+ inf->time_step = before_ts + SCHED_INITIAL_GRANULARITY;
+ else {
+ sched_timestep_t ts = (before_ts + after_ts) / 2;
+
+ /*
+ * If the resolution went out, we have to renumber
+ * this block.
+ */
+ if(ts == before_ts || ts == after_ts)
+ sched_renumber(get_nodes_block(irn));
+ }
+}
+
/**
* Add a node to a block schedule.
* @param block The block to whose schedule the node shall be added to.
* @param irn The node to add.
* @return The given node.
*/
-static INLINE ir_node *_sched_add(ir_node *block, ir_node *irn)
+static INLINE ir_node *_sched_add_before(ir_node *before, ir_node *irn)
{
- assert(is_Block(block) && "Need a block here");
- list_add_tail(&get_irn_sched_info(irn)->list, &get_irn_sched_info(block)->list);
+ list_add_tail(&get_irn_sched_info(irn)->list, &get_irn_sched_info(before)->list);
+ _sched_set_time_stamp(irn);
return irn;
}
-#define sched_get_time_step(irn) _sched_get_time_step(irn)
-#define sched_has_succ(irn) _sched_has_succ(irn)
-#define sched_has_prev(irn) _sched_has_prev(irn)
-#define sched_succ(irn) _sched_succ(irn)
-#define sched_prev(irn) _sched_prev(irn)
-#define sched_first(irn) _sched_first(irn)
-#define sched_last(irn) _sched_last(irn)
-#define sched_add(block,irn) _sched_add(block,irn)
+/**
+ * Add a node to a block schedule.
+ * @param block The block to whose schedule the node shall be added to.
+ * @param irn The node to add.
+ * @return The given node.
+ */
+static INLINE ir_node *_sched_add_after(ir_node *after, ir_node *irn)
+{
+ list_add(&get_irn_sched_info(irn)->list, &get_irn_sched_info(after)->list);
+ _sched_set_time_stamp(irn);
+ return irn;
+}
+/**
+ * Verify a schedule.
+ * @param block The block whose schedule to verify.
+ * @return 1, if the schedule is proper, 0 if not.
+ */
+extern int sched_verify(const ir_node *block);
+
+/**
+ * Verify the schedules in all blocks of the irg.
+ * @param irg The program graph.
+ * @return 1, if all schedules were right, 0 if not.
+ */
+extern int sched_verify_irg(ir_graph *irg);
+
+#define sched_get_time_step(irn) _sched_get_time_step(irn)
+#define sched_has_succ(irn) _sched_has_succ(irn)
+#define sched_has_prev(irn) _sched_has_prev(irn)
+#define sched_succ(irn) _sched_succ(irn)
+#define sched_prev(irn) _sched_prev(irn)
+#define sched_first(irn) _sched_first(irn)
+#define sched_last(irn) _sched_last(irn)
+#define sched_add_before(before, irn) _sched_add_before(before, irn)
+#define sched_add_after(after, irn) _sched_add_after(after, irn)
#endif