* @author Christoph Mallon
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <stdlib.h>
-#include "besched_t.h"
+#include "besched.h"
#include "belistsched.h"
#include "belive_t.h"
#include "beutil.h"
#include "height.h"
#include "irtools.h"
#include "irgwalk.h"
-#include "benode_t.h"
-
+#include "benode.h"
+#include "array_t.h"
// XXX there is no one time init for schedulers
-#define NORMAL_DBG
+//#define NORMAL_DBG
+#include "irprintf.h"
+/** An instance of the normal scheduler. */
+typedef struct instance_t {
+ ir_graph* irg; /**< the IR graph of this instance */
+ struct obstack obst; /**< obstack for temporary data */
+ ir_node* curr_list; /**< current block schedule list */
+} instance_t;
static int must_be_scheduled(const ir_node* const irn)
{
}
-static const arch_env_t *cur_arch_env;
-
-
static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
{
+ instance_t* inst = block_env;
+ ir_node* irn;
+ ir_node* next;
+ ir_node* last = NULL;
ir_nodeset_iterator_t iter;
- ir_node* block;
- ir_node* irn;
- ir_node** sched;
- int sched_count;
- (void)block_env;
(void)live_set;
- ir_nodeset_iterator_init(&iter, ready_set);
- irn = ir_nodeset_iterator_next(&iter);
- block = get_nodes_block(irn);
- sched = get_irn_link(block);
- sched_count = ARR_LEN(sched);
- for (; sched_count-- != 0; ++sched) {
- ir_node* irn = *sched;
- if (ir_nodeset_contains(ready_set, irn) &&
- !arch_irn_class_is(cur_arch_env, irn, branch)) {
+ for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) {
+ next = get_irn_link(irn);
+ if (ir_nodeset_contains(ready_set, irn)) {
#if defined NORMAL_DBG
ir_fprintf(stderr, "scheduling %+F\n", irn);
#endif
+ if (last == NULL)
+ inst->curr_list = next;
+ else
+ set_irn_link(last, next);
return irn;
}
}
+ ir_nodeset_iterator_init(&iter, ready_set);
+ irn = ir_nodeset_iterator_next(&iter);
return irn;
}
int cost;
} irn_cost_pair;
-
static int cost_cmp(const void* a, const void* b)
{
const irn_cost_pair* const a1 = a;
const irn_cost_pair* const b1 = b;
- int ret;
- if (is_irn_forking(a1->irn)) {
- ret = 1;
- } else if (is_irn_forking(b1->irn)) {
- ret = -1;
- } else {
- ret = b1->cost - a1->cost;
- //ret = a1->cost - b1->cost;
- }
+ int ret = b1->cost - a1->cost;
+ if (ret == 0)
+ ret = (int)get_irn_idx(a1->irn) - (int)get_irn_idx(b1->irn);
#if defined NORMAL_DBG
- ir_fprintf(stderr, "%+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
+ ir_fprintf(stderr, "cost %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
#endif
return ret;
}
irn_cost_pair costs[];
} flag_and_cost;
+#define get_irn_fc(irn) ((flag_and_cost*)get_irn_link(irn))
+#define set_irn_fc(irn, fc) set_irn_link(irn, fc)
+
static int count_result(const ir_node* irn)
{
const ir_mode* mode = get_irn_mode(irn);
- return
- mode != mode_M &&
- mode != mode_X &&
- !arch_irn_is(cur_arch_env, irn, ignore);
+
+ if (mode == mode_M || mode == mode_X)
+ return 0;
+
+ if (arch_get_register_req_out(irn)->type & arch_register_req_type_ignore)
+ return 0;
+
+ return 1;
}
/* TODO high cost for store trees
*/
-
-static int normal_tree_cost(ir_node* irn)
+static int normal_tree_cost(ir_node* irn, instance_t *inst)
{
- flag_and_cost* fc = get_irn_link(irn);
- ir_node* block = get_nodes_block(irn);
- int arity = get_irn_arity(irn);
+ flag_and_cost* fc;
+ int arity;
+ ir_node* last;
int n_res;
int cost;
int n_op_res = 0;
return 0;
if (is_Proj(irn)) {
- return normal_tree_cost(get_Proj_pred(irn));
+ return normal_tree_cost(get_Proj_pred(irn), inst);
}
+ arity = get_irn_arity(irn);
+ fc = get_irn_fc(irn);
+
if (fc == NULL) {
irn_cost_pair* costs;
int i;
+ ir_node* block = get_nodes_block(irn);
- fc = malloc(sizeof(*fc) + sizeof(*fc->costs) * arity);
+ fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity);
fc->no_root = 0;
costs = fc->costs;
flag_and_cost* pred_fc;
ir_node* real_pred;
- cost = normal_tree_cost(pred);
+ cost = normal_tree_cost(pred, inst);
if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs
- if (!arch_irn_is(cur_arch_env, pred, ignore)) {
+ if (!arch_irn_is_ignore(pred)) {
real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred);
- pred_fc = get_irn_link(real_pred);
+ pred_fc = get_irn_fc(real_pred);
pred_fc->no_root = 1;
#if defined NORMAL_DBG
ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, real_pred);
}
cost = 0;
+ last = 0;
for (i = 0; i < arity; ++i) {
- if (get_irn_mode(fc->costs[i].irn) == mode_M) continue;
- if (arch_irn_is(cur_arch_env, fc->costs[i].irn, ignore)) continue;
+ ir_node* op = fc->costs[i].irn;
+ if (op == last) continue;
+ if (get_irn_mode(op) == mode_M) continue;
+ if (arch_irn_is_ignore(op)) continue;
cost = MAX(fc->costs[i].cost + n_op_res, cost);
+ last = op;
++n_op_res;
}
n_res = count_result(irn);
static void normal_cost_walker(ir_node* irn, void* env)
{
- (void)env;
+ instance_t *inst = env;
#if defined NORMAL_DBG
ir_fprintf(stderr, "cost walking node %+F\n", irn);
#endif
if (is_Block(irn)) return;
if (!must_be_scheduled(irn)) return;
- normal_tree_cost(irn);
+ normal_tree_cost(irn, inst);
}
if (is_Block(irn)) return;
if (!must_be_scheduled(irn)) return;
- is_root = be_is_Keep(irn) || !((flag_and_cost*)get_irn_link(irn))->no_root;
+ is_root = be_is_Keep(irn) || !get_irn_fc(irn)->no_root;
#if defined NORMAL_DBG
ir_fprintf(stderr, "%+F is %sroot\n", irn, is_root ? "" : "no ");
static ir_node** sched_node(ir_node** sched, ir_node* irn)
{
- ir_node* block = get_nodes_block(irn);
- flag_and_cost* fc = get_irn_link(irn);
- irn_cost_pair* irns = fc->costs;
- int arity = get_irn_arity(irn);
- int i;
-
- if (irn_visited(irn)) return sched;
- if (is_End(irn)) return sched;
+ if (irn_visited_else_mark(irn)) return sched;
+ if (is_End(irn)) return sched;
if (!is_Phi(irn) && !be_is_Keep(irn)) {
+ ir_node* block = get_nodes_block(irn);
+ int arity = get_irn_arity(irn);
+ flag_and_cost* fc = get_irn_fc(irn);
+ irn_cost_pair* irns = fc->costs;
+ int i;
+
for (i = 0; i < arity; ++i) {
ir_node* pred = irns[i].irn;
if (get_nodes_block(pred) != block) continue;
}
}
- mark_irn_visited(irn);
ARR_APP1(ir_node*, sched, irn);
return sched;
}
{
const irn_cost_pair* const a1 = a;
const irn_cost_pair* const b1 = b;
- int ret = b1->cost - a1->cost;
+ int ret;
+ if (is_irn_forking(a1->irn)) {
+ ret = 1;
+ } else if (is_irn_forking(b1->irn)) {
+ ret = -1;
+ } else {
+ ret = b1->cost - a1->cost;
+ if (ret == 0) {
+ /* place live-out nodes later */
+ ret = (count_result(a1->irn) != 0) - (count_result(b1->irn) != 0);
+ }
+ }
#if defined NORMAL_DBG
- ir_fprintf(stderr, "%+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
+ ir_fprintf(stderr, "root %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
#endif
return ret;
}
static void normal_sched_block(ir_node* block, void* env)
{
+ ir_node** roots = get_irn_link(block);
heights_t* heights = env;
- ir_node** roots = get_irn_link(block);
int root_count;
irn_cost_pair* root_costs;
int i;
static void *normal_init_graph(const list_sched_selector_t *vtab,
const be_irg_t *birg)
{
- ir_graph *irg = be_get_birg_irg(birg);
- heights_t *heights;
+ instance_t* inst = XMALLOC(instance_t);
+ ir_graph* irg = be_get_birg_irg(birg);
+ heights_t* heights;
(void)vtab;
- cur_arch_env = be_get_birg_arch_env(birg);
-
be_clear_links(irg);
+ obstack_init(&inst->obst);
+ inst->irg = irg;
+
heights = heights_new(irg);
- irg_walk_graph(irg, normal_cost_walker, NULL, NULL);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+ irg_walk_graph(irg, normal_cost_walker, NULL, inst);
irg_walk_graph(irg, collect_roots, NULL, NULL);
inc_irg_visited(irg);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
irg_block_walk_graph(irg, normal_sched_block, NULL, heights);
+ ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
heights_free(heights);
- return NULL;
+ return inst;
}
-
static void *normal_init_block(void *graph_env, ir_node *block)
{
- (void)graph_env;
- (void)block;
-
- return NULL;
+ instance_t* inst = graph_env;
+ ir_node** sched = get_irn_link(block);
+ ir_node* first = NULL;
+ int i;
+
+ /* turn into a list, so we can easily remove nodes.
+ The link field is used anyway. */
+ for (i = ARR_LEN(sched) - 1; i >= 0; --i) {
+ ir_node* irn = sched[i];
+ if (!is_cfop(irn)) {
+ set_irn_link(irn, first);
+ first = irn;
+ }
+ }
+ /* note: we can free sched here, there should be no attempt to schedule
+ a block twice */
+ DEL_ARR_F(sched);
+ set_irn_link(block, sched);
+ inst->curr_list = first;
+ return inst;
}
+static void normal_finish_graph(void *env)
+{
+ instance_t *inst = env;
+
+ /* block uses the link field to store the schedule */
+ ir_free_resources(inst->irg, IR_RESOURCE_IRN_LINK);
+ obstack_free(&inst->obst, NULL);
+ xfree(inst);
+}
const list_sched_selector_t normal_selector = {
normal_init_graph,
NULL, /* exectime */
NULL, /* latency */
NULL, /* finish_block */
- NULL /* finish_graph */
+ normal_finish_graph
};