ia32: cleanup handling of 8/16bit operations
[libfirm] / ir / be / beschednormal.c
index 68ae8b5..cff3a87 100644 (file)
@@ -20,7 +20,6 @@
 /**
  * @brief   Use the strong normal form theorem (though it does not hold)
  * @author  Christoph Mallon
- * @version $Id$
  */
 #include "config.h"
 
 #include "belive_t.h"
 #include "beutil.h"
 #include "heights.h"
-#include "irtools.h"
 #include "irgwalk.h"
 #include "benode.h"
 #include "bemodule.h"
+#include "util.h"
 #include "array_t.h"
 
 // XXX there is no one time init for schedulers
@@ -54,16 +53,12 @@ static int must_be_scheduled(const ir_node* const irn)
 }
 
 
-static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
-                              ir_nodeset_t *live_set)
+static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set)
 {
        instance_t* inst = (instance_t*)block_env;
        ir_node*    irn;
        ir_node*    next;
        ir_node*    last = NULL;
-       ir_nodeset_iterator_t iter;
-
-       (void)live_set;
 
        for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) {
                next = (ir_node*)get_irn_link(irn);
@@ -79,9 +74,7 @@ static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
                }
        }
 
-       ir_nodeset_iterator_init(&iter, ready_set);
-       irn = ir_nodeset_iterator_next(&iter);
-       return irn;
+       return ir_nodeset_first(ready_set);
 }
 
 
@@ -123,7 +116,7 @@ static int count_result(const ir_node* irn)
        if (mode == mode_T)
                return 1;
 
-       if (arch_get_register_req_out(irn)->type & arch_register_req_type_ignore)
+       if (arch_get_irn_register_req(irn)->type & arch_register_req_type_ignore)
                return 0;
 
        return 1;
@@ -155,7 +148,6 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
 
        if (fc == NULL) {
                irn_cost_pair* costs;
-               int            i;
                ir_node*       block = get_nodes_block(irn);
 
                fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity);
@@ -164,7 +156,6 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
 
                for (i = 0; i < arity; ++i) {
                        ir_node* pred = get_irn_n(irn, i);
-                       int cost;
 
                        if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
                                cost = 0;
@@ -175,7 +166,6 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
                                ir_node*       real_pred;
 
                                cost = normal_tree_cost(pred, inst);
-                               if (be_is_Barrier(pred)) cost = 1; // XXX hack: the barrier causes all users to have a reguse of #regs
                                if (!arch_irn_is_ignore(pred)) {
                                        real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred);
                                        pred_fc = get_irn_fc(real_pred);
@@ -204,6 +194,8 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
                mode = get_irn_mode(op);
                if (mode == mode_M)
                        continue;
+               if (arch_get_irn_flags(op) & arch_irn_flags_not_scheduled)
+                       continue;
                if (mode != mode_T && arch_irn_is_ignore(op))
                        continue;
                cost = MAX(fc->costs[i].cost + n_op_res, cost);
@@ -292,15 +284,19 @@ static int root_cmp(const void* a, const void* b)
        const irn_cost_pair* const a1 = (const irn_cost_pair*)a;
        const irn_cost_pair* const b1 = (const irn_cost_pair*)b;
        int ret;
-       if (is_irn_forking(a1->irn)) {
+       if (is_irn_forking(a1->irn) && !is_irn_forking(b1->irn)) {
                ret = 1;
-       } else if (is_irn_forking(b1->irn)) {
+       } else if (is_irn_forking(b1->irn) && !is_irn_forking(a1->irn)) {
                ret = -1;
        } else {
                ret = b1->cost - a1->cost;
                if (ret == 0) {
                        /* place live-out nodes later */
                        ret = (count_result(a1->irn) != 0) - (count_result(b1->irn) != 0);
+                       if (ret == 0) {
+                               /* compare node idx */
+                               ret = get_irn_idx(a1->irn) - get_irn_idx(b1->irn);
+                       }
                }
        }
 #if defined NORMAL_DBG
@@ -450,7 +446,7 @@ static void sched_normal(ir_graph *irg)
        be_list_sched_graph(irg, &normal_selector);
 }
 
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_normal);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_normal)
 void be_init_sched_normal(void)
 {
        be_register_scheduler("normal", sched_normal);