ia32: Use a more logical specification of operand sizes in the binary emitter.
[libfirm] / ir / opt / loop.c
index d8c38ce..b9d98a8 100644 (file)
@@ -291,7 +291,6 @@ static void get_loop_info(ir_node *node, void *env)
 
                /* Find the loops head/the blocks with cfpred outside of the loop */
                if (is_Block(node)) {
-                       const ir_edge_t *edge;
                        unsigned outs_n = 0;
 
                        /* Count innerloop branches */
@@ -438,8 +437,6 @@ static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
 {
        ir_graph *irg;
        ir_mode *mode;
-       const ir_edge_t *edge;
-       const ir_edge_t *next;
 
        assert(orig_block && orig_val && second_block && second_val &&
                        "no parameter of construct_ssa may be NULL");
@@ -460,7 +457,7 @@ static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
        ssa_second_def       = second_val;
 
        /* Only fix the users of the first, i.e. the original node */
-       foreach_out_edge_safe(orig_val, edge, next) {
+       foreach_out_edge_safe(orig_val, edge) {
                ir_node *user = get_edge_src_irn(edge);
                int j = get_edge_src_pos(edge);
                ir_node *user_block = get_nodes_block(user);
@@ -494,10 +491,9 @@ static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
        unrolling_node_info *info;
        assert(nr != 0 && "0 reserved");
 
-       info = (unrolling_node_info*)ir_nodemap_get(&map, n);
+       info = ir_nodemap_get(unrolling_node_info, &map, n);
        if (! info) {
-               ir_node **arr = NEW_ARR_D(ir_node*, &obst, unroll_nr);
-               memset(arr, 0, unroll_nr * sizeof(ir_node*));
+               ir_node **const arr = NEW_ARR_DZ(ir_node*, &obst, unroll_nr);
 
                info = OALLOCZ(&obst, unrolling_node_info);
                info->copies = arr;
@@ -513,7 +509,7 @@ static void set_unroll_copy(ir_node *n, int nr, ir_node *cp)
 static ir_node *get_unroll_copy(ir_node *n, int nr)
 {
        ir_node             *cp;
-       unrolling_node_info *info = (unrolling_node_info *)ir_nodemap_get(&map, n);
+       unrolling_node_info *info = ir_nodemap_get(unrolling_node_info, &map, n);
        if (! info)
                return NULL;
 
@@ -533,7 +529,7 @@ static void set_inversion_copy(ir_node *n, ir_node *cp)
 /* Getter of copy of n for inversion */
 static ir_node *get_inversion_copy(ir_node *n)
 {
-       ir_node *cp = (ir_node *)ir_nodemap_get(&map, n);
+       ir_node *cp = ir_nodemap_get(ir_node, &map, n);
        return cp;
 }
 
@@ -599,11 +595,10 @@ static void extend_ins_by_copy(ir_node *block, int pos)
 {
        ir_node *new_in;
        ir_node *phi;
-       ir_node *pred;
        assert(is_Block(block));
 
        /* Extend block by copy of definition at pos */
-       pred = get_irn_n(block, pos);
+       ir_node *const pred = get_Block_cfgpred(block, pos);
        new_in = get_inversion_copy(pred);
        DB((dbg, LEVEL_5, "Extend block %N by %N cp of %N\n", block, new_in, pred));
        extend_irn(block, new_in, false);
@@ -629,14 +624,10 @@ static void extend_ins_by_copy(ir_node *block, int pos)
 /* Returns the number of blocks backedges. With or without alien bes. */
 static int get_backedge_n(ir_node *block, bool with_alien)
 {
-       int i;
-       int be_n = 0;
-       int arity = get_irn_arity(block);
-
-       assert(is_Block(block));
-
-       for (i = 0; i < arity; ++i) {
-               ir_node *pred = get_irn_n(block, i);
+       int       be_n  = 0;
+       int const arity = get_Block_n_cfgpreds(block);
+       for (int i = 0; i < arity; ++i) {
+               ir_node *const pred = get_Block_cfgpred(block, i);
                if (is_backedge(block, i) && (with_alien || is_in_loop(pred)))
                        ++be_n;
        }
@@ -680,12 +671,11 @@ static void copy_walk(ir_node *node, walker_condition *walk_condition,
        int arity;
        ir_node *cp;
        ir_node **cpin;
-       ir_graph *irg = current_ir_graph;
 
        /**
         * break condition and cycle resolver, creating temporary node copies
         */
-       if (get_irn_visited(node) >= get_irg_visited(irg)) {
+       if (irn_visited(node)) {
                /* Here we rely on nodestate's copy being initialized with NULL */
                DB((dbg, LEVEL_5, "copy_walk: We have already visited %N\n", node));
                if (get_inversion_copy(node) == NULL) {
@@ -836,15 +826,14 @@ static void unmark_not_allowed_cc_blocks(void)
 
        for(i = 0; i < blocks; ++i) {
                ir_node *block = cc_blocks[i];
-               int a;
-               int arity = get_irn_arity(block);
 
                /* Head is an exception. */
                if (block == loop_head)
                        continue;
 
-               for(a = 0; a < arity; ++a) {
-                       if (! is_nodes_block_marked(get_irn_n(block, a))) {
+               int const arity = get_Block_n_cfgpreds(block);
+               for (int a = 0; a < arity; ++a) {
+                       if (!is_nodes_block_marked(get_Block_cfgpred(block, a))) {
                                set_Block_mark(block, 0);
                                --inversion_blocks_in_cc;
                                DB((dbg, LEVEL_5, "Removed %N from cc (blocks in cc %d)\n",
@@ -930,7 +919,6 @@ static void get_head_outs(ir_node *node, void *env)
  */
 static void find_condition_chain(ir_node *block)
 {
-       const    ir_edge_t *edge;
        bool     mark     = false;
        bool     has_be   = false;
        bool     jmp_only = true;