fix a few errors and warnings in the new pass code; improve some comments
[libfirm] / ir / lower / lower_hl.c
index 4d5c336..946b724 100644 (file)
@@ -23,9 +23,7 @@
  * @author  Boris Boesler, Goetz Lindenmaier, Michael Beck
  * @version $Id$
  */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
 
 #include "lowering.h"
 #include "irmode_t.h"
@@ -37,6 +35,8 @@
 #include "irhooks.h"
 #include "irgmod.h"
 #include "irgwalk.h"
+#include "irtools.h"
+#include "irpass_t.h"
 
 /**
  * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
@@ -52,21 +52,21 @@ static void lower_sel(ir_node *sel) {
 
        assert(is_Sel(sel));
 
+       /* Do not lower frame type/global offset table access: must be lowered by the backend. */
+       ptr = get_Sel_ptr(sel);
+       if (ptr == get_irg_frame(current_ir_graph))
+               return;
+
        ent   = get_Sel_entity(sel);
        owner = get_entity_owner(ent);
 
-       /* Do not lower frame type access: must be lowered by the backend. */
-       if (is_frame_type(owner))
-               return;
-
        /*
-        * Cannot handle value param entities here.
+        * Cannot handle value param entities or frame type entities here.
         * Must be lowered by the backend.
         */
-       if (is_value_param_type(owner))
+       if (is_value_param_type(owner) || is_frame_type(owner))
                return;
 
-       ptr  = get_Sel_ptr(sel);
        dbg  = get_irn_dbg_info(sel);
        mode = get_irn_mode(sel);
 
@@ -79,8 +79,8 @@ static void lower_sel(ir_node *sel) {
                sym.entity_p = ent;
                bl = get_nodes_block(sel);
 
-               cnst = new_rd_SymConst(dbg, irg, bl, mode, sym, symconst_addr_ent);
-               newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode);
+               cnst = new_rd_SymConst(dbg, irg, mode, sym, symconst_addr_ent);
+               newn = new_rd_Add(dbg, bl, ptr, cnst, mode);
        } else {
                /* not TLS */
 
@@ -101,11 +101,11 @@ static void lower_sel(ir_node *sel) {
                        index = get_Sel_index(sel, 0);
 
                        if (is_Array_type(owner)) {
-                               ir_node *last_size;
                                ir_type *arr_ty = owner;
-                               int dims = get_array_n_dimensions(arr_ty);
-                               int *map = alloca(sizeof(int) * dims);
-                               int i;
+                               int      dims   = get_array_n_dimensions(arr_ty);
+                               int     *map    = ALLOCAN(int, dims);
+                               ir_node *last_size;
+                               int      i;
 
                                assert(dims == get_Sel_n_indexs(sel)
                                        && "array dimension must match number of indices of Sel node");
@@ -121,7 +121,7 @@ static void lower_sel(ir_node *sel) {
 
                                /* Size of the array element */
                                tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
-                               last_size = new_rd_Const(dbg, irg, get_irg_start_block(irg), mode_Int, tv);
+                               last_size = new_rd_Const(dbg, irg, tv);
 
                                /*
                                 * We compute the offset part of dimension d_i recursively
@@ -143,12 +143,12 @@ static void lower_sel(ir_node *sel) {
 
                                        assert(irg == current_ir_graph);
                                        if (! is_Unknown(lb))
-                                               lb = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
+                                               lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
                                        else
                                                lb = NULL;
 
                                        if (! is_Unknown(ub))
-                                               ub = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
+                                               ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
                                        else
                                                ub = NULL;
 
@@ -161,36 +161,36 @@ static void lower_sel(ir_node *sel) {
                                                assert(ub != NULL && "upper bound has to be set in multi-dim array");
 
                                                /* Elements in one Dimension */
-                                               elms = new_rd_Sub(dbg, irg, bl, ub, lb, mode_Int);
+                                               elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
                                        }
 
-                                       ind = new_rd_Conv(dbg, irg, bl, get_Sel_index(sel, dim), mode_Int);
+                                       ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);
 
                                        /*
                                         * Normalize index, id lower bound is set, also assume
                                         * lower bound == 0
                                         */
                                        if (lb != NULL)
-                                               ind = new_rd_Sub(dbg, irg, bl, ind, lb, mode_Int);
+                                               ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);
 
-                                       n = new_rd_Mul(dbg, irg, bl, ind, last_size, mode_Int);
+                                       n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);
 
                                        /*
                                         * see comment above.
                                         */
                                        if (i > 0)
-                                               last_size = new_rd_Mul(dbg, irg, bl, last_size, elms, mode_Int);
+                                               last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);
 
-                                       newn = new_rd_Add(dbg, irg, bl, newn, n, mode);
+                                       newn = new_rd_Add(dbg, bl, newn, n, mode);
                                }
                        } else {
                                /* no array type */
                                ir_mode *idx_mode = get_irn_mode(index);
                                tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
 
-                               newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel),
-                                       new_rd_Mul(dbg, irg, bl, index,
-                                       new_r_Const(irg, get_irg_start_block(irg), idx_mode, tv),
+                               newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
+                                       new_rd_Mul(dbg, bl, index,
+                                       new_r_Const(irg, tv),
                                        idx_mode),
                                        mode);
                        }
@@ -203,16 +203,16 @@ static void lower_sel(ir_node *sel) {
 
                        /* We need an additional load when accessing methods from a dispatch table. */
                        tv   = new_tarval_from_long(get_entity_offset(ent), mode_Int);
-                       cnst = new_rd_Const(dbg, irg, get_irg_start_block(irg), mode_Int, tv);
-                       add  = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
+                       cnst = new_rd_Const(dbg, irg, tv);
+                       add  = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
 #ifdef DO_CACHEOPT  /* cacheopt version */
-                       newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), sel, ent_mode);
+                       newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), sel, ent_mode, 0);
                        cacheopt_map_addrs_register_node(newn);
                        set_Load_ptr(newn, add);
 #else /* normal code */
-                       newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), add, ent_mode);
+                       newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, 0);
 #endif
-                       newn = new_r_Proj(irg, bl, newn, ent_mode, pn_Load_res);
+                       newn = new_r_Proj(bl, newn, ent_mode, pn_Load_res);
 
                } else if (get_entity_owner(ent) != get_glob_type()) {
                        int offset;
@@ -223,13 +223,15 @@ static void lower_sel(ir_node *sel) {
                        newn   = get_Sel_ptr(sel);
                        offset = get_entity_offset(ent);
                        if (offset != 0) {
-                               tv = new_tarval_from_long(offset, mode_Int);
-                               cnst = new_r_Const(irg, get_irg_start_block(irg), mode_Int, tv);
-                               newn = new_rd_Add(dbg, irg, bl, newn, cnst, mode);
+                               ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
+
+                               tv = new_tarval_from_long(offset, mode_UInt);
+                               cnst = new_r_Const(irg, tv);
+                               newn = new_rd_Add(dbg, bl, newn, cnst, mode);
                        }
                } else {
                        /* global_type */
-                       newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, mode, ent, firm_unknown_type);
+                       newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent, firm_unknown_type);
                }
        }
        /* run the hooks */
@@ -258,10 +260,7 @@ static void lower_symconst(ir_node *symc) {
                tp   = get_SymConst_type(symc);
                assert(get_type_state(tp) == layout_fixed);
                mode = get_irn_mode(symc);
-               tv   = new_tarval_from_long(get_type_size_bytes(tp), mode);
-               newn = new_r_Const(current_ir_graph,
-                                  get_irg_start_block(current_ir_graph),
-                                  get_irn_mode(symc), tv);
+               newn = new_Const_long(mode, get_type_size_bytes(tp));
                assert(newn);
                /* run the hooks */
                hook_lower(symc);
@@ -272,10 +271,7 @@ static void lower_symconst(ir_node *symc) {
                tp   = get_SymConst_type(symc);
                assert(get_type_state(tp) == layout_fixed);
                mode = get_irn_mode(symc);
-               tv   = new_tarval_from_long(get_type_alignment_bytes(tp), mode);
-               newn = new_r_Const(current_ir_graph,
-                                  get_irg_start_block(current_ir_graph),
-                                  mode, tv);
+               newn = new_Const_long(mode, get_type_alignment_bytes(tp));
                assert(newn);
                /* run the hooks */
                hook_lower(symc);
@@ -292,10 +288,7 @@ static void lower_symconst(ir_node *symc) {
                ent  = get_SymConst_entity(symc);
                assert(get_type_state(get_entity_type(ent)) == layout_fixed);
                mode = get_irn_mode(symc);
-               tv   = new_tarval_from_long(get_entity_offset(ent), mode);
-               newn = new_r_Const(current_ir_graph,
-                                  get_irg_start_block(current_ir_graph),
-                                  mode, tv);
+               newn = new_Const_long(mode, get_entity_offset(ent));
                assert(newn);
                /* run the hooks */
                hook_lower(symc);
@@ -306,17 +299,12 @@ static void lower_symconst(ir_node *symc) {
                ec   = get_SymConst_enum(symc);
                assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
                tv   = get_enumeration_value(ec);
-               newn = new_r_Const(current_ir_graph,
-                                 get_irg_start_block(current_ir_graph),
-                                 get_irn_mode(symc), tv);
+               newn = new_Const(tv);
                assert(newn);
                /* run the hooks */
                hook_lower(symc);
                exchange(symc, newn);
                break;
-       case symconst_label:
-               /* leave */
-               break;
 
        default:
                assert(!"unknown SymConst kind");
@@ -352,7 +340,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
        int offset, bit_offset, bits, bf_bits, old_cse;
        dbg_info *db;
 
-       if (get_irn_op(sel) != op_Sel)
+       if (!is_Sel(sel))
                return;
 
        ent     = get_Sel_entity(sel);
@@ -387,7 +375,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
        /* abandon bitfield sel */
        ptr = get_Sel_ptr(sel);
        db  = get_irn_dbg_info(sel);
-       ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+       ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
 
        set_Load_ptr(load, ptr);
        set_Load_mode(load, mode);
@@ -396,7 +384,7 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
        /* create new proj, switch off CSE or we may get the old one back */
        old_cse = get_opt_cse();
        set_opt_cse(0);
-       res = n_proj = new_r_Proj(current_ir_graph, block, load, mode, pn_Load_res);
+       res = n_proj = new_r_Proj(block, load, mode, pn_Load_res);
        set_opt_cse(old_cse);
 
        if (mode_is_signed(mode)) { /* signed */
@@ -404,24 +392,20 @@ static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
                int shift_count_down  = bits - bf_bits;
 
                if (shift_count_up) {
-                       res = new_r_Shl(current_ir_graph, block, res,
-                               new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_up, mode_Iu)), mode);
+                       res = new_r_Shl(block, res,     new_Const_long(mode_Iu, shift_count_up), mode);
                }
                if (shift_count_down) {
-                       res = new_r_Shrs(current_ir_graph, block, res,
-                               new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode);
+                       res = new_r_Shrs(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
                }
        } else { /* unsigned */
                int shift_count_down  = bit_offset;
                unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
 
                if (shift_count_down) {
-                       res = new_r_Shr(current_ir_graph, block, res,
-                               new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode);
+                       res = new_r_Shr(block, res,     new_Const_long(mode_Iu, shift_count_down), mode);
                }
                if (bits != bf_bits) {
-                       res = new_r_And(current_ir_graph, block, res,
-                               new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(mask, mode)), mode);
+                       res = new_r_And(block, res, new_Const_long(mode, mask), mode);
                }
        }
 
@@ -445,7 +429,7 @@ static void lower_bitfields_stores(ir_node *store) {
        dbg_info *db;
 
        /* check bitfield access */
-       if (get_irn_op(sel) != op_Sel)
+       if (!is_Sel(sel))
                return;
 
        ent     = get_Sel_entity(sel);
@@ -485,26 +469,23 @@ static void lower_bitfields_stores(ir_node *store) {
        /* abandon bitfield sel */
        ptr = get_Sel_ptr(sel);
        db  = get_irn_dbg_info(sel);
-       ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
+       ptr = new_rd_Add(db, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
 
        if (neg_mask) {
                /* there are some bits, normal case */
-               irn  = new_r_Load(current_ir_graph, block, mem, ptr, mode);
-               mem  = new_r_Proj(current_ir_graph, block, irn, mode_M, pn_Load_M);
-               irn  = new_r_Proj(current_ir_graph, block, irn, mode, pn_Load_res);
+               irn  = new_r_Load( block, mem, ptr, mode, 0);
+               mem  = new_r_Proj( block, irn, mode_M, pn_Load_M);
+               irn  = new_r_Proj( block, irn, mode, pn_Load_res);
 
-               irn = new_r_And(current_ir_graph, block, irn,
-                       new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(neg_mask, mode)), mode);
+               irn = new_r_And(block, irn, new_Const_long(mode, neg_mask), mode);
 
                if (bit_offset > 0) {
-                       value = new_r_Shl(current_ir_graph, block, value,
-                               new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bit_offset, mode_Iu)), mode);
+                       value = new_r_Shl(block, value, new_Const_long(mode_Iu, bit_offset), mode);
                }
 
-               value = new_r_And(current_ir_graph, block, value,
-                       new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(mask, mode)), mode);
+               value = new_r_And(block, value, new_Const_long(mode, mask), mode);
 
-               value = new_r_Or(current_ir_graph, block, value, irn, mode);
+               value = new_r_Or(block, value, irn, mode);
        }
 
        set_Store_mem(store, mem);
@@ -566,9 +547,8 @@ static void lower_bf_access(ir_node *irn, void *env) {
        {
                long proj     = get_Proj_proj(irn);
                ir_node *pred = get_Proj_pred(irn);
-               ir_op *op     = get_irn_op(pred);
 
-               if ((proj == pn_Load_res) && (op == op_Load))
+               if (proj == pn_Load_res && is_Load(pred))
                        lower_bitfields_loads(irn, pred);
                break;
        }
@@ -588,7 +568,7 @@ static void lower_bf_access(ir_node *irn, void *env) {
  */
 void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) {
 
-       if(lower_bitfields) {
+       if (lower_bitfields) {
                /* First step: lower bitfield access: must be run as long as Sels still
                 * exists. */
                irg_walk_graph(irg, NULL, lower_bf_access, NULL);
@@ -596,11 +576,47 @@ void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) {
 
        /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
        irg_walk_graph(irg, NULL, lower_irnode, NULL);
-       set_irg_phase_low(irg);
-}  /* lower_highlevel */
+}  /* lower_highlevel_graph */
+
+struct pass_t {
+       ir_graph_pass_t pass;
+       int            lower_bitfields;
+};
 
+/**
+ * Wrapper for running lower_highlevel_graph() as an ir_graph pass.
+ */
+static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) {
+       struct pass_t *pass = context;
+
+       lower_highlevel_graph(irg, pass->lower_bitfields);
+       return 0;
+}  /* lower_highlevel_graph_wrapper */
+
+ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields) {
+       struct pass_t *pass = XMALLOCZ(struct pass_t);
+
+       pass->pass.kind       = k_ir_graph_pass;
+       pass->pass.run_on_irg = lower_highlevel_graph_wrapper;
+       pass->pass.context    = pass;
+       pass->pass.name       = name;
+
+       INIT_LIST_HEAD(&pass->pass.list);
+
+       pass->lower_bitfields = lower_bitfields;
+
+       return &pass->pass;
+}  /* lower_highlevel_graph_pass */
+
+/*
+ * does the same as lower_highlevel() for all nodes on the const code irg
+ */
 void lower_const_code(void) {
        walk_const_code(NULL, lower_irnode, NULL);
+}  /* lower_const_code */
+
+ir_prog_pass_t *lower_const_code_pass(const char *name) {
+       return def_prog_pass(name ? name : "lower_const_code", lower_const_code);
 }
 
 /*