add note/error message about critical edge splitting and IJmps
[libfirm] / ir / opt / opt_inline.c
index f4dcb8b..f1843c7 100644 (file)
@@ -164,15 +164,10 @@ static void copy_node(ir_node *n, void *env) {
        }
        copy_node_attr(n, nn);
 
-#ifdef DEBUG_libfirm
-       {
-               int copy_node_nr = env != NULL;
-               if (copy_node_nr) {
-                       /* for easier debugging, we want to copy the node numbers too */
-                       nn->node_nr = n->node_nr;
-               }
+       if (env != NULL) {
+               /* for easier debugging, we want to copy the node numbers too */
+               nn->node_nr = n->node_nr;
        }
-#endif
 
        set_new_node(n, nn);
        hook_dead_node_elim_subst(current_ir_graph, n, nn);
@@ -221,7 +216,7 @@ static void copy_preds(ir_node *n, void *env) {
                   in array contained Bads.  Now it's possible.
                   We don't call optimize_in_place as it requires
                   that the fields in ir_graph are set properly. */
-               if (!has_Block_label(nn) &&
+               if (!has_Block_entity(nn) &&
                    get_opt_control_flow_straightening() &&
                    get_Block_n_cfgpreds(nn) == 1 &&
                    is_Jmp(get_Block_cfgpred(nn, 0))) {
@@ -866,18 +861,22 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        mtp = get_entity_type(ent);
        ctp = get_Call_type(call);
-       if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
+       n_params = get_method_n_params(mtp);
+       n_res    = get_method_n_ress(mtp);
+       if (n_params > get_method_n_params(ctp)) {
                /* this is a bad feature of C: without a prototype, we can
                 * call a function with less parameters than needed. Currently
                 * we don't support this, although we could use Unknown than. */
                return 0;
        }
+       if (n_res != get_method_n_ress(ctp)) {
+               return 0;
+       }
 
        /* Argh, compiling C has some bad consequences:
         * It is implementation dependent what happens in that case.
         * We support inlining, if the bitsize of the types matches AND
         * the same arithmetic is used. */
-       n_params = get_method_n_params(mtp);
        for (i = n_params - 1; i >= 0; --i) {
                ir_type *param_tp = get_method_param_type(mtp, i);
                ir_type *arg_tp   = get_method_param_type(ctp, i);
@@ -895,6 +894,22 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        /* otherwise we can simply "reinterpret" the bits */
                }
        }
+       for (i = n_res - 1; i >= 0; --i) {
+               ir_type *decl_res_tp = get_method_res_type(mtp, i);
+               ir_type *used_res_tp = get_method_res_type(ctp, i);
+
+               if (decl_res_tp != used_res_tp) {
+                       ir_mode *decl_mode = get_type_mode(decl_res_tp);
+                       ir_mode *used_mode = get_type_mode(used_res_tp);
+                       if (decl_mode == NULL || used_mode == NULL)
+                               return 0;
+                       if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
+                               return 0;
+                       if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
+                               return 0;
+                       /* otherwise we can "reinterpret" the bits */
+               }
+       }
 
        irg = get_irn_irg(call);
 
@@ -931,6 +946,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        set_irg_doms_inconsistent(irg);
        set_irg_loopinfo_inconsistent(irg);
        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
+       set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
 
        /* -- Check preconditions -- */
        assert(is_Call(call));
@@ -1119,11 +1135,18 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* Now the real results */
        if (n_res > 0) {
                for (j = 0; j < n_res; j++) {
+                       ir_type *res_type = get_method_res_type(ctp, j);
+                       ir_mode *res_mode = get_type_mode(res_type);
                        n_ret = 0;
                        for (i = 0; i < arity; i++) {
                                ret = get_Block_cfgpred(end_bl, i);
                                if (is_Return(ret)) {
-                                       cf_pred[n_ret] = get_Return_res(ret, j);
+                                       ir_node *res = get_Return_res(ret, j);
+                                       if (get_irn_mode(res) != res_mode) {
+                                               ir_node *block = get_nodes_block(res);
+                                               res = new_r_Conv(irg, block, res, res_mode);
+                                       }
+                                       cf_pred[n_ret] = res;
                                        n_ret++;
                                }
                        }