X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fopt_inline.c;h=eb013651f9cb5838d16214bb56105a5548014613;hb=6b124543aff56817fcfe6d5b5ff181ac5c790e73;hp=7156cbe01e652771f363488ee5b5ffe3b0749ac8;hpb=6f029dc379ebe2dc5e5ccc4dcdf165f95aac7a68;p=libfirm diff --git a/ir/opt/opt_inline.c b/ir/opt/opt_inline.c index 7156cbe01..eb013651f 100644 --- a/ir/opt/opt_inline.c +++ b/ir/opt/opt_inline.c @@ -50,7 +50,7 @@ #include "irouts.h" #include "irloop_t.h" #include "irbackedge_t.h" -#include "opt_inline_t.h" +#include "opt_init.h" #include "cgana.h" #include "trouts.h" #include "error.h" @@ -703,7 +703,7 @@ void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) { if (*place != NULL) { ir_node *irn = *place; survive_dce_list_t *curr = pmap_get(sd->places, irn); - survive_dce_list_t *nw = obstack_alloc(&sd->obst, sizeof(nw[0])); + survive_dce_list_t *nw = OALLOC(&sd->obst, survive_dce_list_t); nw->next = curr; nw->place = place; @@ -837,9 +837,8 @@ static int can_inline(ir_node *call, ir_graph *called_graph) { } enum exc_mode { - exc_handler = 0, /**< There is a handler. */ - exc_to_end = 1, /**< Branches to End. */ - exc_no_handler = 2 /**< Exception handling not represented. */ + exc_handler, /**< There is a handler. */ + exc_no_handler /**< Exception handling not represented. */ }; /* Inlines a method at the given call site. */ @@ -853,6 +852,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) { ir_node **args_in; ir_node *ret, *phi; int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params; + int n_mem_phi; enum exc_mode exc_handling; ir_type *called_frame, *curr_frame, *mtp, *ctp; ir_entity *ent; @@ -964,18 +964,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) { for the Call node, or do we branch directly to End on an exception? exc_handling: 0 There is a handler. - 1 Branches to End. 2 Exception handling not represented in Firm. -- */ { - ir_node *proj, *Mproj = NULL, *Xproj = NULL; + ir_node *Xproj = NULL; + ir_node *proj; for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) { long proj_nr = get_Proj_proj(proj); if (proj_nr == pn_Call_X_except) Xproj = proj; - if (proj_nr == pn_Call_M_except) Mproj = proj; } - if (Mproj) { assert(Xproj); exc_handling = exc_handler; } /* Mproj */ - else if (Xproj) { exc_handling = exc_to_end; } /* !Mproj && Xproj */ - else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */ + exc_handling = Xproj != NULL ? exc_handler : exc_no_handler; } /* create the argument tuple */ @@ -1123,16 +1120,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) { Add Phi node if there was more than one Return. -- */ turn_into_tuple(post_call, pn_Call_max); /* First the Memory-Phi */ - n_ret = 0; + n_mem_phi = 0; for (i = 0; i < arity; i++) { ret = get_Block_cfgpred(end_bl, i); if (is_Return(ret)) { - cf_pred[n_ret] = get_Return_mem(ret); - n_ret++; + cf_pred[n_mem_phi++] = get_Return_mem(ret); + } + /* memory output for some exceptions is directly connected to End */ + if (is_Call(ret)) { + cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3); + } else if (is_fragile_op(ret)) { + /* We rely that all cfops have the memory output at the same position. */ + cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0); + } else if (is_Raise(ret)) { + cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1); } } - phi = new_Phi(n_ret, cf_pred, mode_M); - set_Tuple_pred(call, pn_Call_M_regular, phi); + phi = new_Phi(n_mem_phi, cf_pred, mode_M); + set_Tuple_pred(call, pn_Call_M, phi); /* Conserve Phi-list for further inlinings -- but might be optimized */ if (get_nodes_block(phi) == post_bl) { set_irn_link(phi, get_irn_link(post_bl)); @@ -1178,15 +1183,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) { set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad()); /* Finally the exception control flow. - We have two (three) possible situations: - First if the Call branches to an exception handler: We need to add a Phi node to + We have two possible situations: + First if the Call branches to an exception handler: + We need to add a Phi node to collect the memory containing the exception objects. Further we need to add another block to get a correct representation of this Phi. To this block we add a Jmp that resolves into the X output of the Call when the Call is turned into a tuple. - Second the Call branches to End, the exception is not handled. Just - add all inlined exception branches to the End node. - Third: there is no Exception edge at all. Handle as case two. */ + Second: There is no exception edge. Just add all inlined exception + branches to the End node. + */ if (exc_handling == exc_handler) { n_exc = 0; for (i = 0; i < arity; i++) { @@ -1199,31 +1205,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) { } } if (n_exc > 0) { - ir_node *block = new_Block(n_exc, cf_pred); - set_cur_block(block); - - set_Tuple_pred(call, pn_Call_X_except, new_Jmp()); - /* The Phi for the memories with the exception objects */ - n_exc = 0; - for (i = 0; i < arity; i++) { - ir_node *ret; - ret = skip_Proj(get_Block_cfgpred(end_bl, i)); - if (is_Call(ret)) { - cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3); - n_exc++; - } else if (is_fragile_op(ret)) { - /* We rely that all cfops have the memory output at the same position. */ - cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0); - n_exc++; - } else if (is_Raise(ret)) { - cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1); - n_exc++; - } + if (n_exc == 1) { + /* simple fix */ + set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]); + } else { + ir_node *block = new_Block(n_exc, cf_pred); + set_cur_block(block); + set_Tuple_pred(call, pn_Call_X_except, new_Jmp()); } - set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M)); } else { set_Tuple_pred(call, pn_Call_X_except, new_Bad()); - set_Tuple_pred(call, pn_Call_M_except, new_Bad()); } } else { ir_node *main_end_bl; @@ -1251,7 +1242,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) { end_preds[main_end_bl_arity + i] = cf_pred[i]; set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds); set_Tuple_pred(call, pn_Call_X_except, new_Bad()); - set_Tuple_pred(call, pn_Call_M_except, new_Bad()); free(end_preds); } free(res_pred); @@ -1295,12 +1285,17 @@ typedef struct _inline_env_t { * * @param call the call node */ -static ir_graph *get_call_called_irg(ir_node *call) { +static ir_graph *get_call_called_irg(ir_node *call) +{ ir_node *addr; addr = get_Call_ptr(call); if (is_Global(addr)) { ir_entity *ent = get_Global_entity(addr); + /* we don't know which function gets finally bound to a weak symbol */ + if (get_entity_linkage(ent) & IR_LINKAGE_WEAK) + return NULL; + return get_entity_irg(ent); } @@ -1318,7 +1313,7 @@ static void collect_calls(ir_node *call, void *env) { if (called_irg != NULL) { /* The Call node calls a locally defined method. Remember to inline. */ inline_env_t *ienv = env; - call_entry *entry = obstack_alloc(&ienv->obst, sizeof(*entry)); + call_entry *entry = OALLOC(&ienv->obst, call_entry); entry->call = call; entry->callee = called_irg; entry->loop_depth = 0; @@ -1367,8 +1362,7 @@ void inline_small_irgs(ir_graph *irg, int size) { ir_graph *callee = entry->callee; irg_inline_property prop = get_irg_inline_property(callee); - if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) { - /* do not inline forbidden / weak graphs */ + if (prop == irg_inline_forbidden) { continue; } @@ -1422,7 +1416,6 @@ typedef struct { unsigned n_callers; /**< Number of known graphs that call this graphs. */ unsigned n_callers_orig; /**< for statistics */ unsigned got_inline:1; /**< Set, if at least one call inside this graph was inlined. */ - unsigned local_vars:1; /**< Set, if an inlined function got the address of a local variable. */ unsigned recursive:1; /**< Set, if this function is self recursive. */ } inline_irg_env; @@ -1430,7 +1423,7 @@ typedef struct { * Allocate a new environment for inlining. */ static inline_irg_env *alloc_inline_irg_env(void) { - inline_irg_env *env = obstack_alloc(&temp_obst, sizeof(*env)); + inline_irg_env *env = OALLOC(&temp_obst, inline_irg_env); INIT_LIST_HEAD(&env->calls); env->local_weights = NULL; env->n_nodes = -2; /* do not count count Start, End */ @@ -1441,7 +1434,6 @@ static inline_irg_env *alloc_inline_irg_env(void) { env->n_callers = 0; env->n_callers_orig = 0; env->got_inline = 0; - env->local_vars = 0; env->recursive = 0; return env; } @@ -1503,7 +1495,7 @@ static void collect_calls2(ir_node *call, void *ctx) { x->recursive = 1; /* link it in the list of possible inlinable entries */ - entry = obstack_alloc(&temp_obst, sizeof(*entry)); + entry = OALLOC(&temp_obst, call_entry); entry->call = call; entry->callee = callee; entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth; @@ -1543,7 +1535,7 @@ inline static int is_smaller(ir_graph *callee, unsigned size) { */ static call_entry *duplicate_call_entry(const call_entry *entry, ir_node *new_call, int loop_depth_delta) { - call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry)); + call_entry *nentry = OALLOC(&temp_obst, call_entry); nentry->call = new_call; nentry->callee = entry->callee; nentry->benefice = entry->benefice; @@ -1648,8 +1640,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize, callee = entry->callee; prop = get_irg_inline_property(callee); - if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) { - /* do not inline forbidden / weak graphs */ + if (prop == irg_inline_forbidden) { continue; } @@ -1703,8 +1694,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize, callee = entry->callee; prop = get_irg_inline_property(callee); - if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) { - /* do not inline forbidden / weak graphs */ + if (prop == irg_inline_forbidden) { continue; } @@ -2007,7 +1997,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee) return entry->benefice = INT_MIN; } - if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) { + if (get_irg_additional_properties(callee) & mtp_property_noreturn) { DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n", call, callee)); return entry->benefice = INT_MIN; @@ -2060,7 +2050,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee) callee_env = get_irg_link(callee); if (callee_env->n_callers == 1 && callee != current_ir_graph && - get_entity_visibility(ent) == visibility_local) { + !entity_is_externally_visible(ent)) { weight += 700; } @@ -2299,8 +2289,6 @@ static void inline_into(ir_graph *irg, unsigned maxsize, /* callee was inline. Append it's call list. */ env->got_inline = 1; - if (curr_call->local_adr) - env->local_vars = 1; --env->n_call_nodes; /* we just generate a bunch of new calls */ @@ -2337,7 +2325,9 @@ static void inline_into(ir_graph *irg, unsigned maxsize, * Heuristic inliner. Calculates a benefice value for every call and inlines * those calls with a value higher than the threshold. */ -void inline_functions(unsigned maxsize, int inline_threshold) { +void inline_functions(unsigned maxsize, int inline_threshold, + opt_ptr after_inline_opt) +{ inline_irg_env *env; int i, n_irgs; ir_graph *rem; @@ -2383,21 +2373,9 @@ void inline_functions(unsigned maxsize, int inline_threshold) { ir_graph *irg = irgs[i]; env = get_irg_link(irg); - if (env->got_inline) { + if (env->got_inline && after_inline_opt != NULL) { /* this irg got calls inlined: optimize it */ - if (get_opt_combo()) { - if (env->local_vars) { - scalar_replacement_opt(irg); - } - combo(irg); - } else { - if (env->local_vars) { - if (scalar_replacement_opt(irg)) { - optimize_graph_df(irg); - } - } - optimize_cf(irg); - } + after_inline_opt(irg); } if (env->got_inline || (env->n_callers_orig != env->n_callers)) { DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n", @@ -2427,6 +2405,7 @@ struct inline_functions_pass_t { ir_prog_pass_t pass; unsigned maxsize; int inline_threshold; + opt_ptr after_inline_opt; }; /** @@ -2436,18 +2415,21 @@ static int inline_functions_wrapper(ir_prog *irp, void *context) { struct inline_functions_pass_t *pass = context; (void)irp; - inline_functions(pass->maxsize, pass->inline_threshold); + inline_functions(pass->maxsize, pass->inline_threshold, + pass->after_inline_opt); return 0; } /* create a ir_prog pass for inline_functions */ ir_prog_pass_t *inline_functions_pass( - const char *name, unsigned maxsize, int inline_threshold) { + const char *name, unsigned maxsize, int inline_threshold, + opt_ptr after_inline_opt) { struct inline_functions_pass_t *pass = XMALLOCZ(struct inline_functions_pass_t); pass->maxsize = maxsize; pass->inline_threshold = inline_threshold; + pass->after_inline_opt = after_inline_opt; return def_prog_pass_constructor( &pass->pass, name ? name : "inline_functions",