2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lowering of Calls with compound parameters and return types.
23 * @author Michael Beck
39 #include "iroptimize.h"
44 /** A type map for def_find_pointer_type. */
45 static pmap *type_map;
48 * Default implementation for finding a pointer type for a given element type.
49 * Simple create a new one.
51 static ir_type *def_find_pointer_type(ir_type *e_type, ir_mode *mode,
54 /* Mode and alignment are always identical in all calls to def_find_pointer_type(), so
55 we simply can use a map from the element type to the pointer type. */
56 ir_type *res = (ir_type*)pmap_get(type_map, e_type);
57 if (res == NULL || get_type_mode(res) != mode) {
58 res = new_type_pointer(e_type);
59 set_type_mode(res, mode);
60 set_type_alignment_bytes(res, alignment);
61 pmap_insert(type_map, e_type, res);
67 * Creates a new lowered type for a method type with compound
68 * arguments. The new type is associated to the old one and returned.
70 * @param lp parameter struct
71 * @param mtp the method type to lower
73 * The current implementation expects that a lowered type already
74 * includes the necessary changes ...
76 static ir_type *create_modified_mtd_type(const lower_params_t *lp, ir_type *mtp)
78 ir_type *lowered, *ptr_tp;
79 ir_type **params, **results, *res_tp;
81 ir_mode *modes[MAX_REGISTER_RET_VAL];
82 size_t n_ress, n_params, nn_ress, nn_params, i;
83 add_hidden hidden_params;
87 lowered = get_type_lowered(mtp);
91 n_ress = get_method_n_ress(mtp);
92 NEW_ARR_A(ir_type *, results, n_ress);
94 n_params = get_method_n_params(mtp);
95 NEW_ARR_A(ir_type *, params, n_params + n_ress);
97 NEW_ARR_A(size_t, param_map, n_params + n_ress);
99 hidden_params = lp->hidden_params;
100 if (hidden_params == ADD_HIDDEN_SMART &&
101 get_method_variadicity(mtp) == variadicity_variadic)
102 hidden_params = ADD_HIDDEN_ALWAYS_IN_FRONT;
104 if (hidden_params == ADD_HIDDEN_ALWAYS_IN_FRONT) {
105 /* add hidden in front */
106 for (nn_ress = nn_params = i = 0; i < n_ress; ++i) {
107 res_tp = get_method_res_type(mtp, i);
109 if (is_compound_type(res_tp)) {
112 if (lp->flags & LF_SMALL_CMP_IN_REGS)
113 n_regs = lp->ret_compound_in_regs(res_tp, modes);
116 /* this compound will be returned solely in registers */
117 panic("Returning compounds in registers not yet implemented");
120 /* this compound will be allocated on callers stack and its
121 address will be transmitted as a hidden parameter. */
122 ptr_tp = lp->find_pointer_type(res_tp, get_modeP_data(), lp->def_ptr_alignment);
123 params[nn_params] = ptr_tp;
124 param_map[nn_params] = n_params + i;
127 if (lp->flags & LF_RETURN_HIDDEN)
128 results[nn_ress++] = ptr_tp;
132 results[nn_ress++] = res_tp;
136 for (i = 0; i < n_params; ++i, ++nn_params) {
137 params[nn_params] = get_method_param_type(mtp, i);
138 param_map[nn_params] = i;
141 /* add hidden parameters last */
142 assert(get_method_variadicity(mtp) == variadicity_non_variadic &&
143 "Cannot add hidden parameters at end of variadic function");
145 for (nn_params = 0; nn_params < n_params; ++nn_params) {
146 params[nn_params] = get_method_param_type(mtp, nn_params);
147 param_map[nn_params] = nn_params;
150 for (nn_ress = i = 0; i < n_ress; ++i) {
151 res_tp = get_method_res_type(mtp, i);
153 if (is_compound_type(res_tp)) {
154 params[nn_params] = lp->find_pointer_type(res_tp, get_modeP_data(), lp->def_ptr_alignment);
155 param_map[nn_params] = n_params + i;
158 results[nn_ress++] = res_tp;
163 /* create the new type */
164 lowered = new_d_type_method(nn_params, nn_ress, get_type_dbg_info(mtp));
167 for (i = 0; i < nn_params; ++i)
168 set_method_param_type(lowered, i, params[i]);
169 for (i = 0; i < nn_ress; ++i)
170 set_method_res_type(lowered, i, results[i]);
172 var = get_method_variadicity(mtp);
173 set_method_variadicity(lowered, var);
175 /* associate the lowered type with the original one for easier access */
177 set_method_calling_convention(lowered, get_method_calling_convention(mtp) | cc_compound_ret);
180 set_lowered_type(mtp, lowered);
188 typedef struct cl_entry cl_entry;
190 cl_entry *next; /**< Pointer to the next entry. */
191 ir_node *call; /**< Pointer to the Call node. */
192 ir_node *copyb; /**< List of all CopyB nodes. */
196 * Walker environment for fix_args_and_collect_calls().
198 typedef struct wlk_env_t {
199 size_t arg_shift; /**< The Argument index shift for parameters. */
200 size_t first_hidden; /**< The index of the first hidden argument. */
201 struct obstack obst; /**< An obstack to allocate the data on. */
202 cl_entry *cl_list; /**< The call list. */
203 pmap *dummy_map; /**< A map for finding the dummy arguments. */
204 unsigned dnr; /**< The dummy index number. */
205 const lower_params_t *params; /**< Lowering parameters. */
206 ir_type *lowered_mtp; /**< The lowered method type of the current irg if any. */
207 ir_type *value_params; /**< The value params type if any. */
208 unsigned only_local_mem:1; /**< Set if only local memory access was found. */
209 unsigned changed:1; /**< Set if the current graph was changed. */
213 * Return the call list entry of a call node.
214 * If no entry exists yet, allocate one and enter the node into
215 * the call list of the environment.
217 * @param call A Call node.
218 * @param env The environment.
220 static cl_entry *get_Call_entry(ir_node *call, wlk_env *env)
222 cl_entry *res = (cl_entry*)get_irn_link(call);
224 cl_entry *res = OALLOC(&env->obst, cl_entry);
225 res->next = env->cl_list;
228 set_irn_link(call, res);
235 * Finds the base address of an address by skipping Sel's and address
238 * @param adr the address
239 * @param pEnt points to the base entity if any
241 static ir_node *find_base_adr(ir_node *ptr, ir_entity **pEnt)
243 ir_entity *ent = NULL;
244 assert(mode_is_reference(get_irn_mode(ptr)));
248 ent = get_Sel_entity(ptr);
249 ptr = get_Sel_ptr(ptr);
251 else if (is_Add(ptr)) {
252 ir_node *left = get_Add_left(ptr);
253 if (mode_is_reference(get_irn_mode(left)))
256 ptr = get_Add_right(ptr);
258 } else if (is_Sub(ptr)) {
259 ptr = get_Sub_left(ptr);
269 * Check if a given pointer represents non-local memory.
271 static void check_ptr(ir_node *ptr, wlk_env *env)
273 ir_storage_class_class_t sc;
276 /* still alias free */
277 ptr = find_base_adr(ptr, &ent);
278 sc = get_base_sc(classify_pointer(ptr, ent));
279 if (sc != ir_sc_localvar && sc != ir_sc_malloced) {
280 /* non-local memory access */
281 env->only_local_mem = 0;
286 * Post walker: shift all parameter indexes
287 * and collect Calls with compound returns in the call list.
288 * If a non-alias free memory access is found, reset the alias free
291 static void fix_args_and_collect_calls(ir_node *n, void *ctx)
293 wlk_env *env = (wlk_env*)ctx;
297 switch (get_irn_opcode(n)) {
299 if (env->lowered_mtp != NULL && env->value_params != NULL) {
300 ir_entity *ent = get_Sel_entity(n);
302 if (get_entity_owner(ent) == env->value_params) {
303 size_t pos = get_struct_member_index(env->value_params, ent) + env->arg_shift;
306 new_ent = get_method_value_param_ent(env->lowered_mtp, pos);
307 set_entity_ident(new_ent, get_entity_ident(ent));
308 set_Sel_entity(n, new_ent);
314 if (env->only_local_mem) {
315 ptr = get_irn_n(n, 1);
320 if (env->arg_shift > 0) {
321 ir_node *pred = get_Proj_pred(n);
323 /* Fix the argument numbers */
324 if (pred == get_irg_args(current_ir_graph)) {
325 long pnr = get_Proj_proj(n);
326 set_Proj_proj(n, pnr + env->arg_shift);
332 if (! is_self_recursive_Call(n)) {
333 /* any non self recursive call might access global memory */
334 env->only_local_mem = 0;
337 ctp = get_Call_type(n);
338 if (env->params->flags & LF_COMPOUND_RETURN) {
339 /* check for compound returns */
341 for (i = 0, n_res = get_method_n_ress(ctp); i < n_res; ++i) {
342 if (is_compound_type(get_method_res_type(ctp, i))) {
344 * This is a call with a compound return. As the result
345 * might be ignored, we must put it in the list.
347 (void)get_Call_entry(n, env);
354 if (env->only_local_mem) {
355 check_ptr(get_CopyB_src(n), env);
356 if (env->only_local_mem)
357 check_ptr(get_CopyB_dst(n), env);
359 if (env->params->flags & LF_COMPOUND_RETURN) {
360 /* check for compound returns */
361 ir_node *src = get_CopyB_src(n);
363 ir_node *proj = get_Proj_pred(src);
364 if (is_Proj(proj) && get_Proj_proj(proj) == pn_Call_T_result) {
365 ir_node *call = get_Proj_pred(proj);
367 ctp = get_Call_type(call);
368 if (is_compound_type(get_method_res_type(ctp, get_Proj_proj(src)))) {
369 /* found a CopyB from compound Call result */
370 cl_entry *e = get_Call_entry(call, env);
371 set_irn_link(n, e->copyb);
386 * Returns non-zero if a node is a compound address
387 * of a frame-type entity.
389 * @param ft the frame type
390 * @param adr the node
392 static int is_compound_address(ir_type *ft, ir_node *adr)
398 if (get_Sel_n_indexs(adr) != 0)
400 ent = get_Sel_entity(adr);
401 return get_entity_owner(ent) == ft;
404 /** A pair for the copy-return-optimization. */
405 typedef struct cr_pair {
406 ir_entity *ent; /**< the entity than can be removed from the frame */
407 ir_node *arg; /**< the argument that replaces the entities address */
411 * Post walker: fixes all entities addresses for the copy-return
414 * Note: We expect the length of the cr_pair array (i.e. number of compound
415 * return values) to be 1 (C, C++) in almost all cases, so ignore the
416 * linear search complexity here.
418 static void do_copy_return_opt(ir_node *n, void *ctx)
421 ir_entity *ent = get_Sel_entity(n);
422 cr_pair *arr = (cr_pair*)ctx;
425 for (i = 0, l = ARR_LEN(arr); i < l; ++i) {
426 if (ent == arr[i].ent) {
427 exchange(n, arr[i].arg);
435 * Return a Sel node that selects a dummy argument of type tp.
436 * Dummy arguments are only needed once and we use a map
438 * We could even assign all dummy arguments the same offset
439 * in the frame type ...
441 * @param irg the graph
442 * @param block the block where a newly create Sel should be placed
443 * @param tp the type of the dummy entity that should be create
444 * @param env the environment
446 static ir_node *get_dummy_sel(ir_graph *irg, ir_node *block, ir_type *tp, wlk_env *env)
451 /* use a map the check if we already create such an entity */
452 e = pmap_find(env->dummy_map, tp);
454 ent = (ir_entity*)e->value;
456 ir_type *ft = get_irg_frame_type(irg);
459 snprintf(buf, sizeof(buf), "dummy.%u", env->dnr++);
460 ent = new_entity(ft, new_id_from_str(buf), tp);
461 pmap_insert(env->dummy_map, tp, ent);
463 if (get_type_state(ft) == layout_fixed) {
464 /* Fix the layout again */
465 assert(0 && "Fixed layout not implemented");
468 return new_r_simpleSel(
476 * Add the hidden parameter from the CopyB node to the Call node.
478 * @param irg the graph
479 * @param n_com number of compound results (will be number of hidden parameters)
480 * @param ins in array to store the hidden parameters into
481 * @param entry the call list
482 * @param env the environment
484 static void add_hidden_param(ir_graph *irg, size_t n_com, ir_node **ins, cl_entry *entry, wlk_env *env)
486 ir_node *p, *n, *mem, *blk;
490 for (p = entry->copyb; p; p = n) {
491 ir_node *src = get_CopyB_src(p);
492 size_t idx = get_Proj_proj(src);
493 n = (ir_node*)get_irn_link(p);
495 ins[idx] = get_CopyB_dst(p);
496 mem = get_CopyB_mem(p);
497 blk = get_nodes_block(p);
499 /* get rid of the CopyB */
500 turn_into_tuple(p, pn_CopyB_max);
501 set_Tuple_pred(p, pn_CopyB_M, mem);
502 set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
503 set_Tuple_pred(p, pn_CopyB_X_except, get_irg_bad(irg));
507 /* now create dummy entities for function with ignored return value */
508 if (n_args < n_com) {
509 ir_type *ctp = get_Call_type(entry->call);
513 ctp = get_type_unlowered(ctp);
515 for (j = i = 0; i < get_method_n_ress(ctp); ++i) {
516 ir_type *rtp = get_method_res_type(ctp, i);
517 if (is_compound_type(rtp)) {
519 ins[j] = get_dummy_sel(irg, get_nodes_block(entry->call), rtp, env);
527 * Fix all calls on a call list by adding hidden parameters.
529 * @param irg the graph
530 * @param env the environment
532 static void fix_call_list(ir_graph *irg, wlk_env *env)
534 const lower_params_t *lp = env->params;
536 ir_node *call, **new_in;
537 ir_type *ctp, *lowered_mtp;
538 add_hidden hidden_params;
539 size_t i, n_res, n_params, n_com, pos;
541 new_in = NEW_ARR_F(ir_node *, 0);
542 for (p = env->cl_list; p; p = p->next) {
544 ctp = get_Call_type(call);
545 lowered_mtp = create_modified_mtd_type(lp, ctp);
546 set_Call_type(call, lowered_mtp);
548 hidden_params = lp->hidden_params;
549 if (hidden_params == ADD_HIDDEN_SMART &&
550 get_method_variadicity(ctp) == variadicity_variadic)
551 hidden_params = ADD_HIDDEN_ALWAYS_IN_FRONT;
553 n_params = get_Call_n_params(call);
556 for (i = 0, n_res = get_method_n_ress(ctp); i < n_res; ++i) {
557 if (is_compound_type(get_method_res_type(ctp, i)))
561 ARR_RESIZE(ir_node *, new_in, n_params + n_com + pos);
562 memset(new_in, 0, sizeof(*new_in) * (n_params + n_com + pos));
563 if (hidden_params == ADD_HIDDEN_ALWAYS_IN_FRONT) {
564 add_hidden_param(irg, n_com, &new_in[pos], p, env);
567 /* copy all other parameters */
568 for (i = 0; i < n_params; ++i)
569 new_in[pos++] = get_Call_param(call, i);
570 if (hidden_params == ADD_HIDDEN_ALWAYS_LAST) {
571 add_hidden_param(irg, n_com, &new_in[pos], p, env);
574 new_in[0] = get_Call_mem(call);
575 new_in[1] = get_Call_ptr(call);
577 set_irn_in(call, n_params + n_com + 2, new_in);
582 * Transform a graph. If it has compound parameter returns,
583 * remove them and use the hidden parameter instead.
584 * If it calls methods with compound parameter returns, add hidden
587 * @param lp parameter struct
588 * @param irg the graph to transform
590 static void transform_irg(const lower_params_t *lp, ir_graph *irg)
592 ir_graph *rem = current_ir_graph;
593 ir_entity *ent = get_irg_entity(irg);
594 ir_type *mtp, *lowered_mtp, *tp, *ft;
595 size_t i, j, k, n_ress = 0, n_ret_com = 0;
597 ir_node **new_in, *ret, *endbl, *bl, *mem, *copy;
600 add_hidden hidden_params;
602 current_ir_graph = irg;
604 assert(ent && "Cannot transform graph without an entity");
605 assert(get_irg_phase_state(irg) == phase_high && "call lowering must be done in phase high");
607 mtp = get_entity_type(ent);
609 if (lp->flags & LF_COMPOUND_RETURN) {
610 /* calculate the number of compound returns */
611 n_ress = get_method_n_ress(mtp);
612 for (n_ret_com = i = 0; i < n_ress; ++i) {
613 tp = get_method_res_type(mtp, i);
615 if (is_compound_type(tp))
621 /* much easier if we have only one return */
622 normalize_one_return(irg);
624 /* This graph has a compound argument. Create a new type */
625 lowered_mtp = create_modified_mtd_type(lp, mtp);
626 set_entity_type(ent, lowered_mtp);
628 hidden_params = lp->hidden_params;
629 if (hidden_params == ADD_HIDDEN_SMART &&
630 get_method_variadicity(mtp) == variadicity_variadic)
631 hidden_params = ADD_HIDDEN_ALWAYS_IN_FRONT;
633 if (hidden_params == ADD_HIDDEN_ALWAYS_IN_FRONT) {
634 /* hidden arguments are added first */
635 env.arg_shift = n_ret_com;
636 env.first_hidden = 0;
638 /* hidden arguments are added last */
640 env.first_hidden = get_method_n_params(mtp);
643 /* we must only search for calls */
647 obstack_init(&env.obst);
649 env.dummy_map = pmap_create_ex(8);
652 env.lowered_mtp = lowered_mtp;
653 env.value_params = get_method_value_param_type(mtp);
654 env.only_local_mem = 1;
657 /* scan the code, fix argument numbers and collect calls. */
658 irg_walk_graph(irg, firm_clear_link, fix_args_and_collect_calls, &env);
662 fix_call_list(irg, &env);
669 /* STEP 1: find the return. This is simple, we have normalized the graph. */
670 endbl = get_irg_end_block(irg);
672 for (idx = get_Block_n_cfgpreds(endbl) - 1; idx >= 0; --idx) {
673 ir_node *pred = get_Block_cfgpred(endbl, idx);
675 if (is_Return(pred)) {
681 /* in case of infinite loops, there might be no return */
682 if (ret == NULL) goto return_fixed;
685 * Now fix the Return node of the current graph.
690 * STEP 2: fix it. For all compound return values add a CopyB,
691 * all others are copied.
693 NEW_ARR_A(ir_node *, new_in, n_ress + 1);
695 bl = get_nodes_block(ret);
696 mem = get_Return_mem(ret);
698 ft = get_irg_frame_type(irg);
699 NEW_ARR_A(cr_pair, cr_opt, n_ret_com);
701 for (j = 1, i = k = 0; i < n_ress; ++i) {
702 ir_node *pred = get_Return_res(ret, i);
703 tp = get_method_res_type(mtp, i);
705 if (is_compound_type(tp)) {
706 ir_node *arg = get_irg_args(irg);
707 arg = new_r_Proj(arg, mode_P_data, env.first_hidden + k);
710 if (is_Unknown(pred)) {
711 /* The Return(Unknown) is the Firm construct for a missing return.
715 * Sorrily detecting that copy-return is possible isn't that simple.
716 * We must check, that the hidden address is alias free during the whole
718 * A simple heuristic: all Loads/Stores inside
719 * the function access only local frame.
721 if (env.only_local_mem && is_compound_address(ft, pred)) {
722 /* we can do the copy-return optimization here */
723 cr_opt[n_cr_opt].ent = get_Sel_entity(pred);
724 cr_opt[n_cr_opt].arg = arg;
726 } else { /* copy-return optimization is impossible, do the copy. */
734 mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
737 if (lp->flags & LF_RETURN_HIDDEN) {
741 } else { /* scalar return value */
746 /* replace the in of the Return */
748 set_irn_in(ret, j, new_in);
753 irg_walk_graph(irg, NULL, do_copy_return_opt, cr_opt);
755 for (i = 0, n = ARR_LEN(cr_opt); i < n; ++i) {
756 free_entity(cr_opt[i].ent);
759 } /* if (n_ret_com) */
762 pmap_destroy(env.dummy_map);
763 obstack_free(&env.obst, NULL);
766 /* invalidate the analysis info */
767 set_irg_outs_inconsistent(irg);
768 set_irg_loopinfo_state(irg, loopinfo_inconsistent);
770 current_ir_graph = rem;
774 * Returns non-zero if the given type is a method
775 * type that must be lowered.
777 * @param lp lowering parameters
778 * @param tp The type.
780 static int must_be_lowered(const lower_params_t *lp, ir_type *tp)
785 if (is_Method_type(tp)) {
786 if (lp->flags & LF_COMPOUND_RETURN) {
787 /* check for compound returns */
788 n_ress = get_method_n_ress(tp);
789 for (i = 0; i < n_ress; ++i) {
790 res_tp = get_method_res_type(tp, i);
792 if (is_compound_type(res_tp))
801 * type-walker: lower all method types of entities
802 * and points-to types.
804 static void lower_method_types(type_or_ent tore, void *env)
806 const lower_params_t *lp = (const lower_params_t*)env;
809 /* fix method entities */
810 if (is_entity(tore.ent)) {
811 ir_entity *ent = tore.ent;
812 tp = get_entity_type(ent);
814 if (must_be_lowered(lp, tp)) {
815 tp = create_modified_mtd_type(lp, tp);
816 set_entity_type(ent, tp);
821 /* fix pointer to methods */
822 if (is_Pointer_type(tp)) {
823 ir_type *etp = get_pointer_points_to_type(tp);
824 if (must_be_lowered(lp, etp)) {
825 etp = create_modified_mtd_type(lp, etp);
826 set_pointer_points_to_type(tp, etp);
833 * Lower calls with compound parameters and return types.
834 * This function does the following transformations:
836 * - Adds a new (hidden) pointer parameter for
837 * any return compound type.
839 * - Use of the hidden parameters in the function code.
841 * - Change all calls to functions with compound return
842 * by providing space for the hidden parameter on the callers
845 * - Replace a possible block copy after the function call.
847 void lower_calls_with_compounds(const lower_params_t *params)
851 lower_params_t param = *params;
853 if (param.find_pointer_type == NULL) {
854 param.find_pointer_type = def_find_pointer_type;
855 type_map = pmap_create_ex(8);
859 /* first step: Transform all graphs */
860 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
861 irg = get_irp_irg(i);
863 transform_irg(¶m, irg);
866 /* second step: Lower all method types of visible entities */
867 type_walk(NULL, lower_method_types, ¶m);
870 pmap_destroy(type_map);