2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lowering of Calls with compound parameters and return types.
23 * @author Michael Beck, Matthias Braun
29 #include "lower_calls.h"
40 #include "iroptimize.h"
45 static pmap *pointer_types;
46 static pmap *lowered_mtps;
49 * Default implementation for finding a pointer type for a given element type.
50 * Simple create a new one.
52 static ir_type *get_pointer_type(ir_type *dest_type)
54 ir_type *res = (ir_type*)pmap_get(pointer_types, dest_type);
56 res = new_type_pointer(dest_type);
57 pmap_insert(pointer_types, dest_type, res);
62 static void fix_parameter_entities(ir_graph *irg, size_t n_compound_ret)
64 ir_type *frame_type = get_irg_frame_type(irg);
65 size_t n_members = get_compound_n_members(frame_type);
68 for (i = 0; i < n_members; ++i) {
69 ir_entity *member = get_compound_member(frame_type, i);
71 if (!is_parameter_entity(member))
74 /* increase parameter number since we added a new parameter in front */
75 num = get_entity_parameter_number(member);
76 if (num == IR_VA_START_PARAMETER_NUMBER)
78 set_entity_parameter_number(member, num + n_compound_ret);
82 static void remove_compound_param_entities(ir_graph *irg)
84 ir_type *frame_type = get_irg_frame_type(irg);
85 size_t n_members = get_compound_n_members(frame_type);
88 for (i = n_members; i > 0; ) {
89 ir_entity *member = get_compound_member(frame_type, --i);
91 if (!is_parameter_entity(member))
94 type = get_entity_type(member);
95 if (is_compound_type(type)) {
102 * Creates a new lowered type for a method type with compound
103 * arguments. The new type is associated to the old one and returned.
105 static ir_type *lower_mtp(compound_call_lowering_flags flags, ir_type *mtp)
107 bool must_be_lowered = false;
117 mtp_additional_properties mtp_properties;
119 if (!is_Method_type(mtp))
122 lowered = (ir_type*)pmap_get(lowered_mtps, mtp);
126 /* check if the type has to be lowered at all */
127 n_params = get_method_n_params(mtp);
128 n_ress = get_method_n_ress(mtp);
129 for (i = 0; i < n_ress; ++i) {
130 ir_type *res_tp = get_method_res_type(mtp, i);
131 if (is_compound_type(res_tp)) {
132 must_be_lowered = true;
136 if (!must_be_lowered && !(flags & LF_DONT_LOWER_ARGUMENTS)) {
137 for (i = 0; i < n_params; ++i) {
138 ir_type *param_type = get_method_param_type(mtp, i);
139 if (is_compound_type(param_type)) {
140 must_be_lowered = true;
145 if (!must_be_lowered)
148 results = ALLOCANZ(ir_type*, n_ress);
149 params = ALLOCANZ(ir_type*, n_params + n_ress);
153 /* add a hidden parameter in front for every compound result */
154 for (i = 0; i < n_ress; ++i) {
155 ir_type *res_tp = get_method_res_type(mtp, i);
157 if (is_compound_type(res_tp)) {
158 /* this compound will be allocated on callers stack and its
159 address will be transmitted as a hidden parameter. */
160 ir_type *ptr_tp = get_pointer_type(res_tp);
161 params[nn_params++] = ptr_tp;
162 if (flags & LF_RETURN_HIDDEN)
163 results[nn_ress++] = ptr_tp;
166 results[nn_ress++] = res_tp;
169 /* copy over parameter types */
170 for (i = 0; i < n_params; ++i) {
171 ir_type *param_type = get_method_param_type(mtp, i);
172 if (! (flags & LF_DONT_LOWER_ARGUMENTS)
173 && is_compound_type(param_type)) {
174 /* turn parameter into a pointer type */
175 param_type = new_type_pointer(param_type);
177 params[nn_params++] = param_type;
179 assert(nn_ress <= n_ress);
180 assert(nn_params <= n_params + n_ress);
182 /* create the new type */
183 lowered = new_d_type_method(nn_params, nn_ress, get_type_dbg_info(mtp));
186 for (i = 0; i < nn_params; ++i)
187 set_method_param_type(lowered, i, params[i]);
188 for (i = 0; i < nn_ress; ++i)
189 set_method_res_type(lowered, i, results[i]);
191 set_method_variadicity(lowered, get_method_variadicity(mtp));
193 cconv = get_method_calling_convention(mtp);
194 if (nn_params > n_params) {
195 cconv |= cc_compound_ret;
197 set_method_calling_convention(lowered, cconv);
199 mtp_properties = get_method_additional_properties(mtp);
200 /* after lowering the call is not const anymore, since it writes to the
201 * memory for the return value passed to it */
202 mtp_properties &= ~mtp_property_const;
203 set_method_additional_properties(lowered, mtp_properties);
205 /* associate the lowered type with the original one for easier access */
206 set_higher_type(lowered, mtp);
207 pmap_insert(lowered_mtps, mtp, lowered);
215 typedef struct cl_entry cl_entry;
217 cl_entry *next; /**< Pointer to the next entry. */
218 ir_node *call; /**< Pointer to the Call node. */
219 ir_node *copyb; /**< List of all CopyB nodes. */
220 bool has_compound_ret : 1;
221 bool has_compound_param : 1;
225 * Walker environment for fix_args_and_collect_calls().
227 typedef struct wlk_env_t {
228 size_t arg_shift; /**< The Argument index shift for parameters. */
229 struct obstack obst; /**< An obstack to allocate the data on. */
230 cl_entry *cl_list; /**< The call list. */
231 pmap *dummy_map; /**< A map for finding the dummy arguments. */
232 compound_call_lowering_flags flags;
233 ir_type *lowered_mtp; /**< The lowered method type of the current irg if any. */
234 bool only_local_mem:1;/**< Set if only local memory access was found. */
235 bool changed:1; /**< Set if the current graph was changed. */
239 * Return the call list entry of a call node.
240 * If no entry exists yet, allocate one and enter the node into
241 * the call list of the environment.
243 * @param call A Call node.
244 * @param env The environment.
246 static cl_entry *get_call_entry(ir_node *call, wlk_env *env)
248 cl_entry *res = (cl_entry*)get_irn_link(call);
250 res = OALLOC(&env->obst, cl_entry);
251 res->next = env->cl_list;
254 set_irn_link(call, res);
261 * Finds the base address of an address by skipping Sel's and address
264 * @param adr the address
265 * @param pEnt points to the base entity if any
267 static ir_node *find_base_adr(ir_node *ptr, ir_entity **pEnt)
269 ir_entity *ent = NULL;
270 assert(mode_is_reference(get_irn_mode(ptr)));
274 ent = get_Sel_entity(ptr);
275 ptr = get_Sel_ptr(ptr);
277 else if (is_Add(ptr)) {
278 ir_node *left = get_Add_left(ptr);
279 if (mode_is_reference(get_irn_mode(left)))
282 ptr = get_Add_right(ptr);
284 } else if (is_Sub(ptr)) {
285 ptr = get_Sub_left(ptr);
295 * Check if a given pointer represents non-local memory.
297 static void check_ptr(ir_node *ptr, wlk_env *env)
299 ir_storage_class_class_t sc;
302 /* still alias free */
303 ptr = find_base_adr(ptr, &ent);
304 sc = get_base_sc(classify_pointer(ptr, ent));
305 if (sc != ir_sc_localvar && sc != ir_sc_malloced) {
306 /* non-local memory access */
307 env->only_local_mem = false;
312 * Returns non-zero if a Call is surely a self-recursive Call.
313 * Beware: if this functions returns 0, the call might be self-recursive!
315 static bool is_self_recursive_Call(const ir_node *call)
317 const ir_node *callee = get_Call_ptr(call);
319 if (is_SymConst_addr_ent(callee)) {
320 const ir_entity *ent = get_SymConst_entity(callee);
321 const ir_graph *irg = get_entity_irg(ent);
322 if (irg == get_irn_irg(call))
329 * Post walker: shift all parameter indexes
330 * and collect Calls with compound returns in the call list.
331 * If a non-alias free memory access is found, reset the alias free
334 static void fix_args_and_collect_calls(ir_node *n, void *ctx)
336 wlk_env *env = (wlk_env*)ctx;
339 switch (get_irn_opcode(n)) {
342 if (env->only_local_mem) {
343 ptr = get_irn_n(n, 1);
348 if (env->arg_shift > 0) {
349 ir_node *pred = get_Proj_pred(n);
350 ir_graph *irg = get_irn_irg(n);
352 /* Fix the argument numbers */
353 if (pred == get_irg_args(irg)) {
354 long pnr = get_Proj_proj(n);
355 set_Proj_proj(n, pnr + env->arg_shift);
361 ir_type *ctp = get_Call_type(n);
362 size_t n_ress = get_method_n_ress(ctp);
363 size_t n_params = get_method_n_params(ctp);
365 if (! is_self_recursive_Call(n)) {
366 /* any non self recursive call might access global memory */
367 env->only_local_mem = false;
370 /* check for compound returns */
371 for (i = 0; i < n_ress; ++i) {
372 ir_type *type = get_method_res_type(ctp, i);
373 if (is_compound_type(type)) {
375 * This is a call with a compound return. As the result
376 * might be ignored, we must put it in the list.
378 cl_entry *entry = get_call_entry(n, env);
379 entry->has_compound_ret = true;
383 for (i = 0; i < n_params; ++i) {
384 ir_type *type = get_method_param_type(ctp, i);
385 if (is_compound_type(type)) {
386 cl_entry *entry = get_call_entry(n, env);
387 entry->has_compound_param = true;
394 ir_node *src = get_CopyB_src(n);
395 if (env->only_local_mem) {
396 check_ptr(get_CopyB_src(n), env);
397 if (env->only_local_mem)
398 check_ptr(get_CopyB_dst(n), env);
400 /* check for compound returns */
402 ir_node *proj = get_Proj_pred(src);
403 if (is_Proj(proj) && get_Proj_proj(proj) == pn_Call_T_result) {
404 ir_node *call = get_Proj_pred(proj);
406 ir_type *ctp = get_Call_type(call);
407 if (is_compound_type(get_method_res_type(ctp, get_Proj_proj(src)))) {
408 /* found a CopyB from compound Call result */
409 cl_entry *e = get_call_entry(call, env);
410 set_irn_link(n, e->copyb);
419 ir_entity *entity = get_Sel_entity(n);
420 ir_type *type = get_entity_type(entity);
422 if (is_parameter_entity(entity) && is_compound_type(type)) {
423 if (! (env->flags & LF_DONT_LOWER_ARGUMENTS)) {
424 /* note that num was already modified by fix_parameter_entities
425 * so no need to add env->arg_shift again */
426 size_t num = get_entity_parameter_number(entity);
427 ir_graph *irg = get_irn_irg(n);
428 ir_node *args = get_irg_args(irg);
429 ir_node *ptr = new_r_Proj(args, mode_P, num);
431 /* hack to avoid us visiting the proj again */
432 mark_irn_visited(ptr);
435 /* we need to copy compound parameters */
436 env->only_local_mem = false;
447 * Returns non-zero if a node is a compound address
448 * of a frame-type entity.
450 * @param ft the frame type
451 * @param adr the node
453 static bool is_compound_address(ir_type *ft, ir_node *adr)
459 if (get_Sel_n_indexs(adr) != 0)
461 ent = get_Sel_entity(adr);
462 return get_entity_owner(ent) == ft;
465 /** A pair for the copy-return-optimization. */
466 typedef struct cr_pair {
467 ir_entity *ent; /**< the entity than can be removed from the frame */
468 ir_node *arg; /**< the argument that replaces the entities address */
472 * Post walker: fixes all entities addresses for the copy-return
475 * Note: We expect the length of the cr_pair array (i.e. number of compound
476 * return values) to be 1 (C, C++) in almost all cases, so ignore the
477 * linear search complexity here.
479 static void do_copy_return_opt(ir_node *n, void *ctx)
482 ir_entity *ent = get_Sel_entity(n);
483 cr_pair *arr = (cr_pair*)ctx;
486 for (i = 0, l = ARR_LEN(arr); i < l; ++i) {
487 if (ent == arr[i].ent) {
488 exchange(n, arr[i].arg);
496 * Return a Sel node that selects a dummy argument of type tp.
497 * Dummy arguments are only needed once and we use a map
499 * We could even assign all dummy arguments the same offset
500 * in the frame type ...
502 * @param irg the graph
503 * @param block the block where a newly create Sel should be placed
504 * @param tp the type of the dummy entity that should be create
505 * @param env the environment
507 static ir_node *get_dummy_sel(ir_graph *irg, ir_node *block, ir_type *tp,
510 /* use a map the check if we already create such an entity */
511 ir_entity *ent = pmap_get(env->dummy_map, tp);
513 ir_type *ft = get_irg_frame_type(irg);
514 ident *dummy_id = id_unique("dummy.%u");
515 ent = new_entity(ft, dummy_id, tp);
516 pmap_insert(env->dummy_map, tp, ent);
518 if (get_type_state(ft) == layout_fixed) {
519 /* Fix the layout again */
520 panic("Fixed layout not implemented");
523 return new_r_simpleSel(block, get_irg_no_mem(irg), get_irg_frame(irg), ent);
527 * Add the hidden parameter from the CopyB node to the Call node.
529 static void add_hidden_param(ir_graph *irg, size_t n_com, ir_node **ins,
530 cl_entry *entry, wlk_env *env,
537 for (p = entry->copyb; p; p = n) {
538 ir_node *src = get_CopyB_src(p);
539 size_t idx = get_Proj_proj(src);
540 n = (ir_node*)get_irn_link(p);
542 /* consider only the first CopyB */
543 if (ins[idx] == NULL) {
544 ir_node *block = get_nodes_block(p);
546 /* use the memory output of the call and not the input of the CopyB
547 * otherwise stuff breaks if the call was mtp_property_const, because
548 * then the copyb skips the call. But after lowering the call is not
549 * const anymore, and its memory has to be used */
550 ir_node *mem = new_r_Proj(entry->call, mode_M, pn_Call_M);
552 ins[idx] = get_CopyB_dst(p);
554 /* get rid of the CopyB */
555 if (ir_throws_exception(p)) {
556 turn_into_tuple(p, pn_CopyB_max+1);
557 set_Tuple_pred(p, pn_CopyB_M, mem);
558 set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(block));
559 set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
561 turn_into_tuple(p, pn_CopyB_M+1);
562 set_Tuple_pred(p, pn_CopyB_M, mem);
568 /* now create dummy entities for function with ignored return value */
569 if (n_args < n_com) {
573 for (j = i = 0; i < get_method_n_ress(ctp); ++i) {
574 ir_type *rtp = get_method_res_type(ctp, i);
575 if (is_compound_type(rtp)) {
577 ins[j] = get_dummy_sel(irg, get_nodes_block(entry->call), rtp, env);
584 static void fix_compound_ret(wlk_env *env, cl_entry *entry, ir_type *ctp)
586 ir_node *call = entry->call;
587 ir_graph *irg = get_irn_irg(call);
588 size_t n_params = get_Call_n_params(call);
590 size_t n_res = get_method_n_ress(ctp);
595 for (i = 0; i < n_res; ++i) {
596 ir_type *type = get_method_res_type(ctp, i);
597 if (is_compound_type(type))
601 new_in = ALLOCANZ(ir_node*, n_params + n_com + (n_Call_max+1));
602 new_in[pos++] = get_Call_mem(call);
603 new_in[pos++] = get_Call_ptr(call);
604 assert(pos == n_Call_max+1);
605 add_hidden_param(irg, n_com, &new_in[pos], entry, env, ctp);
608 /* copy all other parameters */
609 for (i = 0; i < n_params; ++i) {
610 ir_node *param = get_Call_param(call, i);
611 new_in[pos++] = param;
613 assert(pos == n_params+n_com+(n_Call_max+1));
614 set_irn_in(call, pos, new_in);
617 static ir_entity *create_compound_arg_entitiy(ir_graph *irg, ir_type *type)
619 ir_type *frame = get_irg_frame_type(irg);
620 ident *id = id_unique("$compound_param.%u");
621 ir_entity *entity = new_entity(frame, id, type);
623 * we could do some optimisations here and create a big union type for all
624 * different call types in a function */
628 static void fix_compound_params(cl_entry *entry, ir_type *ctp)
630 ir_node *call = entry->call;
631 dbg_info *dbgi = get_irn_dbg_info(call);
632 ir_node *mem = get_Call_mem(call);
633 ir_graph *irg = get_irn_irg(call);
634 ir_node *nomem = new_r_NoMem(irg);
635 ir_node *frame = get_irg_frame(irg);
636 size_t n_params = get_method_n_params(ctp);
639 for (i = 0; i < n_params; ++i) {
640 ir_type *type = get_method_param_type(ctp, i);
645 ir_entity *arg_entity;
646 if (!is_compound_type(type))
649 arg = get_Call_param(call, i);
650 arg_entity = create_compound_arg_entitiy(irg, type);
651 block = get_nodes_block(call);
652 sel = new_rd_simpleSel(dbgi, block, nomem, frame, arg_entity);
653 copyb = new_rd_CopyB(dbgi, block, mem, sel, arg, type);
654 mem = new_r_Proj(copyb, mode_M, pn_CopyB_M);
655 set_Call_param(call, i, sel);
657 set_Call_mem(call, mem);
660 static void fix_calls(wlk_env *env)
663 for (entry = env->cl_list; entry; entry = entry->next) {
664 ir_node *call = entry->call;
665 ir_type *ctp = get_Call_type(call);
666 ir_type *lowered_mtp = lower_mtp(env->flags, ctp);
667 set_Call_type(call, lowered_mtp);
669 if (entry->has_compound_param) {
670 fix_compound_params(entry, ctp);
672 if (entry->has_compound_ret) {
673 fix_compound_ret(env, entry, ctp);
679 * Transform a graph. If it has compound parameter returns,
680 * remove them and use the hidden parameter instead.
681 * If it calls methods with compound parameter returns, add hidden
684 * @param irg the graph to transform
686 static void transform_irg(compound_call_lowering_flags flags, ir_graph *irg)
688 ir_entity *ent = get_irg_entity(irg);
689 ir_type *mtp = get_entity_type(ent);
690 size_t n_ress = get_method_n_ress(mtp);
691 size_t n_params = get_method_n_params(mtp);
692 size_t n_ret_com = 0;
693 size_t n_param_com = 0;
695 ir_type *lowered_mtp, *tp, *ft;
698 ir_node **new_in, *ret, *endbl, *bl, *mem, *copy;
702 /* calculate the number of compound returns */
703 for (n_ret_com = i = 0; i < n_ress; ++i) {
704 ir_type *type = get_method_res_type(mtp, i);
705 if (is_compound_type(type))
708 for (i = 0; i < n_params; ++i) {
709 ir_type *type = get_method_param_type(mtp, i);
710 if (is_compound_type(type))
715 fix_parameter_entities(irg, n_ret_com);
718 /* much easier if we have only one return */
719 normalize_one_return(irg);
721 /* This graph has a compound argument. Create a new type */
722 lowered_mtp = lower_mtp(flags, mtp);
723 set_entity_type(ent, lowered_mtp);
725 /* hidden arguments are added first */
726 env.arg_shift = n_ret_com;
728 /* we must only search for calls */
732 obstack_init(&env.obst);
734 env.dummy_map = pmap_create_ex(8);
736 env.lowered_mtp = lowered_mtp;
737 env.only_local_mem = true;
740 /* scan the code, fix argument numbers and collect calls. */
741 irg_walk_graph(irg, firm_clear_link, NULL, &env);
742 irg_walk_graph(irg, fix_args_and_collect_calls, NULL, &env);
744 if (n_param_com > 0 && !(flags & LF_DONT_LOWER_ARGUMENTS))
745 remove_compound_param_entities(irg);
748 if (env.cl_list != NULL) {
756 /* STEP 1: find the return. This is simple, we have normalized the graph. */
757 endbl = get_irg_end_block(irg);
759 for (idx = get_Block_n_cfgpreds(endbl) - 1; idx >= 0; --idx) {
760 ir_node *pred = get_Block_cfgpred(endbl, idx);
762 if (is_Return(pred)) {
768 /* in case of infinite loops, there might be no return */
771 * Now fix the Return node of the current graph.
776 * STEP 2: fix it. For all compound return values add a CopyB,
777 * all others are copied.
779 NEW_ARR_A(ir_node *, new_in, n_ress + 1);
781 bl = get_nodes_block(ret);
782 mem = get_Return_mem(ret);
784 ft = get_irg_frame_type(irg);
785 NEW_ARR_A(cr_pair, cr_opt, n_ret_com);
787 for (j = 1, i = k = 0; i < n_ress; ++i) {
788 ir_node *pred = get_Return_res(ret, i);
789 tp = get_method_res_type(mtp, i);
791 if (is_compound_type(tp)) {
792 ir_node *arg = get_irg_args(irg);
793 arg = new_r_Proj(arg, mode_P_data, k);
796 if (is_Unknown(pred)) {
797 /* The Return(Unknown) is the Firm construct for a
798 * missing return. Do nothing. */
801 * Sorrily detecting that copy-return is possible isn't
802 * that simple. We must check, that the hidden address
803 * is alias free during the whole function.
804 * A simple heuristic: all Loads/Stores inside
805 * the function access only local frame.
807 if (env.only_local_mem && is_compound_address(ft, pred)) {
808 /* we can do the copy-return optimization here */
809 cr_opt[n_cr_opt].ent = get_Sel_entity(pred);
810 cr_opt[n_cr_opt].arg = arg;
812 } else { /* copy-return optimization is impossible, do the copy. */
820 mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
823 if (flags & LF_RETURN_HIDDEN) {
827 } else { /* scalar return value */
832 /* replace the in of the Return */
834 set_irn_in(ret, j, new_in);
840 irg_walk_graph(irg, NULL, do_copy_return_opt, cr_opt);
842 for (c = 0, n = ARR_LEN(cr_opt); c < n; ++c) {
843 free_entity(cr_opt[c].ent);
849 pmap_destroy(env.dummy_map);
850 obstack_free(&env.obst, NULL);
853 static void lower_method_types(type_or_ent tore, void *env)
855 const compound_call_lowering_flags *flags
856 = (const compound_call_lowering_flags*)env;
858 /* fix method entities */
859 if (is_entity(tore.ent)) {
860 ir_entity *ent = tore.ent;
861 ir_type *tp = get_entity_type(ent);
862 ir_type *lowered = lower_mtp(*flags, tp);
863 set_entity_type(ent, lowered);
865 ir_type *tp = tore.typ;
867 /* fix pointer to methods */
868 if (is_Pointer_type(tp)) {
869 ir_type *points_to = get_pointer_points_to_type(tp);
870 ir_type *lowered_points_to = lower_mtp(*flags, points_to);
871 set_pointer_points_to_type(tp, lowered_points_to);
876 void lower_calls_with_compounds(compound_call_lowering_flags flags)
880 pointer_types = pmap_create();
881 lowered_mtps = pmap_create();
883 /* first step: Transform all graphs */
884 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
885 ir_graph *irg = get_irp_irg(i);
886 transform_irg(flags, irg);
889 /* second step: Lower all method types of visible entities */
890 type_walk(NULL, lower_method_types, &flags);
892 pmap_destroy(lowered_mtps);
893 pmap_destroy(pointer_types);