2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lowering of Calls with compound parameters and return types.
23 * @author Michael Beck
30 #include "lower_calls.h"
41 #include "iroptimize.h"
46 static pmap *pointer_types;
47 static pmap *lowered_mtps;
50 * Default implementation for finding a pointer type for a given element type.
51 * Simple create a new one.
53 static ir_type *get_pointer_type(ir_type *dest_type)
55 ir_type *res = (ir_type*)pmap_get(pointer_types, dest_type);
57 res = new_type_pointer(dest_type);
58 pmap_insert(pointer_types, dest_type, res);
63 static void fix_parameter_entities(ir_graph *irg, size_t n_compound_ret)
65 ir_type *frame_type = get_irg_frame_type(irg);
66 size_t n_compound = get_compound_n_members(frame_type);
69 if (n_compound_ret == 0)
72 for (i = 0; i < n_compound; ++i) {
73 ir_entity *member = get_compound_member(frame_type, i);
75 if (!is_parameter_entity(member))
78 /* increase parameter number since we added a new parameter in front */
79 num = get_entity_parameter_number(member);
80 if (num == IR_VA_START_PARAMETER_NUMBER)
82 set_entity_parameter_number(member, num + n_compound_ret);
87 * Creates a new lowered type for a method type with compound
88 * arguments. The new type is associated to the old one and returned.
90 static ir_type *lower_mtp(compound_call_lowering_flags flags, ir_type *mtp)
92 bool must_be_lowered = false;
101 mtp_additional_properties mtp_properties;
103 if (!is_Method_type(mtp))
106 lowered = (ir_type*)pmap_get(lowered_mtps, mtp);
110 /* check if the type has to be lowered at all */
111 n_ress = get_method_n_ress(mtp);
112 for (i = 0; i < n_ress; ++i) {
113 ir_type *res_tp = get_method_res_type(mtp, i);
114 if (is_compound_type(res_tp)) {
115 must_be_lowered = true;
119 if (!must_be_lowered)
122 n_params = get_method_n_params(mtp);
123 results = ALLOCANZ(ir_type*, n_ress);
124 params = ALLOCANZ(ir_type*, n_params + n_ress);
128 /* add a hidden parameter in front for every compound result */
129 for (i = 0; i < n_ress; ++i) {
130 ir_type *res_tp = get_method_res_type(mtp, i);
132 if (is_compound_type(res_tp)) {
133 /* this compound will be allocated on callers stack and its
134 address will be transmitted as a hidden parameter. */
135 ir_type *ptr_tp = get_pointer_type(res_tp);
136 params[nn_params++] = ptr_tp;
137 if (flags & LF_RETURN_HIDDEN)
138 results[nn_ress++] = ptr_tp;
141 results[nn_ress++] = res_tp;
144 /* copy over parameter types */
145 for (i = 0; i < n_params; ++i) {
146 params[nn_params++] = get_method_param_type(mtp, i);
148 assert(nn_ress <= n_ress);
149 assert(nn_params <= n_params + n_ress);
151 /* create the new type */
152 lowered = new_d_type_method(nn_params, nn_ress, get_type_dbg_info(mtp));
155 for (i = 0; i < nn_params; ++i)
156 set_method_param_type(lowered, i, params[i]);
157 for (i = 0; i < nn_ress; ++i)
158 set_method_res_type(lowered, i, results[i]);
160 set_method_variadicity(lowered, get_method_variadicity(mtp));
162 set_method_calling_convention(lowered, get_method_calling_convention(mtp) | cc_compound_ret);
163 mtp_properties = get_method_additional_properties(mtp);
164 /* after lowering the call is not const anymore, since it writes to the
165 * memory for the return value passed to it */
166 mtp_properties &= ~mtp_property_const;
167 set_method_additional_properties(lowered, mtp_properties);
169 /* associate the lowered type with the original one for easier access */
170 set_lowered_type(mtp, lowered);
171 pmap_insert(lowered_mtps, mtp, lowered);
179 typedef struct cl_entry cl_entry;
181 cl_entry *next; /**< Pointer to the next entry. */
182 ir_node *call; /**< Pointer to the Call node. */
183 ir_node *copyb; /**< List of all CopyB nodes. */
187 * Walker environment for fix_args_and_collect_calls().
189 typedef struct wlk_env_t {
190 size_t arg_shift; /**< The Argument index shift for parameters. */
191 struct obstack obst; /**< An obstack to allocate the data on. */
192 cl_entry *cl_list; /**< The call list. */
193 pmap *dummy_map; /**< A map for finding the dummy arguments. */
194 compound_call_lowering_flags flags;
195 ir_type *lowered_mtp; /**< The lowered method type of the current irg if any. */
196 unsigned only_local_mem:1; /**< Set if only local memory access was found. */
197 unsigned changed:1; /**< Set if the current graph was changed. */
201 * Return the call list entry of a call node.
202 * If no entry exists yet, allocate one and enter the node into
203 * the call list of the environment.
205 * @param call A Call node.
206 * @param env The environment.
208 static cl_entry *get_Call_entry(ir_node *call, wlk_env *env)
210 cl_entry *res = (cl_entry*)get_irn_link(call);
212 res = OALLOC(&env->obst, cl_entry);
213 res->next = env->cl_list;
216 set_irn_link(call, res);
223 * Finds the base address of an address by skipping Sel's and address
226 * @param adr the address
227 * @param pEnt points to the base entity if any
229 static ir_node *find_base_adr(ir_node *ptr, ir_entity **pEnt)
231 ir_entity *ent = NULL;
232 assert(mode_is_reference(get_irn_mode(ptr)));
236 ent = get_Sel_entity(ptr);
237 ptr = get_Sel_ptr(ptr);
239 else if (is_Add(ptr)) {
240 ir_node *left = get_Add_left(ptr);
241 if (mode_is_reference(get_irn_mode(left)))
244 ptr = get_Add_right(ptr);
246 } else if (is_Sub(ptr)) {
247 ptr = get_Sub_left(ptr);
257 * Check if a given pointer represents non-local memory.
259 static void check_ptr(ir_node *ptr, wlk_env *env)
261 ir_storage_class_class_t sc;
264 /* still alias free */
265 ptr = find_base_adr(ptr, &ent);
266 sc = get_base_sc(classify_pointer(ptr, ent));
267 if (sc != ir_sc_localvar && sc != ir_sc_malloced) {
268 /* non-local memory access */
269 env->only_local_mem = 0;
274 * Returns non-zero if a Call is surely a self-recursive Call.
275 * Beware: if this functions returns 0, the call might be self-recursive!
277 static bool is_self_recursive_Call(const ir_node *call)
279 const ir_node *callee = get_Call_ptr(call);
281 if (is_SymConst_addr_ent(callee)) {
282 const ir_entity *ent = get_SymConst_entity(callee);
283 const ir_graph *irg = get_entity_irg(ent);
284 if (irg == get_irn_irg(call))
291 * Post walker: shift all parameter indexes
292 * and collect Calls with compound returns in the call list.
293 * If a non-alias free memory access is found, reset the alias free
296 static void fix_args_and_collect_calls(ir_node *n, void *ctx)
298 wlk_env *env = (wlk_env*)ctx;
302 switch (get_irn_opcode(n)) {
305 if (env->only_local_mem) {
306 ptr = get_irn_n(n, 1);
311 if (env->arg_shift > 0) {
312 ir_node *pred = get_Proj_pred(n);
313 ir_graph *irg = get_irn_irg(n);
315 /* Fix the argument numbers */
316 if (pred == get_irg_args(irg)) {
317 long pnr = get_Proj_proj(n);
318 set_Proj_proj(n, pnr + env->arg_shift);
326 if (! is_self_recursive_Call(n)) {
327 /* any non self recursive call might access global memory */
328 env->only_local_mem = 0;
331 ctp = get_Call_type(n);
332 /* check for compound returns */
333 for (i = 0, n_res = get_method_n_ress(ctp); i < n_res; ++i) {
334 if (is_compound_type(get_method_res_type(ctp, i))) {
336 * This is a call with a compound return. As the result
337 * might be ignored, we must put it in the list.
339 (void)get_Call_entry(n, env);
346 ir_node *src = get_CopyB_src(n);
347 if (env->only_local_mem) {
348 check_ptr(get_CopyB_src(n), env);
349 if (env->only_local_mem)
350 check_ptr(get_CopyB_dst(n), env);
352 /* check for compound returns */
354 ir_node *proj = get_Proj_pred(src);
355 if (is_Proj(proj) && get_Proj_proj(proj) == pn_Call_T_result) {
356 ir_node *call = get_Proj_pred(proj);
358 ctp = get_Call_type(call);
359 if (is_compound_type(get_method_res_type(ctp, get_Proj_proj(src)))) {
360 /* found a CopyB from compound Call result */
361 cl_entry *e = get_Call_entry(call, env);
362 set_irn_link(n, e->copyb);
371 ir_entity *ent = get_Sel_entity(n);
372 ir_type *type = get_entity_type(ent);
374 /* we need to copy compound parameters */
375 if (is_parameter_entity(ent) && is_compound_type(type)) {
376 env->only_local_mem = 0;
387 * Returns non-zero if a node is a compound address
388 * of a frame-type entity.
390 * @param ft the frame type
391 * @param adr the node
393 static bool is_compound_address(ir_type *ft, ir_node *adr)
399 if (get_Sel_n_indexs(adr) != 0)
401 ent = get_Sel_entity(adr);
402 return get_entity_owner(ent) == ft;
405 /** A pair for the copy-return-optimization. */
406 typedef struct cr_pair {
407 ir_entity *ent; /**< the entity than can be removed from the frame */
408 ir_node *arg; /**< the argument that replaces the entities address */
412 * Post walker: fixes all entities addresses for the copy-return
415 * Note: We expect the length of the cr_pair array (i.e. number of compound
416 * return values) to be 1 (C, C++) in almost all cases, so ignore the
417 * linear search complexity here.
419 static void do_copy_return_opt(ir_node *n, void *ctx)
422 ir_entity *ent = get_Sel_entity(n);
423 cr_pair *arr = (cr_pair*)ctx;
426 for (i = 0, l = ARR_LEN(arr); i < l; ++i) {
427 if (ent == arr[i].ent) {
428 exchange(n, arr[i].arg);
436 * Return a Sel node that selects a dummy argument of type tp.
437 * Dummy arguments are only needed once and we use a map
439 * We could even assign all dummy arguments the same offset
440 * in the frame type ...
442 * @param irg the graph
443 * @param block the block where a newly create Sel should be placed
444 * @param tp the type of the dummy entity that should be create
445 * @param env the environment
447 static ir_node *get_dummy_sel(ir_graph *irg, ir_node *block, ir_type *tp,
453 /* use a map the check if we already create such an entity */
454 e = pmap_find(env->dummy_map, tp);
456 ent = (ir_entity*)e->value;
458 ir_type *ft = get_irg_frame_type(irg);
459 ident *dummy_id = id_unique("dummy.%u");
460 ent = new_entity(ft, dummy_id, tp);
461 pmap_insert(env->dummy_map, tp, ent);
463 if (get_type_state(ft) == layout_fixed) {
464 /* Fix the layout again */
465 panic("Fixed layout not implemented");
468 return new_r_simpleSel(block, get_irg_no_mem(irg), get_irg_frame(irg), ent);
472 * Add the hidden parameter from the CopyB node to the Call node.
474 * @param irg the graph
475 * @param n_com number of compound results (will be number of hidden parameters)
476 * @param ins in array to store the hidden parameters into
477 * @param entry the call list
478 * @param env the environment
480 static void add_hidden_param(ir_graph *irg, size_t n_com, ir_node **ins,
481 cl_entry *entry, wlk_env *env)
483 ir_node *p, *n, *mem, *blk;
487 for (p = entry->copyb; p; p = n) {
488 ir_node *src = get_CopyB_src(p);
489 size_t idx = get_Proj_proj(src);
490 n = (ir_node*)get_irn_link(p);
492 ins[idx] = get_CopyB_dst(p);
493 blk = get_nodes_block(p);
495 /* use the memory output of the call and not the input of the CopyB
496 * otherwise stuff breaks if the call was mtp_property_const, because
497 * then the copyb skips the call. But after lowering the call is not
498 * const anymore, and its memory has to be used */
499 mem = new_r_Proj(entry->call, mode_M, pn_Call_M);
501 /* get rid of the CopyB */
502 turn_into_tuple(p, pn_CopyB_max+1);
503 set_Tuple_pred(p, pn_CopyB_M, mem);
504 set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
505 set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
509 /* now create dummy entities for function with ignored return value */
510 if (n_args < n_com) {
511 ir_type *ctp = get_Call_type(entry->call);
515 if (is_lowered_type(ctp))
516 ctp = get_associated_type(ctp);
518 for (j = i = 0; i < get_method_n_ress(ctp); ++i) {
519 ir_type *rtp = get_method_res_type(ctp, i);
520 if (is_compound_type(rtp)) {
522 ins[j] = get_dummy_sel(irg, get_nodes_block(entry->call), rtp, env);
530 * Fix all calls on a call list by adding hidden parameters.
532 * @param irg the graph
533 * @param env the environment
535 static void fix_call_list(ir_graph *irg, wlk_env *env)
538 ir_node *call, **new_in;
539 ir_type *ctp, *lowered_mtp;
540 size_t i, n_res, n_params, n_com, pos;
542 new_in = NEW_ARR_F(ir_node *, 0);
543 for (p = env->cl_list; p; p = p->next) {
545 ctp = get_Call_type(call);
546 lowered_mtp = lower_mtp(env->flags, ctp);
547 set_Call_type(call, lowered_mtp);
549 n_params = get_Call_n_params(call);
552 for (i = 0, n_res = get_method_n_ress(ctp); i < n_res; ++i) {
553 if (is_compound_type(get_method_res_type(ctp, i)))
557 ARR_RESIZE(ir_node *, new_in, n_params + n_com + pos);
558 memset(new_in, 0, sizeof(*new_in) * (n_params + n_com + pos));
559 add_hidden_param(irg, n_com, &new_in[pos], p, env);
561 /* copy all other parameters */
562 for (i = 0; i < n_params; ++i)
563 new_in[pos++] = get_Call_param(call, i);
564 new_in[0] = get_Call_mem(call);
565 new_in[1] = get_Call_ptr(call);
567 set_irn_in(call, n_params + n_com + 2, new_in);
572 * Transform a graph. If it has compound parameter returns,
573 * remove them and use the hidden parameter instead.
574 * If it calls methods with compound parameter returns, add hidden
577 * @param irg the graph to transform
579 static void transform_irg(compound_call_lowering_flags flags, ir_graph *irg)
581 ir_entity *ent = get_irg_entity(irg);
582 ir_type *mtp, *lowered_mtp, *tp, *ft;
583 size_t i, j, k, n_ress = 0, n_ret_com = 0;
585 ir_node **new_in, *ret, *endbl, *bl, *mem, *copy;
589 mtp = get_entity_type(ent);
591 /* calculate the number of compound returns */
592 n_ress = get_method_n_ress(mtp);
593 for (n_ret_com = i = 0; i < n_ress; ++i) {
594 tp = get_method_res_type(mtp, i);
596 if (is_compound_type(tp))
600 fix_parameter_entities(irg, n_ret_com);
603 /* much easier if we have only one return */
604 normalize_one_return(irg);
606 /* This graph has a compound argument. Create a new type */
607 lowered_mtp = lower_mtp(flags, mtp);
608 set_entity_type(ent, lowered_mtp);
610 /* hidden arguments are added first */
611 env.arg_shift = n_ret_com;
613 /* we must only search for calls */
617 obstack_init(&env.obst);
619 env.dummy_map = pmap_create_ex(8);
621 env.lowered_mtp = lowered_mtp;
622 env.only_local_mem = 1;
625 /* scan the code, fix argument numbers and collect calls. */
626 irg_walk_graph(irg, firm_clear_link, fix_args_and_collect_calls, &env);
630 fix_call_list(irg, &env);
637 /* STEP 1: find the return. This is simple, we have normalized the graph. */
638 endbl = get_irg_end_block(irg);
640 for (idx = get_Block_n_cfgpreds(endbl) - 1; idx >= 0; --idx) {
641 ir_node *pred = get_Block_cfgpred(endbl, idx);
643 if (is_Return(pred)) {
649 /* in case of infinite loops, there might be no return */
652 * Now fix the Return node of the current graph.
657 * STEP 2: fix it. For all compound return values add a CopyB,
658 * all others are copied.
660 NEW_ARR_A(ir_node *, new_in, n_ress + 1);
662 bl = get_nodes_block(ret);
663 mem = get_Return_mem(ret);
665 ft = get_irg_frame_type(irg);
666 NEW_ARR_A(cr_pair, cr_opt, n_ret_com);
668 for (j = 1, i = k = 0; i < n_ress; ++i) {
669 ir_node *pred = get_Return_res(ret, i);
670 tp = get_method_res_type(mtp, i);
672 if (is_compound_type(tp)) {
673 ir_node *arg = get_irg_args(irg);
674 arg = new_r_Proj(arg, mode_P_data, k);
677 if (is_Unknown(pred)) {
678 /* The Return(Unknown) is the Firm construct for a
679 * missing return. Do nothing. */
682 * Sorrily detecting that copy-return is possible isn't
683 * that simple. We must check, that the hidden address
684 * is alias free during the whole function.
685 * A simple heuristic: all Loads/Stores inside
686 * the function access only local frame.
688 if (env.only_local_mem && is_compound_address(ft, pred)) {
689 /* we can do the copy-return optimization here */
690 cr_opt[n_cr_opt].ent = get_Sel_entity(pred);
691 cr_opt[n_cr_opt].arg = arg;
693 } else { /* copy-return optimization is impossible, do the copy. */
701 mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
704 if (flags & LF_RETURN_HIDDEN) {
708 } else { /* scalar return value */
713 /* replace the in of the Return */
715 set_irn_in(ret, j, new_in);
721 irg_walk_graph(irg, NULL, do_copy_return_opt, cr_opt);
723 for (c = 0, n = ARR_LEN(cr_opt); c < n; ++c) {
724 free_entity(cr_opt[c].ent);
730 pmap_destroy(env.dummy_map);
731 obstack_free(&env.obst, NULL);
734 static void lower_method_types(type_or_ent tore, void *env)
736 const compound_call_lowering_flags *flags
737 = (const compound_call_lowering_flags*)env;
739 /* fix method entities */
740 if (is_entity(tore.ent)) {
741 ir_entity *ent = tore.ent;
742 ir_type *tp = get_entity_type(ent);
743 ir_type *lowered = lower_mtp(*flags, tp);
744 set_entity_type(ent, lowered);
746 ir_type *tp = tore.typ;
748 /* fix pointer to methods */
749 if (is_Pointer_type(tp)) {
750 ir_type *points_to = get_pointer_points_to_type(tp);
751 ir_type *lowered_points_to = lower_mtp(*flags, points_to);
752 set_pointer_points_to_type(tp, lowered_points_to);
757 void lower_calls_with_compounds(compound_call_lowering_flags flags)
761 pointer_types = pmap_create();
762 lowered_mtps = pmap_create();
764 /* first step: Transform all graphs */
765 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
766 ir_graph *irg = get_irp_irg(i);
767 transform_irg(flags, irg);
770 /* second step: Lower all method types of visible entities */
771 type_walk(NULL, lower_method_types, &flags);
773 pmap_destroy(lowered_mtps);
774 pmap_destroy(pointer_types);