2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Scalar replacement of compounds.
23 * @author Beyhan Veliev, Michael Beck
30 #include "iroptimize.h"
31 #include "scalar_replace.h"
49 #define SET_VNUM(node, vnum) set_irn_link(node, INT_TO_PTR(vnum))
50 #define GET_VNUM(node) (unsigned)PTR_TO_INT(get_irn_link(node))
53 * A path element entry: it is either an entity
54 * or a tarval, because we evaluate only constant array
55 * accesses like a.b.c[8].d
63 * An access path, used to assign value numbers
64 * to variables that will be scalar replaced.
66 typedef struct _path_t {
67 unsigned vnum; /**< The value number. */
68 unsigned path_len; /**< The length of the access path. */
69 path_elem_t path[1]; /**< The path. */
72 /** The size of a path in bytes. */
73 #define PATH_SIZE(p) (sizeof(*(p)) + sizeof((p)->path[0]) * ((p)->path_len - 1))
75 typedef struct _scalars_t {
76 ir_entity *ent; /**< A entity for scalar replacement. */
77 ir_type *ent_owner; /**< The owner of this entity. */
80 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
85 * @return 0 if they are identically
87 static int path_cmp(const void *elt, const void *key, size_t size) {
88 const path_t *p1 = elt;
89 const path_t *p2 = key;
92 /* we can use memcmp here, because identical tarvals should have identical addresses */
93 return memcmp(p1->path, p2->path, p1->path_len * sizeof(p1->path[0]));
97 * Compare two elements of the scalars_t set.
99 * @return 0 if they are identically
101 static int ent_cmp(const void *elt, const void *key, size_t size) {
102 const scalars_t *c1 = elt;
103 const scalars_t *c2 = key;
106 return c1->ent != c2->ent;
110 * Calculate a hash value for a path.
112 static unsigned path_hash(const path_t *path) {
116 for (i = 0; i < path->path_len; ++i)
117 hash ^= (unsigned)PTR_TO_INT(path->path[i].ent);
123 * Returns non-zero, if all indeces of a Sel node are constants.
125 * @param sel the Sel node that will be checked
127 static int is_const_sel(ir_node *sel) {
128 int i, n = get_Sel_n_indexs(sel);
130 for (i = 0; i < n; ++i) {
131 ir_node *idx = get_Sel_index(sel, i);
140 * Check the mode of a Load/Store with the mode of the entity
142 * If the mode of the entity and the Load/Store mode do not match, we
143 * have the bad reinterpret case:
146 * char b = *(char *)&i;
148 * We do NOT count this as one value and return address_taken
150 * However, we support an often used case. If the mode is two-complement
151 * we allow casts between signed/unsigned.
153 * @param mode the mode of the Load/Store
154 * @param ent_mode the mode of the accessed entity
156 static int check_load_store_mode(ir_mode *mode, ir_mode *ent_mode) {
157 if (ent_mode != mode) {
158 if (ent_mode == NULL ||
159 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
160 get_mode_sort(ent_mode) != get_mode_sort(mode) ||
161 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
162 get_mode_arithmetic(mode) != irma_twos_complement)
169 * Returns non-zero, if the address of an entity
170 * represented by a Sel node (or it's successor Sels) is taken.
172 int is_address_taken(ir_node *sel)
175 ir_mode *emode, *mode;
179 if (! is_const_sel(sel))
182 for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
183 ir_node *succ = get_irn_out(sel, i);
185 switch (get_irn_opcode(succ)) {
187 /* do not remove volatile variables */
188 if (get_Load_volatility(succ) == volatility_is_volatile)
190 /* check if this load is not a hidden conversion */
191 mode = get_Load_mode(succ);
192 ent = get_Sel_entity(sel);
193 emode = get_type_mode(get_entity_type(ent));
194 if (! check_load_store_mode(mode, emode))
199 /* check that Sel is not the Store's value */
200 value = get_Store_value(succ);
203 /* do not remove volatile variables */
204 if (get_Store_volatility(succ) == volatility_is_volatile)
206 /* check if this Store is not a hidden conversion */
207 mode = get_irn_mode(value);
208 ent = get_Sel_entity(sel);
209 emode = get_type_mode(get_entity_type(ent));
210 if (! check_load_store_mode(mode, emode))
216 ir_entity *entity = get_Sel_entity(succ);
217 /* we can't handle unions correctly yet -> address taken */
218 if (is_Union_type(get_entity_owner(entity)))
221 /* Check the Sel successor of Sel */
222 res = is_address_taken(succ);
229 /* The address of an entity is given as a parameter.
230 * As long as we do not have analyses that can tell what
231 * is done with parameters, think is taken.
232 * One special case: If the Call type tells that it's a
233 * value parameter, the address is NOT taken.
238 int res = is_address_taken(succ);
245 /* Non-optimized Tuple, happens in inlining */
246 for (input_nr = get_Tuple_n_preds(succ) - 1; input_nr >= 0; --input_nr) {
247 ir_node *pred = get_Tuple_pred(succ, input_nr);
250 /* we found one input */
251 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
252 ir_node *proj = get_irn_out(succ, k);
254 if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
255 int res = is_address_taken(proj);
265 /* another op, the address is taken */
273 * Link all leave Sels with the entity.
275 * @param ent the entity that will be scalar replaced
276 * @param sel a Sel node that selects some fields of this entity
278 static int link_all_leave_sels(ir_entity *ent, ir_node *sel) {
281 for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
282 ir_node *succ = get_irn_out(sel, i);
285 /* the current leave has further Sel's, no leave */
287 link_all_leave_sels(ent, succ);
288 } else if (is_Id(succ)) {
289 is_leave &= link_all_leave_sels(ent, succ);
297 /* we know we are at a leave, because this function is only
298 * called if the address is NOT taken, so sel's successor(s)
299 * must be Loads or Stores
301 set_irn_link(sel, get_entity_link(ent));
302 set_entity_link(ent, sel);
307 /* we need a special address that serves as an address taken marker */
309 static void *ADDRESS_TAKEN = &_x;
312 * Find possible scalar replacements.
314 * @param irg an IR graph
316 * This function finds variables on the (members of the) frame type
317 * that can be scalar replaced, because their address is never taken.
318 * If such a variable is found, it's entity link will hold a list of all
319 * Sel nodes, that selects the atomic fields of this entity.
320 * Otherwise, the link will be ADDRESS_TAKEN or NULL.
322 * @return non-zero if at least one entity could be replaced
325 static int find_possible_replacements(ir_graph *irg) {
328 int i, j, k, static_link_arg;
332 * First, clear the link field of all interesting entities.
334 frame_tp = get_irg_frame_type(irg);
335 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
336 ir_entity *ent = get_class_member(frame_tp, i);
337 set_entity_link(ent, NULL);
340 /* check for inner functions:
341 * FIXME: need a way to get the argument position for the static link */
343 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
344 ir_entity *ent = get_class_member(frame_tp, i);
345 if (is_method_entity(ent)) {
346 ir_graph *inner_irg = get_entity_irg(ent);
349 assure_irg_outs(inner_irg);
350 args = get_irg_args(inner_irg);
351 for (j = get_irn_n_outs(args) - 1; j >= 0; --j) {
352 ir_node *arg = get_irn_out(args, j);
354 if (get_Proj_proj(arg) == static_link_arg) {
355 for (k = get_irn_n_outs(arg) - 1; k >= 0; --k) {
356 ir_node *succ = get_irn_out(arg, k);
359 ir_entity *ent = get_Sel_entity(succ);
361 if (get_entity_owner(ent) == frame_tp) {
362 /* found an access to the outer frame */
363 set_entity_link(ent, ADDRESS_TAKEN);
373 * Check the ir_graph for Sel nodes. If the entity of Sel
374 * isn't a scalar replacement set the link of this entity
375 * equal ADDRESS_TAKEN.
377 irg_frame = get_irg_frame(irg);
378 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
379 ir_node *succ = get_irn_out(irg_frame, i);
382 ir_entity *ent = get_Sel_entity(succ);
385 /* we are only interested in entities on the frame, NOT
387 if (get_entity_owner(ent) != frame_tp)
390 if (get_entity_link(ent) == ADDRESS_TAKEN)
394 * Beware: in rare cases even entities on the frame might be
395 * volatile. This might happen if the entity serves as a store
396 * to a value that must survive a exception. Do not optimize
397 * such entities away.
399 if (get_entity_volatility(ent) == volatility_is_volatile) {
400 set_entity_link(ent, ADDRESS_TAKEN);
404 ent_type = get_entity_type(ent);
406 /* we can handle arrays, structs and atomic types yet */
407 if (is_Array_type(ent_type) || is_Struct_type(ent_type) || is_atomic_type(ent_type)) {
408 if (is_address_taken(succ)) {
410 if (get_entity_link(ent))
412 set_entity_link(ent, ADDRESS_TAKEN);
414 /* possible found one */
415 if (get_entity_link(ent) == NULL)
417 link_all_leave_sels(ent, succ);
427 * Return a path from the Sel node sel to it's root.
429 * @param sel the Sel node
430 * @param len the length of the path so far
432 static path_t *find_path(ir_node *sel, unsigned len) {
435 ir_node *pred = get_Sel_ptr(sel);
437 /* the current Sel node will add some path elements */
438 n = get_Sel_n_indexs(sel);
441 if (! is_Sel(pred)) {
442 /* we found the root */
443 res = XMALLOCF(path_t, path, len);
446 res = find_path(pred, len);
448 pos = res->path_len - len;
450 res->path[pos++].ent = get_Sel_entity(sel);
451 for (i = 0; i < n; ++i) {
452 ir_node *index = get_Sel_index(sel, i);
454 res->path[pos++].tv = get_Const_tarval(index);
461 * Allocate value numbers for the leaves
462 * in our found entities.
464 * @param sels a set that will contain all Sels that have a value number
465 * @param ent the entity that will be scalar replaced
466 * @param vnum the first value number we can assign
467 * @param modes a flexible array, containing all the modes of
470 * @return the next free value number
472 static unsigned allocate_value_numbers(pset *sels, ir_entity *ent, unsigned vnum, ir_mode ***modes)
476 set *pathes = new_set(path_cmp, 8);
478 DB((dbg, SET_LEVEL_3, " Visiting Sel nodes of entity %+F\n", ent));
479 /* visit all Sel nodes in the chain of the entity */
480 for (sel = get_entity_link(ent); sel; sel = next) {
481 next = get_irn_link(sel);
483 /* we must mark this sel for later */
484 pset_insert_ptr(sels, sel);
486 key = find_path(sel, 0);
487 path = set_find(pathes, key, PATH_SIZE(key), path_hash(key));
490 SET_VNUM(sel, path->vnum);
491 DB((dbg, SET_LEVEL_3, " %+F represents value %u\n", sel, path->vnum));
495 set_insert(pathes, key, PATH_SIZE(key), path_hash(key));
497 SET_VNUM(sel, key->vnum);
498 DB((dbg, SET_LEVEL_3, " %+F represents value %u\n", sel, key->vnum));
500 ARR_EXTO(ir_mode *, *modes, (int)((key->vnum + 15) & ~15));
502 (*modes)[key->vnum] = get_type_mode(get_entity_type(get_Sel_entity(sel)));
504 assert((*modes)[key->vnum] && "Value is not atomic");
510 DB((dbg, SET_LEVEL_2, " %s", get_entity_name(key->path[0].ent)));
511 for (i = 1; i < key->path_len; ++i) {
512 if (is_entity(key->path[i].ent))
513 DB((dbg, SET_LEVEL_2, ".%s", get_entity_name(key->path[i].ent)));
515 DB((dbg, SET_LEVEL_2, "[%ld]", get_tarval_long(key->path[i].tv)));
517 DB((dbg, SET_LEVEL_2, " = %u (%s)\n", PTR_TO_INT(get_irn_link(sel)), get_mode_name((*modes)[key->vnum])));
519 #endif /* DEBUG_libfirm */
525 set_entity_link(ent, NULL);
530 * A list entry for the fixing lists
532 typedef struct _list_entry_t {
533 ir_node *node; /**< the node that must be fixed */
534 unsigned vnum; /**< the value number of this node */
538 * environment for memory walker
540 typedef struct _env_t {
541 int nvals; /**< number of values */
542 ir_mode **modes; /**< the modes of the values */
543 pset *sels; /**< A set of all Sel nodes that have a value number */
547 * topological post-walker.
549 static void topologic_walker(ir_node *node, void *ctx) {
551 ir_node *adr, *block, *mem, *val;
556 /* a load, check if we can resolve it */
557 adr = get_Load_ptr(node);
559 DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
561 DB((dbg, SET_LEVEL_3, "no Sel input (%+F)\n", adr));
565 if (! pset_find_ptr(env->sels, adr)) {
566 DB((dbg, SET_LEVEL_3, "Sel %+F has no VNUM\n", adr));
570 /* ok, we have a Load that will be replaced */
571 vnum = GET_VNUM(adr);
572 assert(vnum < (unsigned)env->nvals);
574 DB((dbg, SET_LEVEL_3, "replacing by value %u\n", vnum));
576 block = get_nodes_block(node);
577 set_cur_block(block);
579 /* check, if we can replace this Load */
580 val = get_value(vnum, env->modes[vnum]);
582 /* Beware: A Load can contain a hidden conversion in Firm.
583 This happens for instance in the following code:
586 unsigned j = *(unsigned *)&i;
589 mode = get_Load_mode(node);
590 if (mode != get_irn_mode(val))
591 val = new_d_Conv(get_irn_dbg_info(node), val, mode);
593 mem = get_Load_mem(node);
594 turn_into_tuple(node, pn_Load_max);
595 set_Tuple_pred(node, pn_Load_M, mem);
596 set_Tuple_pred(node, pn_Load_res, val);
597 set_Tuple_pred(node, pn_Load_X_regular, new_Jmp());
598 set_Tuple_pred(node, pn_Load_X_except, new_Bad());
599 } else if (is_Store(node)) {
600 DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
602 /* a Store always can be replaced */
603 adr = get_Store_ptr(node);
606 DB((dbg, SET_LEVEL_3, "no Sel input (%+F)\n", adr));
610 if (! pset_find_ptr(env->sels, adr)) {
611 DB((dbg, SET_LEVEL_3, "Sel %+F has no VNUM\n", adr));
615 vnum = GET_VNUM(adr);
616 assert(vnum < (unsigned)env->nvals);
618 DB((dbg, SET_LEVEL_3, "replacing by value %u\n", vnum));
620 block = get_nodes_block(node);
621 set_cur_block(block);
623 /* Beware: A Store can contain a hidden conversion in Firm. */
624 val = get_Store_value(node);
625 if (get_irn_mode(val) != env->modes[vnum])
626 val = new_d_Conv(get_irn_dbg_info(node), val, env->modes[vnum]);
628 set_value(vnum, val);
630 mem = get_Store_mem(node);
631 turn_into_tuple(node, pn_Store_max);
632 set_Tuple_pred(node, pn_Store_M, mem);
633 set_Tuple_pred(node, pn_Store_X_regular, new_Jmp());
634 set_Tuple_pred(node, pn_Store_X_except, new_Bad());
639 * Make scalar replacement.
641 * @param sels A set containing all Sel nodes that have a value number
642 * @param nvals The number of scalars.
643 * @param modes A flexible array, containing all the modes of
646 static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes) {
649 ssa_cons_start(current_ir_graph, nvals);
656 * second step: walk over the graph blockwise in topological order
657 * and fill the array as much as possible.
659 DB((dbg, SET_LEVEL_3, "Substituting Loads and Stores in %+F\n", current_ir_graph));
660 irg_walk_blkwise_graph(current_ir_graph, NULL, topologic_walker, &env);
662 ssa_cons_finish(current_ir_graph);
666 * Find possible scalar replacements
668 * @param irg The current ir graph.
670 int scalar_replacement_opt(ir_graph *irg) {
673 scalars_t key, *value;
678 ir_type *ent_type, *frame_tp;
682 if (! get_opt_scalar_replacement())
685 rem = current_ir_graph;
686 current_ir_graph = irg;
688 /* Call algorithm that computes the out edges */
689 assure_irg_outs(irg);
691 /* we use the link field to store the VNUM */
692 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
693 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
695 /* Find possible scalar replacements */
696 if (find_possible_replacements(irg)) {
697 DB((dbg, SET_LEVEL_1, "Scalar Replacement: %+F\n", irg));
699 /* Insert in set the scalar replacements. */
700 irg_frame = get_irg_frame(irg);
702 modes = NEW_ARR_F(ir_mode *, 16);
703 set_ent = new_set(ent_cmp, 8);
704 sels = pset_new_ptr(8);
705 frame_tp = get_irg_frame_type(irg);
707 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
708 ir_node *succ = get_irn_out(irg_frame, i);
711 ir_entity *ent = get_Sel_entity(succ);
713 /* we are only interested in entities on the frame, NOT
715 if (get_entity_owner(ent) != frame_tp)
718 if (get_entity_link(ent) == NULL || get_entity_link(ent) == ADDRESS_TAKEN)
721 ent_type = get_entity_type(ent);
724 key.ent_owner = get_entity_owner(ent);
725 set_insert(set_ent, &key, sizeof(key), HASH_PTR(key.ent));
728 if (is_Array_type(ent_type)) {
729 DB((dbg, SET_LEVEL_1, " found array %s\n", get_entity_name(ent)));
730 } else if (is_Struct_type(ent_type)) {
731 DB((dbg, SET_LEVEL_1, " found struct %s\n", get_entity_name(ent)));
732 } else if (is_atomic_type(ent_type))
733 DB((dbg, SET_LEVEL_1, " found atomic value %s\n", get_entity_name(ent)));
735 panic("Neither an array nor a struct or atomic value found in scalar replace");
737 #endif /* DEBUG_libfirm */
739 nvals = allocate_value_numbers(sels, ent, nvals, &modes);
743 DB((dbg, SET_LEVEL_1, " %u values will be needed\n", nvals));
745 /* If scalars were found. */
747 do_scalar_replacements(sels, nvals, modes);
749 foreach_set(set_ent, value) {
750 remove_class_member(value->ent_owner, value->ent);
754 * We changed the graph, but did NOT introduce new blocks
755 * neither changed control flow, cf-backedges should be still
758 set_irg_outs_inconsistent(irg);
759 set_irg_loopinfo_inconsistent(irg);
768 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
769 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
771 current_ir_graph = rem;
775 ir_graph_pass_t *scalar_replacement_opt_pass(const char *name) {
776 return def_graph_pass_ret(name ? name : "scalar_rep",
777 scalar_replacement_opt);
780 void firm_init_scalar_replace(void) {
781 FIRM_DBG_REGISTER(dbg, "firm.opt.scalar_replace");