2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Scalar replacement of compounds.
23 * @author Beyhan Veliev, Michael Beck
30 #include "iroptimize.h"
31 #include "scalar_replace.h"
50 #define SET_VNUM(node, vnum) set_irn_link(node, INT_TO_PTR(vnum))
51 #define GET_VNUM(node) (unsigned)PTR_TO_INT(get_irn_link(node))
54 * A path element entry: it is either an entity
55 * or a tarval, because we evaluate only constant array
56 * accesses like a.b.c[8].d
64 * An access path, used to assign value numbers
65 * to variables that will be scalar replaced.
67 typedef struct _path_t {
68 unsigned vnum; /**< The value number. */
69 unsigned path_len; /**< The length of the access path. */
70 path_elem_t path[1]; /**< The path. */
73 /** The size of a path in bytes. */
74 #define PATH_SIZE(p) (sizeof(*(p)) + sizeof((p)->path[0]) * ((p)->path_len - 1))
76 typedef struct _scalars_t {
77 ir_entity *ent; /**< A entity for scalar replacement. */
78 ir_type *ent_owner; /**< The owner of this entity. */
81 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
86 * @return 0 if they are identically
88 static int path_cmp(const void *elt, const void *key, size_t size)
90 const path_t *p1 = elt;
91 const path_t *p2 = key;
94 /* we can use memcmp here, because identical tarvals should have identical addresses */
95 return memcmp(p1->path, p2->path, p1->path_len * sizeof(p1->path[0]));
99 * Compare two elements of the scalars_t set.
101 * @return 0 if they are identically
103 static int ent_cmp(const void *elt, const void *key, size_t size)
105 const scalars_t *c1 = elt;
106 const scalars_t *c2 = key;
109 return c1->ent != c2->ent;
113 * Calculate a hash value for a path.
115 static unsigned path_hash(const path_t *path)
120 for (i = 0; i < path->path_len; ++i)
121 hash ^= (unsigned)PTR_TO_INT(path->path[i].ent);
127 * Returns non-zero, if all indeces of a Sel node are constants.
129 * @param sel the Sel node that will be checked
131 static int is_const_sel(ir_node *sel)
133 int i, n = get_Sel_n_indexs(sel);
135 for (i = 0; i < n; ++i) {
136 ir_node *idx = get_Sel_index(sel, i);
145 * Check the mode of a Load/Store with the mode of the entity
147 * If the mode of the entity and the Load/Store mode do not match, we
148 * have the bad reinterpret case:
151 * char b = *(char *)&i;
153 * We do NOT count this as one value and return address_taken
155 * However, we support an often used case. If the mode is two-complement
156 * we allow casts between signed/unsigned.
158 * @param mode the mode of the Load/Store
159 * @param ent_mode the mode of the accessed entity
161 static int check_load_store_mode(ir_mode *mode, ir_mode *ent_mode)
163 if (ent_mode != mode) {
164 if (ent_mode == NULL ||
165 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
166 get_mode_sort(ent_mode) != get_mode_sort(mode) ||
167 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
168 get_mode_arithmetic(mode) != irma_twos_complement)
175 * Returns non-zero, if the address of an entity
176 * represented by a Sel node (or it's successor Sels) is taken.
178 int is_address_taken(ir_node *sel)
181 ir_mode *emode, *mode;
185 if (! is_const_sel(sel))
188 for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
189 ir_node *succ = get_irn_out(sel, i);
191 switch (get_irn_opcode(succ)) {
193 /* do not remove volatile variables */
194 if (get_Load_volatility(succ) == volatility_is_volatile)
196 /* check if this load is not a hidden conversion */
197 mode = get_Load_mode(succ);
198 ent = get_Sel_entity(sel);
199 emode = get_type_mode(get_entity_type(ent));
200 if (! check_load_store_mode(mode, emode))
205 /* check that Sel is not the Store's value */
206 value = get_Store_value(succ);
209 /* do not remove volatile variables */
210 if (get_Store_volatility(succ) == volatility_is_volatile)
212 /* check if this Store is not a hidden conversion */
213 mode = get_irn_mode(value);
214 ent = get_Sel_entity(sel);
215 emode = get_type_mode(get_entity_type(ent));
216 if (! check_load_store_mode(mode, emode))
222 ir_entity *entity = get_Sel_entity(succ);
223 /* we can't handle unions correctly yet -> address taken */
224 if (is_Union_type(get_entity_owner(entity)))
227 /* Check the Sel successor of Sel */
228 res = is_address_taken(succ);
235 /* The address of an entity is given as a parameter.
236 * As long as we do not have analyses that can tell what
237 * is done with parameters, think is taken.
238 * One special case: If the Call type tells that it's a
239 * value parameter, the address is NOT taken.
244 int res = is_address_taken(succ);
251 /* Non-optimized Tuple, happens in inlining */
252 for (input_nr = get_Tuple_n_preds(succ) - 1; input_nr >= 0; --input_nr) {
253 ir_node *pred = get_Tuple_pred(succ, input_nr);
256 /* we found one input */
257 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
258 ir_node *proj = get_irn_out(succ, k);
260 if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
261 int res = is_address_taken(proj);
271 /* another op, the address is taken */
279 * Link all leave Sels with the entity.
281 * @param ent the entity that will be scalar replaced
282 * @param sel a Sel node that selects some fields of this entity
284 static int link_all_leave_sels(ir_entity *ent, ir_node *sel)
288 for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
289 ir_node *succ = get_irn_out(sel, i);
292 /* the current leave has further Sel's, no leave */
294 link_all_leave_sels(ent, succ);
295 } else if (is_Id(succ)) {
296 is_leave &= link_all_leave_sels(ent, succ);
304 /* we know we are at a leave, because this function is only
305 * called if the address is NOT taken, so sel's successor(s)
306 * must be Loads or Stores
308 set_irn_link(sel, get_entity_link(ent));
309 set_entity_link(ent, sel);
314 /* we need a special address that serves as an address taken marker */
316 static void *ADDRESS_TAKEN = &_x;
319 * Find possible scalar replacements.
321 * @param irg an IR graph
323 * This function finds variables on the (members of the) frame type
324 * that can be scalar replaced, because their address is never taken.
325 * If such a variable is found, it's entity link will hold a list of all
326 * Sel nodes, that selects the atomic fields of this entity.
327 * Otherwise, the link will be ADDRESS_TAKEN or NULL.
329 * @return non-zero if at least one entity could be replaced
332 static int find_possible_replacements(ir_graph *irg)
336 int i, j, k, static_link_arg;
340 * First, clear the link field of all interesting entities.
342 frame_tp = get_irg_frame_type(irg);
343 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
344 ir_entity *ent = get_class_member(frame_tp, i);
345 set_entity_link(ent, NULL);
348 /* check for inner functions:
349 * FIXME: need a way to get the argument position for the static link */
351 for (i = get_class_n_members(frame_tp) - 1; i >= 0; --i) {
352 ir_entity *ent = get_class_member(frame_tp, i);
353 if (is_method_entity(ent)) {
354 ir_graph *inner_irg = get_entity_irg(ent);
357 assure_irg_outs(inner_irg);
358 args = get_irg_args(inner_irg);
359 for (j = get_irn_n_outs(args) - 1; j >= 0; --j) {
360 ir_node *arg = get_irn_out(args, j);
362 if (get_Proj_proj(arg) == static_link_arg) {
363 for (k = get_irn_n_outs(arg) - 1; k >= 0; --k) {
364 ir_node *succ = get_irn_out(arg, k);
367 ir_entity *ent = get_Sel_entity(succ);
369 if (get_entity_owner(ent) == frame_tp) {
370 /* found an access to the outer frame */
371 set_entity_link(ent, ADDRESS_TAKEN);
381 * Check the ir_graph for Sel nodes. If the entity of Sel
382 * isn't a scalar replacement set the link of this entity
383 * equal ADDRESS_TAKEN.
385 irg_frame = get_irg_frame(irg);
386 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
387 ir_node *succ = get_irn_out(irg_frame, i);
390 ir_entity *ent = get_Sel_entity(succ);
393 /* we are only interested in entities on the frame, NOT
395 if (get_entity_owner(ent) != frame_tp)
398 if (get_entity_link(ent) == ADDRESS_TAKEN)
402 * Beware: in rare cases even entities on the frame might be
403 * volatile. This might happen if the entity serves as a store
404 * to a value that must survive a exception. Do not optimize
405 * such entities away.
407 if (get_entity_volatility(ent) == volatility_is_volatile) {
408 set_entity_link(ent, ADDRESS_TAKEN);
412 ent_type = get_entity_type(ent);
414 /* we can handle arrays, structs and atomic types yet */
415 if (is_Array_type(ent_type) || is_Struct_type(ent_type) || is_atomic_type(ent_type)) {
416 if (is_address_taken(succ)) {
418 if (get_entity_link(ent))
420 set_entity_link(ent, ADDRESS_TAKEN);
422 /* possible found one */
423 if (get_entity_link(ent) == NULL)
425 link_all_leave_sels(ent, succ);
435 * Return a path from the Sel node sel to it's root.
437 * @param sel the Sel node
438 * @param len the length of the path so far
440 static path_t *find_path(ir_node *sel, unsigned len)
444 ir_node *pred = get_Sel_ptr(sel);
446 /* the current Sel node will add some path elements */
447 n = get_Sel_n_indexs(sel);
450 if (! is_Sel(pred)) {
451 /* we found the root */
452 res = XMALLOCF(path_t, path, len);
455 res = find_path(pred, len);
457 pos = res->path_len - len;
459 res->path[pos++].ent = get_Sel_entity(sel);
460 for (i = 0; i < n; ++i) {
461 ir_node *index = get_Sel_index(sel, i);
463 res->path[pos++].tv = get_Const_tarval(index);
470 * Allocate value numbers for the leaves
471 * in our found entities.
473 * @param sels a set that will contain all Sels that have a value number
474 * @param ent the entity that will be scalar replaced
475 * @param vnum the first value number we can assign
476 * @param modes a flexible array, containing all the modes of
479 * @return the next free value number
481 static unsigned allocate_value_numbers(pset *sels, ir_entity *ent, unsigned vnum, ir_mode ***modes)
485 set *pathes = new_set(path_cmp, 8);
487 DB((dbg, SET_LEVEL_3, " Visiting Sel nodes of entity %+F\n", ent));
488 /* visit all Sel nodes in the chain of the entity */
489 for (sel = get_entity_link(ent); sel; sel = next) {
490 next = get_irn_link(sel);
492 /* we must mark this sel for later */
493 pset_insert_ptr(sels, sel);
495 key = find_path(sel, 0);
496 path = set_find(pathes, key, PATH_SIZE(key), path_hash(key));
499 SET_VNUM(sel, path->vnum);
500 DB((dbg, SET_LEVEL_3, " %+F represents value %u\n", sel, path->vnum));
504 set_insert(pathes, key, PATH_SIZE(key), path_hash(key));
506 SET_VNUM(sel, key->vnum);
507 DB((dbg, SET_LEVEL_3, " %+F represents value %u\n", sel, key->vnum));
509 ARR_EXTO(ir_mode *, *modes, (int)((key->vnum + 15) & ~15));
511 (*modes)[key->vnum] = get_type_mode(get_entity_type(get_Sel_entity(sel)));
513 assert((*modes)[key->vnum] && "Value is not atomic");
519 DB((dbg, SET_LEVEL_2, " %s", get_entity_name(key->path[0].ent)));
520 for (i = 1; i < key->path_len; ++i) {
521 if (is_entity(key->path[i].ent))
522 DB((dbg, SET_LEVEL_2, ".%s", get_entity_name(key->path[i].ent)));
524 DB((dbg, SET_LEVEL_2, "[%ld]", get_tarval_long(key->path[i].tv)));
526 DB((dbg, SET_LEVEL_2, " = %u (%s)\n", PTR_TO_INT(get_irn_link(sel)), get_mode_name((*modes)[key->vnum])));
528 #endif /* DEBUG_libfirm */
534 set_entity_link(ent, NULL);
539 * A list entry for the fixing lists
541 typedef struct _list_entry_t {
542 ir_node *node; /**< the node that must be fixed */
543 unsigned vnum; /**< the value number of this node */
547 * environment for memory walker
549 typedef struct _env_t {
550 int nvals; /**< number of values */
551 ir_mode **modes; /**< the modes of the values */
552 pset *sels; /**< A set of all Sel nodes that have a value number */
556 * topological post-walker.
558 static void topologic_walker(ir_node *node, void *ctx)
561 ir_node *adr, *block, *mem, *val;
566 /* a load, check if we can resolve it */
567 adr = get_Load_ptr(node);
569 DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
571 DB((dbg, SET_LEVEL_3, "no Sel input (%+F)\n", adr));
575 if (! pset_find_ptr(env->sels, adr)) {
576 DB((dbg, SET_LEVEL_3, "Sel %+F has no VNUM\n", adr));
580 /* ok, we have a Load that will be replaced */
581 vnum = GET_VNUM(adr);
582 assert(vnum < (unsigned)env->nvals);
584 DB((dbg, SET_LEVEL_3, "replacing by value %u\n", vnum));
586 block = get_nodes_block(node);
587 set_cur_block(block);
589 /* check, if we can replace this Load */
590 val = get_value(vnum, env->modes[vnum]);
592 /* Beware: A Load can contain a hidden conversion in Firm.
593 This happens for instance in the following code:
596 unsigned j = *(unsigned *)&i;
599 mode = get_Load_mode(node);
600 if (mode != get_irn_mode(val))
601 val = new_d_Conv(get_irn_dbg_info(node), val, mode);
603 mem = get_Load_mem(node);
604 turn_into_tuple(node, pn_Load_max);
605 set_Tuple_pred(node, pn_Load_M, mem);
606 set_Tuple_pred(node, pn_Load_res, val);
607 set_Tuple_pred(node, pn_Load_X_regular, new_Jmp());
608 set_Tuple_pred(node, pn_Load_X_except, new_Bad());
609 } else if (is_Store(node)) {
610 DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
612 /* a Store always can be replaced */
613 adr = get_Store_ptr(node);
616 DB((dbg, SET_LEVEL_3, "no Sel input (%+F)\n", adr));
620 if (! pset_find_ptr(env->sels, adr)) {
621 DB((dbg, SET_LEVEL_3, "Sel %+F has no VNUM\n", adr));
625 vnum = GET_VNUM(adr);
626 assert(vnum < (unsigned)env->nvals);
628 DB((dbg, SET_LEVEL_3, "replacing by value %u\n", vnum));
630 block = get_nodes_block(node);
631 set_cur_block(block);
633 /* Beware: A Store can contain a hidden conversion in Firm. */
634 val = get_Store_value(node);
635 if (get_irn_mode(val) != env->modes[vnum])
636 val = new_d_Conv(get_irn_dbg_info(node), val, env->modes[vnum]);
638 set_value(vnum, val);
640 mem = get_Store_mem(node);
641 turn_into_tuple(node, pn_Store_max);
642 set_Tuple_pred(node, pn_Store_M, mem);
643 set_Tuple_pred(node, pn_Store_X_regular, new_Jmp());
644 set_Tuple_pred(node, pn_Store_X_except, new_Bad());
649 * Make scalar replacement.
651 * @param sels A set containing all Sel nodes that have a value number
652 * @param nvals The number of scalars.
653 * @param modes A flexible array, containing all the modes of
656 static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes)
660 ssa_cons_start(current_ir_graph, nvals);
667 * second step: walk over the graph blockwise in topological order
668 * and fill the array as much as possible.
670 DB((dbg, SET_LEVEL_3, "Substituting Loads and Stores in %+F\n", current_ir_graph));
671 irg_walk_blkwise_graph(current_ir_graph, NULL, topologic_walker, &env);
673 ssa_cons_finish(current_ir_graph);
677 * Find possible scalar replacements
679 * @param irg The current ir graph.
681 int scalar_replacement_opt(ir_graph *irg)
685 scalars_t key, *value;
690 ir_type *ent_type, *frame_tp;
694 rem = current_ir_graph;
695 current_ir_graph = irg;
697 /* Call algorithm that computes the out edges */
698 assure_irg_outs(irg);
700 /* we use the link field to store the VNUM */
701 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
702 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
704 /* Find possible scalar replacements */
705 if (find_possible_replacements(irg)) {
706 DB((dbg, SET_LEVEL_1, "Scalar Replacement: %+F\n", irg));
708 /* Insert in set the scalar replacements. */
709 irg_frame = get_irg_frame(irg);
711 modes = NEW_ARR_F(ir_mode *, 16);
712 set_ent = new_set(ent_cmp, 8);
713 sels = pset_new_ptr(8);
714 frame_tp = get_irg_frame_type(irg);
716 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
717 ir_node *succ = get_irn_out(irg_frame, i);
720 ir_entity *ent = get_Sel_entity(succ);
722 /* we are only interested in entities on the frame, NOT
724 if (get_entity_owner(ent) != frame_tp)
727 if (get_entity_link(ent) == NULL || get_entity_link(ent) == ADDRESS_TAKEN)
730 ent_type = get_entity_type(ent);
733 key.ent_owner = get_entity_owner(ent);
734 set_insert(set_ent, &key, sizeof(key), HASH_PTR(key.ent));
737 if (is_Array_type(ent_type)) {
738 DB((dbg, SET_LEVEL_1, " found array %s\n", get_entity_name(ent)));
739 } else if (is_Struct_type(ent_type)) {
740 DB((dbg, SET_LEVEL_1, " found struct %s\n", get_entity_name(ent)));
741 } else if (is_atomic_type(ent_type))
742 DB((dbg, SET_LEVEL_1, " found atomic value %s\n", get_entity_name(ent)));
744 panic("Neither an array nor a struct or atomic value found in scalar replace");
746 #endif /* DEBUG_libfirm */
748 nvals = allocate_value_numbers(sels, ent, nvals, &modes);
752 DB((dbg, SET_LEVEL_1, " %u values will be needed\n", nvals));
754 /* If scalars were found. */
756 do_scalar_replacements(sels, nvals, modes);
758 foreach_set(set_ent, value) {
759 remove_class_member(value->ent_owner, value->ent);
763 * We changed the graph, but did NOT introduce new blocks
764 * neither changed control flow, cf-backedges should be still
767 set_irg_outs_inconsistent(irg);
768 set_irg_loopinfo_inconsistent(irg);
777 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
778 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
780 current_ir_graph = rem;
784 ir_graph_pass_t *scalar_replacement_opt_pass(const char *name)
786 return def_graph_pass_ret(name ? name : "scalar_rep",
787 scalar_replacement_opt);
790 void firm_init_scalar_replace(void)
792 FIRM_DBG_REGISTER(dbg, "firm.opt.scalar_replace");