2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Scalar replacement of compounds.
23 * @author Beyhan Veliev, Michael Beck
32 #include "iroptimize.h"
33 #include "scalar_replace.h"
48 #define SET_VNUM(node, vnum) set_irn_link(node, INT_TO_PTR(vnum))
49 #define GET_VNUM(node) (unsigned)PTR_TO_INT(get_irn_link(node))
52 * A path element entry: it is either an entity
53 * or a tarval, because we evaluate only constant array
54 * accesses like a.b.c[8].d
62 * An access path, used to assign value numbers
63 * to variables that will be scalar replaced.
65 typedef struct _path_t {
66 unsigned vnum; /**< The value number. */
67 unsigned path_len; /**< The length of the access path. */
68 path_elem_t path[1]; /**< The path. */
71 /** The size of a path in bytes. */
72 #define PATH_SIZE(p) (sizeof(*(p)) + sizeof((p)->path[0]) * ((p)->path_len - 1))
74 typedef struct _scalars_t {
75 ir_entity *ent; /**< A entity for scalar replacement. */
76 ir_type *ent_owner; /**< The owner of this entity. */
83 * @return 0 if they are identically
85 static int path_cmp(const void *elt, const void *key, size_t size)
87 const path_t *p1 = elt;
88 const path_t *p2 = key;
91 /* we can use memcmp here, because identical tarvals should have identical addresses */
92 return memcmp(p1->path, p2->path, p1->path_len * sizeof(p1->path[0]));
96 * Compare two elements of the scalars_t set.
98 * @return 0 if they are identically
100 static int ent_cmp(const void *elt, const void *key, size_t size)
102 const scalars_t *c1 = elt;
103 const scalars_t *c2 = key;
106 return c1->ent != c2->ent;
110 * Calculate a hash value for a path.
112 static unsigned path_hash(const path_t *path)
117 for (i = 0; i < path->path_len; ++i)
118 hash ^= (unsigned)PTR_TO_INT(path->path[i].ent);
124 * Returns non-zero, if all indeces of a Sel node are constants.
126 * @param sel the Sel node that will be checked
128 static int is_const_sel(ir_node *sel) {
129 int i, n = get_Sel_n_indexs(sel);
131 for (i = 0; i < n; ++i) {
132 ir_node *idx = get_Sel_index(sel, i);
134 if (get_irn_op(idx) != op_Const)
141 * Check the mode of a Load/Store with the mode of the entity
143 * If the mode of the entity and the Load/Store mode do not match, we
144 * have the bad reinterpret case:
147 * char b = *(char *)&i;
149 * We do NOT count this as one value and return address_taken
151 * However, we support an often used case. If the mode is two-complement
152 * we allow casts between signed/unsigned.
154 * @param mode the mode of the Load/Store
155 * @param ent_mode the mode of the accessed entity
157 static int check_load_store_mode(ir_mode *mode, ir_mode *ent_mode) {
158 if (ent_mode != mode) {
159 if (ent_mode == NULL ||
160 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
161 get_mode_sort(ent_mode) != get_mode_sort(mode) ||
162 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
163 get_mode_arithmetic(mode) != irma_twos_complement)
170 * Returns non-zero, if the address of an entity
171 * represented by a Sel node (or it's successor Sels) is taken.
173 int is_address_taken(ir_node *sel)
176 ir_mode *emode, *mode;
180 if (! is_const_sel(sel))
183 for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
184 ir_node *succ = get_irn_out(sel, i);
186 switch (get_irn_opcode(succ)) {
188 /* check if this load is not a hidden conversion */
189 mode = get_Load_mode(succ);
190 ent = get_Sel_entity(sel);
191 emode = get_type_mode(get_entity_type(ent));
192 if (! check_load_store_mode(mode, emode))
197 /* check that Sel is not the Store's value */
198 value = get_Store_value(succ);
201 /* check if this Store is not a hidden conversion */
202 mode = get_irn_mode(value);
203 ent = get_Sel_entity(sel);
204 emode = get_type_mode(get_entity_type(ent));
205 if (! check_load_store_mode(mode, emode))
210 /* Check the Sel successor of Sel */
211 int res = is_address_taken(succ);
219 /* The address of an entity is given as a parameter.
220 * As long as we do not have analyses that can tell what
221 * is done with parameters, think is taken.
226 /* another op, the address is taken */
234 * Link all leave Sels with the entity.
236 * @param ent the entity that will be scalar replaced
237 * @param sel a Sel node that selects some fields of this entity
239 static void link_all_leave_sels(ir_entity *ent, ir_node *sel)
243 n = get_irn_n_outs(sel);
244 for (i = 0; i < n; ++i) {
245 ir_node *succ = get_irn_out(sel, i);
248 link_all_leave_sels(ent, succ);
254 /* if Sel nodes with memory inputs are used, a entity can be
255 * visited more than once causing a ring here, so we use the
256 * node flag to mark linked nodes
258 if (irn_visited(sel))
261 /* we know we are at a leave, because this function is only
262 * called if the address is NOT taken, so succ must be a Load
265 set_irn_link(sel, get_entity_link(ent));
266 set_entity_link(ent, sel);
268 mark_irn_visited(sel);
272 /* we need a special address that serves as an address taken marker */
274 static void *ADDRESS_TAKEN = &_x;
277 * Find possible scalar replacements.
279 * @param irg an IR graph
281 * This function finds variables on the (members of the) frame type
282 * that can be scalar replaced, because their address is never taken.
283 * If such a variable is found, it's entity link will hold a list of all
284 * Sel nodes, that selects the atomic fields of this entity.
285 * Otherwise, the link will be ADDRESS_TAKEN or NULL.
287 * @return non-zero if at least one entity could be replaced
290 static int find_possible_replacements(ir_graph *irg)
292 ir_node *irg_frame = get_irg_frame(irg);
296 inc_irg_visited(irg);
298 n = get_irn_n_outs(irg_frame);
301 * First, clear the link field of all interesting entities.
302 * Note that we did not rely on the fact that there is only
303 * one Sel node per entity, so we might access one entity
304 * more than once here.
305 * That's why we have need two loops.
307 for (i = 0; i < n; ++i) {
308 ir_node *succ = get_irn_out(irg_frame, i);
311 ir_entity *ent = get_Sel_entity(succ);
312 set_entity_link(ent, NULL);
317 * Check the ir_graph for Sel nodes. If the entity of Sel
318 * isn't a scalar replacement set the link of this entity
319 * equal ADDRESS_TAKEN.
321 for (i = 0; i < n; ++i) {
322 ir_node *succ = get_irn_out(irg_frame, i);
325 ir_entity *ent = get_Sel_entity(succ);
328 if (get_entity_link(ent) == ADDRESS_TAKEN)
332 * Beware: in rare cases even entities on the frame might be
333 * volatile. This might happen if the entity serves as a store
334 * to a value that must survive a exception. Do not optimize
335 * such entities away.
337 if (get_entity_volatility(ent) == volatility_is_volatile) {
338 set_entity_link(ent, ADDRESS_TAKEN);
342 ent_type = get_entity_type(ent);
344 /* we can handle arrays, structs and atomic types yet */
345 if (is_Array_type(ent_type) || is_Struct_type(ent_type) || is_atomic_type(ent_type)) {
346 if (is_address_taken(succ)) {
347 if (get_entity_link(ent)) /* killing one */
349 set_entity_link(ent, ADDRESS_TAKEN);
352 /* possible found one */
353 if (get_entity_link(ent) == NULL)
355 link_all_leave_sels(ent, succ);
365 * Return a path from the Sel node sel to it's root.
367 * @param sel the Sel node
368 * @param len the length of the path so far
370 static path_t *find_path(ir_node *sel, unsigned len)
374 ir_node *pred = get_Sel_ptr(sel);
376 /* the current Sel node will add some path elements */
377 n = get_Sel_n_indexs(sel);
380 if (! is_Sel(pred)) {
381 /* we found the root */
383 res = xmalloc(sizeof(*res) + (len - 1) * sizeof(res->path));
387 res = find_path(pred, len);
389 pos = res->path_len - len;
391 res->path[pos++].ent = get_Sel_entity(sel);
392 for (i = 0; i < n; ++i) {
393 ir_node *index = get_Sel_index(sel, i);
395 res->path[pos++].tv = get_Const_tarval(index);
402 * Allocate value numbers for the leaves
403 * in our found entities.
405 * @param sels a set that will contain all Sels that have a value number
406 * @param ent the entity that will be scalar replaced
407 * @param vnum the first value number we can assign
408 * @param modes a flexible array, containing all the modes of
411 * @return the next free value number
413 static unsigned allocate_value_numbers(pset *sels, ir_entity *ent, unsigned vnum, ir_mode ***modes)
417 set *pathes = new_set(path_cmp, 8);
419 /* visit all Sel nodes in the chain of the entity */
420 for (sel = get_entity_link(ent); sel; sel = next) {
421 next = get_irn_link(sel);
423 /* we must mark this sel for later */
424 pset_insert_ptr(sels, sel);
426 key = find_path(sel, 0);
427 path = set_find(pathes, key, PATH_SIZE(key), path_hash(key));
430 SET_VNUM(sel, path->vnum);
434 set_insert(pathes, key, PATH_SIZE(key), path_hash(key));
436 SET_VNUM(sel, key->vnum);
437 ARR_EXTO(ir_mode *, *modes, (int)((key->vnum + 15) & ~15));
439 (*modes)[key->vnum] = get_type_mode(get_entity_type(get_Sel_entity(sel)));
441 assert((*modes)[key->vnum] && "Value is not atomic");
445 if (get_opt_scalar_replacement_verbose() && get_firm_verbosity() > 1) {
447 printf(" %s", get_entity_name(key->path[0].ent));
448 for (i = 1; i < key->path_len; ++i) {
449 if (is_entity(key->path[i].ent))
450 printf(".%s", get_entity_name(key->path[i].ent));
452 printf("[%ld]", get_tarval_long(key->path[i].tv));
454 printf(" = %u (%s)\n", PTR_TO_INT(get_irn_link(sel)), get_mode_name((*modes)[key->vnum]));
456 #endif /* DEBUG_libfirm */
462 set_entity_link(ent, NULL);
467 * A list entry for the fixing lists
469 typedef struct _list_entry_t {
470 ir_node *node; /**< the node that must be fixed */
471 unsigned vnum; /**< the value number of this node */
475 * environment for memory walker
477 typedef struct _env_t {
478 struct obstack obst; /**< a obstack for the value blocks */
479 int nvals; /**< number of values */
480 ir_mode **modes; /**< the modes of the values */
481 list_entry_t *fix_phis; /**< list of all Phi nodes that must be fixed */
482 list_entry_t *fix_loads; /**< list of all Load nodes that must be fixed */
483 pset *sels; /**< A set of all Sel nodes that have a value number */
487 * topological walker.
489 static void topologic_walker(ir_node *node, void *ctx)
492 ir_op *op = get_irn_op(node);
493 ir_node *adr, *block, *mem, *unk, **value_arr, **in, *val;
500 /* a load, check if we can resolve it */
501 adr = get_Load_ptr(node);
506 if (! pset_find_ptr(env->sels, adr))
509 /* ok, we have a Load that will be replaced */
510 vnum = GET_VNUM(adr);
512 assert(vnum < (unsigned)env->nvals);
514 block = get_nodes_block(node);
515 value_arr = get_irn_link(block);
517 /* check, if we can replace this Load */
518 if (value_arr[vnum]) {
519 mem = get_Load_mem(node);
521 /* Beware: A Load can contain a hidden conversion in Firm.
522 This happens for instance in the following code:
525 unsigned j = *(unsigned *)&i;
528 val = value_arr[vnum];
529 mode = get_Load_mode(node);
530 if (mode != get_irn_mode(val))
531 val = new_d_Conv(get_irn_dbg_info(node), val, mode);
533 turn_into_tuple(node, pn_Load_max);
534 set_Tuple_pred(node, pn_Load_M, mem);
535 set_Tuple_pred(node, pn_Load_res, val);
536 set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(current_ir_graph, block));
537 set_Tuple_pred(node, pn_Load_X_except, new_Bad());
539 l = obstack_alloc(&env->obst, sizeof(*l));
543 set_irn_link(node, env->fix_loads);
546 } else if (op == op_Store) {
547 /* a Store always can be replaced */
548 adr = get_Store_ptr(node);
553 if (! pset_find_ptr(env->sels, adr))
556 vnum = GET_VNUM(adr);
558 assert(vnum < (unsigned)env->nvals);
560 block = get_nodes_block(node);
561 value_arr = get_irn_link(block);
563 /* Beware: A Store can contain a hidden conversion in Firm. */
564 val = get_Store_value(node);
565 if (get_irn_mode(val) != env->modes[vnum])
566 val = new_d_Conv(get_irn_dbg_info(node), val, env->modes[vnum]);
567 value_arr[vnum] = val;
569 mem = get_Store_mem(node);
570 block = get_nodes_block(node);
572 turn_into_tuple(node, pn_Store_max);
573 set_Tuple_pred(node, pn_Store_M, mem);
574 set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(current_ir_graph, block));
575 set_Tuple_pred(node, pn_Store_X_except, new_Bad());
576 } else if (op == op_Phi && get_irn_mode(node) == mode_M) {
578 * found a memory Phi: Here, we must create new Phi nodes
580 block = get_nodes_block(node);
581 value_arr = get_irn_link(block);
583 n = get_Block_n_cfgpreds(block);
585 in = alloca(sizeof(*in) * n);
587 for (i = env->nvals - 1; i >= 0; --i) {
588 unk = new_Unknown(env->modes[i]);
589 for (j = n - 1; j >= 0; --j)
592 value_arr[i] = new_r_Phi(current_ir_graph, block, n, in, env->modes[i]);
594 l = obstack_alloc(&env->obst, sizeof(*l));
595 l->node = value_arr[i];
598 set_irn_link(value_arr[i], env->fix_phis);
605 * Walker: allocate the value array for every block.
607 static void alloc_value_arr(ir_node *block, void *ctx)
610 ir_node **var_arr = obstack_alloc(&env->obst, sizeof(*var_arr) * env->nvals);
612 /* the value array is empty at start */
613 memset(var_arr, 0, sizeof(*var_arr) * env->nvals);
614 set_irn_link(block, var_arr);
618 * searches through blocks beginning from block for value
619 * vnum and return it.
621 static ir_node *find_vnum_value(ir_node *block, unsigned vnum)
627 if (Block_not_block_visited(block)) {
628 mark_Block_block_visited(block);
630 value_arr = get_irn_link(block);
633 return value_arr[vnum];
635 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
636 ir_node *pred = get_Block_cfgpred(block, i);
638 res = find_vnum_value(get_nodes_block(pred), vnum);
649 static void fix_phis(env_t *env)
652 ir_node *phi, *block, *pred, *val;
655 for (l = env->fix_phis; l; l = get_irn_link(phi)) {
658 block = get_nodes_block(phi);
659 for (i = get_irn_arity(phi) - 1; i >= 0; --i) {
660 pred = get_Block_cfgpred(block, i);
661 pred = get_nodes_block(pred);
663 inc_irg_block_visited(current_ir_graph);
664 val = find_vnum_value(pred, l->vnum);
667 set_irn_n(phi, i, val);
675 static void fix_loads(env_t *env)
678 ir_node *load, *block, *pred, *val = NULL, *mem;
682 for (l = env->fix_loads; l; l = get_irn_link(load)) {
685 block = get_nodes_block(load);
686 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
687 pred = get_Block_cfgpred(block, i);
688 pred = get_nodes_block(pred);
690 inc_irg_block_visited(current_ir_graph);
691 val = find_vnum_value(pred, l->vnum);
698 /* access of an uninitialized value */
699 val = new_Unknown(env->modes[l->vnum]);
702 /* Beware: A Load can contain a hidden conversion in Firm.
704 mode = get_Load_mode(load);
705 if (mode != get_irn_mode(val))
706 val = new_d_Conv(get_irn_dbg_info(load), val, mode);
708 mem = get_Load_mem(load);
710 turn_into_tuple(load, pn_Load_max);
711 set_Tuple_pred(load, pn_Load_M, mem);
712 set_Tuple_pred(load, pn_Load_res, val);
713 set_Tuple_pred(load, pn_Load_X_regular, new_r_Jmp(current_ir_graph, block));
714 set_Tuple_pred(load, pn_Load_X_except, new_Bad());
719 * Make scalar replacement.
721 * @param sels A set containing all Sel nodes that have a value number
722 * @param nvals The number of scalars.
723 * @param modes A flexible array, containing all the modes of
726 static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes)
730 obstack_init(&env.obst);
734 env.fix_loads = NULL;
737 /* first step: allocate the value arrays for every block */
738 irg_block_walk_graph(current_ir_graph, NULL, alloc_value_arr, &env);
741 * second step: walk over the graph blockwise in topological order
742 * and fill the array as much as possible.
744 irg_walk_blkwise_graph(current_ir_graph, NULL, topologic_walker, &env);
746 /* third, fix the list of Phis, then the list of Loads */
750 obstack_free(&env.obst, NULL);
754 * Find possible scalar replacements
756 * @param irg The current ir graph.
758 void scalar_replacement_opt(ir_graph *irg)
762 scalars_t key, *value;
770 if (! get_opt_scalar_replacement())
773 rem = current_ir_graph;
775 /* Call algorithm that computes the out edges */
776 assure_irg_outs(irg);
778 /* Find possible scalar replacements */
779 if (find_possible_replacements(irg)) {
781 if (get_opt_scalar_replacement_verbose()) {
782 printf("Scalar Replacement: %s\n", get_entity_name(get_irg_entity(irg)));
785 /* Insert in set the scalar replacements. */
786 irg_frame = get_irg_frame(irg);
788 modes = NEW_ARR_F(ir_mode *, 16);
789 set_ent = new_set(ent_cmp, 8);
790 sels = pset_new_ptr(8);
792 for (i = 0 ; i < get_irn_n_outs(irg_frame); i++) {
793 ir_node *succ = get_irn_out(irg_frame, i);
796 ir_entity *ent = get_Sel_entity(succ);
798 if (get_entity_link(ent) == NULL || get_entity_link(ent) == ADDRESS_TAKEN)
801 ent_type = get_entity_type(ent);
804 key.ent_owner = get_entity_owner(ent);
805 set_insert(set_ent, &key, sizeof(key), HASH_PTR(key.ent));
807 if (get_opt_scalar_replacement_verbose()) {
808 if (is_Array_type(ent_type)) {
809 printf(" found array %s\n", get_entity_name(ent));
811 else if (is_Struct_type(ent_type)) {
812 printf(" found struct %s\n", get_entity_name(ent));
814 else if (is_atomic_type(ent_type))
815 printf(" found atomic value %s\n", get_entity_name(ent));
817 assert(0 && "Neither an array nor a struct or atomic value");
821 nvals = allocate_value_numbers(sels, ent, nvals, &modes);
825 if (get_opt_scalar_replacement_verbose()) {
826 printf(" %u values will be needed\n", nvals);
829 /* If scalars were found. */
831 do_scalar_replacements(sels, nvals, modes);
833 for (value = set_first(set_ent); value; value = set_next(set_ent)) {
834 remove_class_member(value->ent_owner, value->ent);
844 * We changed the graph, but did NOT introduce new blocks
845 * neither changed control flow, cf-backedges should be still
848 set_irg_outs_inconsistent(irg);
849 set_irg_loopinfo_inconsistent(irg);
853 current_ir_graph = rem;