3 * File name: ir/opt/scalar_replace.c
4 * Purpose: scalar replacement of arrays and compounds
5 * Author: Beyhan Veliev
6 * Modified by: Michael Beck
9 * Copyright: (c) 1998-2005 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 #include "scalar_replace.h"
42 #define SET_VNUM(node, vnum) set_irn_link(node, INT_TO_PTR(vnum))
43 #define GET_VNUM(node) (unsigned)PTR_TO_INT(get_irn_link(node))
46 * A path element entry: it is either an entity
47 * or a tarval, because we evaluate only constant array
48 * accesses like a.b.c[8].d
56 * An access path, used to assign value numbers
57 * to variables that will be scalar replaced
59 typedef struct _path_t {
60 unsigned vnum; /**< the value number */
61 unsigned path_len; /**< the length of the access path */
62 path_elem_t path[1]; /**< the path */
65 /** The size of a path in bytes */
66 #define PATH_SIZE(p) (sizeof(*(p)) + sizeof((p)->path[0]) * ((p)->path_len - 1))
68 typedef struct _scalars_t {
69 entity *ent; /**< A entity for scalar replacement. */
70 ir_type *ent_owner; /**< The owner of this entity. */
77 * @return 0 if they are identically
79 static int path_cmp(const void *elt, const void *key, size_t size)
81 const path_t *p1 = elt;
82 const path_t *p2 = key;
84 /* we can use memcmp here, because identical tarvals should have identical addresses */
85 return memcmp(p1->path, p2->path, p1->path_len * sizeof(p1->path[0]));
89 * Compare two elements of the scalars_t set.
91 * @return 0 if they are identically
93 static int ent_cmp(const void *elt, const void *key, size_t size)
95 const scalars_t *c1 = elt;
96 const scalars_t *c2 = key;
98 return c1->ent != c2->ent;
102 * Calculate a hash value for a path.
104 static unsigned path_hash(const path_t *path)
109 for (i = 0; i < path->path_len; ++i)
110 hash ^= (unsigned)PTR_TO_INT(path->path[i].ent);
116 * Returns non-zero, if all indeces of a Sel node are constants.
118 * @param sel the Sel node that will be checked
120 static int is_const_sel(ir_node *sel) {
121 int i, n = get_Sel_n_indexs(sel);
123 for (i = 0; i < n; ++i) {
124 ir_node *idx = get_Sel_index(sel, i);
126 if (get_irn_op(idx) != op_Const)
133 * Returns non-zero, if the address of an entity
134 * represented by a Sel node (or it's successor Sels) is taken.
136 int is_address_taken(ir_node *sel)
140 if (! is_const_sel(sel))
143 for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
144 ir_node *succ = get_irn_out(sel, i);
146 switch (get_irn_opcode(succ)) {
148 /* ok, we just load from that entity */
152 /* check that Sel is not the Store's value */
153 if (get_Store_value(succ) == sel)
158 /* Check the Sel successor of Sel */
159 int res = is_address_taken(succ);
167 /* The address of an entity is given as a parameter.
168 * As long as we do not have analyses that can tell what
169 * is done with parameters, think is taken.
174 /* another op, the address is taken */
182 * Link all leave Sels with the entity.
184 * @param ent the entity that will be scalar replaced
185 * @param sel a Sel node that selects some fields of this entity
187 static void link_all_leave_sels(entity *ent, ir_node *sel)
191 n = get_irn_n_outs(sel);
192 for (i = 0; i < n; ++i) {
193 ir_node *succ = get_irn_out(sel, i);
196 link_all_leave_sels(ent, succ);
202 /* if Sel nodes with memory inputs are used, a entity can be
203 * visited more than once causing a ring here, so we use the
204 * node flag to mark linked nodes
206 if (irn_visited(sel))
209 /* we know we are at a leave, because this function is only
210 * called if the address is NOT taken, so succ must be a Load
213 set_irn_link(sel, get_entity_link(ent));
214 set_entity_link(ent, sel);
216 mark_irn_visited(sel);
220 /* we need a special address that serves as an address taken marker */
222 static void *ADDRESS_TAKEN = &_x;
225 * Find possible scalar replacements.
227 * @param irg an IR graph
229 * This function finds variables on the (members of the) frame type
230 * that can be scalar replaced, because their address is never taken.
231 * If such a variable is found, it's entity link will hold a list of all
232 * Sel nodes, that selects the atomic fields of this entity.
233 * Otherwise, the link will be ADDRESS_TAKEN or NULL.
235 * @return non-zero if at least one entity could be replaced
238 static int find_possible_replacements(ir_graph *irg)
240 ir_node *irg_frame = get_irg_frame(irg);
244 inc_irg_visited(irg);
246 n = get_irn_n_outs(irg_frame);
249 * First, clear the link field of all interesting entities.
250 * Note that we did not rely on the fact that there is only
251 * one Sel node per entity, so we might access one entity
252 * more than once here.
253 * That's why we have need two loops.
255 for (i = 0; i < n; ++i) {
256 ir_node *succ = get_irn_out(irg_frame, i);
259 entity *ent = get_Sel_entity(succ);
260 set_entity_link(ent, NULL);
265 * Check the ir_graph for Sel nodes. If the entity of Sel
266 * isn't a scalar replacement set the link of this entity
267 * equal ADDRESS_TAKEN.
269 for (i = 0; i < n; ++i) {
270 ir_node *succ = get_irn_out(irg_frame, i);
273 entity *ent = get_Sel_entity(succ);
276 if (get_entity_link(ent) == ADDRESS_TAKEN)
280 * Beware: in rare cases even entities on the frame might be
281 * volatile. This might happen if the entity serves as a store
282 * to a value that must survive a exception. Do not optimize
283 * such entities away.
285 if (get_entity_volatility(ent) == volatility_is_volatile) {
286 set_entity_link(ent, ADDRESS_TAKEN);
290 ent_type = get_entity_type(ent);
292 /* we can handle arrays, structs and atomic types yet */
293 if (is_Array_type(ent_type) || is_Struct_type(ent_type) || is_atomic_type(ent_type)) {
294 if (is_address_taken(succ)) {
295 if (get_entity_link(ent)) /* killing one */
297 set_entity_link(ent, ADDRESS_TAKEN);
300 /* possible found one */
301 if (get_entity_link(ent) == NULL)
303 link_all_leave_sels(ent, succ);
313 * Return a path from the Sel node sel to it's root.
315 * @param sel the Sel node
316 * @param len the length of the path so far
318 static path_t *find_path(ir_node *sel, unsigned len)
322 ir_node *pred = get_Sel_ptr(sel);
324 /* the current Sel node will add some path elements */
325 n = get_Sel_n_indexs(sel);
328 if (! is_Sel(pred)) {
329 /* we found the root */
331 res = xmalloc(sizeof(*res) + (len - 1) * sizeof(res->path));
335 res = find_path(pred, len);
337 pos = res->path_len - len;
339 res->path[pos++].ent = get_Sel_entity(sel);
340 for (i = 0; i < n; ++i) {
341 ir_node *index = get_Sel_index(sel, i);
343 res->path[pos++].tv = get_Const_tarval(index);
350 * Allocate value numbers for the leaves
351 * in our found entities.
353 * @param sels a set that will contain all Sels that have a value number
354 * @param ent the entity that will be scalar replaced
355 * @param vnum the first value number we can assign
356 * @param modes a flexible array, containing all the modes of
359 * @return the next free value number
361 static unsigned allocate_value_numbers(pset *sels, entity *ent, unsigned vnum, ir_mode ***modes)
365 set *pathes = new_set(path_cmp, 8);
367 /* visit all Sel nodes in the chain of the entity */
368 for (sel = get_entity_link(ent); sel; sel = next) {
369 next = get_irn_link(sel);
371 /* we must mark this sel for later */
372 pset_insert_ptr(sels, sel);
374 key = find_path(sel, 0);
375 path = set_find(pathes, key, PATH_SIZE(key), path_hash(key));
378 SET_VNUM(sel, path->vnum);
384 set_insert(pathes, key, PATH_SIZE(key), path_hash(key));
386 SET_VNUM(sel, key->vnum);
387 ARR_EXTO(ir_mode *, *modes, (key->vnum + 15) & ~15);
389 (*modes)[key->vnum] = get_type_mode(get_entity_type(get_Sel_entity(sel)));
391 assert((*modes)[key->vnum] && "Value is not atomic");
395 if (get_opt_scalar_replacement_verbose() && get_firm_verbosity() > 1) {
396 printf(" %s", get_entity_name(key->path[0].ent));
397 for (i = 1; i < key->path_len; ++i) {
398 if (is_entity(key->path[i].ent))
399 printf(".%s", get_entity_name(key->path[i].ent));
401 printf("[%ld]", get_tarval_long(key->path[i].tv));
403 printf(" = %u (%s)\n", PTR_TO_INT(get_irn_link(sel)), get_mode_name((*modes)[key->vnum]));
405 #endif /* DEBUG_libfirm */
411 set_entity_link(ent, NULL);
416 * A list entry for the fixing lists
418 typedef struct _list_entry_t {
419 ir_node *node; /**< the node that must be fixed */
420 unsigned vnum; /**< the value number of this node */
424 * environment for memory walker
426 typedef struct _env_t {
427 struct obstack obst; /**< a obstack for the value blocks */
428 int nvals; /**< number of values */
429 ir_mode **modes; /**< the modes of the values */
430 list_entry_t *fix_phis; /**< list of all Phi nodes that must be fixed */
431 list_entry_t *fix_loads; /**< list of all Load nodes that must be fixed */
432 pset *sels; /**< A set of all Sel nodes that have a value number */
438 static void handle_first(ir_node *node, void *ctx)
441 ir_op *op = get_irn_op(node);
442 ir_node *adr, *block, *mem, *unk, **value_arr, **in;
448 /* a load, check if we can resolve it */
449 adr = get_Load_ptr(node);
454 if (! pset_find_ptr(env->sels, adr))
457 /* ok, we have a Load that will be replaced */
458 vnum = GET_VNUM(adr);
460 assert(vnum < (unsigned)env->nvals);
462 block = get_nodes_block(node);
463 value_arr = get_irn_link(block);
465 /* check, if we can replace this Load */
466 if (value_arr[vnum]) {
467 mem = get_Load_mem(node);
469 turn_into_tuple(node, pn_Load_max);
470 set_Tuple_pred(node, pn_Load_M, mem);
471 set_Tuple_pred(node, pn_Load_res, value_arr[vnum]);
472 set_Tuple_pred(node, pn_Load_X_except, new_Bad());
475 l = obstack_alloc(&env->obst, sizeof(*l));
479 set_irn_link(node, env->fix_loads);
483 else if (op == op_Store) {
484 /* a Store always can be replaced */
485 adr = get_Store_ptr(node);
490 if (! pset_find_ptr(env->sels, adr))
493 vnum = GET_VNUM(adr);
495 assert(vnum < (unsigned)env->nvals);
497 block = get_nodes_block(node);
498 value_arr = get_irn_link(block);
500 value_arr[vnum] = get_Store_value(node);
502 mem = get_Store_mem(node);
504 turn_into_tuple(node, pn_Store_max);
505 set_Tuple_pred(node, pn_Store_M, mem);
506 set_Tuple_pred(node, pn_Store_X_except, new_Bad());
508 else if (op == op_Phi && get_irn_mode(node) == mode_M) {
510 * found a memory Phi: Here, we must create new Phi nodes
512 block = get_nodes_block(node);
513 value_arr = get_irn_link(block);
515 n = get_Block_n_cfgpreds(block);
517 in = alloca(sizeof(*in) * n);
519 for (i = env->nvals - 1; i >= 0; --i) {
520 unk = new_Unknown(env->modes[i]);
521 for (j = n - 1; j >= 0; --j)
524 value_arr[i] = new_r_Phi(current_ir_graph, block, n, in, env->modes[i]);
526 l = obstack_alloc(&env->obst, sizeof(*l));
527 l->node = value_arr[i];
530 set_irn_link(value_arr[i], env->fix_phis);
537 * Walker: allocate the value array for every block.
539 static void alloc_value_arr(ir_node *block, void *ctx)
542 ir_node **var_arr = obstack_alloc(&env->obst, sizeof(*var_arr) * env->nvals);
544 /* the value array is empty at start */
545 memset(var_arr, 0, sizeof(*var_arr) * env->nvals);
546 set_irn_link(block, var_arr);
550 * searches through blocks beginning from block for value
551 * vnum and return it.
553 static ir_node *find_value(ir_node *block, unsigned vnum)
559 if (Block_not_block_visited(block)) {
560 mark_Block_block_visited(block);
562 value_arr = get_irn_link(block);
565 return value_arr[vnum];
567 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
568 ir_node *pred = get_Block_cfgpred(block, i);
570 res = find_value(get_nodes_block(pred), vnum);
581 static void fix_phis(env_t *env)
584 ir_node *phi, *block, *pred, *val;
587 for (l = env->fix_phis; l; l = get_irn_link(phi)) {
590 block = get_nodes_block(phi);
591 for (i = get_irn_arity(phi) - 1; i >= 0; --i) {
592 pred = get_Block_cfgpred(block, i);
593 pred = get_nodes_block(pred);
595 inc_irg_block_visited(current_ir_graph);
596 val = find_value(pred, l->vnum);
599 set_irn_n(phi, i, val);
607 static void fix_loads(env_t *env)
610 ir_node *load, *block, *pred, *val = NULL, *mem;
613 for (l = env->fix_loads; l; l = get_irn_link(load)) {
616 block = get_nodes_block(load);
617 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
618 pred = get_Block_cfgpred(block, i);
619 pred = get_nodes_block(pred);
621 inc_irg_block_visited(current_ir_graph);
622 val = find_value(pred, l->vnum);
629 /* access of an uninitialized value */
630 val = new_Unknown(env->modes[l->vnum]);
633 mem = get_Load_mem(load);
635 turn_into_tuple(load, pn_Load_max);
636 set_Tuple_pred(load, pn_Load_M, mem);
637 set_Tuple_pred(load, pn_Load_res, val);
638 set_Tuple_pred(load, pn_Load_X_except, new_Bad());
643 * Make scalar replacement.
645 * @param sels A set containing all Sel nodes that have a value number
646 * @param nvals The number of scalars.
647 * @param modes A flexible array, containing all the modes of
650 static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes)
654 obstack_init(&env.obst);
658 env.fix_loads = NULL;
661 /* first step: allocate the value arrays for every block */
662 irg_block_walk_graph(current_ir_graph, NULL, alloc_value_arr, &env);
665 * second step: walk over the graph blockwise in topological order
666 * and fill the array as much as possible.
668 irg_walk_blkwise_graph(current_ir_graph, NULL, handle_first, &env);
670 /* third, fix the list of Phis, then the list of Loads */
674 obstack_free(&env.obst, NULL);
678 * Find possible scalar replacements
680 * @param irg The current ir graph.
682 void scalar_replacement_opt(ir_graph *irg)
686 scalars_t key, *value;
694 if (! get_opt_scalar_replacement())
697 rem = current_ir_graph;
699 /* Call algorithm that computes the out edges */
700 assure_irg_outs(irg);
702 /* Find possible scalar replacements */
703 if (find_possible_replacements(irg)) {
705 if (get_opt_scalar_replacement_verbose()) {
706 printf("Scalar Replacement: %s\n", get_entity_name(get_irg_entity(irg)));
709 /* Insert in set the scalar replacements. */
710 irg_frame = get_irg_frame(irg);
712 modes = NEW_ARR_F(ir_mode *, 16);
713 set_ent = new_set(ent_cmp, 8);
714 sels = pset_new_ptr(8);
716 for (i = 0 ; i < get_irn_n_outs(irg_frame); i++) {
717 ir_node *succ = get_irn_out(irg_frame, i);
720 entity *ent = get_Sel_entity(succ);
722 if (get_entity_link(ent) == NULL || get_entity_link(ent) == ADDRESS_TAKEN)
725 ent_type = get_entity_type(ent);
728 key.ent_owner = get_entity_owner(ent);
729 set_insert(set_ent, &key, sizeof(key), HASH_PTR(key.ent));
731 if (get_opt_scalar_replacement_verbose()) {
732 if (is_Array_type(ent_type)) {
733 printf(" found array %s\n", get_entity_name(ent));
735 else if (is_Struct_type(ent_type)) {
736 printf(" found struct %s\n", get_entity_name(ent));
738 else if (is_atomic_type(ent_type))
739 printf(" found atomic value %s\n", get_entity_name(ent));
741 assert(0 && "Neither an array nor a struct or atomic value");
745 nvals = allocate_value_numbers(sels, ent, nvals, &modes);
749 if (get_opt_scalar_replacement_verbose()) {
750 printf(" %u values will be needed\n", nvals);
753 /* If scalars were found. */
755 do_scalar_replacements(sels, nvals, modes);
757 for (value = set_first(set_ent); value; value = set_next(set_ent)) {
758 remove_class_member(value->ent_owner, value->ent);
768 * We changed the graph, but did NOT introduce new blocks
769 * neither changed control flow, cf-backedges should be still
772 set_irg_outs_inconsistent(irg);
773 set_irg_loopinfo_inconsistent(irg);
777 current_ir_graph = rem;