3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2004 Universit
\81ät Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
25 # include "irnode_t.h"
26 # include "irgraph_t.h"
27 # include "irmode_t.h"
29 # include "ircons_t.h"
34 # include "dbginfo_t.h"
35 # include "iropt_dbg.h"
36 # include "irflag_t.h"
39 # include "opt_polymorphy.h"
42 #include "cacheopt/cachesim.h"
46 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
48 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
53 typedef struct _walk_env_t {
54 struct obstack obst; /**< list of all stores */
59 * flags for Load/Store
62 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
68 typedef struct _ldst_info_t {
69 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
70 ir_node *exc_block; /**< the exception block if available */
71 int exc_idx; /**< predecessor index in the exception block */
72 unsigned flags; /**< flags */
76 * flags for control flow
79 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
80 BLOCK_HAS_EXC = 2 /**< Block has exceptionl control flow */
86 typedef struct _block_info_t {
87 unsigned flags; /**< flags for the block */
91 * walker, clears all links first
93 static void init_links(ir_node *n, void *env)
95 set_irn_link(n, NULL);
99 * get the Load/Store info of a node
101 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env)
103 ldst_info_t *info = get_irn_link(node);
106 info = obstack_alloc(&env->obst, sizeof(*info));
108 memset(info, 0, sizeof(*info));
110 set_irn_link(node, info);
116 * get the Block info of a node
118 static block_info_t *get_block_info(ir_node *node, walk_env_t *env)
120 block_info_t *info = get_irn_link(node);
123 info = obstack_alloc(&env->obst, sizeof(*info));
125 memset(info, 0, sizeof(*info));
127 set_irn_link(node, info);
133 * update the projection info for a Load/Store
135 static int update_projs(ldst_info_t *info, ir_node *proj)
137 long nr = get_Proj_proj(proj);
139 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
141 if (info->projs[nr]) {
142 /* there is already one, do CSE */
143 exchange(proj, info->projs[nr]);
147 info->projs[nr] = proj;
153 * update the exception block info for a Load/Store
155 static int update_exc(ldst_info_t *info, ir_node *block, int pos)
157 assert(info->exc_block == NULL && "more than one exception block found");
159 info->exc_block = block;
164 #define get_irn_out_n(node) (unsigned)get_irn_link(node)
165 #define set_irn_out_n(node, n) set_irn_link(adr, (void *)(n))
168 * walker, collects all Load/Store/Proj nodes
170 * walks form Start -> End
172 static void collect_nodes(ir_node *node, void *env)
174 ir_op *op = get_irn_op(node);
176 ldst_info_t *ldst_info;
177 walk_env_t *wenv = env;
183 pred = get_Proj_pred(node);
184 op = get_irn_op(pred);
187 ldst_info = get_ldst_info(pred, wenv);
189 wenv->changes |= update_projs(ldst_info, node);
191 if ((ldst_info->flags & LDST_VISITED) == 0) {
192 adr = get_Load_ptr(pred);
193 set_irn_out_n(adr, get_irn_out_n(adr) + 1);
195 ldst_info->flags |= LDST_VISITED;
198 else if (op == op_Store) {
199 ldst_info = get_ldst_info(pred, wenv);
201 wenv->changes |= update_projs(ldst_info, node);
203 if ((ldst_info->flags & LDST_VISITED) == 0) {
204 adr = get_Store_ptr(pred);
205 set_irn_out_n(adr, get_irn_out_n(adr) + 1);
207 ldst_info->flags |= LDST_VISITED;
211 else if (op == op_Block) { /* check, if it's an exception block */
214 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i) {
216 block_info_t *bl_info;
218 pred = skip_Proj(get_Block_cfgpred(node, i));
220 /* ignore Bad predecessors, they will be removed later */
224 pred_block = get_nodes_block(pred);
225 bl_info = get_block_info(pred_block, wenv);
227 if (is_fragile_op(pred))
228 bl_info->flags |= BLOCK_HAS_EXC;
229 else if (is_forking_op(pred))
230 bl_info->flags |= BLOCK_HAS_COND;
232 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
233 ldst_info = get_ldst_info(pred, wenv);
235 wenv->changes |= update_exc(ldst_info, node, i);
242 * Returns an entity if the address ptr points to a constant one.
244 static entity *find_constant_entity(ir_node *ptr)
247 ir_op *op = get_irn_op(ptr);
249 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
250 return get_SymConst_entity(ptr);
252 else if (op == op_Sel) {
253 entity *ent = get_Sel_entity(ptr);
254 type *tp = get_entity_owner(ent);
256 /* Do not fiddle about polymorphy. */
257 if (is_Class_type(get_entity_owner(ent)) &&
258 ((get_entity_n_overwrites(ent) != 0) ||
259 (get_entity_n_overwrittenby(ent) != 0) ) )
262 if (variability_constant == get_entity_variability(ent))
265 if (is_Array_type(tp)) {
269 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
271 tarval *tlower, *tupper;
272 ir_node *index = get_Sel_index(ptr, i);
273 tarval *tv = computed_value(index);
275 /* check if the index is constant */
276 if (tv == tarval_bad)
279 bound = get_array_lower_bound(tp, i);
280 tlower = computed_value(bound);
281 bound = get_array_upper_bound(tp, i);
282 tupper = computed_value(bound);
284 if (tlower == tarval_bad || tupper == tarval_bad)
287 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
289 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
292 /* ok, bounds check finished */
297 ptr = get_Sel_ptr(ptr);
304 static long get_Sel_array_index_long(ir_node *n, int dim) {
305 ir_node *index = get_Sel_index(n, dim);
306 assert(get_irn_op(index) == op_Const);
307 return get_tarval_long(get_Const_tarval(index));
310 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
311 compound_graph_path *res = NULL;
312 entity *root, *field;
315 if (get_irn_op(ptr) == op_SymConst) {
316 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
317 root = get_SymConst_entity(ptr);
318 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
321 assert(get_irn_op(ptr) == op_Sel);
322 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
323 field = get_Sel_entity(ptr);
324 path_len = get_compound_graph_path_length(res);
325 pos = path_len - depth - 1;
326 set_compound_graph_path_node(res, pos, field);
327 if (is_Array_type(get_entity_owner(field))) {
328 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
329 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
336 /** Returns an access path or NULL. The access path is only
337 * valid, if the graph is in phase_high and _no_ address computation is used. */
338 static compound_graph_path *get_accessed_path(ir_node *ptr) {
339 return rec_get_accessed_path(ptr, 0);
346 static int optimize_load(ir_node *load)
348 ldst_info_t *info = get_irn_link(load);
349 ir_mode *load_mode = get_Load_mode(load);
350 ir_node *pred, *mem, *ptr, *new_node;
354 /* the address of the load to be optimized */
355 ptr = get_Load_ptr(load);
358 * Check if we can remove the exception form a Load:
359 * this can be done, if the address is from an Sel(Alloc) and
360 * the Sel type is a subtype of the alloc type.
362 * This optimizes some often used OO constructs,
363 * like x = new O; x->t;
365 if (info->projs[pn_Load_X_except]) {
366 if (get_irn_op(ptr) == op_Sel) {
367 ir_node *mem = get_Sel_mem(ptr);
369 if (get_irn_op(mem) == op_Alloc) {
370 /* ok, check the types */
371 entity *ent = get_Sel_entity(ptr);
372 type *s_type = get_entity_type(ent);
373 type *a_type = get_Alloc_type(mem);
375 if (is_subclass_of(s_type, a_type)) {
376 /* ok, condition met: there can't be an exception because
377 * alloc guarantees that enough memory was allocated */
379 exchange(info->projs[pn_Load_X_except], new_Bad());
380 info->projs[pn_Load_X_except] = NULL;
384 else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
385 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
386 /* simple case: a direct load after an Alloc. Firm Alloc throw
387 * an exception in case of out-of-memory. So, there is no way for an
388 * exception in this load.
389 * This code is constructed by the "exception lowering" in the Jack compiler.
391 exchange(info->projs[pn_Load_X_except], new_Bad());
392 info->projs[pn_Load_X_except] = NULL;
396 /* do NOT touch volatile loads for now */
397 if (get_Load_volatility(load) == volatility_is_volatile)
400 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
401 /* a Load which value is neither used nor exception checked, remove it */
402 mem = get_Load_mem(load);
403 exchange(info->projs[pn_Load_M], mem);
408 /* the mem of the Load. Must still be returned after optimization */
409 mem = get_Load_mem(load);
411 /* Load from a constant polymorphic field, where we can resolve
413 new_node = transform_node_Load(load);
414 if (new_node != load) {
415 if (info->projs[pn_Load_M]) {
416 exchange(info->projs[pn_Load_M], mem);
417 info->projs[pn_Load_M] = NULL;
419 if (info->projs[pn_Load_X_except]) {
420 exchange(info->projs[pn_Load_X_except], new_Bad());
421 info->projs[pn_Load_X_except] = NULL;
423 if (info->projs[pn_Load_res])
424 exchange(info->projs[pn_Load_res], new_node);
428 /* check if we can determine the entity that will be loaded */
429 ent = find_constant_entity(ptr);
431 if ((allocation_static == get_entity_allocation(ent)) &&
432 (visibility_external_allocated != get_entity_visibility(ent))) {
433 /* a static allocation that is not external: there should be NO exception
436 /* no exception, clear the info field as it might be checked later again */
437 if (info->projs[pn_Load_X_except]) {
438 exchange(info->projs[pn_Load_X_except], new_Bad());
439 info->projs[pn_Load_X_except] = NULL;
442 if (variability_constant == get_entity_variability(ent)
443 && is_atomic_entity(ent)) {
444 /* Might not be atomic after
445 lowering of Sels. In this
446 case we could also load, but
447 it's more complicated. */
448 /* more simpler case: we load the content of a constant value:
449 * replace it by the constant itself
453 if (info->projs[pn_Load_M])
454 exchange(info->projs[pn_Load_M], mem);
457 if (info->projs[pn_Load_res]) {
458 if (is_atomic_entity(ent)) {
459 ir_node *c = copy_const_value(get_atomic_ent_value(ent));
462 exchange(info->projs[pn_Load_res], c);
468 else if (variability_constant == get_entity_variability(ent)) {
469 compound_graph_path *path;
471 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
472 get_entity_name(get_irg_entity(current_ir_graph)));
473 printf(" load: "); DDMN(load);
474 printf(" ptr: "); DDMN(ptr);
476 path = get_accessed_path(ptr);
480 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
481 c = get_compound_ent_value_by_path(ent, path);
483 /* printf(" cons: "); DDMN(c); */
485 if (info->projs[pn_Load_M])
486 exchange(info->projs[pn_Load_M], mem);
487 if (info->projs[pn_Load_res])
488 exchange(info->projs[pn_Load_res], copy_const_value(c));
492 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
493 entity *node = get_compound_graph_path_node(path, j);
494 fprintf(stdout, ".%s", get_entity_name(node));
495 if (is_Array_type(get_entity_owner(node)))
496 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
502 /* We can not determine a correct access path. E.g., in jack, we load
503 a byte from an object to generate an exception. Happens in test program
505 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
506 get_entity_name(get_irg_entity(current_ir_graph)));
507 printf(" load: "); DDMN(load);
508 printf(" ptr: "); DDMN(ptr);
509 if (get_irn_op(ptr) == op_SymConst &&
510 get_SymConst_kind(ptr) == symconst_addr_ent) { printf(" "); DDMEO(get_SymConst_entity(ptr)); }
511 printf("cannot optimize.\n");
516 /* we changed the irg, but try further */
521 /* Check, if the address of this load is used more than once.
522 * If not, this load cannot be removed in any case. */
523 if (get_irn_out_n(ptr) <= 1)
526 /* follow the memory chain as long as there are only Loads */
527 for (pred = skip_Proj(mem); ; pred = skip_Proj(get_Load_mem(pred))) {
530 * BEWARE: one might think that checking the modes is useless, because
531 * if the pointers are identical, they refer to the same object.
532 * This is only true in strong typed languages, not is C were the following
533 * is possible a = *(type1 *)p; b = *(type2 *)p ...
536 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
537 get_irn_mode(get_Store_value(pred)) == load_mode) {
538 ldst_info_t *pred_info = get_irn_link(pred);
541 * a Load immediately after a Store -- a read after write.
542 * We may remove the Load, if both Load & Store does not have an exception handler
543 * OR they are in the same block. In the latter case the Load cannot
544 * throw an exception when the previous Store was quiet.
546 * Why we need to check for Store Exc? If the Store cannot be executed (ROM)
547 * the exception handler might simply jump into the load block :-(
548 * We could make it a little bit better if we would know that the exception
549 * handler of the Store jumps directly to the end...
551 if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
552 get_nodes_block(load) == get_nodes_block(pred)) {
553 DBG_OPT_RAW(load, pred);
554 exchange( info->projs[pn_Load_res], get_Store_value(pred) );
556 if (info->projs[pn_Load_M])
557 exchange(info->projs[pn_Load_M], mem);
560 if (info->projs[pn_Load_X_except])
561 exchange( info->projs[pn_Load_X_except], new_Bad());
565 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
566 get_Load_mode(pred) == load_mode) {
568 * a Load after a Load -- a read after read.
569 * We may remove the second Load, if it does not have an exception handler
570 * OR they are in the same block. In the later case the Load cannot
571 * throw an exception when the previous Load was quiet.
573 * Here, there is no need to check if the previos Load has an exception hander because
574 * they would have exact the same exception...
576 if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
577 ldst_info_t *pred_info = get_irn_link(pred);
579 DBG_OPT_RAR(load, pred);
581 if (pred_info->projs[pn_Load_res]) {
582 /* we need a data proj from the previous load for this optimization */
583 exchange( info->projs[pn_Load_res], pred_info->projs[pn_Load_res] );
584 if (info->projs[pn_Load_M])
585 exchange(info->projs[pn_Load_M], mem);
588 if (info->projs[pn_Load_res]) {
589 set_Proj_pred(info->projs[pn_Load_res], pred);
590 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
592 if (info->projs[pn_Load_M]) {
593 /* Actually, this if should not be necessary. Construct the Loads
595 exchange(info->projs[pn_Load_M], mem);
600 if (info->projs[pn_Load_X_except])
601 exchange(info->projs[pn_Load_X_except], new_Bad());
607 /* follow only Load chains */
608 if (get_irn_op(pred) != op_Load)
617 static int optimize_store(ir_node *store)
619 ldst_info_t *info = get_irn_link(store);
620 ir_node *pred, *mem, *ptr, *value, *block;
624 if (get_Store_volatility(store) == volatility_is_volatile)
628 * BEWARE: one might think that checking the modes is useless, because
629 * if the pointers are identical, they refer to the same object.
630 * This is only true in strong typed languages, not is C were the following
631 * is possible *(type1 *)p = a; *(type2 *)p = b ...
634 ptr = get_Store_ptr(store);
636 /* Check, if the address of this load is used more than once.
637 * If not, this load cannot be removed in any case. */
638 if (get_irn_out_n(ptr) <= 1)
641 block = get_nodes_block(store);
642 mem = get_Store_mem(store);
643 value = get_Store_value(store);
644 mode = get_irn_mode(value);
646 /* follow the memory chain as long as there are only Loads */
647 for (pred = skip_Proj(mem); ; pred = skip_Proj(get_Load_mem(pred))) {
648 ldst_info_t *pred_info = get_irn_link(pred);
650 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
651 get_nodes_block(pred) == block && get_irn_mode(get_Store_value(pred)) == mode) {
653 * a Store after a Store in the same block -- a write after write.
654 * We may remove the first Store, if it does not have an exception handler.
656 * TODO: What, if both have the same exception handler ???
658 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
659 DBG_OPT_WAW(pred, store);
660 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
664 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
665 value == pred_info->projs[pn_Load_res]) {
667 * a Store of a value after a Load -- a write after read.
668 * We may remove the second Store, if it does not have an exception handler.
670 if (! info->projs[pn_Store_X_except]) {
671 DBG_OPT_WAR(store, pred);
672 exchange( info->projs[pn_Store_M], mem );
677 /* follow only Load chains */
678 if (get_irn_op(pred) != op_Load)
685 * walker, optimizes Phi after Stores:
686 * Does the following optimization:
688 * val1 val2 val3 val1 val2 val3
696 * This removes the number of stores and allows for predicated execution.
697 * Moves Stores back to the end of a function which may be bad
699 * Is only allowed if the predecessor blocks have only one successor.
701 static int optimize_phi(ir_node *phi, void *env)
703 walk_env_t *wenv = env;
705 ir_node *store, *old_store, *ptr, *block, *phiM, *phiD, *exc, *projM;
707 ir_node **inM, **inD;
711 block_info_t *bl_info;
713 /* Must be a memory Phi */
714 if (get_irn_mode(phi) != mode_M)
717 n = get_Phi_n_preds(phi);
721 store = skip_Proj(get_Phi_pred(phi, 0));
723 if (get_irn_op(store) != op_Store)
726 /* abort on bad blocks */
727 if (is_Bad(get_nodes_block(store)))
730 /* check if the block has only one output */
731 bl_info = get_irn_link(get_nodes_block(store));
735 /* this is the address of the store */
736 ptr = get_Store_ptr(store);
737 mode = get_irn_mode(get_Store_value(store));
738 info = get_irn_link(store);
739 exc = info->exc_block;
741 for (i = 1; i < n; ++i) {
742 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
744 if (get_irn_op(pred) != op_Store)
747 if (mode != get_irn_mode(get_Store_value(pred)) || ptr != get_Store_ptr(pred))
750 info = get_irn_link(pred);
752 /* check, if all stores have the same exception flow */
753 if (exc != info->exc_block)
756 /* abort on bad blocks */
757 if (is_Bad(get_nodes_block(store)))
760 /* check if the block has only one output */
761 bl_info = get_irn_link(get_nodes_block(store));
767 * ok, when we are here, we found all predecessors of a Phi that
768 * are Stores to the same address. That means whatever we do before
769 * we enter the block of the Phi, we do a Store.
770 * So, we can move the store to the current block:
772 * val1 val2 val3 val1 val2 val3
774 * | Str | | Str | | Str | \ | /
780 * Is only allowed if the predecessor blocks have only one successor.
783 /* first step: collect all inputs */
784 NEW_ARR_A(ir_node *, inM, n);
785 NEW_ARR_A(ir_node *, inD, n);
786 NEW_ARR_A(int, idx, n);
788 for (i = 0; i < n; ++i) {
789 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
790 info = get_irn_link(pred);
792 inM[i] = get_Store_mem(pred);
793 inD[i] = get_Store_value(pred);
794 idx[i] = info->exc_idx;
796 block = get_nodes_block(phi);
798 /* second step: create a new memory Phi */
799 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
801 /* third step: create a new data Phi */
802 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
804 /* fourth step: create the Store */
805 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
807 co_set_irn_name(store, co_get_irn_ident(old_store));
810 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
812 info = get_ldst_info(store, wenv);
813 info->projs[pn_Store_M] = projM;
815 /* fifths step: repair exception flow */
817 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
819 info->projs[pn_Store_X_except] = projX;
820 info->exc_block = exc;
821 info->exc_idx = idx[0];
823 for (i = 0; i < n; ++i) {
824 set_Block_cfgpred(exc, idx[i], projX);
828 /* the exception block should be optimized as some inputs are identical now */
832 /* sixt step: replace old Phi */
833 exchange(phi, projM);
839 * walker, collects all Load/Store/Proj nodes
841 static void do_load_store_optimize(ir_node *n, void *env)
843 walk_env_t *wenv = env;
845 switch (get_irn_opcode(n)) {
848 wenv->changes |= optimize_load(n);
852 wenv->changes |= optimize_store(n);
856 wenv->changes |= optimize_phi(n, env);
864 * do the load store optimization
866 void optimize_load_store(ir_graph *irg)
870 assert(get_irg_phase_state(irg) != phase_building);
872 if (!get_opt_redundant_LoadStore())
875 obstack_init(&env.obst);
878 /* init the links, then collect Loads/Stores/Proj's in lists */
879 irg_walk_graph(irg, init_links, collect_nodes, &env);
881 /* now we have collected enough information, optimize */
882 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
884 obstack_free(&env.obst, NULL);
886 /* Handle graph state */
888 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
889 set_irg_outs_inconsistent(current_ir_graph);
891 /* is this really needed: Yes, as exception block may get bad but this might be tested */
892 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
893 set_irg_dom_inconsistent(current_ir_graph);