3 * File name: ir/opt/ldstopt.c
4 * Purpose: load store optimizations
8 * Copyright: (c) 1998-2004 Universit
\81ät Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
25 # include "irnode_t.h"
26 # include "irgraph_t.h"
27 # include "irmode_t.h"
29 # include "ircons_t.h"
34 # include "dbginfo_t.h"
35 # include "iropt_dbg.h"
36 # include "irflag_t.h"
40 # include "opt_polymorphy.h"
43 #include "cacheopt/cachesim.h"
47 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
49 #define MAX_PROJ IMAX(pn_Load_max, pn_Store_max)
54 typedef struct _walk_env_t {
55 struct obstack obst; /**< list of all stores */
60 * flags for Load/Store
63 LDST_VISITED = 1 /**< if set, this Load/Store is already visited */
69 typedef struct _ldst_info_t {
70 ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
71 ir_node *exc_block; /**< the exception block if available */
72 int exc_idx; /**< predecessor index in the exception block */
73 unsigned flags; /**< flags */
77 * flags for control flow
80 BLOCK_HAS_COND = 1, /**< Block has conditional control flow */
81 BLOCK_HAS_EXC = 2 /**< Block has exceptionl control flow */
87 typedef struct _block_info_t {
88 unsigned flags; /**< flags for the block */
92 * walker, clears all links first
94 static void init_links(ir_node *n, void *env)
96 set_irn_link(n, NULL);
100 * get the Load/Store info of a node
102 static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env)
104 ldst_info_t *info = get_irn_link(node);
107 info = obstack_alloc(&env->obst, sizeof(*info));
109 memset(info, 0, sizeof(*info));
111 set_irn_link(node, info);
117 * get the Block info of a node
119 static block_info_t *get_block_info(ir_node *node, walk_env_t *env)
121 block_info_t *info = get_irn_link(node);
124 info = obstack_alloc(&env->obst, sizeof(*info));
126 memset(info, 0, sizeof(*info));
128 set_irn_link(node, info);
134 * update the projection info for a Load/Store
136 static int update_projs(ldst_info_t *info, ir_node *proj)
138 long nr = get_Proj_proj(proj);
140 assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
142 if (info->projs[nr]) {
143 /* there is already one, do CSE */
144 exchange(proj, info->projs[nr]);
148 info->projs[nr] = proj;
154 * update the exception block info for a Load/Store
156 static int update_exc(ldst_info_t *info, ir_node *block, int pos)
158 assert(info->exc_block == NULL && "more than one exception block found");
160 info->exc_block = block;
165 #define get_irn_out_n(node) (unsigned)PTR_TO_INT(get_irn_link(node))
166 #define set_irn_out_n(node, n) set_irn_link(adr, INT_TO_PTR(n))
169 * walker, collects all Load/Store/Proj nodes
171 * walks form Start -> End
173 static void collect_nodes(ir_node *node, void *env)
175 ir_op *op = get_irn_op(node);
177 ldst_info_t *ldst_info;
178 walk_env_t *wenv = env;
184 pred = get_Proj_pred(node);
185 op = get_irn_op(pred);
188 ldst_info = get_ldst_info(pred, wenv);
190 wenv->changes |= update_projs(ldst_info, node);
192 if ((ldst_info->flags & LDST_VISITED) == 0) {
193 adr = get_Load_ptr(pred);
194 set_irn_out_n(adr, get_irn_out_n(adr) + 1);
196 ldst_info->flags |= LDST_VISITED;
199 else if (op == op_Store) {
200 ldst_info = get_ldst_info(pred, wenv);
202 wenv->changes |= update_projs(ldst_info, node);
204 if ((ldst_info->flags & LDST_VISITED) == 0) {
205 adr = get_Store_ptr(pred);
206 set_irn_out_n(adr, get_irn_out_n(adr) + 1);
208 ldst_info->flags |= LDST_VISITED;
212 else if (op == op_Block) { /* check, if it's an exception block */
215 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i) {
217 block_info_t *bl_info;
219 pred = skip_Proj(get_Block_cfgpred(node, i));
221 /* ignore Bad predecessors, they will be removed later */
225 pred_block = get_nodes_block(pred);
226 bl_info = get_block_info(pred_block, wenv);
228 if (is_fragile_op(pred))
229 bl_info->flags |= BLOCK_HAS_EXC;
230 else if (is_forking_op(pred))
231 bl_info->flags |= BLOCK_HAS_COND;
233 if (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store) {
234 ldst_info = get_ldst_info(pred, wenv);
236 wenv->changes |= update_exc(ldst_info, node, i);
243 * Returns an entity if the address ptr points to a constant one.
245 static entity *find_constant_entity(ir_node *ptr)
248 ir_op *op = get_irn_op(ptr);
250 if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
251 return get_SymConst_entity(ptr);
253 else if (op == op_Sel) {
254 entity *ent = get_Sel_entity(ptr);
255 type *tp = get_entity_owner(ent);
257 /* Do not fiddle about polymorphy. */
258 if (is_Class_type(get_entity_owner(ent)) &&
259 ((get_entity_n_overwrites(ent) != 0) ||
260 (get_entity_n_overwrittenby(ent) != 0) ) )
263 if (variability_constant == get_entity_variability(ent))
266 if (is_Array_type(tp)) {
270 for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
272 tarval *tlower, *tupper;
273 ir_node *index = get_Sel_index(ptr, i);
274 tarval *tv = computed_value(index);
276 /* check if the index is constant */
277 if (tv == tarval_bad)
280 bound = get_array_lower_bound(tp, i);
281 tlower = computed_value(bound);
282 bound = get_array_upper_bound(tp, i);
283 tupper = computed_value(bound);
285 if (tlower == tarval_bad || tupper == tarval_bad)
288 if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
290 if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
293 /* ok, bounds check finished */
298 ptr = get_Sel_ptr(ptr);
306 * Return the Selection index of a Sel node from dimension n
308 static long get_Sel_array_index_long(ir_node *n, int dim) {
309 ir_node *index = get_Sel_index(n, dim);
310 assert(get_irn_op(index) == op_Const);
311 return get_tarval_long(get_Const_tarval(index));
315 * Returns the accessed component graph path for an
316 * node computing an address.
318 * @param ptr the node computing the address
319 * @param depth current depth in steps upward from the root
322 static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
323 compound_graph_path *res = NULL;
324 entity *root, *field;
327 if (get_irn_op(ptr) == op_SymConst) {
328 /* a SymConst. If the depth is 0, this is an access to a global
329 * entity and we don't need a component path, else we know
330 * at least it's length.
332 assert(get_SymConst_kind(ptr) == symconst_addr_ent);
333 root = get_SymConst_entity(ptr);
334 res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
337 assert(get_irn_op(ptr) == op_Sel);
338 /* it's a Sel, go up until we find the root */
339 res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
341 /* fill up the step in the path at the current position */
342 field = get_Sel_entity(ptr);
343 path_len = get_compound_graph_path_length(res);
344 pos = path_len - depth - 1;
345 set_compound_graph_path_node(res, pos, field);
347 if (is_Array_type(get_entity_owner(field))) {
348 assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
349 set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
355 /** Returns an access path or NULL. The access path is only
356 * valid, if the graph is in phase_high and _no_ address computation is used.
358 static compound_graph_path *get_accessed_path(ir_node *ptr) {
359 return rec_get_accessed_path(ptr, 0);
365 static int optimize_load(ir_node *load)
367 ldst_info_t *info = get_irn_link(load);
368 ir_mode *load_mode = get_Load_mode(load);
369 ir_node *pred, *mem, *ptr, *new_node;
373 /* do NOT touch volatile loads for now */
374 if (get_Load_volatility(load) == volatility_is_volatile)
377 /* the address of the load to be optimized */
378 ptr = get_Load_ptr(load);
381 * Check if we can remove the exception from a Load:
382 * This can be done, if the address is from an Sel(Alloc) and
383 * the Sel type is a subtype of the allocated type.
385 * This optimizes some often used OO constructs,
386 * like x = new O; x->t;
388 if (info->projs[pn_Load_X_except]) {
389 if (get_irn_op(ptr) == op_Sel) {
390 ir_node *mem = get_Sel_mem(ptr);
392 if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
393 /* ok, check the types */
394 entity *ent = get_Sel_entity(ptr);
395 type *s_type = get_entity_type(ent);
396 type *a_type = get_Alloc_type(mem);
398 if (is_subclass_of(s_type, a_type)) {
399 /* ok, condition met: there can't be an exception because
400 * Alloc guarantees that enough memory was allocated */
402 exchange(info->projs[pn_Load_X_except], new_Bad());
403 info->projs[pn_Load_X_except] = NULL;
408 else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
409 ((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
410 /* simple case: a direct load after an Alloc. Firm Alloc throw
411 * an exception in case of out-of-memory. So, there is no way for an
412 * exception in this load.
413 * This code is constructed by the "exception lowering" in the Jack compiler.
415 exchange(info->projs[pn_Load_X_except], new_Bad());
416 info->projs[pn_Load_X_except] = NULL;
421 /* the mem of the Load. Must still be returned after optimization */
422 mem = get_Load_mem(load);
424 if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
425 /* a Load which value is neither used nor exception checked, remove it */
426 exchange(info->projs[pn_Load_M], mem);
431 /* Load from a constant polymorphic field, where we can resolve
433 new_node = transform_node_Load(load);
434 if (new_node != load) {
435 if (info->projs[pn_Load_M]) {
436 exchange(info->projs[pn_Load_M], mem);
437 info->projs[pn_Load_M] = NULL;
439 if (info->projs[pn_Load_X_except]) {
440 exchange(info->projs[pn_Load_X_except], new_Bad());
441 info->projs[pn_Load_X_except] = NULL;
443 if (info->projs[pn_Load_res])
444 exchange(info->projs[pn_Load_res], new_node);
448 /* check if we can determine the entity that will be loaded */
449 ent = find_constant_entity(ptr);
451 if ((allocation_static == get_entity_allocation(ent)) &&
452 (visibility_external_allocated != get_entity_visibility(ent))) {
453 /* a static allocation that is not external: there should be NO exception
456 /* no exception, clear the info field as it might be checked later again */
457 if (info->projs[pn_Load_X_except]) {
458 exchange(info->projs[pn_Load_X_except], new_Bad());
459 info->projs[pn_Load_X_except] = NULL;
463 if (variability_constant == get_entity_variability(ent)
464 && is_atomic_entity(ent)) {
465 /* Might not be atomic after
466 lowering of Sels. In this
467 case we could also load, but
468 it's more complicated. */
469 /* more simpler case: we load the content of a constant value:
470 * replace it by the constant itself
474 if (info->projs[pn_Load_M]) {
475 exchange(info->projs[pn_Load_M], mem);
480 if (info->projs[pn_Load_res]) {
481 if (is_atomic_entity(ent)) {
482 ir_node *c = copy_const_value(get_atomic_ent_value(ent));
485 exchange(info->projs[pn_Load_res], c);
490 else if (variability_constant == get_entity_variability(ent)) {
491 compound_graph_path *path = get_accessed_path(ptr);
496 assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
500 for (j = 0; j < get_compound_graph_path_length(path); ++j) {
501 entity *node = get_compound_graph_path_node(path, j);
502 fprintf(stdout, ".%s", get_entity_name(node));
503 if (is_Array_type(get_entity_owner(node)))
504 fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
510 c = get_compound_ent_value_by_path(ent, path);
511 free_compound_graph_path(path);
513 /* printf(" cons: "); DDMN(c); */
515 if (info->projs[pn_Load_M]) {
516 exchange(info->projs[pn_Load_M], mem);
519 if (info->projs[pn_Load_res]) {
520 exchange(info->projs[pn_Load_res], copy_const_value(c));
525 /* We can not determine a correct access path. E.g., in jack, we load
526 a byte from an object to generate an exception. Happens in test program
528 printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
529 get_entity_name(get_irg_entity(current_ir_graph)));
530 printf(" load: "); DDMN(load);
531 printf(" ptr: "); DDMN(ptr);
538 /* Check, if the address of this load is used more than once.
539 * If not, this load cannot be removed in any case. */
540 if (get_irn_out_n(ptr) <= 1)
543 /* follow the memory chain as long as there are only Loads
544 * and try to replace current Load or Store by a previous one
546 for (pred = skip_Proj(mem); ; pred = skip_Proj(get_Load_mem(pred))) {
548 * BEWARE: one might think that checking the modes is useless, because
549 * if the pointers are identical, they refer to the same object.
550 * This is only true in strong typed languages, not in C were the following
551 * is possible a = *(type1 *)p; b = *(type2 *)p ...
554 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
555 get_irn_mode(get_Store_value(pred)) == load_mode) {
556 ldst_info_t *pred_info = get_irn_link(pred);
559 * a Load immediately after a Store -- a read after write.
560 * We may remove the Load, if both Load & Store does not have an exception handler
561 * OR they are in the same block. In the latter case the Load cannot
562 * throw an exception when the previous Store was quiet.
564 * Why we need to check for Store Exception? If the Store cannot
565 * be executed (ROM) the exception handler might simply jump into
567 * We could make it a little bit better if we would know that the exception
568 * handler of the Store jumps directly to the end...
570 if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
571 get_nodes_block(load) == get_nodes_block(pred)) {
572 ir_node *value = get_Store_value(pred);
574 DBG_OPT_RAW(load, value);
575 if (info->projs[pn_Load_M])
576 exchange(info->projs[pn_Load_M], mem);
579 if (info->projs[pn_Load_X_except])
580 exchange( info->projs[pn_Load_X_except], new_Bad());
582 if (info->projs[pn_Load_res])
583 exchange(info->projs[pn_Load_res], value);
588 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
589 get_Load_mode(pred) == load_mode) {
591 * a Load after a Load -- a read after read.
592 * We may remove the second Load, if it does not have an exception handler
593 * OR they are in the same block. In the later case the Load cannot
594 * throw an exception when the previous Load was quiet.
596 * Here, there is no need to check if the previous Load has an exception
597 * hander because they would have exact the same exception...
599 if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
600 ldst_info_t *pred_info = get_irn_link(pred);
602 DBG_OPT_RAR(load, pred);
604 if (pred_info->projs[pn_Load_res]) {
605 /* we need a data proj from the previous load for this optimization */
606 if (info->projs[pn_Load_res])
607 exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
609 if (info->projs[pn_Load_M])
610 exchange(info->projs[pn_Load_M], mem);
613 if (info->projs[pn_Load_res]) {
614 set_Proj_pred(info->projs[pn_Load_res], pred);
615 set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
616 pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
618 if (info->projs[pn_Load_M]) {
619 /* Actually, this if should not be necessary. Construct the Loads
621 exchange(info->projs[pn_Load_M], mem);
626 if (info->projs[pn_Load_X_except])
627 exchange(info->projs[pn_Load_X_except], new_Bad());
633 /* follow only Load chains */
634 if (get_irn_op(pred) != op_Load)
643 static int optimize_store(ir_node *store)
645 ldst_info_t *info = get_irn_link(store);
646 ir_node *pred, *mem, *ptr, *value, *block;
650 if (get_Store_volatility(store) == volatility_is_volatile)
654 * BEWARE: one might think that checking the modes is useless, because
655 * if the pointers are identical, they refer to the same object.
656 * This is only true in strong typed languages, not is C were the following
657 * is possible *(type1 *)p = a; *(type2 *)p = b ...
660 ptr = get_Store_ptr(store);
662 /* Check, if the address of this load is used more than once.
663 * If not, this load cannot be removed in any case. */
664 if (get_irn_out_n(ptr) <= 1)
667 block = get_nodes_block(store);
668 mem = get_Store_mem(store);
669 value = get_Store_value(store);
670 mode = get_irn_mode(value);
672 /* follow the memory chain as long as there are only Loads */
673 for (pred = skip_Proj(mem); ; pred = skip_Proj(get_Load_mem(pred))) {
674 ldst_info_t *pred_info = get_irn_link(pred);
676 if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
677 get_nodes_block(pred) == block && get_irn_mode(get_Store_value(pred)) == mode) {
679 * a Store after a Store in the same block -- a write after write.
680 * We may remove the first Store, if it does not have an exception handler.
682 * TODO: What, if both have the same exception handler ???
684 if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
685 DBG_OPT_WAW(pred, store);
686 exchange( pred_info->projs[pn_Store_M], get_Store_mem(pred) );
690 else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
691 value == pred_info->projs[pn_Load_res]) {
693 * a Store of a value after a Load -- a write after read.
694 * We may remove the second Store, if it does not have an exception handler.
696 if (! info->projs[pn_Store_X_except]) {
697 DBG_OPT_WAR(store, pred);
698 exchange( info->projs[pn_Store_M], mem );
703 /* follow only Load chains */
704 if (get_irn_op(pred) != op_Load)
711 * walker, optimizes Phi after Stores:
712 * Does the following optimization:
714 * val1 val2 val3 val1 val2 val3
722 * This removes the number of stores and allows for predicated execution.
723 * Moves Stores back to the end of a function which may be bad
725 * Is only allowed if the predecessor blocks have only one successor.
727 static int optimize_phi(ir_node *phi, void *env)
729 walk_env_t *wenv = env;
731 ir_node *store, *old_store, *ptr, *block, *phiM, *phiD, *exc, *projM;
733 ir_node **inM, **inD;
737 block_info_t *bl_info;
739 /* Must be a memory Phi */
740 if (get_irn_mode(phi) != mode_M)
743 n = get_Phi_n_preds(phi);
747 store = skip_Proj(get_Phi_pred(phi, 0));
749 if (get_irn_op(store) != op_Store)
752 /* abort on bad blocks */
753 if (is_Bad(get_nodes_block(store)))
756 /* check if the block has only one output */
757 bl_info = get_irn_link(get_nodes_block(store));
761 /* this is the address of the store */
762 ptr = get_Store_ptr(store);
763 mode = get_irn_mode(get_Store_value(store));
764 info = get_irn_link(store);
765 exc = info->exc_block;
767 for (i = 1; i < n; ++i) {
768 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
770 if (get_irn_op(pred) != op_Store)
773 if (mode != get_irn_mode(get_Store_value(pred)) || ptr != get_Store_ptr(pred))
776 info = get_irn_link(pred);
778 /* check, if all stores have the same exception flow */
779 if (exc != info->exc_block)
782 /* abort on bad blocks */
783 if (is_Bad(get_nodes_block(store)))
786 /* check if the block has only one output */
787 bl_info = get_irn_link(get_nodes_block(store));
793 * ok, when we are here, we found all predecessors of a Phi that
794 * are Stores to the same address. That means whatever we do before
795 * we enter the block of the Phi, we do a Store.
796 * So, we can move the store to the current block:
798 * val1 val2 val3 val1 val2 val3
800 * | Str | | Str | | Str | \ | /
806 * Is only allowed if the predecessor blocks have only one successor.
809 /* first step: collect all inputs */
810 NEW_ARR_A(ir_node *, inM, n);
811 NEW_ARR_A(ir_node *, inD, n);
812 NEW_ARR_A(int, idx, n);
814 for (i = 0; i < n; ++i) {
815 ir_node *pred = skip_Proj(get_Phi_pred(phi, i));
816 info = get_irn_link(pred);
818 inM[i] = get_Store_mem(pred);
819 inD[i] = get_Store_value(pred);
820 idx[i] = info->exc_idx;
822 block = get_nodes_block(phi);
824 /* second step: create a new memory Phi */
825 phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
827 /* third step: create a new data Phi */
828 phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
830 /* fourth step: create the Store */
831 store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
833 co_set_irn_name(store, co_get_irn_ident(old_store));
836 projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
838 info = get_ldst_info(store, wenv);
839 info->projs[pn_Store_M] = projM;
841 /* fifths step: repair exception flow */
843 ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
845 info->projs[pn_Store_X_except] = projX;
846 info->exc_block = exc;
847 info->exc_idx = idx[0];
849 for (i = 0; i < n; ++i) {
850 set_Block_cfgpred(exc, idx[i], projX);
854 /* the exception block should be optimized as some inputs are identical now */
858 /* sixt step: replace old Phi */
859 exchange(phi, projM);
865 * walker, collects all Load/Store/Proj nodes
867 static void do_load_store_optimize(ir_node *n, void *env)
869 walk_env_t *wenv = env;
871 switch (get_irn_opcode(n)) {
874 wenv->changes |= optimize_load(n);
878 wenv->changes |= optimize_store(n);
882 wenv->changes |= optimize_phi(n, env);
890 * do the load store optimization
892 void optimize_load_store(ir_graph *irg)
896 assert(get_irg_phase_state(irg) != phase_building);
897 assert(get_irg_pinned(irg) != op_pin_state_floats &&
898 "LoadStore optimization needs pinned graph");
900 if (!get_opt_redundant_LoadStore())
903 obstack_init(&env.obst);
906 /* init the links, then collect Loads/Stores/Proj's in lists */
907 irg_walk_graph(irg, init_links, collect_nodes, &env);
909 /* now we have collected enough information, optimize */
910 irg_walk_graph(irg, NULL, do_load_store_optimize, &env);
912 obstack_free(&env.obst, NULL);
914 /* Handle graph state */
916 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
917 set_irg_outs_inconsistent(current_ir_graph);
919 /* is this really needed: Yes, as exception block may get bad but this might be tested */
920 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
921 set_irg_dom_inconsistent(current_ir_graph);