2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Operator Strength Reduction.
24 * @author Michael Beck
27 * Implementation of the Operator Strength Reduction algorithm
28 * by Keith D. Cooper, L. Taylor Simpson, Christopher A. Vick.
34 #include "iroptimize.h"
55 /** The debug handle. */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
60 ir_node *head; /**< the head of the list */
61 tarval *init; /**< the init value iff only one exists. */
62 tarval *incr; /**< the induction variable increment if only a single const exists. */
63 unsigned code; /**< == iro_Add if +incr, iro_Sub if -incr, 0 if not analysed, iro_Bad else */
67 typedef struct node_entry {
68 unsigned DFSnum; /**< the DFS number of this node */
69 unsigned low; /**< the low number of this node */
70 ir_node *header; /**< the header of this node */
71 int in_stack; /**< flag, set if the node is on the stack */
72 ir_node *next; /**< link to the next node the the same scc */
73 scc *pscc; /**< the scc of this node */
74 unsigned POnum; /**< the post order number for blocks */
77 /** The environment. */
78 typedef struct iv_env {
79 struct obstack obst; /**< an obstack for allocations */
80 ir_node **stack; /**< the node stack */
81 int tos; /**< tos index */
82 unsigned nextDFSnum; /**< the current DFS number */
83 unsigned POnum; /**< current post order number */
84 set *quad_map; /**< a map from (op, iv, rc) to node */
85 set *lftr_edges; /**< the set of lftr edges */
86 unsigned replaced; /**< number of replaced ops */
87 unsigned lftr_replaced; /**< number of applied linear function test replacements */
88 unsigned osr_flags; /**< additional flags steering the transformation */
89 unsigned need_postpass; /**< set, if a post pass is needed to fix Add and Sub nodes */
90 /** Function called to process a SCC. */
91 void (*process_scc)(scc *pscc, struct iv_env *env);
95 * An entry in the (op, node, node) -> node map.
97 typedef struct quadruple_t {
98 ir_opcode code; /**< the opcode of the reduced operation */
99 ir_node *op1; /**< the first operand the reduced operation */
100 ir_node *op2; /**< the second operand of the reduced operation */
102 ir_node *res; /**< the reduced operation */
108 typedef struct LFTR_edge {
109 ir_node *src; /**< the source node */
110 ir_node *dst; /**< the destination node */
111 ir_opcode code; /**< the opcode that must be applied */
112 ir_node *rc; /**< the region const that must be applied */
116 static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env);
119 * Compare two LFTR edges.
121 static int LFTR_cmp(const void *e1, const void *e2, size_t size) {
122 const LFTR_edge *l1 = e1;
123 const LFTR_edge *l2 = e2;
126 return l1->src != l2->src;
132 * @param src the source node of the transition
134 static LFTR_edge *LFTR_find(ir_node *src, iv_env *env) {
139 return set_find(env->lftr_edges, &key, sizeof(key), HASH_PTR(src));
145 * @param src the source node of the edge
146 * @param dst the destination node of the edge
147 * @param code the opcode of the transformed transition
148 * @param rc the region const used in the transition
149 * @param env the environment
151 static void LFTR_add(ir_node *src, ir_node *dst, ir_opcode code, ir_node *rc, iv_env *env) {
160 * There might be more than one edge here. This is rather bad
161 * because we currently store only one.
163 // assert(LFTR_find(src, env) == NULL);
164 set_insert(env->lftr_edges, &key, sizeof(key), HASH_PTR(src));
168 * Gets the node_entry of a node.
170 * @param irn the node
171 * @param env the environment
173 static node_entry *get_irn_ne(ir_node *irn, iv_env *env) {
174 node_entry *e = get_irn_link(irn);
177 e = obstack_alloc(&env->obst, sizeof(*e));
178 memset(e, 0, sizeof(*e));
179 set_irn_link(irn, e);
185 * Gets the scc from an induction variable.
187 * @param iv any node of the induction variable
188 * @param env the environment
190 static scc *get_iv_scc(ir_node *iv, iv_env *env) {
191 node_entry *e = get_irn_ne(iv, env);
196 * Check if irn is an IV.
198 * @param irn the node to check
199 * @param env the environment
201 * @returns the header if it is one, NULL else
203 static ir_node *is_iv(ir_node *irn, iv_env *env) {
204 return get_irn_ne(irn, env)->header;
208 * Check if irn is a region constant.
209 * The block or irn must strictly dominate the header block.
211 * @param irn the node to check
212 * @param header_block the header block of the induction variable
214 static int is_rc(ir_node *irn, ir_node *header_block) {
215 ir_node *block = get_nodes_block(irn);
217 return (block != header_block) && block_dominates(block, header_block);
221 * Set compare function for the quad set.
223 static int quad_cmp(const void *e1, const void *e2, size_t size) {
224 const quadruple_t *c1 = e1;
225 const quadruple_t *c2 = e2;
228 return c1->code != c2->code || c1->op1 != c2->op1 || c1->op2 != c2->op2;
232 * Check if an reduced operation was already calculated.
234 * @param code the opcode of the operation
235 * @param op1 the first operand of the operation
236 * @param op2 the second operand of the operation
237 * @param env the environment
239 * @return the already reduced node or NULL if this operation is not yet reduced
241 static ir_node *search(ir_opcode code, ir_node *op1, ir_node *op2, iv_env *env) {
242 quadruple_t key, *entry;
248 entry = set_find(env->quad_map, &key, sizeof(key),
249 (code * 9) ^ HASH_PTR(op1) ^HASH_PTR(op2));
256 * Add an reduced operation.
258 * @param code the opcode of the operation
259 * @param op1 the first operand of the operation
260 * @param op2 the second operand of the operation
261 * @param result the result of the reduced operation
262 * @param env the environment
264 static void add(ir_opcode code, ir_node *op1, ir_node *op2, ir_node *result, iv_env *env) {
272 set_insert(env->quad_map, &key, sizeof(key),
273 (code * 9) ^ HASH_PTR(op1) ^HASH_PTR(op2));
277 * Find a location where to place a bin-op whose operands are in
280 * @param block1 the block of the first operand
281 * @param block2 the block of the second operand
283 * Note that we know here that such a place must exists. Moreover, this means
284 * that either block1 dominates block2 or vice versa. So, just return
287 static ir_node *find_location(ir_node *block1, ir_node *block2) {
288 if (block_dominates(block1, block2))
290 assert(block_dominates(block2, block1));
292 } /* find_location */
295 * Create a node that executes an op1 code op1 operation.
297 * @param code the opcode to execute
298 * @param db debug info to add to the new node
299 * @param op1 the first operand
300 * @param op2 the second operand
301 * @param mode the mode of the new operation
303 * @return the newly created node
305 static ir_node *do_apply(ir_opcode code, dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) {
307 ir_node *block = find_location(get_nodes_block(op1), get_nodes_block(op2));
311 result = new_rd_Mul(db, block, op1, op2, mode);
314 result = new_rd_Add(db, block, op1, op2, mode);
317 result = new_rd_Sub(db, block, op1, op2, mode);
320 panic("Unsupported opcode");
327 * The Apply operation.
329 * @param orig the node that represent the original operation and determines
330 * the opcode, debug-info and mode of a newly created one
331 * @param op1 the first operand
332 * @param op2 the second operand
333 * @param env the environment
335 * @return the newly created node
337 static ir_node *apply(ir_node *header, ir_node *orig, ir_node *op1, ir_node *op2, iv_env *env) {
338 ir_opcode code = get_irn_opcode(orig);
339 ir_node *result = search(code, op1, op2, env);
341 if (result == NULL) {
342 dbg_info *db = get_irn_dbg_info(orig);
343 ir_node *op1_header = get_irn_ne(op1, env)->header;
344 ir_node *op2_header = get_irn_ne(op2, env)->header;
346 if (op1_header == header && is_rc(op2, op1_header)) {
347 result = reduce(orig, op1, op2, env);
349 else if (op2_header == header && is_rc(op1, op2_header)) {
350 result = reduce(orig, op2, op1, env);
353 result = do_apply(code, db, op1, op2, get_irn_mode(orig));
354 get_irn_ne(result, env)->header = NULL;
361 * The Reduce operation.
363 * @param orig the node that represent the original operation and determines
364 * the opcode, debug-info and mode of a newly created one
365 * @param iv the induction variable
366 * @param rc the region constant
367 * @param env the environment
369 * @return the reduced node
371 static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env) {
372 ir_opcode code = get_irn_opcode(orig);
373 ir_node *result = search(code, iv, rc, env);
375 /* check if we have already done this operation on the iv */
376 if (result == NULL) {
377 node_entry *e, *iv_e;
379 ir_mode *mode = get_irn_mode(orig);
381 result = exact_copy(iv);
383 if (get_irn_mode(result) != mode) {
385 * Beware: we must always create a new induction variable with the same mode
386 * as the node we are replacing. Especially this means the mode might be changed
387 * from P to I and back. This is always possible, because we have only Phi, Add
389 * However, this might lead to AddIs(Iu,Is) which we must fix. The best way to do this
390 * seems to be a post-pass, or we might end with useless Conv's.
392 set_irn_mode(result, mode);
393 env->need_postpass = 1;
395 add(code, iv, rc, result, env);
396 DB((dbg, LEVEL_3, " Created new %+F for %+F (%s %+F)\n", result, iv,
397 get_irn_opname(orig), rc));
399 iv_e = get_irn_ne(iv, env);
400 e = get_irn_ne(result, env);
401 e->header = iv_e->header;
403 /* create the LFTR edge */
404 LFTR_add(iv, result, code, rc, env);
406 for (i = get_irn_arity(result) - 1; i >= 0; --i) {
407 ir_node *o = get_irn_n(result, i);
409 e = get_irn_ne(o, env);
410 if (e->header == iv_e->header)
411 o = reduce(orig, o, rc, env);
412 else if (is_Phi(result) || code == iro_Mul)
413 o = apply(iv_e->header, orig, o, rc, env);
414 set_irn_n(result, i, o);
417 DB((dbg, LEVEL_3, " Already Created %+F for %+F (%s %+F)\n", result, iv,
418 get_irn_opname(orig), rc));
424 * Update the scc for a newly created IV.
426 static void update_scc(ir_node *iv, node_entry *e, iv_env *env) {
428 ir_node *header = e->header;
429 waitq *wq = new_waitq();
431 DB((dbg, LEVEL_2, " Creating SCC for new an induction variable:\n "));
435 ir_node *irn = waitq_get(wq);
436 node_entry *ne = get_irn_ne(irn, env);
440 ne->next = pscc->head;
442 DB((dbg, LEVEL_2, " %+F,", irn));
444 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
445 ir_node *pred = get_irn_n(irn, i);
446 node_entry *pe = get_irn_ne(pred, env);
448 if (pe->header == header && pe->pscc == NULL) {
449 /* set the pscc here to ensure that the node is NOT enqueued another time */
454 } while (! waitq_empty(wq));
456 DB((dbg, LEVEL_2, "\n"));
460 * The Replace operation. We found a node representing iv (+,-,*) rc
461 * that can be removed by replacing the induction variable iv by a new
462 * one that 'applies' the operation 'irn'.
464 * @param irn the node that will be replaced
465 * @param iv the induction variable
466 * @param rc the region constant
467 * @param env the environment
469 static int replace(ir_node *irn, ir_node *iv, ir_node *rc, iv_env *env) {
472 DB((dbg, LEVEL_2, " Replacing %+F\n", irn));
474 result = reduce(irn, iv, rc, env);
478 hook_strength_red(current_ir_graph, irn);
479 exchange(irn, result);
480 e = get_irn_ne(result, env);
481 if (e->pscc == NULL) {
482 e->pscc = obstack_alloc(&env->obst, sizeof(*e->pscc));
483 memset(e->pscc, 0, sizeof(*e->pscc));
484 update_scc(result, e, env);
494 * check if a given node is a mul with 2, 4, 8
496 static int is_x86_shift_const(ir_node *mul) {
502 /* normalization put constants on the right side */
503 rc = get_Mul_right(mul);
505 tarval *tv = get_Const_tarval(rc);
507 if (tarval_is_long(tv)) {
508 long value = get_tarval_long(tv);
510 if (value == 2 || value == 4 || value == 8) {
511 /* do not reduce multiplications by 2, 4, 8 */
517 } /* is_x86_shift_const */
521 * Check if an IV represents a counter with constant limits.
523 * @param iv any node of the induction variable
524 * @param env the environment
526 static int is_counter_iv(ir_node *iv, iv_env *env) {
527 node_entry *e = get_irn_ne(iv, env);
529 ir_node *have_init = NULL;
530 ir_node *have_incr = NULL;
531 ir_opcode code = iro_Bad;
534 if (pscc->code != 0) {
535 /* already analysed */
536 return pscc->code != iro_Bad;
539 pscc->code = iro_Bad;
540 for (irn = pscc->head; irn != NULL; irn = e->next) {
542 if (have_incr != NULL)
545 have_incr = get_Add_right(irn);
546 if (! is_Const(have_incr)) {
547 have_incr = get_Add_left(irn);
548 if (! is_Const(have_incr))
552 } else if (is_Sub(irn)) {
553 if (have_incr != NULL)
556 have_incr = get_Sub_right(irn);
557 if (! is_Const(have_incr))
560 } else if (is_Phi(irn)) {
563 for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
564 ir_node *pred = get_Phi_pred(irn, i);
565 node_entry *ne = get_irn_ne(pred, env);
567 if (ne->header == e->header)
569 if (have_init != NULL)
572 if (! is_Const(pred))
577 e = get_irn_ne(irn, env);
579 pscc->init = get_Const_tarval(have_init);
580 pscc->incr = get_Const_tarval(have_incr);
582 return code != iro_Bad;
583 } /* is_counter_iv */
586 * Check the users of an induction variable for register pressure.
588 * @param iv any node of the induction variable
589 * @param env the environment
591 * @return non-zero if the register pressure is estimated
592 * to not increase, zero else
594 static int check_users_for_reg_pressure(ir_node *iv, iv_env *env) {
595 ir_node *irn, *header;
596 ir_node *have_user = NULL;
597 ir_node *have_cmp = NULL;
598 node_entry *e = get_irn_ne(iv, env);
602 for (irn = pscc->head; irn != NULL; irn = e->next) {
603 const ir_edge_t *edge;
605 foreach_out_edge(irn, edge) {
606 ir_node *user = get_edge_src_irn(edge);
607 node_entry *ne = get_irn_ne(user, env);
609 if (e->header == ne->header) {
610 /* found user from the same IV */
614 if (have_cmp != NULL) {
615 /* more than one cmp, for now end here */
620 /* user is a real user of the IV */
621 if (have_user != NULL) {
622 /* found the second user */
628 e = get_irn_ne(irn, env);
631 if (have_user == NULL) {
632 /* no user, ignore */
636 if (have_cmp == NULL) {
637 /* fine, only one user, try to reduce */
641 * We found one user AND at least one cmp.
642 * We should check here if we can transform the Cmp.
644 * For now our capabilities for doing linear function test
645 * are limited, so check if the iv has the right form: Only ONE
646 * Phi, only one Add/Sub with a Const.
648 if (! is_counter_iv(iv, env))
652 * Ok, we have only one increment AND it is a Const, we might be able
653 * to do a linear function test replacement, so go on.
656 } /* check_users_for_reg_pressure */
659 * Check if a node can be replaced (+, -, *).
661 * @param irn the node to check
662 * @param env the environment
664 * @return non-zero if irn should be Replace'd
666 static int check_replace(ir_node *irn, iv_env *env) {
667 ir_node *left, *right, *iv, *rc;
668 ir_op *op = get_irn_op(irn);
669 ir_opcode code = get_op_code(op);
678 left = get_binop_left(irn);
679 right = get_binop_right(irn);
681 liv = is_iv(left, env);
682 riv = is_iv(right, env);
683 if (liv && is_rc(right, liv)) {
684 iv = left; rc = right;
686 else if (riv && is_op_commutative(op) &&
688 iv = right; rc = left;
692 if (env->osr_flags & osr_flag_keep_reg_pressure) {
693 if (! check_users_for_reg_pressure(iv, env))
696 return replace(irn, iv, rc, env);
703 } /* check_replace */
706 * Check which SCC's are induction variables.
709 * @param env the environment
711 static void classify_iv(scc *pscc, iv_env *env) {
712 ir_node *irn, *next, *header = NULL;
713 node_entry *b, *h = NULL;
714 int j, only_phi, num_outside;
717 /* find the header block for this scc */
718 for (irn = pscc->head; irn; irn = next) {
719 node_entry *e = get_irn_link(irn);
720 ir_node *block = get_nodes_block(irn);
723 b = get_irn_ne(block, env);
726 if (h->POnum < b->POnum) {
737 /* check if this scc contains only Phi, Add or Sub nodes */
741 for (irn = pscc->head; irn; irn = next) {
742 node_entry *e = get_irn_ne(irn, env);
745 switch (get_irn_opcode(irn)) {
751 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
752 ir_node *pred = get_irn_n(irn, j);
753 node_entry *pe = get_irn_ne(pred, env);
755 if (pe->pscc != e->pscc) {
756 /* not in the same SCC, must be a region const */
757 if (! is_rc(pred, header)) {
758 /* not an induction variable */
764 } else if (out_rc != pred) {
771 /* not an induction variable */
775 /* found an induction variable */
776 DB((dbg, LEVEL_2, " Found an induction variable:\n "));
777 if (only_phi && num_outside == 1) {
778 /* a phi cycle with only one real predecessor can be collapsed */
779 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
781 for (irn = pscc->head; irn; irn = next) {
782 node_entry *e = get_irn_ne(irn, env);
785 exchange(irn, out_rc);
791 /* set the header for every node in this scc */
792 for (irn = pscc->head; irn; irn = next) {
793 node_entry *e = get_irn_ne(irn, env);
796 DB((dbg, LEVEL_2, " %+F,", irn));
798 DB((dbg, LEVEL_2, "\n"));
802 for (irn = pscc->head; irn; irn = next) {
803 node_entry *e = get_irn_ne(irn, env);
811 * Process an SCC for the operator strength reduction.
813 * @param pscc the SCC
814 * @param env the environment
816 static void process_scc(scc *pscc, iv_env *env) {
817 ir_node *head = pscc->head;
818 node_entry *e = get_irn_link(head);
824 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
825 for (irn = pscc->head; irn != NULL; irn = next) {
826 node_entry *e = get_irn_link(irn);
830 DB((dbg, LEVEL_4, " %+F,", irn));
832 DB((dbg, LEVEL_4, "\n"));
836 if (e->next == NULL) {
837 /* this SCC has only a single member */
838 check_replace(head, env);
840 classify_iv(pscc, env);
845 * If an SCC is a Phi only cycle, remove it.
847 * @param pscc an SCC that consists of Phi nodes only
848 * @param env the environment
850 static void remove_phi_cycle(scc *pscc, iv_env *env) {
855 /* check if this scc contains only Phi, Add or Sub nodes */
857 for (irn = pscc->head; irn; irn = next) {
858 node_entry *e = get_irn_ne(irn, env);
864 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
865 ir_node *pred = get_irn_n(irn, j);
866 node_entry *pe = get_irn_ne(pred, env);
868 if (pe->pscc != e->pscc) {
869 /* not in the same SCC, must be the only input */
872 } else if (out_rc != pred) {
878 /* found a Phi cycle */
879 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
881 for (irn = pscc->head; irn; irn = next) {
882 node_entry *e = get_irn_ne(irn, env);
885 exchange(irn, out_rc);
888 } /* remove_phi_cycle */
891 * Process a SCC for the Phi cycle remove.
893 * @param pscc the SCC
894 * @param env the environment
896 static void process_phi_only_scc(scc *pscc, iv_env *env) {
897 ir_node *head = pscc->head;
898 node_entry *e = get_irn_link(head);
904 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
905 for (irn = pscc->head; irn; irn = next) {
906 node_entry *e = get_irn_link(irn);
910 DB((dbg, LEVEL_4, " %+F,", irn));
912 DB((dbg, LEVEL_4, "\n"));
917 remove_phi_cycle(pscc, env);
918 } /* process_phi_only_scc */
922 * Push a node onto the stack.
924 * @param env the environment
925 * @param n the node to push
927 static void push(iv_env *env, ir_node *n) {
930 if (env->tos == ARR_LEN(env->stack)) {
931 int nlen = ARR_LEN(env->stack) * 2;
932 ARR_RESIZE(ir_node *, env->stack, nlen);
934 env->stack[env->tos++] = n;
935 e = get_irn_ne(n, env);
940 * Pop a node from the stack.
942 * @param env the environment
944 * @return The topmost node
946 static ir_node *pop(iv_env *env) {
947 ir_node *n = env->stack[--env->tos];
948 node_entry *e = get_irn_ne(n, env);
955 * Do Tarjan's SCC algorithm and drive OSR.
957 * @param irn start at this node
958 * @param env the environment
960 static void dfs(ir_node *irn, iv_env *env) {
962 node_entry *node = get_irn_ne(irn, env);
964 mark_irn_visited(irn);
966 /* do not put blocks into the scc */
968 n = get_irn_arity(irn);
969 for (i = 0; i < n; ++i) {
970 ir_node *pred = get_irn_n(irn, i);
972 if (!irn_visited(pred))
976 ir_node *block = get_nodes_block(irn);
978 node->DFSnum = env->nextDFSnum++;
979 node->low = node->DFSnum;
982 /* handle the block */
983 if (!irn_visited(block))
986 n = get_irn_arity(irn);
987 for (i = 0; i < n; ++i) {
988 ir_node *pred = get_irn_n(irn, i);
989 node_entry *o = get_irn_ne(pred, env);
991 if (!irn_visited(pred)) {
993 node->low = MIN(node->low, o->low);
995 if (o->DFSnum < node->DFSnum && o->in_stack)
996 node->low = MIN(o->DFSnum, node->low);
998 if (node->low == node->DFSnum) {
999 scc *pscc = obstack_alloc(&env->obst, sizeof(*pscc));
1002 memset(pscc, 0, sizeof(*pscc));
1007 e = get_irn_ne(x, env);
1009 e->next = pscc->head;
1013 env->process_scc(pscc, env);
1019 * Do the DFS by starting at the End node of a graph.
1021 * @param irg the graph to process
1022 * @param env the environment
1024 static void do_dfs(ir_graph *irg, iv_env *env) {
1025 ir_graph *rem = current_ir_graph;
1026 ir_node *end = get_irg_end(irg);
1029 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1031 current_ir_graph = irg;
1032 inc_irg_visited(irg);
1034 /* visit all visible nodes */
1037 /* visit the keep-alives */
1038 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
1039 ir_node *ka = get_End_keepalive(end, i);
1041 if (!irn_visited(ka))
1045 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1047 current_ir_graph = rem;
1051 * Post-block-walker: assign the post-order number.
1053 static void assign_po(ir_node *block, void *ctx) {
1055 node_entry *e = get_irn_ne(block, env);
1057 e->POnum = env->POnum++;
1061 * Apply one LFTR edge operation.
1062 * Return NULL if the transformation cannot be done safely without
1065 * @param iv the induction variable
1066 * @param rc the constant that should be translated
1067 * @param e the LFTR edge
1068 * @param env the IV environment
1070 * @return the translated region constant or NULL
1071 * if the translation was not possible
1074 * In the current implementation only the last edge is stored, so
1075 * only one chain exists. That's why we might miss some opportunities.
1077 static ir_node *applyOneEdge(ir_node *iv, ir_node *rc, LFTR_edge *e, iv_env *env) {
1078 if (env->osr_flags & osr_flag_lftr_with_ov_check) {
1079 tarval *tv_l, *tv_r, *tv, *tv_init, *tv_incr, *tv_end;
1080 tarval_int_overflow_mode_t ovmode;
1083 if (! is_counter_iv(iv, env)) {
1084 DB((dbg, LEVEL_4, " not counter IV"));
1088 /* overflow can only be decided for Consts */
1089 if (! is_Const(e->rc)) {
1090 if (e->code == iro_Add && mode_is_reference(get_irn_mode(e->rc))) {
1091 /* However we allow ONE Pointer Add, as pointer arithmetic with wrap
1092 around is undefined anyway */
1093 return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(e->rc));
1095 DB((dbg, LEVEL_4, " = UNKNOWN (%+F)", e->rc));
1099 tv_l = get_Const_tarval(rc);
1100 tv_r = get_Const_tarval(e->rc);
1102 ovmode = tarval_get_integer_overflow_mode();
1103 tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD);
1105 pscc = get_iv_scc(iv, env);
1106 tv_incr = pscc->incr;
1107 tv_init = pscc->init;
1110 * Check that no overflow occurs:
1111 * init must be transformed without overflow
1112 * the new rc must be transformed without overflow
1113 * rc +/- incr must be possible without overflow
1117 tv = tarval_mul(tv_l, tv_r);
1118 tv_init = tarval_mul(tv_init, tv_r);
1119 tv_incr = tarval_mul(tv_incr, tv_r);
1120 DB((dbg, LEVEL_4, " * %+F", tv_r));
1123 tv = tarval_add(tv_l, tv_r);
1124 tv_init = tarval_add(tv_init, tv_r);
1125 DB((dbg, LEVEL_4, " + %+F", tv_r));
1128 tv = tarval_sub(tv_l, tv_r, NULL);
1129 tv_init = tarval_sub(tv_init, tv_r, NULL);
1130 DB((dbg, LEVEL_4, " - %+F", tv_r));
1133 panic("Unsupported opcode");
1137 if (pscc->code == iro_Add) {
1138 tv_end = tarval_add(tv, tv_incr);
1140 assert(pscc->code == iro_Sub);
1141 tv_end = tarval_sub(tv, tv_incr, NULL);
1144 tarval_set_integer_overflow_mode(ovmode);
1146 if (tv == tarval_bad || tv_init == tarval_bad || tv_end == tarval_bad) {
1147 DB((dbg, LEVEL_4, " = OVERFLOW"));
1150 return new_Const(tv);
1152 return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(e->dst));
1153 } /* applyOneEdge */
1156 * Applies the operations represented by the LFTR edges to a
1157 * region constant and returns the value.
1158 * Return NULL if the transformation cannot be done safely without
1161 * @param pIV points to the IV node that starts the LFTR edge chain
1162 * after translation points to the new IV
1163 * @param rc the region constant that should be translated
1164 * @param env the IV environment
1166 * @return the translated region constant or NULL
1167 * if the translation was not possible
1169 static ir_node *applyEdges(ir_node **pIV, ir_node *rc, iv_env *env) {
1171 if (env->osr_flags & osr_flag_lftr_with_ov_check) {
1172 /* overflow can only be decided for Consts */
1173 if (! is_counter_iv(iv, env)) {
1174 DB((dbg, LEVEL_4, "not counter IV\n", rc));
1177 if (! is_Const(rc)) {
1178 DB((dbg, LEVEL_4, " = UNKNOWN (%+F)\n", rc));
1181 DB((dbg, LEVEL_4, "%+F", get_Const_tarval(rc)));
1185 LFTR_edge *e = LFTR_find(iv, env);
1187 rc = applyOneEdge(iv, rc, e, env);
1192 DB((dbg, LEVEL_3, "\n"));
1198 * Walker, finds Cmp(iv, rc) or Cmp(rc, iv)
1199 * and tries to optimize them.
1201 static void do_lftr(ir_node *cmp, void *ctx) {
1203 ir_node *left, *right, *liv, *riv;
1205 ir_node *nleft = NULL, *nright = NULL;
1210 left = get_Cmp_left(cmp);
1211 right = get_Cmp_right(cmp);
1213 liv = is_iv(left, env);
1214 riv = is_iv(right, env);
1215 if (liv && is_rc(right, liv)) {
1216 iv = left; rc = right;
1218 nright = applyEdges(&iv, rc, env);
1221 else if (riv && is_rc(left, riv)) {
1222 iv = right; rc = left;
1224 nleft = applyEdges(&iv, rc, env);
1228 if (nleft && nright) {
1229 DB((dbg, LEVEL_2, " LFTR for %+F\n", cmp));
1230 set_Cmp_left(cmp, nleft);
1231 set_Cmp_right(cmp, nright);
1232 ++env->lftr_replaced;
1237 * do linear function test replacement.
1239 * @param irg the graph that should be optimized
1240 * @param env the IV environment
1242 static void lftr(ir_graph *irg, iv_env *env) {
1243 irg_walk_graph(irg, NULL, do_lftr, env);
1247 * Pre-walker: set all node links to NULL and fix the
1248 * block of Proj nodes.
1250 static void clear_and_fix(ir_node *irn, void *env) {
1252 set_irn_link(irn, NULL);
1255 ir_node *pred = get_Proj_pred(irn);
1256 ir_node *pred_block = get_nodes_block(pred);
1258 if (get_nodes_block(irn) != pred_block) {
1259 set_nodes_block(irn, pred_block);
1263 } /* clear_and_fix */
1266 /* Remove any Phi cycles with only one real input. */
1267 void remove_phi_cycles(ir_graph *irg) {
1272 rem = current_ir_graph;
1273 current_ir_graph = irg;
1275 FIRM_DBG_REGISTER(dbg, "firm.opt.remove_phi");
1277 DB((dbg, LEVEL_1, "Doing Phi cycle removement for %+F\n", irg));
1279 obstack_init(&env.obst);
1280 env.stack = NEW_ARR_F(ir_node *, 128);
1284 env.quad_map = NULL;
1285 env.lftr_edges = NULL;
1287 env.lftr_replaced = 0;
1289 env.need_postpass = 0;
1290 env.process_scc = process_phi_only_scc;
1292 /* Clear all links and move Proj nodes into the
1293 the same block as it's predecessors.
1294 This can improve the placement of new nodes.
1297 irg_walk_graph(irg, NULL, clear_and_fix, &projs_moved);
1299 set_irg_outs_inconsistent(irg);
1301 /* we need outs for calculating the post order */
1302 assure_irg_outs(irg);
1304 /* calculate the post order number for blocks. */
1305 irg_out_block_walk(get_irg_start_block(irg), NULL, assign_po, &env);
1307 /* calculate the SCC's and drive OSR. */
1308 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1310 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1313 set_irg_outs_inconsistent(irg);
1314 DB((dbg, LEVEL_1, "remove_phi_cycles: %u Cycles removed\n\n", env.replaced));
1317 DEL_ARR_F(env.stack);
1318 obstack_free(&env.obst, NULL);
1320 current_ir_graph = rem;
1321 } /* remove_phi_cycles */
1324 * Post-walker: fix Add and Sub nodes that where results of I<->P conversions.
1326 static void fix_adds_and_subs(ir_node *irn, void *ctx) {
1330 ir_mode *mode = get_irn_mode(irn);
1332 if (mode_is_int(mode)) {
1335 pred = get_Add_left(irn);
1336 if (get_irn_mode(pred) != mode) {
1337 ir_node *block = get_nodes_block(pred);
1339 pred = new_r_Conv(block, pred, mode);
1340 set_Add_left(irn, pred);
1342 pred = get_Add_right(irn);
1343 if (get_irn_mode(pred) != mode) {
1344 ir_node *block = get_nodes_block(pred);
1346 pred = new_r_Conv(block, pred, mode);
1347 set_Add_right(irn, pred);
1350 } else if (is_Sub(irn)) {
1351 ir_mode *mode = get_irn_mode(irn);
1353 if (mode_is_int(mode)) {
1354 ir_node *left = get_Sub_left(irn);
1355 ir_node *right = get_Sub_right(irn);
1356 ir_mode *l_mode = get_irn_mode(left);
1357 ir_mode *r_mode = get_irn_mode(right);
1359 if (mode_is_int(l_mode) && mode_is_int(r_mode)) {
1360 if (l_mode != mode) {
1361 ir_node *block = get_nodes_block(left);
1363 left = new_r_Conv(block, left, mode);
1364 set_Sub_left(irn, left);
1366 if (r_mode != mode) {
1367 ir_node *block = get_nodes_block(right);
1369 right = new_r_Conv(block, right, mode);
1370 set_Sub_right(irn, right);
1375 } /* fix_adds_and_subs */
1377 /* Performs Operator Strength Reduction for the passed graph. */
1378 void opt_osr(ir_graph *irg, unsigned flags) {
1384 if (! get_opt_strength_red()) {
1385 /* only kill Phi cycles */
1386 remove_phi_cycles(irg);
1390 rem = current_ir_graph;
1391 current_ir_graph = irg;
1393 FIRM_DBG_REGISTER(dbg, "firm.opt.osr");
1395 DB((dbg, LEVEL_1, "Doing Operator Strength Reduction for %+F\n", irg));
1397 obstack_init(&env.obst);
1398 env.stack = NEW_ARR_F(ir_node *, 128);
1402 env.quad_map = new_set(quad_cmp, 64);
1403 env.lftr_edges = new_set(LFTR_cmp, 64);
1405 env.lftr_replaced = 0;
1406 env.osr_flags = flags;
1407 env.need_postpass = 0;
1408 env.process_scc = process_scc;
1410 /* Clear all links and move Proj nodes into the
1411 the same block as it's predecessors.
1412 This can improve the placement of new nodes.
1415 irg_walk_graph(irg, NULL, clear_and_fix, &projs_moved);
1417 set_irg_outs_inconsistent(irg);
1419 /* we need dominance */
1422 edges = edges_assure(irg);
1424 /* calculate the post order number for blocks by walking the out edges. */
1425 assure_irg_outs(irg);
1426 irg_block_edges_walk(get_irg_start_block(irg), NULL, assign_po, &env);
1428 /* calculate the SCC's and drive OSR. */
1429 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1433 if (env.need_postpass)
1434 irg_walk_graph(irg, NULL, fix_adds_and_subs, &env);
1436 /* try linear function test replacements */
1440 set_irg_outs_inconsistent(irg);
1441 DB((dbg, LEVEL_1, "Replacements: %u + %u (lftr)\n\n", env.replaced, env.lftr_replaced));
1443 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1445 del_set(env.lftr_edges);
1446 del_set(env.quad_map);
1447 DEL_ARR_F(env.stack);
1448 obstack_free(&env.obst, NULL);
1451 edges_deactivate(irg);
1453 current_ir_graph = rem;