2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Operator Strength Reduction.
24 * @author Michael Beck
27 * Implementation of the Operator Strength Reduction algorithm
28 * by Keith D. Cooper, L. Taylor Simpson, Christopher A. Vick.
34 #include "iroptimize.h"
55 /** The debug handle. */
56 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
60 ir_node *head; /**< the head of the list */
61 tarval *init; /**< the init value iff only one exists. */
62 tarval *incr; /**< the induction variable increment if only a single const exists. */
63 unsigned code; /**< == iro_Add if +incr, iro_Sub if -incr, 0 if not analysed, iro_Bad else */
67 typedef struct node_entry {
68 unsigned DFSnum; /**< the DFS number of this node */
69 unsigned low; /**< the low number of this node */
70 ir_node *header; /**< the header of this node */
71 int in_stack; /**< flag, set if the node is on the stack */
72 ir_node *next; /**< link to the next node the the same scc */
73 scc *pscc; /**< the scc of this node */
74 unsigned POnum; /**< the post order number for blocks */
77 /** The environment. */
78 typedef struct iv_env {
79 struct obstack obst; /**< an obstack for allocations */
80 ir_node **stack; /**< the node stack */
81 int tos; /**< tos index */
82 unsigned nextDFSnum; /**< the current DFS number */
83 unsigned POnum; /**< current post order number */
84 set *quad_map; /**< a map from (op, iv, rc) to node */
85 set *lftr_edges; /**< the set of lftr edges */
86 unsigned replaced; /**< number of replaced ops */
87 unsigned lftr_replaced; /**< number of applied linear function test replacements */
88 unsigned flags; /**< additional flags */
89 /** Function called to process a SCC. */
90 void (*process_scc)(scc *pscc, struct iv_env *env);
94 * An entry in the (op, node, node) -> node map.
96 typedef struct quadruple_t {
97 ir_opcode code; /**< the opcode of the reduced operation */
98 ir_node *op1; /**< the first operand the reduced operation */
99 ir_node *op2; /**< the second operand of the reduced operation */
101 ir_node *res; /**< the reduced operation */
107 typedef struct LFTR_edge {
108 ir_node *src; /**< the source node */
109 ir_node *dst; /**< the destination node */
110 ir_opcode code; /**< the opcode that must be applied */
111 ir_node *rc; /**< the region const that must be applied */
115 static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env);
118 * Compare two LFTR edges.
120 static int LFTR_cmp(const void *e1, const void *e2, size_t size) {
121 const LFTR_edge *l1 = e1;
122 const LFTR_edge *l2 = e2;
125 return l1->src != l2->src;
131 static LFTR_edge *LFTR_find(ir_node *src, iv_env *env) {
136 return set_find(env->lftr_edges, &key, sizeof(key), HASH_PTR(src));
142 static void LFTR_add(ir_node *src, ir_node *dst, ir_opcode code, ir_node *rc, iv_env *env) {
151 * There might be more than one edge here. This is rather bad
152 * because we currently store only one.
154 // assert(LFTR_find(src, env) == NULL);
155 set_insert(env->lftr_edges, &key, sizeof(key), HASH_PTR(src));
159 * Gets the node_entry of a node
161 static node_entry *get_irn_ne(ir_node *irn, iv_env *env) {
162 node_entry *e = get_irn_link(irn);
165 e = obstack_alloc(&env->obst, sizeof(*e));
166 memset(e, 0, sizeof(*e));
167 set_irn_link(irn, e);
173 * Gets the scc from an IV.
175 static scc *get_iv_scc(ir_node *iv, iv_env *env) {
176 node_entry *e = get_irn_ne(iv, env);
181 * Check if irn is an IV.
183 * @param irn the node to check
184 * @param env the environment
186 * @returns the header if it is one, NULL else
188 static ir_node *is_iv(ir_node *irn, iv_env *env) {
189 return get_irn_ne(irn, env)->header;
193 * Check if irn is a region constant.
194 * The block or irn must strictly dominate the header block.
196 * @param irn the node to check
197 * @param header_block the header block of the induction variable
199 static int is_rc(ir_node *irn, ir_node *header_block) {
200 ir_node *block = get_nodes_block(irn);
202 return (block != header_block) && block_dominates(block, header_block);
206 * Set compare function for the quad set.
208 static int quad_cmp(const void *e1, const void *e2, size_t size) {
209 const quadruple_t *c1 = e1;
210 const quadruple_t *c2 = e2;
213 return c1->code != c2->code || c1->op1 != c2->op1 || c1->op2 != c2->op2;
217 * Check if an reduced operation was already calculated.
219 * @param code the opcode of the operation
220 * @param op1 the first operand of the operation
221 * @param op2 the second operand of the operation
222 * @param env the environment
224 * @return the already reduced node or NULL if this operation is not yet reduced
226 static ir_node *search(ir_opcode code, ir_node *op1, ir_node *op2, iv_env *env) {
227 quadruple_t key, *entry;
233 entry = set_find(env->quad_map, &key, sizeof(key),
234 (code * 9) ^ HASH_PTR(op1) ^HASH_PTR(op2));
241 * Add an reduced operation.
243 * @param code the opcode of the operation
244 * @param op1 the first operand of the operation
245 * @param op2 the second operand of the operation
246 * @param result the result of the reduced operation
247 * @param env the environment
249 static void add(ir_opcode code, ir_node *op1, ir_node *op2, ir_node *result, iv_env *env) {
257 set_insert(env->quad_map, &key, sizeof(key),
258 (code * 9) ^ HASH_PTR(op1) ^HASH_PTR(op2));
262 * Find a location where to place a bin-op whose operands are in
265 * @param block1 the block of the first operand
266 * @param block2 the block of the second operand
268 * Note that we know here that such a place must exists. Moreover, this means
269 * that either block1 dominates block2 or vice versa. So, just return
272 static ir_node *find_location(ir_node *block1, ir_node *block2) {
273 if (block_dominates(block1, block2))
275 assert(block_dominates(block2, block1));
280 * Create a node that executes an op1 code op1 operation.
282 * @param code the opcode to execute
283 * @param db debug info to add to the new node
284 * @param op1 the first operand
285 * @param op2 the second operand
286 * @param mode the mode of the new operation
288 * @return the newly created node
290 static ir_node *do_apply(ir_opcode code, dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) {
291 ir_graph *irg = current_ir_graph;
293 ir_node *block = find_location(get_nodes_block(op1), get_nodes_block(op2));
297 result = new_rd_Mul(db, irg, block, op1, op2, mode);
300 result = new_rd_Add(db, irg, block, op1, op2, mode);
303 result = new_rd_Sub(db, irg, block, op1, op2, mode);
306 panic("Unsupported opcode");
313 * The Apply operation.
315 * @param orig the node that represent the original operation and determines
316 * the opcode, debug-info and mode of a newly created one
317 * @param op1 the first operand
318 * @param op2 the second operand
319 * @param env the environment
321 * @return the newly created node
323 static ir_node *apply(ir_node *header, ir_node *orig, ir_node *op1, ir_node *op2, iv_env *env) {
324 ir_opcode code = get_irn_opcode(orig);
325 ir_node *result = search(code, op1, op2, env);
327 if (result == NULL) {
328 dbg_info *db = get_irn_dbg_info(orig);
329 ir_node *op1_header = get_irn_ne(op1, env)->header;
330 ir_node *op2_header = get_irn_ne(op2, env)->header;
332 if (op1_header == header && is_rc(op2, op1_header)) {
333 result = reduce(orig, op1, op2, env);
335 else if (op2_header == header && is_rc(op1, op2_header)) {
336 result = reduce(orig, op2, op1, env);
339 result = do_apply(code, db, op1, op2, get_irn_mode(orig));
340 get_irn_ne(result, env)->header = NULL;
347 * The Reduce operation.
349 * @param orig the node that represent the original operation and determines
350 * the opcode, debug-info and mode of a newly created one
351 * @param iv the induction variable
352 * @param rc the region constant
353 * @param env the environment
355 * @return the reduced node
357 static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env) {
358 ir_opcode code = get_irn_opcode(orig);
359 ir_node *result = search(code, iv, rc, env);
361 if (result == NULL) {
362 node_entry *e, *iv_e;
364 ir_mode *mode = get_irn_mode(orig);
366 result = exact_copy(iv);
368 /* Beware: we must always create a new induction variable with the same mode
369 as the node we are replacing. Especially this means the mode might be changed
370 from P to I and back. This is always possible, because we have only Phi, Add
372 set_irn_mode(result, mode);
373 add(code, iv, rc, result, env);
374 DB((dbg, LEVEL_3, " Created new %+F for %+F (%s %+F)\n", result, iv,
375 get_irn_opname(orig), rc));
377 iv_e = get_irn_ne(iv, env);
378 e = get_irn_ne(result, env);
379 e->header = iv_e->header;
381 /* create the LFTR edge */
382 LFTR_add(iv, result, code, rc, env);
384 n = get_irn_arity(result);
385 for (i = 0; i < n; ++i) {
386 ir_node *o = get_irn_n(result, i);
388 e = get_irn_ne(o, env);
389 if (e->header == iv_e->header)
390 o = reduce(orig, o, rc, env);
391 else if (is_Phi(result) || code == iro_Mul)
392 o = apply(iv_e->header, orig, o, rc, env);
393 set_irn_n(result, i, o);
397 DB((dbg, LEVEL_3, " Already Created %+F for %+F (%s %+F)\n", result, iv,
398 get_irn_opname(orig), rc));
404 * Update the scc for a newly created IV.
406 static void update_scc(ir_node *iv, node_entry *e, iv_env *env) {
408 ir_node *header = e->header;
409 waitq *wq = new_waitq();
411 DB((dbg, LEVEL_2, " Creating SCC for new an induction variable:\n "));
415 ir_node *irn = waitq_get(wq);
416 node_entry *ne = get_irn_ne(irn, env);
420 ne->next = pscc->head;
422 DB((dbg, LEVEL_2, " %+F,", irn));
424 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
425 ir_node *pred = get_irn_n(irn, i);
426 node_entry *pe = get_irn_ne(pred, env);
428 if (pe->header == header && pe->pscc == NULL) {
429 /* set the pscc here to ensure that the node is NOT enqueued another time */
434 } while (! waitq_empty(wq));
436 DB((dbg, LEVEL_2, "\n"));
440 * The Replace operation.
442 * @param irn the node that will be replaced
443 * @param iv the induction variable
444 * @param rc the region constant
445 * @param env the environment
447 static int replace(ir_node *irn, ir_node *iv, ir_node *rc, iv_env *env) {
450 DB((dbg, LEVEL_2, " Replacing %+F\n", irn));
452 result = reduce(irn, iv, rc, env);
456 hook_strength_red(current_ir_graph, irn);
457 exchange(irn, result);
458 e = get_irn_ne(result, env);
459 if (e->pscc == NULL) {
460 e->pscc = obstack_alloc(&env->obst, sizeof(*e->pscc));
461 memset(e->pscc, 0, sizeof(*e->pscc));
462 update_scc(result, e, env);
472 * check if a given node is a mul with 2, 4, 8
474 static int is_x86_shift_const(ir_node *mul) {
480 /* normalization put constants on the right side */
481 rc = get_Mul_right(mul);
483 tarval *tv = get_Const_tarval(rc);
485 if (tarval_is_long(tv)) {
486 long value = get_tarval_long(tv);
488 if (value == 2 || value == 4 || value == 8) {
489 /* do not reduce multiplications by 2, 4, 8 */
499 * Check if an IV represents a counter with constant limits.
501 static int is_counter_iv(ir_node *iv, iv_env *env) {
502 node_entry *e = get_irn_ne(iv, env);
504 ir_node *have_init = NULL;
505 ir_node *have_incr = NULL;
506 ir_opcode code = iro_Bad;
509 if (pscc->code != 0) {
510 /* already analysed */
511 return pscc->code != iro_Bad;
514 pscc->code = iro_Bad;
515 for (irn = pscc->head; irn != NULL; irn = e->next) {
517 if (have_incr != NULL)
520 have_incr = get_Add_right(irn);
521 if (! is_Const(have_incr)) {
522 have_incr = get_Add_left(irn);
523 if (! is_Const(have_incr))
527 } else if (is_Sub(irn)) {
528 if (have_incr != NULL)
531 have_incr = get_Sub_right(irn);
532 if (! is_Const(have_incr))
535 } else if (is_Phi(irn)) {
538 for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
539 ir_node *pred = get_Phi_pred(irn, i);
540 node_entry *ne = get_irn_ne(pred, env);
542 if (ne->header == e->header)
544 if (have_init != NULL)
547 if (! is_Const(pred))
552 e = get_irn_ne(irn, env);
554 pscc->init = get_Const_tarval(have_init);
555 pscc->incr = get_Const_tarval(have_incr);
557 return code != iro_Bad;
561 * Check the users of an induction variable for register pressure.
563 static int check_users_for_reg_pressure(ir_node *iv, iv_env *env) {
564 ir_node *irn, *header;
565 ir_node *have_user = NULL;
566 ir_node *have_cmp = NULL;
567 node_entry *e = get_irn_ne(iv, env);
571 for (irn = pscc->head; irn != NULL; irn = e->next) {
572 const ir_edge_t *edge;
574 foreach_out_edge(irn, edge) {
575 ir_node *user = get_edge_src_irn(edge);
576 node_entry *ne = get_irn_ne(user, env);
578 if (e->header == ne->header) {
579 /* found user from the same IV */
583 if (have_cmp != NULL) {
584 /* more than one cmp, for now end here */
589 /* user is a real user of the IV */
590 if (have_user != NULL) {
591 /* found the second user */
597 e = get_irn_ne(irn, env);
600 if (have_user == NULL) {
601 /* no user, ignore */
605 if (have_cmp == NULL) {
606 /* fine, only one user, try to reduce */
610 * We found one user AND at least one cmp.
611 * We should check here if we can transform the Cmp.
613 * For now our capabilities for doing linear function test
614 * are limited, so check if the iv has the right form: Only ONE
615 * phi, only one Add/Sub with a Const
617 if (! is_counter_iv(iv, env))
621 * Ok, we have only one increment AND it is a Const, we might be able
622 * to do a linear function test replacement, so go on.
628 * Check if a node can be replaced (+, -, *).
630 * @param irn the node to check
631 * @param env the environment
633 * @return non-zero if irn should be Replace'd
635 static int check_replace(ir_node *irn, iv_env *env) {
636 ir_node *left, *right, *iv, *rc;
637 ir_op *op = get_irn_op(irn);
638 ir_opcode code = get_op_code(op);
647 left = get_binop_left(irn);
648 right = get_binop_right(irn);
650 liv = is_iv(left, env);
651 riv = is_iv(right, env);
652 if (liv && is_rc(right, liv)) {
653 iv = left; rc = right;
655 else if (riv && is_op_commutative(op) &&
657 iv = right; rc = left;
661 if (env->flags & osr_flag_keep_reg_pressure) {
662 if (! check_users_for_reg_pressure(iv, env))
665 return replace(irn, iv, rc, env);
675 * Check which SCC's are induction variables.
678 * @param env the environment
680 static void classify_iv(scc *pscc, iv_env *env) {
681 ir_node *irn, *next, *header = NULL;
682 node_entry *b, *h = NULL;
683 int j, only_phi, num_outside;
686 /* find the header block for this scc */
687 for (irn = pscc->head; irn; irn = next) {
688 node_entry *e = get_irn_link(irn);
689 ir_node *block = get_nodes_block(irn);
692 b = get_irn_ne(block, env);
695 if (h->POnum < b->POnum) {
706 /* check if this scc contains only Phi, Add or Sub nodes */
710 for (irn = pscc->head; irn; irn = next) {
711 node_entry *e = get_irn_ne(irn, env);
714 switch (get_irn_opcode(irn)) {
720 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
721 ir_node *pred = get_irn_n(irn, j);
722 node_entry *pe = get_irn_ne(pred, env);
724 if (pe->pscc != e->pscc) {
725 /* not in the same SCC, must be a region const */
726 if (! is_rc(pred, header)) {
727 /* not an induction variable */
733 } else if (out_rc != pred) {
740 /* not an induction variable */
744 /* found an induction variable */
745 DB((dbg, LEVEL_2, " Found an induction variable:\n "));
746 if (only_phi && num_outside == 1) {
747 /* a phi cycle with only one real predecessor can be collapsed */
748 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
750 for (irn = pscc->head; irn; irn = next) {
751 node_entry *e = get_irn_ne(irn, env);
754 exchange(irn, out_rc);
760 /* set the header for every node in this scc */
761 for (irn = pscc->head; irn; irn = next) {
762 node_entry *e = get_irn_ne(irn, env);
765 DB((dbg, LEVEL_2, " %+F,", irn));
767 DB((dbg, LEVEL_2, "\n"));
771 for (irn = pscc->head; irn; irn = next) {
772 node_entry *e = get_irn_ne(irn, env);
780 * Process a SCC for the operator strength reduction.
782 * @param pscc the SCC
783 * @param env the environment
785 static void process_scc(scc *pscc, iv_env *env) {
786 ir_node *head = pscc->head;
787 node_entry *e = get_irn_link(head);
793 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
794 for (irn = pscc->head; irn; irn = next) {
795 node_entry *e = get_irn_link(irn);
799 DB((dbg, LEVEL_4, " %+F,", irn));
801 DB((dbg, LEVEL_4, "\n"));
805 if (e->next == NULL) {
806 /* this SCC has only a single member */
807 check_replace(head, env);
809 classify_iv(pscc, env);
814 * If an SCC is a Phi only cycle, remove it.
816 static void remove_phi_cycle(scc *pscc, iv_env *env) {
821 /* check if this scc contains only Phi, Add or Sub nodes */
823 for (irn = pscc->head; irn; irn = next) {
824 node_entry *e = get_irn_ne(irn, env);
830 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
831 ir_node *pred = get_irn_n(irn, j);
832 node_entry *pe = get_irn_ne(pred, env);
834 if (pe->pscc != e->pscc) {
835 /* not in the same SCC, must be the only input */
838 } else if (out_rc != pred) {
844 /* found a Phi cycle */
845 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
847 for (irn = pscc->head; irn; irn = next) {
848 node_entry *e = get_irn_ne(irn, env);
851 exchange(irn, out_rc);
857 * Process a SCC for the Phi cycle removement.
859 * @param pscc the SCC
860 * @param env the environment
862 static void process_phi_only_scc(scc *pscc, iv_env *env) {
863 ir_node *head = pscc->head;
864 node_entry *e = get_irn_link(head);
870 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
871 for (irn = pscc->head; irn; irn = next) {
872 node_entry *e = get_irn_link(irn);
876 DB((dbg, LEVEL_4, " %+F,", irn));
878 DB((dbg, LEVEL_4, "\n"));
883 remove_phi_cycle(pscc, env);
888 * Push a node onto the stack.
890 * @param env the environment
891 * @param n the node to push
893 static void push(iv_env *env, ir_node *n) {
896 if (env->tos == ARR_LEN(env->stack)) {
897 int nlen = ARR_LEN(env->stack) * 2;
898 ARR_RESIZE(ir_node *, env->stack, nlen);
900 env->stack[env->tos++] = n;
901 e = get_irn_ne(n, env);
906 * pop a node from the stack
908 * @param env the environment
910 * @return The topmost node
912 static ir_node *pop(iv_env *env)
914 ir_node *n = env->stack[--env->tos];
915 node_entry *e = get_irn_ne(n, env);
922 * Do Tarjan's SCC algorithm and drive OSR.
924 * @param irn start at this node
925 * @param env the environment
927 static void dfs(ir_node *irn, iv_env *env)
930 node_entry *node = get_irn_ne(irn, env);
932 mark_irn_visited(irn);
934 /* do not put blocks into the scc */
936 n = get_irn_arity(irn);
937 for (i = 0; i < n; ++i) {
938 ir_node *pred = get_irn_n(irn, i);
940 if (!irn_visited(pred))
945 ir_node *block = get_nodes_block(irn);
947 node->DFSnum = env->nextDFSnum++;
948 node->low = node->DFSnum;
951 /* handle the block */
952 if (!irn_visited(block))
955 n = get_irn_arity(irn);
956 for (i = 0; i < n; ++i) {
957 ir_node *pred = get_irn_n(irn, i);
958 node_entry *o = get_irn_ne(pred, env);
960 if (!irn_visited(pred)) {
962 node->low = MIN(node->low, o->low);
964 if (o->DFSnum < node->DFSnum && o->in_stack)
965 node->low = MIN(o->DFSnum, node->low);
967 if (node->low == node->DFSnum) {
968 scc *pscc = obstack_alloc(&env->obst, sizeof(*pscc));
971 memset(pscc, 0, sizeof(*pscc));
976 e = get_irn_ne(x, env);
978 e->next = pscc->head;
982 env->process_scc(pscc, env);
988 * Do the DFS by starting at the End node of a graph.
990 * @param irg the graph to process
991 * @param env the environment
993 static void do_dfs(ir_graph *irg, iv_env *env) {
994 ir_graph *rem = current_ir_graph;
995 ir_node *end = get_irg_end(irg);
998 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1000 current_ir_graph = irg;
1001 inc_irg_visited(irg);
1003 /* visit all visible nodes */
1006 /* visit the keep-alives */
1007 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
1008 ir_node *ka = get_End_keepalive(end, i);
1010 if (!irn_visited(ka))
1014 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1016 current_ir_graph = rem;
1020 * Post-block-walker: assign the post-order number.
1022 static void assign_po(ir_node *block, void *ctx) {
1024 node_entry *e = get_irn_ne(block, env);
1026 e->POnum = env->POnum++;
1030 * Follows the LFTR edges and return the last node in the chain.
1032 * @param irn the node that should be followed
1033 * @param env the IV environment
1036 * In the current implementation only the last edge is stored, so
1037 * only one chain exists. That's why we might miss some opportunities.
1039 static ir_node *followEdges(ir_node *irn, iv_env *env) {
1041 LFTR_edge *e = LFTR_find(irn, env);
1050 * Apply one LFTR edge operation.
1051 * Return NULL if the transformation cannot be done safely without
1054 * @param iv the induction variable
1055 * @param rc the constant that should be translated
1056 * @param e the LFTR edge
1057 * @param env the IV environment
1059 * @return the translated region constant or NULL
1060 * if the translation was not possible
1063 * In the current implementation only the last edge is stored, so
1064 * only one chain exists. That's why we might miss some opportunities.
1066 static ir_node *applyOneEdge(ir_node *iv, ir_node *rc, LFTR_edge *e, iv_env *env) {
1067 if (env->flags & osr_flag_lftr_with_ov_check) {
1068 tarval *tv_l, *tv_r, *tv, *tv_init, *tv_incr;
1069 tarval_int_overflow_mode_t ovmode;
1072 if (! is_counter_iv(iv, env)) {
1073 DB((dbg, LEVEL_4, " not counter IV"));
1077 /* overflow can only be decided for Consts */
1078 if (! is_Const(e->rc)) {
1079 DB((dbg, LEVEL_4, " = UNKNOWN (%+F)", e->rc));
1083 tv_l = get_Const_tarval(rc);
1084 tv_r = get_Const_tarval(e->rc);
1086 ovmode = tarval_get_integer_overflow_mode();
1087 tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD);
1089 pscc = get_iv_scc(iv, env);
1090 tv_incr = pscc->incr;
1091 tv_init = pscc->init;
1094 * Check that no overflow occurs:
1095 * init must be transformed without overflow
1096 * the new rc must be transformed without overflow
1097 * rc +/- incr must be possible without overflow
1101 tv = tarval_mul(tv_l, tv_r);
1102 tv_init = tarval_mul(tv_init, tv_r);
1103 tv_incr = tarval_mul(tv_incr, tv_r);
1104 DB((dbg, LEVEL_4, " * %+F", tv_r));
1107 tv = tarval_add(tv_l, tv_r);
1108 tv_init = tarval_add(tv_init, tv_r);
1109 DB((dbg, LEVEL_4, " + %+F", tv_r));
1112 tv = tarval_sub(tv_l, tv_r, NULL);
1113 tv_init = tarval_sub(tv_init, tv_r, NULL);
1114 DB((dbg, LEVEL_4, " - %+F", tv_r));
1117 panic("Unsupported opcode");
1121 if (pscc->code == iro_Add) {
1122 tv = tarval_add(tv, tv_incr);
1124 assert(pscc->code == iro_Sub);
1125 tv = tarval_sub(tv, tv_incr, NULL);
1128 tarval_set_integer_overflow_mode(ovmode);
1130 if (tv == tarval_bad || tv_init == tarval_bad) {
1131 DB((dbg, LEVEL_4, " = OVERFLOW"));
1134 return new_Const(tv);
1136 return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(rc));
1140 * Applies the operations represented by the LFTR edges to a
1141 * region constant and returns the value.
1142 * Return NULL if the transformation cannot be done safely without
1145 * @param iv the IV node that starts the LFTR edge chain
1146 * @param rc the region constant that should be translated
1147 * @param env the IV environment
1149 * @return the translated region constant or NULL
1150 * if the translation was not possible
1152 static ir_node *applyEdges(ir_node *iv, ir_node *rc, iv_env *env) {
1153 if (env->flags & osr_flag_lftr_with_ov_check) {
1154 /* overflow can only be decided for Consts */
1155 if (! is_counter_iv(iv, env)) {
1156 DB((dbg, LEVEL_4, "not counter IV\n", rc));
1159 if (! is_Const(rc)) {
1160 DB((dbg, LEVEL_4, " = UNKNOWN (%+F)\n", rc));
1163 DB((dbg, LEVEL_4, "%+F", get_Const_tarval(rc)));
1167 LFTR_edge *e = LFTR_find(iv, env);
1169 rc = applyOneEdge(iv, rc, e, env);
1175 DB((dbg, LEVEL_3, "\n"));
1180 * Walker, finds Cmp(iv, rc) or Cmp(rc, iv)
1181 * and tries to optimize them.
1183 static void do_lftr(ir_node *cmp, void *ctx) {
1185 ir_node *left, *right, *liv, *riv;
1187 ir_node *nleft = NULL, *nright = NULL;
1192 left = get_Cmp_left(cmp);
1193 right = get_Cmp_right(cmp);
1195 liv = is_iv(left, env);
1196 riv = is_iv(right, env);
1197 if (liv && is_rc(right, liv)) {
1198 iv = left; rc = right;
1200 nright = applyEdges(iv, rc, env);
1201 if (nright && nright != rc) {
1202 nleft = followEdges(iv, env);
1205 else if (riv && is_rc(left, riv)) {
1206 iv = right; rc = left;
1208 nleft = applyEdges(iv, rc, env);
1209 if (nleft && nleft != rc) {
1210 nright = followEdges(iv, env);
1214 if (nleft && nright) {
1215 DB((dbg, LEVEL_2, " LFTR for %+F\n", cmp));
1216 set_Cmp_left(cmp, nleft);
1217 set_Cmp_right(cmp, nright);
1218 ++env->lftr_replaced;
1223 * do linear function test replacement.
1225 * @param irg the graph that should be optimized
1226 * @param env the IV environment
1228 static void lftr(ir_graph *irg, iv_env *env) {
1229 irg_walk_graph(irg, NULL, do_lftr, env);
1233 * Pre-walker: set all node links to NULL and fix the
1234 * block of Proj nodes.
1236 static void clear_and_fix(ir_node *irn, void *env)
1239 set_irn_link(irn, NULL);
1242 ir_node *pred = get_Proj_pred(irn);
1243 ir_node *pred_block = get_nodes_block(pred);
1245 if (get_nodes_block(irn) != pred_block) {
1246 set_nodes_block(irn, pred_block);
1252 /* Performs Operator Strength Reduction for the passed graph. */
1253 void opt_osr(ir_graph *irg, unsigned flags) {
1259 if (! get_opt_strength_red()) {
1260 /* only kill Phi cycles */
1261 remove_phi_cycles(irg);
1265 rem = current_ir_graph;
1266 current_ir_graph = irg;
1268 FIRM_DBG_REGISTER(dbg, "firm.opt.osr");
1270 DB((dbg, LEVEL_1, "Doing Operator Strength Reduction for %+F\n", irg));
1272 obstack_init(&env.obst);
1273 env.stack = NEW_ARR_F(ir_node *, 128);
1277 env.quad_map = new_set(quad_cmp, 64);
1278 env.lftr_edges = new_set(LFTR_cmp, 64);
1280 env.lftr_replaced = 0;
1282 env.process_scc = process_scc;
1284 /* Clear all links and move Proj nodes into the
1285 the same block as it's predecessors.
1286 This can improve the placement of new nodes.
1289 irg_walk_graph(irg, NULL, clear_and_fix, &projs_moved);
1291 set_irg_outs_inconsistent(irg);
1293 /* we need dominance */
1296 edges = edges_assure(irg);
1298 /* calculate the post order number for blocks by walking the out edges. */
1299 assure_irg_outs(irg);
1300 irg_block_edges_walk(get_irg_start_block(irg), NULL, assign_po, &env);
1302 /* calculate the SCC's and drive OSR. */
1303 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1307 /* try linear function test replacements */
1308 //lftr(irg, &env); // currently buggy :-(
1311 set_irg_outs_inconsistent(irg);
1312 DB((dbg, LEVEL_1, "Replacements: %u + %u (lftr)\n\n", env.replaced, env.lftr_replaced));
1314 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1316 del_set(env.lftr_edges);
1317 del_set(env.quad_map);
1318 DEL_ARR_F(env.stack);
1319 obstack_free(&env.obst, NULL);
1322 edges_deactivate(irg);
1324 current_ir_graph = rem;
1327 /* Remove any Phi cycles with only one real input. */
1328 void remove_phi_cycles(ir_graph *irg) {
1333 rem = current_ir_graph;
1334 current_ir_graph = irg;
1336 FIRM_DBG_REGISTER(dbg, "firm.opt.remove_phi");
1338 DB((dbg, LEVEL_1, "Doing Phi cycle removement for %+F\n", irg));
1340 obstack_init(&env.obst);
1341 env.stack = NEW_ARR_F(ir_node *, 128);
1345 env.quad_map = NULL;
1346 env.lftr_edges = NULL;
1348 env.lftr_replaced = 0;
1350 env.process_scc = process_phi_only_scc;
1352 /* Clear all links and move Proj nodes into the
1353 the same block as it's predecessors.
1354 This can improve the placement of new nodes.
1357 irg_walk_graph(irg, NULL, clear_and_fix, &projs_moved);
1359 set_irg_outs_inconsistent(irg);
1361 /* we need outs for calculating the post order */
1362 assure_irg_outs(irg);
1364 /* calculate the post order number for blocks. */
1365 irg_out_block_walk(get_irg_start_block(irg), NULL, assign_po, &env);
1367 /* calculate the SCC's and drive OSR. */
1368 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1370 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1373 set_irg_outs_inconsistent(irg);
1374 DB((dbg, LEVEL_1, "remove_phi_cycles: %u Cycles removed\n\n", env.replaced));
1377 DEL_ARR_F(env.stack);
1378 obstack_free(&env.obst, NULL);
1380 current_ir_graph = rem;