2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Operator Strength Reduction.
24 * @author Michael Beck
26 * Implementation of the Operator Strength Reduction algorithm
27 * by Keith D. Cooper, L. Taylor Simpson, Christopher A. Vick.
33 #include "iroptimize.h"
56 /** The debug handle. */
57 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
61 ir_node *head; /**< the head of the list */
62 ir_tarval *init; /**< the init value iff only one exists. */
63 ir_tarval *incr; /**< the induction variable increment if only a single const exists. */
64 unsigned code; /**< == iro_Add if +incr, iro_Sub if -incr, 0 if not analysed, iro_Bad else */
68 typedef struct node_entry {
69 unsigned DFSnum; /**< the DFS number of this node */
70 unsigned low; /**< the low number of this node */
71 ir_node *header; /**< the header of this node */
72 int in_stack; /**< flag, set if the node is on the stack */
73 ir_node *next; /**< link to the next node the the same scc */
74 scc *pscc; /**< the scc of this node */
75 unsigned POnum; /**< the post order number for blocks */
78 /** The environment. */
79 typedef struct iv_env {
80 struct obstack obst; /**< an obstack for allocations */
81 ir_node **stack; /**< the node stack */
82 size_t tos; /**< tos index */
83 unsigned nextDFSnum; /**< the current DFS number */
84 unsigned POnum; /**< current post order number */
85 set *quad_map; /**< a map from (op, iv, rc) to node */
86 set *lftr_edges; /**< the set of lftr edges */
87 unsigned replaced; /**< number of replaced ops */
88 unsigned lftr_replaced; /**< number of applied linear function test replacements */
89 unsigned osr_flags; /**< additional flags steering the transformation */
90 unsigned need_postpass; /**< set, if a post pass is needed to fix Add and Sub nodes */
91 /** Function called to process a SCC. */
92 void (*process_scc)(scc *pscc, struct iv_env *env);
96 * An entry in the (op, node, node) -> node map.
98 typedef struct quadruple_t {
99 unsigned code; /**< the opcode of the reduced operation */
100 ir_node *op1; /**< the first operand the reduced operation */
101 ir_node *op2; /**< the second operand of the reduced operation */
103 ir_node *res; /**< the reduced operation */
109 typedef struct LFTR_edge {
110 ir_node *src; /**< the source node */
111 ir_node *dst; /**< the destination node */
112 unsigned code; /**< the opcode that must be applied */
113 ir_node *rc; /**< the region const that must be applied */
117 static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env);
120 * Compare two LFTR edges.
122 static int LFTR_cmp(const void *e1, const void *e2, size_t size)
124 const LFTR_edge *l1 = (const LFTR_edge*)e1;
125 const LFTR_edge *l2 = (const LFTR_edge*)e2;
128 return l1->src != l2->src;
134 * @param src the source node of the transition
136 static LFTR_edge *LFTR_find(ir_node *src, iv_env *env)
142 return set_find(LFTR_edge, env->lftr_edges, &key, sizeof(key), hash_ptr(src));
148 * @param src the source node of the edge
149 * @param dst the destination node of the edge
150 * @param code the opcode of the transformed transition
151 * @param rc the region const used in the transition
152 * @param env the environment
154 static void LFTR_add(ir_node *src, ir_node *dst, unsigned code, ir_node *rc, iv_env *env)
164 * There might be more than one edge here. This is rather bad
165 * because we currently store only one.
167 (void)set_insert(LFTR_edge, env->lftr_edges, &key, sizeof(key), hash_ptr(src));
171 * Gets the node_entry of a node.
173 * @param irn the node
174 * @param env the environment
176 static node_entry *get_irn_ne(ir_node *irn, iv_env *env)
178 node_entry *e = (node_entry*)get_irn_link(irn);
181 e = OALLOCZ(&env->obst, node_entry);
182 set_irn_link(irn, e);
188 * Gets the scc from an induction variable.
190 * @param iv any node of the induction variable
191 * @param env the environment
193 static scc *get_iv_scc(ir_node *iv, iv_env *env)
195 node_entry *e = get_irn_ne(iv, env);
200 * Check if irn is an IV.
202 * @param irn the node to check
203 * @param env the environment
205 * @returns the header if it is one, NULL else
207 static ir_node *is_iv(ir_node *irn, iv_env *env)
209 return get_irn_ne(irn, env)->header;
213 * Check if irn is a region constant.
214 * The block or irn must strictly dominate the header block.
216 * @param irn the node to check
217 * @param header_block the header block of the induction variable
219 static int is_rc(ir_node *irn, ir_node *header_block)
221 ir_node *block = get_nodes_block(irn);
223 return (block != header_block) && block_dominates(block, header_block);
227 * Set compare function for the quad set.
229 static int quad_cmp(const void *e1, const void *e2, size_t size)
231 const quadruple_t *c1 = (const quadruple_t*)e1;
232 const quadruple_t *c2 = (const quadruple_t*)e2;
235 return c1->code != c2->code || c1->op1 != c2->op1 || c1->op2 != c2->op2;
239 * Check if an reduced operation was already calculated.
241 * @param code the opcode of the operation
242 * @param op1 the first operand of the operation
243 * @param op2 the second operand of the operation
244 * @param env the environment
246 * @return the already reduced node or NULL if this operation is not yet reduced
248 static ir_node *search(unsigned code, ir_node *op1, ir_node *op2, iv_env *env)
250 quadruple_t key, *entry;
256 entry = set_find(quadruple_t, env->quad_map, &key, sizeof(key), (code * 9) ^ hash_ptr(op1) ^ hash_ptr(op2));
263 * Add an reduced operation.
265 * @param code the opcode of the operation
266 * @param op1 the first operand of the operation
267 * @param op2 the second operand of the operation
268 * @param result the result of the reduced operation
269 * @param env the environment
271 static void add(unsigned code, ir_node *op1, ir_node *op2, ir_node *result, iv_env *env)
280 (void)set_insert(quadruple_t, env->quad_map, &key, sizeof(key), (code * 9) ^ hash_ptr(op1) ^ hash_ptr(op2));
284 * Find a location where to place a bin-op whose operands are in
287 * @param block1 the block of the first operand
288 * @param block2 the block of the second operand
290 * Note that we know here that such a place must exists. Moreover, this means
291 * that either block1 dominates block2 or vice versa. So, just return
294 static ir_node *find_location(ir_node *block1, ir_node *block2)
296 if (block_dominates(block1, block2))
298 assert(block_dominates(block2, block1));
300 } /* find_location */
303 * Create a node that executes an op1 code op1 operation.
305 * @param code the opcode to execute
306 * @param db debug info to add to the new node
307 * @param op1 the first operand
308 * @param op2 the second operand
309 * @param mode the mode of the new operation
311 * @return the newly created node
313 static ir_node *do_apply(unsigned code, dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
316 ir_node *block = find_location(get_nodes_block(op1), get_nodes_block(op2));
320 result = new_rd_Mul(db, block, op1, op2, mode);
323 result = new_rd_Add(db, block, op1, op2, mode);
326 result = new_rd_Sub(db, block, op1, op2, mode);
329 panic("Unsupported opcode");
335 * The Apply operation.
337 * @param orig the node that represent the original operation and determines
338 * the opcode, debug-info and mode of a newly created one
339 * @param op1 the first operand
340 * @param op2 the second operand
341 * @param env the environment
343 * @return the newly created node
345 static ir_node *apply(ir_node *header, ir_node *orig, ir_node *op1, ir_node *op2, iv_env *env)
347 unsigned code = get_irn_opcode(orig);
348 ir_node *result = search(code, op1, op2, env);
350 if (result == NULL) {
351 dbg_info *db = get_irn_dbg_info(orig);
352 ir_node *op1_header = get_irn_ne(op1, env)->header;
353 ir_node *op2_header = get_irn_ne(op2, env)->header;
355 if (op1_header == header && is_rc(op2, op1_header)) {
356 result = reduce(orig, op1, op2, env);
358 else if (op2_header == header && is_rc(op1, op2_header)) {
359 result = reduce(orig, op2, op1, env);
362 result = do_apply(code, db, op1, op2, get_irn_mode(orig));
363 get_irn_ne(result, env)->header = NULL;
370 * The Reduce operation.
372 * @param orig the node that represent the original operation and determines
373 * the opcode, debug-info and mode of a newly created one
374 * @param iv the induction variable
375 * @param rc the region constant
376 * @param env the environment
378 * @return the reduced node
380 static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env)
382 unsigned code = get_irn_opcode(orig);
383 ir_node *result = search(code, iv, rc, env);
385 /* check if we have already done this operation on the iv */
386 if (result == NULL) {
387 node_entry *e, *iv_e;
389 ir_mode *mode = get_irn_mode(orig);
391 result = exact_copy(iv);
393 if (get_irn_mode(result) != mode) {
395 * Beware: we must always create a new induction variable with the same mode
396 * as the node we are replacing. Especially this means the mode might be changed
397 * from P to I and back. This is always possible, because we have only Phi, Add
399 * However, this might lead to AddIs(Iu,Is) which we must fix. The best way to do this
400 * seems to be a post-pass, or we might end with useless Conv's.
402 set_irn_mode(result, mode);
403 env->need_postpass = 1;
405 add(code, iv, rc, result, env);
406 DB((dbg, LEVEL_3, " Created new %+F for %+F (%s %+F)\n", result, iv,
407 get_irn_opname(orig), rc));
409 iv_e = get_irn_ne(iv, env);
410 e = get_irn_ne(result, env);
411 e->header = iv_e->header;
413 /* create the LFTR edge */
414 LFTR_add(iv, result, code, rc, env);
416 for (i = get_irn_arity(result) - 1; i >= 0; --i) {
417 ir_node *o = get_irn_n(result, i);
419 e = get_irn_ne(o, env);
420 if (e->header == iv_e->header)
421 o = reduce(orig, o, rc, env);
422 else if (is_Phi(result) || code == iro_Mul)
423 o = apply(iv_e->header, orig, o, rc, env);
424 set_irn_n(result, i, o);
427 DB((dbg, LEVEL_3, " Already Created %+F for %+F (%s %+F)\n", result, iv,
428 get_irn_opname(orig), rc));
434 * Update the scc for a newly created IV.
436 static void update_scc(ir_node *iv, node_entry *e, iv_env *env)
439 ir_node *header = e->header;
440 waitq *wq = new_waitq();
442 DB((dbg, LEVEL_2, " Creating SCC for new an induction variable:\n "));
446 ir_node *irn = (ir_node*)waitq_get(wq);
447 node_entry *ne = get_irn_ne(irn, env);
451 ne->next = pscc->head;
453 DB((dbg, LEVEL_2, " %+F,", irn));
455 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
456 ir_node *pred = get_irn_n(irn, i);
457 node_entry *pe = get_irn_ne(pred, env);
459 if (pe->header == header && pe->pscc == NULL) {
460 /* set the pscc here to ensure that the node is NOT enqueued another time */
465 } while (! waitq_empty(wq));
467 DB((dbg, LEVEL_2, "\n"));
471 * The Replace operation. We found a node representing iv (+,-,*) rc
472 * that can be removed by replacing the induction variable iv by a new
473 * one that 'applies' the operation 'irn'.
475 * @param irn the node that will be replaced
476 * @param iv the induction variable
477 * @param rc the region constant
478 * @param env the environment
480 static int replace(ir_node *irn, ir_node *iv, ir_node *rc, iv_env *env)
484 DB((dbg, LEVEL_2, " Replacing %+F\n", irn));
486 result = reduce(irn, iv, rc, env);
490 hook_strength_red(get_irn_irg(irn), irn);
491 exchange(irn, result);
492 e = get_irn_ne(result, env);
493 if (e->pscc == NULL) {
494 e->pscc = OALLOCZ(&env->obst, scc);
495 update_scc(result, e, env);
505 * check if a given node is a mul with 2, 4, 8
507 static int is_x86_shift_const(ir_node *mul)
514 /* normalization put constants on the right side */
515 rc = get_Mul_right(mul);
517 ir_tarval *tv = get_Const_tarval(rc);
519 if (tarval_is_long(tv)) {
520 long value = get_tarval_long(tv);
522 if (value == 2 || value == 4 || value == 8) {
523 /* do not reduce multiplications by 2, 4, 8 */
529 } /* is_x86_shift_const */
533 * Check if an IV represents a counter with constant limits.
535 * @param iv any node of the induction variable
536 * @param env the environment
538 static int is_counter_iv(ir_node *iv, iv_env *env)
540 node_entry *e = get_irn_ne(iv, env);
542 ir_node *have_init = NULL;
543 ir_node *have_incr = NULL;
544 ir_opcode code = iro_Bad;
547 if (pscc->code != 0) {
548 /* already analysed */
549 return pscc->code != iro_Bad;
552 pscc->code = iro_Bad;
553 for (irn = pscc->head; irn != NULL; irn = e->next) {
555 if (have_incr != NULL)
558 have_incr = get_Add_right(irn);
559 if (! is_Const(have_incr)) {
560 have_incr = get_Add_left(irn);
561 if (! is_Const(have_incr))
565 } else if (is_Sub(irn)) {
566 if (have_incr != NULL)
569 have_incr = get_Sub_right(irn);
570 if (! is_Const(have_incr))
573 } else if (is_Phi(irn)) {
576 for (i = get_Phi_n_preds(irn) - 1; i >= 0; --i) {
577 ir_node *pred = get_Phi_pred(irn, i);
578 node_entry *ne = get_irn_ne(pred, env);
580 if (ne->header == e->header)
582 if (have_init != NULL)
585 if (! is_Const(pred))
590 e = get_irn_ne(irn, env);
592 pscc->init = get_Const_tarval(have_init);
593 pscc->incr = get_Const_tarval(have_incr);
595 return code != iro_Bad;
596 } /* is_counter_iv */
599 * Check the users of an induction variable for register pressure.
601 * @param iv any node of the induction variable
602 * @param env the environment
604 * @return non-zero if the register pressure is estimated
605 * to not increase, zero else
607 static int check_users_for_reg_pressure(ir_node *iv, iv_env *env)
610 ir_node *have_user = NULL;
611 ir_node *have_cmp = NULL;
612 node_entry *e = get_irn_ne(iv, env);
615 for (irn = pscc->head; irn != NULL; irn = e->next) {
616 foreach_out_edge(irn, edge) {
617 ir_node *user = get_edge_src_irn(edge);
618 node_entry *ne = get_irn_ne(user, env);
620 if (e->header == ne->header) {
621 /* found user from the same IV */
625 if (have_cmp != NULL) {
626 /* more than one cmp, for now end here */
631 /* user is a real user of the IV */
632 if (have_user != NULL) {
633 /* found the second user */
639 e = get_irn_ne(irn, env);
642 if (have_user == NULL) {
643 /* no user, ignore */
647 if (have_cmp == NULL) {
648 /* fine, only one user, try to reduce */
652 * We found one user AND at least one cmp.
653 * We should check here if we can transform the Cmp.
655 * For now our capabilities for doing linear function test
656 * are limited, so check if the iv has the right form: Only ONE
657 * Phi, only one Add/Sub with a Const.
659 if (! is_counter_iv(iv, env))
663 * Ok, we have only one increment AND it is a Const, we might be able
664 * to do a linear function test replacement, so go on.
667 } /* check_users_for_reg_pressure */
670 * Check if a node can be replaced (+, -, *).
672 * @param irn the node to check
673 * @param env the environment
675 * @return non-zero if irn should be Replace'd
677 static int check_replace(ir_node *irn, iv_env *env)
679 ir_node *left, *right, *iv, *rc;
680 ir_op *op = get_irn_op(irn);
681 unsigned code = get_op_code(op);
690 left = get_binop_left(irn);
691 right = get_binop_right(irn);
693 liv = is_iv(left, env);
694 riv = is_iv(right, env);
695 if (liv && is_rc(right, liv)) {
696 iv = left; rc = right;
698 else if (riv && is_op_commutative(op) &&
700 iv = right; rc = left;
704 if (env->osr_flags & osr_flag_keep_reg_pressure) {
705 if (! check_users_for_reg_pressure(iv, env))
708 return replace(irn, iv, rc, env);
715 } /* check_replace */
718 * Check which SCC's are induction variables.
721 * @param env the environment
723 static void classify_iv(scc *pscc, iv_env *env)
725 ir_node *irn, *next, *header = NULL;
726 node_entry *b, *h = NULL;
727 int j, only_phi, num_outside;
730 /* find the header block for this scc */
731 for (irn = pscc->head; irn; irn = next) {
732 node_entry *e = (node_entry*)get_irn_link(irn);
733 ir_node *block = get_nodes_block(irn);
736 b = get_irn_ne(block, env);
739 if (h->POnum < b->POnum) {
750 /* check if this scc contains only Phi, Add or Sub nodes */
754 for (irn = pscc->head; irn; irn = next) {
755 node_entry *e = get_irn_ne(irn, env);
758 switch (get_irn_opcode(irn)) {
762 ir_node *left = get_Sub_left(irn);
763 node_entry *left_entry = get_irn_ne(left, env);
764 ir_node *right = get_Sub_right(irn);
765 node_entry *right_entry = get_irn_ne(right, env);
767 if (left_entry->pscc != e->pscc ||
768 (right_entry->pscc != e->pscc && !is_rc(right, header))) {
770 * Not an induction variable.
771 * Region constant are only allowed on right hand side.
782 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
783 ir_node *pred = get_irn_n(irn, j);
784 node_entry *pe = get_irn_ne(pred, env);
786 if (pe->pscc != e->pscc) {
787 /* not in the same SCC, must be a region const */
788 if (! is_rc(pred, header)) {
789 /* not an induction variable */
795 } else if (out_rc != pred) {
802 /* not an induction variable */
806 /* found an induction variable */
807 DB((dbg, LEVEL_2, " Found an induction variable:\n "));
808 if (only_phi && num_outside == 1) {
809 /* a phi cycle with only one real predecessor can be collapsed */
810 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
812 for (irn = pscc->head; irn; irn = next) {
813 node_entry *e = get_irn_ne(irn, env);
816 exchange(irn, out_rc);
822 /* set the header for every node in this scc */
823 for (irn = pscc->head; irn; irn = next) {
824 node_entry *e = get_irn_ne(irn, env);
827 DB((dbg, LEVEL_2, " %+F,", irn));
829 DB((dbg, LEVEL_2, "\n"));
833 for (irn = pscc->head; irn; irn = next) {
834 node_entry *e = get_irn_ne(irn, env);
842 * Process an SCC for the operator strength reduction.
844 * @param pscc the SCC
845 * @param env the environment
847 static void process_scc(scc *pscc, iv_env *env)
849 ir_node *head = pscc->head;
850 node_entry *e = (node_entry*)get_irn_link(head);
856 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
857 for (irn = pscc->head; irn != NULL; irn = next) {
858 node_entry *e = (node_entry*)get_irn_link(irn);
862 DB((dbg, LEVEL_4, " %+F,", irn));
864 DB((dbg, LEVEL_4, "\n"));
868 if (e->next == NULL) {
869 /* this SCC has only a single member */
870 check_replace(head, env);
872 classify_iv(pscc, env);
877 * If an SCC is a Phi only cycle, remove it.
879 * @param pscc an SCC that consists of Phi nodes only
880 * @param env the environment
882 static void remove_phi_cycle(scc *pscc, iv_env *env)
888 /* check if this scc contains only Phi nodes */
890 for (irn = pscc->head; irn; irn = next) {
891 node_entry *e = get_irn_ne(irn, env);
897 for (j = get_irn_arity(irn) - 1; j >= 0; --j) {
898 ir_node *pred = get_irn_n(irn, j);
899 node_entry *pe = get_irn_ne(pred, env);
901 if (pe->pscc != e->pscc) {
902 /* not in the same SCC, must be the only input */
905 } else if (out_rc != pred) {
911 /* found a Phi cycle */
912 DB((dbg, LEVEL_2, " Found an USELESS Phi cycle:\n "));
914 for (irn = pscc->head; irn; irn = next) {
915 node_entry *e = get_irn_ne(irn, env);
918 exchange(irn, out_rc);
921 } /* remove_phi_cycle */
924 * Process a SCC for the Phi cycle remove.
926 * @param pscc the SCC
927 * @param env the environment
929 static void process_phi_only_scc(scc *pscc, iv_env *env)
931 ir_node *head = pscc->head;
932 node_entry *e = (node_entry*)get_irn_link(head);
938 DB((dbg, LEVEL_4, " SCC at %p:\n ", pscc));
939 for (irn = pscc->head; irn; irn = next) {
940 node_entry *e = (node_entry*)get_irn_link(irn);
944 DB((dbg, LEVEL_4, " %+F,", irn));
946 DB((dbg, LEVEL_4, "\n"));
951 remove_phi_cycle(pscc, env);
952 } /* process_phi_only_scc */
956 * Push a node onto the stack.
958 * @param env the environment
959 * @param n the node to push
961 static void push(iv_env *env, ir_node *n)
965 if (env->tos == ARR_LEN(env->stack)) {
966 size_t nlen = ARR_LEN(env->stack) * 2;
967 ARR_RESIZE(ir_node *, env->stack, nlen);
969 env->stack[env->tos++] = n;
970 e = get_irn_ne(n, env);
975 * Pop a node from the stack.
977 * @param env the environment
979 * @return The topmost node
981 static ir_node *pop(iv_env *env)
983 ir_node *n = env->stack[--env->tos];
984 node_entry *e = get_irn_ne(n, env);
991 * Do Tarjan's SCC algorithm and drive OSR.
993 * @param irn start at this node
994 * @param env the environment
996 static void dfs(ir_node *irn, iv_env *env)
999 node_entry *node = get_irn_ne(irn, env);
1001 mark_irn_visited(irn);
1003 /* do not put blocks into the scc */
1004 if (is_Block(irn)) {
1005 n = get_irn_arity(irn);
1006 for (i = 0; i < n; ++i) {
1007 ir_node *pred = get_irn_n(irn, i);
1009 if (!irn_visited(pred))
1013 ir_node *block = get_nodes_block(irn);
1015 node->DFSnum = env->nextDFSnum++;
1016 node->low = node->DFSnum;
1019 /* handle the block */
1020 if (!irn_visited(block))
1023 n = get_irn_arity(irn);
1024 for (i = 0; i < n; ++i) {
1025 ir_node *pred = get_irn_n(irn, i);
1026 node_entry *o = get_irn_ne(pred, env);
1028 if (!irn_visited(pred)) {
1030 node->low = MIN(node->low, o->low);
1032 if (o->DFSnum < node->DFSnum && o->in_stack)
1033 node->low = MIN(o->DFSnum, node->low);
1035 if (node->low == node->DFSnum) {
1036 scc *pscc = OALLOCZ(&env->obst, scc);
1043 e = get_irn_ne(x, env);
1045 e->next = pscc->head;
1049 env->process_scc(pscc, env);
1055 * Do the DFS by starting at the End node of a graph.
1057 * @param irg the graph to process
1058 * @param env the environment
1060 static void do_dfs(ir_graph *irg, iv_env *env)
1062 ir_node *end = get_irg_end(irg);
1065 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
1067 inc_irg_visited(irg);
1069 /* visit all visible nodes */
1072 /* visit the keep-alives */
1073 for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
1074 ir_node *ka = get_End_keepalive(end, i);
1076 if (!irn_visited(ka))
1080 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
1084 * Post-block-walker: assign the post-order number.
1086 static void assign_po(ir_node *block, void *ctx)
1088 iv_env *env = (iv_env*)ctx;
1089 node_entry *e = get_irn_ne(block, env);
1091 e->POnum = env->POnum++;
1095 * Apply one LFTR edge operation.
1096 * Return NULL if the transformation cannot be done safely without
1099 * @param iv the induction variable
1100 * @param rc the constant that should be translated
1101 * @param e the LFTR edge
1102 * @param env the IV environment
1104 * @return the translated region constant or NULL
1105 * if the translation was not possible
1108 * In the current implementation only the last edge is stored, so
1109 * only one chain exists. That's why we might miss some opportunities.
1111 static ir_node *applyOneEdge(ir_node *iv, ir_node *rc, LFTR_edge *e, iv_env *env)
1113 if (env->osr_flags & osr_flag_lftr_with_ov_check) {
1114 ir_tarval *tv_l, *tv_r, *tv, *tv_init, *tv_incr, *tv_end;
1115 tarval_int_overflow_mode_t ovmode;
1119 if (! is_counter_iv(iv, env)) {
1120 DB((dbg, LEVEL_4, " not counter IV"));
1124 /* overflow can only be decided for Consts */
1125 if (! is_Const(e->rc)) {
1126 if (e->code == iro_Add && mode_is_reference(get_irn_mode(e->rc))) {
1127 /* However we allow ONE Pointer Add, as pointer arithmetic with wrap
1128 around is undefined anyway */
1129 return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(e->rc));
1131 DB((dbg, LEVEL_4, " = UNKNOWN (%+F)", e->rc));
1135 tv_l = get_Const_tarval(rc);
1136 tv_r = get_Const_tarval(e->rc);
1138 ovmode = tarval_get_integer_overflow_mode();
1139 tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD);
1141 pscc = get_iv_scc(iv, env);
1142 tv_incr = pscc->incr;
1143 tv_init = pscc->init;
1146 * Check that no overflow occurs:
1147 * init must be transformed without overflow
1148 * the new rc must be transformed without overflow
1149 * rc +/- incr must be possible without overflow
1153 tv = tarval_mul(tv_l, tv_r);
1154 tv_init = tarval_mul(tv_init, tv_r);
1155 tv_incr = tarval_mul(tv_incr, tv_r);
1156 DB((dbg, LEVEL_4, " * %+F", tv_r));
1159 tv = tarval_add(tv_l, tv_r);
1160 tv_init = tarval_add(tv_init, tv_r);
1161 DB((dbg, LEVEL_4, " + %+F", tv_r));
1164 tv = tarval_sub(tv_l, tv_r, NULL);
1165 tv_init = tarval_sub(tv_init, tv_r, NULL);
1166 DB((dbg, LEVEL_4, " - %+F", tv_r));
1169 panic("Unsupported opcode");
1172 if (tv == tarval_bad || tv_init == tarval_bad) {
1173 tarval_set_integer_overflow_mode(ovmode);
1174 DB((dbg, LEVEL_4, " = OVERFLOW"));
1178 if (pscc->code == iro_Add) {
1179 tv_end = tarval_add(tv, tv_incr);
1181 assert(pscc->code == iro_Sub);
1182 tv_end = tarval_sub(tv, tv_incr, NULL);
1185 tarval_set_integer_overflow_mode(ovmode);
1187 if (tv_end == tarval_bad) {
1188 DB((dbg, LEVEL_4, " = OVERFLOW"));
1191 irg = get_irn_irg(iv);
1192 return new_r_Const(irg, tv);
1194 return do_apply(e->code, NULL, rc, e->rc, get_irn_mode(e->dst));
1195 } /* applyOneEdge */
1198 * Applies the operations represented by the LFTR edges to a
1199 * region constant and returns the value.
1200 * Return NULL if the transformation cannot be done safely without
1203 * @param pIV points to the IV node that starts the LFTR edge chain
1204 * after translation points to the new IV
1205 * @param rc the region constant that should be translated
1206 * @param env the IV environment
1208 * @return the translated region constant or NULL
1209 * if the translation was not possible
1211 static ir_node *applyEdges(ir_node **pIV, ir_node *rc, iv_env *env)
1214 if (env->osr_flags & osr_flag_lftr_with_ov_check) {
1215 /* overflow can only be decided for Consts */
1216 if (! is_counter_iv(iv, env)) {
1217 DB((dbg, LEVEL_4, "not counter IV\n", rc));
1220 if (! is_Const(rc)) {
1221 DB((dbg, LEVEL_4, " = UNKNOWN (%+F)\n", rc));
1224 DB((dbg, LEVEL_4, "%+F", get_Const_tarval(rc)));
1228 LFTR_edge *e = LFTR_find(iv, env);
1230 rc = applyOneEdge(iv, rc, e, env);
1235 DB((dbg, LEVEL_3, "\n"));
1241 * Walker, finds Cmp(iv, rc) or Cmp(rc, iv)
1242 * and tries to optimize them.
1244 static void do_lftr(ir_node *cmp, void *ctx)
1246 iv_env *env = (iv_env*)ctx;
1247 ir_node *left, *right, *liv, *riv;
1249 ir_node *nleft = NULL, *nright = NULL;
1254 left = get_Cmp_left(cmp);
1255 right = get_Cmp_right(cmp);
1257 liv = is_iv(left, env);
1258 riv = is_iv(right, env);
1259 if (liv && is_rc(right, liv)) {
1260 iv = left; rc = right;
1262 nright = applyEdges(&iv, rc, env);
1265 else if (riv && is_rc(left, riv)) {
1266 iv = right; rc = left;
1268 nleft = applyEdges(&iv, rc, env);
1272 if (nleft && nright) {
1273 DB((dbg, LEVEL_2, " LFTR for %+F\n", cmp));
1274 set_Cmp_left(cmp, nleft);
1275 set_Cmp_right(cmp, nright);
1276 ++env->lftr_replaced;
1281 * do linear function test replacement.
1283 * @param irg the graph that should be optimized
1284 * @param env the IV environment
1286 static void lftr(ir_graph *irg, iv_env *env)
1288 irg_walk_graph(irg, NULL, do_lftr, env);
1291 /* Remove any Phi cycles with only one real input. */
1292 void remove_phi_cycles(ir_graph *irg)
1296 assure_irg_properties(irg,
1297 IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
1298 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
1299 | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
1301 FIRM_DBG_REGISTER(dbg, "firm.opt.remove_phi");
1303 DB((dbg, LEVEL_1, "Doing Phi cycle removement for %+F\n", irg));
1305 obstack_init(&env.obst);
1306 env.stack = NEW_ARR_F(ir_node *, 128);
1310 env.quad_map = NULL;
1311 env.lftr_edges = NULL;
1313 env.lftr_replaced = 0;
1315 env.need_postpass = 0;
1316 env.process_scc = process_phi_only_scc;
1318 /* Clear all links and move Proj nodes into the
1319 * the same block as their predecessors.
1320 * This can improve the placement of new nodes.
1322 irg_walk_graph(irg, NULL, firm_clear_link, NULL);
1324 /* calculate the post order number for blocks. */
1325 irg_out_block_walk(get_irg_start_block(irg), NULL, assign_po, &env);
1327 /* calculate the SCC's and drive OSR. */
1328 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1330 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1333 DB((dbg, LEVEL_1, "remove_phi_cycles: %u Cycles removed\n\n",
1337 DEL_ARR_F(env.stack);
1338 obstack_free(&env.obst, NULL);
1340 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW);
1343 ir_graph_pass_t *remove_phi_cycles_pass(const char *name)
1345 return def_graph_pass(name ? name : "remove_phi_cycles", remove_phi_cycles);
1346 } /* remove_phi_cycles_pass */
1349 * Post-walker: fix Add and Sub nodes that where results of I<->P conversions.
1351 static void fix_adds_and_subs(ir_node *irn, void *ctx)
1356 ir_mode *mode = get_irn_mode(irn);
1358 if (mode_is_int(mode)) {
1361 pred = get_Add_left(irn);
1362 if (get_irn_mode(pred) != mode) {
1363 ir_node *block = get_nodes_block(pred);
1365 pred = new_r_Conv(block, pred, mode);
1366 set_Add_left(irn, pred);
1368 pred = get_Add_right(irn);
1369 if (get_irn_mode(pred) != mode) {
1370 ir_node *block = get_nodes_block(pred);
1372 pred = new_r_Conv(block, pred, mode);
1373 set_Add_right(irn, pred);
1376 } else if (is_Sub(irn)) {
1377 ir_mode *mode = get_irn_mode(irn);
1379 if (mode_is_int(mode)) {
1380 ir_node *left = get_Sub_left(irn);
1381 ir_node *right = get_Sub_right(irn);
1382 ir_mode *l_mode = get_irn_mode(left);
1383 ir_mode *r_mode = get_irn_mode(right);
1385 if (mode_is_int(l_mode) && mode_is_int(r_mode)) {
1386 if (l_mode != mode) {
1387 ir_node *block = get_nodes_block(left);
1389 left = new_r_Conv(block, left, mode);
1390 set_Sub_left(irn, left);
1392 if (r_mode != mode) {
1393 ir_node *block = get_nodes_block(right);
1395 right = new_r_Conv(block, right, mode);
1396 set_Sub_right(irn, right);
1399 } else if (mode_is_reference(mode)) {
1400 ir_node *left = get_Sub_left(irn);
1401 ir_node *right = get_Sub_right(irn);
1402 ir_mode *l_mode = get_irn_mode(left);
1403 ir_mode *r_mode = get_irn_mode(right);
1404 if (mode_is_int(l_mode)) {
1405 /* Usually, Sub(I*,P) is an error, hence the verifier rejects it.
1406 * However, it is correct in this case, so add Conv to make verifier happy. */
1407 ir_node *block = get_nodes_block(right);
1408 ir_node *lconv = new_r_Conv(block, left, r_mode);
1409 assert(mode_is_reference(r_mode));
1410 set_Sub_left(irn, lconv);
1414 } /* fix_adds_and_subs */
1416 /* Performs Operator Strength Reduction for the passed graph. */
1417 void opt_osr(ir_graph *irg, unsigned flags)
1421 FIRM_DBG_REGISTER(dbg, "firm.opt.osr");
1423 assure_irg_properties(irg,
1424 IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
1425 | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
1426 | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
1428 DB((dbg, LEVEL_1, "Doing Operator Strength Reduction for %+F\n", irg));
1430 obstack_init(&env.obst);
1431 env.stack = NEW_ARR_F(ir_node *, 128);
1435 env.quad_map = new_set(quad_cmp, 64);
1436 env.lftr_edges = new_set(LFTR_cmp, 64);
1438 env.lftr_replaced = 0;
1439 env.osr_flags = flags;
1440 env.need_postpass = 0;
1441 env.process_scc = process_scc;
1443 /* Clear all links and move Proj nodes into the
1444 * the same block as its predecessors.
1445 * This can improve the placement of new nodes.
1447 irg_walk_graph(irg, NULL, firm_clear_link, NULL);
1449 irg_block_edges_walk(get_irg_start_block(irg), NULL, assign_po, &env);
1451 /* calculate the SCC's and drive OSR. */
1452 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1456 if (env.need_postpass)
1457 irg_walk_graph(irg, NULL, fix_adds_and_subs, &env);
1459 /* try linear function test replacements */
1463 DB((dbg, LEVEL_1, "Replacements: %u + %u (lftr)\n\n", env.replaced, env.lftr_replaced));
1465 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1467 del_set(env.lftr_edges);
1468 del_set(env.quad_map);
1469 DEL_ARR_F(env.stack);
1470 obstack_free(&env.obst, NULL);
1472 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
1475 typedef struct pass_t {
1476 ir_graph_pass_t pass;
1481 * Wrapper for running opt_osr() as an ir_graph pass.
1483 static int pass_wrapper(ir_graph *irg, void *context)
1485 pass_t *pass = (pass_t*)context;
1486 opt_osr(irg, pass->flags);
1488 } /* pass_wrapper */
1490 ir_graph_pass_t *opt_osr_pass(const char *name, unsigned flags)
1492 pass_t *pass = XMALLOCZ(pass_t);
1494 pass->flags = flags;
1495 return def_graph_pass_constructor(
1496 &pass->pass, name ? name : "osr", pass_wrapper);
1497 } /* opt_osr_pass */