2 * Copyright (C) 1995-2009 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief analyze graph to provide value range information
30 #include "irgraph_t.h"
39 static void *VISITED = &v;
41 typedef struct worklist_t worklist_t;
43 struct list_head nodes;
51 int update_vrp_data( ir_node *node)
54 tarval *new_bits_set = get_tarval_bad();
55 tarval *new_bits_not_set = get_tarval_bad();
56 tarval *new_range_bottom = get_tarval_bad();
57 tarval *new_range_top = get_tarval_bad();
58 ir_node *new_bits_node = NULL;
59 ir_node *new_range_node = NULL;
60 enum range_types new_range_type = VRP_UNDEFINED;
61 enum range_ops new_range_op = VRP_NONE;
62 int something_changed = 0;
65 // TODO: Check if all predecessors have valid VRP information
68 if (!mode_is_int(get_irn_mode(node))) {
69 return 0; // we don't optimize for non-int-nodes
73 tarval *tv = get_Const_tarval(node);
76 new_bits_not_set = tarval_not(tv);
77 new_range_bottom = tv;
79 new_range_type = VRP_RANGE;
80 } else if (is_And(node)) {
81 ir_node *pred0 = get_And_left(node);
82 ir_node *pred1 = get_And_right(node);
85 new_bits_set = tarval_and(pred0->vrp.bits_set, pred1->vrp.bits_set);
86 new_bits_not_set = tarval_or(pred0->vrp.bits_not_set, pred1->vrp.bits_not_set);
88 tmp = tarval_not(pred0->vrp.bits_set);
89 tmp = tarval_eor(pred0->vrp.bits_not_set, tmp);
90 //check if one of the predecessors is completely determined
91 if (tarval_is_null(tmp)) {
92 new_bits_node = pred1;
95 tmp = tarval_not(pred1->vrp.bits_set);
96 tmp = tarval_eor(pred1->vrp.bits_not_set, tmp);
97 if (tarval_is_null(tmp)) {
98 new_bits_node = pred0;
100 } else if (is_Add(node)) {
101 ir_node *pred0 = get_Add_left(node);
102 ir_node *pred1 = get_Add_right(node);
103 int overflow_top, overflow_bottom;
104 tarval *new_top, *new_bottom;
106 if (pred0->vrp.range_type == VRP_UNDEFINED || pred1->vrp.range_type ==
107 VRP_UNDEFINED || pred0->vrp.range_type == VRP_VARYING ||
108 pred1->vrp.range_type == VRP_VARYING) {
112 new_top = tarval_add(pred0->vrp.range_top, pred1->vrp.range_top);
113 overflow_top = tarval_carry();
114 new_bottom = tarval_add(pred0->vrp.range_bottom, pred1->vrp.range_bottom);
115 overflow_bottom = tarval_carry();
117 if (!overflow_top && !overflow_bottom && pred0->vrp.range_type == VRP_RANGE
118 &&pred1->vrp.range_type == VRP_RANGE) {
119 new_range_bottom = new_bottom;
120 new_range_top = new_top;
121 new_range_type = VRP_RANGE;
124 if (overflow_top || overflow_bottom) {
125 // TODO Implement overflow handling
126 new_range_type = VRP_UNDEFINED;
128 } else if (is_Sub(node)) {
129 ir_node *pred0 = get_Sub_left(node);
130 ir_node *pred1 = get_Sub_right(node);
131 int overflow_top, overflow_bottom;
132 tarval *new_top, *new_bottom;
134 if (pred0->vrp.range_type == VRP_UNDEFINED || pred1->vrp.range_type ==
139 new_top = tarval_sub(pred0->vrp.range_top, pred1->vrp.range_top, NULL);
140 overflow_top = tarval_carry();
141 new_bottom = tarval_sub(pred0->vrp.range_bottom, pred1->vrp.range_bottom, NULL);
142 overflow_bottom = tarval_carry();
144 if (!overflow_top && !overflow_bottom && pred0->vrp.range_type == VRP_RANGE
145 &&pred1->vrp.range_type == VRP_RANGE) {
146 new_range_bottom = new_bottom;
147 new_range_top = new_top;
148 new_range_type = VRP_RANGE;
151 if (overflow_top || overflow_bottom) {
152 // TODO Implement overflow handling
154 } else if (is_Or(node)) {
155 ir_node *a = get_Or_left(node);
156 ir_node *b = get_Or_right(node);
159 new_bits_set = tarval_or(a->vrp.bits_set, b->vrp.bits_set);
160 new_bits_not_set = tarval_and(a->vrp.bits_not_set, b->vrp.bits_not_set);
162 tmp = tarval_not(a->vrp.bits_set);
163 tmp = tarval_eor(a->vrp.bits_not_set, tmp);
164 //check if one of the predecessors is completely determined
165 if (tarval_is_null(tmp)) {
169 tmp = tarval_not(b->vrp.bits_set);
170 tmp = tarval_eor(b->vrp.bits_not_set, tmp);
171 if (tarval_is_null(tmp)) {
175 } else if (is_Rotl(node)) {
176 ir_node *a = get_Rotl_left(node);
177 ir_node *b = get_Rotl_right(node);
179 // We can only compute this if the right value is a constant
181 tarval *bits_set, *bits_not_set;
182 bits_set = tarval_rotl(a->vrp.bits_set, get_Const_tarval(b));
183 bits_not_set = tarval_rotl(a->vrp.bits_not_set, get_Const_tarval(b));
185 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
186 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
189 } else if (is_Shl(node)) {
190 ir_node *a = get_Shl_left(node);
191 ir_node *b = get_Shl_right(node);
193 // We can only compute this if the right value is a constant
195 tarval *bits_set, *bits_not_set;
196 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
197 bits_set = tarval_shl(a->vrp.bits_set, get_Const_tarval(b));
198 bits_not_set = tarval_shl(a->vrp.bits_not_set, get_Const_tarval(b));
200 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
201 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
203 bits_not_set = tarval_not( tarval_shl(
204 get_tarval_all_one(m),
205 get_Const_tarval(b)));
206 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
210 } else if (is_Shr(node)) {
211 ir_node *a = get_Shr_left(node);
212 ir_node *b = get_Shr_right(node);
214 // We can only compute this if the right value is a constant
216 tarval *bits_set, *bits_not_set;
217 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
218 bits_set = tarval_shr(a->vrp.bits_set, get_Const_tarval(b));
219 bits_not_set = tarval_shr(a->vrp.bits_not_set, get_Const_tarval(b));
221 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
222 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
224 bits_not_set = tarval_not( tarval_shr(
225 get_tarval_all_one(m),
226 get_Const_tarval(b)));
227 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
230 } else if (is_Shrs(node)) {
231 ir_node *a = get_Shrs_left(node);
232 ir_node *b = get_Shrs_right(node);
234 // We can only compute this if the right value is a constant
236 tarval *bits_set, *bits_not_set;
237 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
238 bits_set = tarval_shrs(a->vrp.bits_set, get_Const_tarval(b));
239 bits_not_set = tarval_shrs(a->vrp.bits_not_set, get_Const_tarval(b));
241 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
242 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
244 bits_not_set = tarval_not( tarval_shrs(
245 get_tarval_all_one(m),
246 get_Const_tarval(b)));
247 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
250 } else if (is_Eor(node)) {
251 ir_node *a = get_Eor_left(node);
252 ir_node *b = get_Eor_right(node);
254 tarval *bits_set, *bits_not_set;
255 bits_not_set = tarval_or(
256 tarval_and(a->vrp.bits_set, b->vrp.bits_set),
257 tarval_and(a->vrp.bits_not_set,
258 b->vrp.bits_not_set));
260 bits_set = tarval_or(
261 tarval_and(a->vrp.bits_set, b->vrp.bits_not_set),
262 tarval_and(a->vrp.bits_not_set, b->vrp.bits_set));
264 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
265 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
267 } else if (is_Id(node)) {
268 ir_node *pred = get_Id_pred(node);
269 new_bits_set = pred->vrp.bits_set;
270 new_bits_not_set = pred->vrp.bits_not_set;
271 new_range_top = pred->vrp.range_top;
272 new_range_bottom = pred->vrp.range_bottom;
273 new_range_type = pred->vrp.range_type;
275 } else if (is_Not(node)) {
276 ir_node *pred = get_Not_op(node);
277 new_bits_set = tarval_or(pred->vrp.bits_not_set, node->vrp.bits_set);
278 new_bits_not_set = tarval_or(pred->vrp.bits_set, node->vrp.bits_not_set);
280 } else if (is_Conv(node)) {
281 ir_node *pred = get_Conv_op(node);
282 ir_mode *old_mode = get_irn_mode(pred);
284 tarval *bits_not_set;
286 if (!mode_is_int(old_mode))
289 new_mode = get_irn_mode(node);
291 // The second and is needed if target type is smaller
292 bits_not_set = tarval_not(
293 tarval_convert_to(get_tarval_all_one(old_mode),
296 bits_not_set = tarval_or(bits_not_set, tarval_convert_to(pred->vrp.bits_not_set, new_mode));
297 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
298 new_bits_set = tarval_and(
299 tarval_not(bits_not_set), tarval_convert_to(pred->vrp.bits_set, new_mode));
301 if (tarval_cmp(pred->vrp.range_top, get_tarval_max(new_mode)) == pn_Cmp_Le) {
302 node->vrp.range_top = pred->vrp.range_top;
305 if (tarval_cmp(pred->vrp.range_bottom, get_tarval_min(new_mode)) == pn_Cmp_Ge) {
306 node->vrp.range_bottom = pred->vrp.range_bottom;
309 } else if (is_Confirm(node)) {
310 pn_Cmp cmp = get_Confirm_cmp(node);
311 ir_node *bound = get_Confirm_bound(node);
313 /** @todo: Handle non-Const bounds */
315 if (cmp == pn_Cmp_Lg) {
316 /** @todo: Is there some way to preserve the information? */
317 new_range_type = VRP_ANTIRANGE;
318 if (is_Const(bound)) {
319 new_range_top = get_Const_tarval(bound);
320 new_range_bottom = get_Const_tarval(bound);
322 } else if (cmp == pn_Cmp_Le) {
323 if (node->vrp.range_type == VRP_UNDEFINED) {
324 new_range_type = VRP_RANGE;
325 if (is_Const(bound)) {
326 new_range_top = get_Const_tarval(bound);
328 new_range_bottom = get_tarval_min(get_irn_mode(node));
329 } else if (node->vrp.range_type == VRP_RANGE) {
330 if (is_Const(bound)) {
331 if (tarval_cmp(node->vrp.range_top,
332 get_Const_tarval(bound)) == pn_Cmp_Le) {
333 new_range_top = get_Const_tarval(bound);
335 new_range_bottom = get_tarval_min(get_irn_mode(node));
337 } else if (node->vrp.range_type == VRP_ANTIRANGE) {
338 /** @todo: How do we manage not to get a never ending loop? */
343 } else if (is_Phi(node)) {
344 // combine all ranges
346 int num = get_Phi_n_preds(node);
349 tarval *range_top, *range_bottom, *bits_set, *bits_not_set;
350 enum range_types range_type;
354 pred = get_Phi_pred(node,0);
355 range_top = pred->vrp.range_top;
356 range_bottom = pred->vrp.range_bottom;
357 range_type = pred->vrp.range_type;
358 bits_set = pred->vrp.bits_set;
359 bits_not_set = pred->vrp.bits_not_set;
361 for(i = 1; i < num; i++) {
362 pred = get_Phi_pred(node, i);
363 if (range_type == VRP_RANGE && pred->vrp.range_type ==
365 cmp = tarval_cmp(range_top, pred->vrp.range_top);
366 if (cmp == pn_Cmp_Lt) {
367 range_top = pred->vrp.range_top;
369 cmp = tarval_cmp(range_bottom, pred->vrp.range_bottom);
370 if (cmp == pn_Cmp_Gt) {
371 range_bottom = pred->vrp.range_bottom;
374 range_type = VRP_VARYING;
379 new_range_type = range_type;
380 new_range_top = range_top;
381 new_range_bottom = range_bottom;
384 // unhandled, therefore never updated
390 /* TODO: Check, if there can be information derived from any of these:
391 is_Abs(node) is_Alloc(node) is_Anchor(node) is_Borrow(node) is_Bound(node)
392 is_Break(node) is_Builtin(node) is_Call(node) is_CallBegin(node)
393 is_Carry(node) is_Cast(node) is_Cmp(node) is_Cond(node)
394 is_CopyB(node) is_Div(node) is_DivMod(node) is_Dummy(node)
395 is_End(node) is_EndExcept(node) is_EndReg(node) is_Filter(node) is_Free(node)
396 is_IJmp(node) is_InstOf(node) is_Jmp(node) is_Load(node) is_Minus(node)
397 is_Mod(node) is_Mul(node) is_Mulh(node) is_Mux(node) is_NoMem(node)
398 is_Pin(node) is_Proj(node) is_Quot(node)
399 is_Raise(node) is_Return(node) is_Sel(node) is_Start(node) is_Store(node)
400 is_SymConst(node) is_Sync(node) is_Tuple(node)
403 // Merge the newly calculated values with those that might already exist
405 if (new_bits_set != tarval_bad) {
406 new_bits_set = tarval_or(new_bits_set, node->vrp.bits_set);
407 if (tarval_cmp(new_bits_set, node->vrp.bits_set) != pn_Cmp_Eq) {
408 something_changed = 1;
409 node->vrp.bits_set = new_bits_set;
413 if (new_bits_not_set != tarval_bad) {
414 new_bits_not_set = tarval_or(new_bits_not_set, node->vrp.bits_not_set);
416 if (tarval_cmp(new_bits_not_set, node->vrp.bits_not_set) != pn_Cmp_Eq) {
417 something_changed = 1;
418 node->vrp.bits_not_set = new_bits_not_set;
422 if (node->vrp.bits_node == NULL && new_bits_node != NULL) {
423 something_changed = 1;
424 node->vrp.bits_node = new_bits_node;
427 if (node->vrp.range_type == VRP_UNDEFINED &&
428 new_range_type != VRP_UNDEFINED) {
429 something_changed = 1;
430 node->vrp.range_type = new_range_type;
431 node->vrp.range_bottom = new_range_bottom;
432 node->vrp.range_top = new_range_top;
433 node->vrp.range_op = new_range_op;
434 node->vrp.range_node = new_range_node;
436 } else if (node->vrp.range_type == VRP_RANGE) {
437 if (new_range_type == VRP_RANGE) {
438 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
439 (new_range_node == node->vrp.range_node &&
440 new_range_op == node->vrp.range_op)) {
441 if (tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Lt) {
442 something_changed = 1;
443 node->vrp.range_bottom = new_range_bottom;
445 if (tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Gt) {
446 something_changed = 1;
447 node->vrp.range_top = new_range_top;
451 // prefer the absolute value
452 if (new_range_node == NULL && node->vrp.range_node != NULL) {
453 something_changed = 1;
454 node->vrp.range_node = NULL;
455 node->vrp.range_top = new_range_top;
456 node->vrp.range_bottom = new_range_bottom;
460 if (new_range_type == VRP_ANTIRANGE) {
461 // if they are overlapping, cut the range.
462 // TODO: Maybe we can preserve more information here
463 if (new_range_node == NULL && node->vrp.range_node == NULL) {
464 if (tarval_cmp(node->vrp.range_bottom, new_range_top) == pn_Cmp_Gt &&
465 tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Gt) {
466 something_changed = 1;
467 node->vrp.range_bottom = new_range_top;
469 } else if (tarval_cmp(node->vrp.range_top, new_range_bottom) == pn_Cmp_Gt &&
470 tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Lt) {
471 something_changed = 1;
472 node->vrp.range_top = new_range_bottom;
475 // We can not handle the case where the anti range is in the
479 // prefer the absolute value
480 if (new_range_node == NULL && node->vrp.range_node != NULL) {
481 something_changed = 1;
482 node->vrp.range_node = NULL;
483 node->vrp.range_top = new_range_top;
484 node->vrp.range_bottom = new_range_bottom;
487 } else if (node->vrp.range_type == VRP_ANTIRANGE) {
488 if (new_range_type == VRP_ANTIRANGE) {
489 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
490 (new_range_node == node->vrp.range_node &&
491 new_range_op == node->vrp.range_op)) {
492 if (tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Gt) {
493 something_changed = 1;
494 node->vrp.range_bottom = new_range_bottom;
496 if (tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Lt) {
497 something_changed = 1;
498 node->vrp.range_top = new_range_top;
502 // prefer the absolute value
503 if (new_range_node == NULL && node->vrp.range_node != NULL) {
504 something_changed = 1;
505 node->vrp.range_node = NULL;
506 node->vrp.range_top = new_range_top;
507 node->vrp.range_bottom = new_range_bottom;
511 if (new_range_type == VRP_RANGE) {
512 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
513 (new_range_node == node->vrp.range_node &&
514 new_range_op == node->vrp.range_op)) {
515 if (tarval_cmp(node->vrp.range_bottom, new_range_top) == pn_Cmp_Gt) {
516 something_changed = 1;
517 node->vrp.range_bottom = new_range_top;
519 if (tarval_cmp(node->vrp.range_top, new_range_bottom) == pn_Cmp_Lt) {
520 something_changed = 1;
521 node->vrp.range_top = new_range_bottom;
525 // prefer the absolute value
526 if (new_range_node == NULL && node->vrp.range_node != NULL) {
527 something_changed = 1;
528 node->vrp.range_node = NULL;
529 node->vrp.range_top = new_range_top;
530 node->vrp.range_bottom = new_range_bottom;
535 assert(tarval_is_null(
536 tarval_and(node->vrp.bits_set, node->vrp.bits_not_set)));
538 return something_changed;
541 void vrp_first_pass(ir_node *n, void *e)
544 worklist_t *tmp_entry;
546 struct vrp_env_t *env = e;
551 set_irn_link(n, VISITED);
555 for (i = get_irn_n_outs(n) - 1; i >=0; --i) {
556 succ = get_irn_out(n, i);
557 if (get_irn_link(succ) == VISITED) {
560 tmp_entry = XMALLOC(worklist_t);
562 list_add(&(tmp_entry->nodes), &(env->worklist->nodes));
570 void set_vrp_data(ir_graph *irg)
576 worklist_t *tmp_entry, *tmp_entry2;
577 struct vrp_env_t env;
584 assure_irg_outs(irg); // ensure that out edges are consistent
586 // edges_activate(irg);
588 INIT_LIST_HEAD(&worklist.nodes);
590 env.worklist = &worklist;
591 irg_walk_graph(irg, NULL, vrp_first_pass, &env);
595 // while there are entries in the worklist, continue
596 while( !list_empty(&worklist.nodes) ) {
598 list_head *pos, *next;
599 list_for_each_safe(pos, next, &worklist.nodes) {
601 tmp_entry = list_entry(pos, worklist_t, nodes);
603 if (update_vrp_data(tmp_entry->node)) {
604 // if something changed, add successors to worklist
605 for (i = get_irn_n_outs(tmp_entry->node) - 1; i >=0; --i) {
606 succ = get_irn_out(tmp_entry->node, i);
608 tmp_entry2 = XMALLOC(worklist_t);
609 tmp_entry2->node = succ;
610 list_add(&(tmp_entry2->nodes), &worklist.nodes);
621 ir_graph_pass_t *set_vrp_pass(const char *name)
623 return def_graph_pass(name ? name : "set_vrp", set_vrp_data);
626 pn_Cmp vrp_cmp(ir_node *left, ir_node *right)
628 if (!left->vrp.valid || !right->vrp.valid) {
632 if (left->vrp.range_type == VRP_RANGE && right->vrp.range_type == VRP_RANGE) {
633 if (tarval_cmp(left->vrp.range_top, right->vrp.range_bottom) == pn_Cmp_Lt) {
636 if (tarval_cmp(left->vrp.range_bottom, right->vrp.range_top) == pn_Cmp_Gt) {
641 if (!tarval_is_null(tarval_and(left->vrp.bits_set, right->vrp.bits_not_set)) ||
642 !tarval_is_null(tarval_and(left->vrp.bits_not_set, right->vrp.bits_set))) {
645 // TODO: We can get way more information here