2 * Copyright (C) 1995-2009 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief analyze graph to provide value range information
30 #include "irgraph_t.h"
39 static void *VISITED = &v;
41 typedef struct worklist_t worklist_t;
43 struct list_head nodes;
51 int update_vrp_data( ir_node *node) {
53 tarval *new_bits_set = get_tarval_bad();
54 tarval *new_bits_not_set = get_tarval_bad();
55 tarval *new_range_bottom = get_tarval_bad();
56 tarval *new_range_top = get_tarval_bad();
57 ir_node *new_bits_node = NULL;
58 ir_node *new_range_node = NULL;
59 enum range_types new_range_type = VRP_UNDEFINED;
60 enum range_ops new_range_op = VRP_NONE;
61 int something_changed = 0;
64 // TODO: Check if all predecessors have valid VRP information
67 if (!mode_is_int(get_irn_mode(node))) {
68 return 0; // we don't optimize for non-int-nodes
72 tarval *tv = get_Const_tarval(node);
75 new_bits_not_set = tarval_not(tv);
76 new_range_bottom = tv;
78 new_range_type = VRP_RANGE;
79 } else if (is_And(node)) {
80 ir_node *pred0 = get_And_left(node);
81 ir_node *pred1 = get_And_right(node);
84 new_bits_set = tarval_and(pred0->vrp.bits_set, pred1->vrp.bits_set);
85 new_bits_not_set = tarval_or(pred0->vrp.bits_not_set, pred1->vrp.bits_not_set);
87 tmp = tarval_not(pred0->vrp.bits_set);
88 tmp = tarval_eor(pred0->vrp.bits_not_set, tmp);
89 //check if one of the predecessors is completely determined
90 if (tarval_is_null(tmp)) {
91 new_bits_node = pred1;
94 tmp = tarval_not(pred1->vrp.bits_set);
95 tmp = tarval_eor(pred1->vrp.bits_not_set, tmp);
96 if (tarval_is_null(tmp)) {
97 new_bits_node = pred0;
99 } else if (is_Add(node)) {
100 ir_node *pred0 = get_Add_left(node);
101 ir_node *pred1 = get_Add_right(node);
102 int overflow_top, overflow_bottom;
103 tarval *new_top, *new_bottom;
105 if (pred0->vrp.range_type == VRP_UNDEFINED || pred1->vrp.range_type ==
106 VRP_UNDEFINED || pred0->vrp.range_type == VRP_VARYING ||
107 pred1->vrp.range_type == VRP_VARYING) {
111 new_top = tarval_add(pred0->vrp.range_top, pred1->vrp.range_top);
112 overflow_top = tarval_carry();
113 new_bottom = tarval_add(pred0->vrp.range_bottom, pred1->vrp.range_bottom);
114 overflow_bottom = tarval_carry();
116 if (!overflow_top && !overflow_bottom && pred0->vrp.range_type == VRP_RANGE
117 &&pred1->vrp.range_type == VRP_RANGE) {
118 new_range_bottom = new_bottom;
119 new_range_top = new_top;
120 new_range_type = VRP_RANGE;
123 if (overflow_top || overflow_bottom) {
124 // TODO Implement overflow handling
125 new_range_type = VRP_UNDEFINED;
127 } else if (is_Sub(node)) {
128 ir_node *pred0 = get_Sub_left(node);
129 ir_node *pred1 = get_Sub_right(node);
130 int overflow_top, overflow_bottom;
131 tarval *new_top, *new_bottom;
133 if (pred0->vrp.range_type == VRP_UNDEFINED || pred1->vrp.range_type ==
138 new_top = tarval_sub(pred0->vrp.range_top, pred1->vrp.range_top, NULL);
139 overflow_top = tarval_carry();
140 new_bottom = tarval_sub(pred0->vrp.range_bottom, pred1->vrp.range_bottom, NULL);
141 overflow_bottom = tarval_carry();
143 if (!overflow_top && !overflow_bottom && pred0->vrp.range_type == VRP_RANGE
144 &&pred1->vrp.range_type == VRP_RANGE) {
145 new_range_bottom = new_bottom;
146 new_range_top = new_top;
147 new_range_type = VRP_RANGE;
150 if (overflow_top || overflow_bottom) {
151 // TODO Implement overflow handling
153 } else if (is_Or(node)) {
154 ir_node *a = get_Or_left(node);
155 ir_node *b = get_Or_right(node);
158 new_bits_set = tarval_or(a->vrp.bits_set, b->vrp.bits_set);
159 new_bits_not_set = tarval_and(a->vrp.bits_not_set, b->vrp.bits_not_set);
161 tmp = tarval_not(a->vrp.bits_set);
162 tmp = tarval_eor(a->vrp.bits_not_set, tmp);
163 //check if one of the predecessors is completely determined
164 if (tarval_is_null(tmp)) {
168 tmp = tarval_not(b->vrp.bits_set);
169 tmp = tarval_eor(b->vrp.bits_not_set, tmp);
170 if (tarval_is_null(tmp)) {
174 } else if (is_Rotl(node)) {
175 ir_node *a = get_Rotl_left(node);
176 ir_node *b = get_Rotl_right(node);
178 // We can only compute this if the right value is a constant
180 tarval *bits_set, *bits_not_set;
181 bits_set = tarval_rotl(a->vrp.bits_set, get_Const_tarval(b));
182 bits_not_set = tarval_rotl(a->vrp.bits_not_set, get_Const_tarval(b));
184 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
185 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
188 } else if (is_Shl(node)) {
189 ir_node *a = get_Shl_left(node);
190 ir_node *b = get_Shl_right(node);
192 // We can only compute this if the right value is a constant
194 tarval *bits_set, *bits_not_set;
195 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
196 bits_set = tarval_shl(a->vrp.bits_set, get_Const_tarval(b));
197 bits_not_set = tarval_shl(a->vrp.bits_not_set, get_Const_tarval(b));
199 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
200 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
202 bits_not_set = tarval_not( tarval_shl(
203 get_tarval_all_one(m),
204 get_Const_tarval(b)));
205 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
209 } else if (is_Shr(node)) {
210 ir_node *a = get_Shr_left(node);
211 ir_node *b = get_Shr_right(node);
213 // We can only compute this if the right value is a constant
215 tarval *bits_set, *bits_not_set;
216 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
217 bits_set = tarval_shr(a->vrp.bits_set, get_Const_tarval(b));
218 bits_not_set = tarval_shr(a->vrp.bits_not_set, get_Const_tarval(b));
220 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
221 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
223 bits_not_set = tarval_not( tarval_shr(
224 get_tarval_all_one(m),
225 get_Const_tarval(b)));
226 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
229 } else if (is_Shrs(node)) {
230 ir_node *a = get_Shrs_left(node);
231 ir_node *b = get_Shrs_right(node);
233 // We can only compute this if the right value is a constant
235 tarval *bits_set, *bits_not_set;
236 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
237 bits_set = tarval_shrs(a->vrp.bits_set, get_Const_tarval(b));
238 bits_not_set = tarval_shrs(a->vrp.bits_not_set, get_Const_tarval(b));
240 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
241 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
243 bits_not_set = tarval_not( tarval_shrs(
244 get_tarval_all_one(m),
245 get_Const_tarval(b)));
246 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
249 } else if (is_Eor(node)) {
250 ir_node *a = get_Eor_left(node);
251 ir_node *b = get_Eor_right(node);
253 tarval *bits_set, *bits_not_set;
254 bits_not_set = tarval_or(
255 tarval_and(a->vrp.bits_set, b->vrp.bits_set),
256 tarval_and(a->vrp.bits_not_set,
257 b->vrp.bits_not_set));
259 bits_set = tarval_or(
260 tarval_and(a->vrp.bits_set, b->vrp.bits_not_set),
261 tarval_and(a->vrp.bits_not_set, b->vrp.bits_set));
263 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
264 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
266 } else if (is_Id(node)) {
267 ir_node *pred = get_Id_pred(node);
268 new_bits_set = pred->vrp.bits_set;
269 new_bits_not_set = pred->vrp.bits_not_set;
270 new_range_top = pred->vrp.range_top;
271 new_range_bottom = pred->vrp.range_bottom;
272 new_range_type = pred->vrp.range_type;
274 } else if (is_Not(node)) {
275 ir_node *pred = get_Not_op(node);
276 new_bits_set = tarval_or(pred->vrp.bits_not_set, node->vrp.bits_set);
277 new_bits_not_set = tarval_or(pred->vrp.bits_set, node->vrp.bits_not_set);
279 } else if (is_Conv(node)) {
280 ir_node *pred = get_Conv_op(node);
281 ir_mode *old_mode = get_irn_mode(pred);
283 tarval *bits_not_set;
285 if (!mode_is_int(old_mode))
288 new_mode = get_irn_mode(node);
290 // The second and is needed if target type is smaller
291 bits_not_set = tarval_not(
292 tarval_convert_to(get_tarval_all_one(old_mode),
295 bits_not_set = tarval_or(bits_not_set, tarval_convert_to(pred->vrp.bits_not_set, new_mode));
296 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
297 new_bits_set = tarval_and(
298 tarval_not(bits_not_set), tarval_convert_to(pred->vrp.bits_set, new_mode));
300 if (tarval_cmp(pred->vrp.range_top, get_tarval_max(new_mode)) == pn_Cmp_Le) {
301 node->vrp.range_top = pred->vrp.range_top;
304 if (tarval_cmp(pred->vrp.range_bottom, get_tarval_min(new_mode)) == pn_Cmp_Ge) {
305 node->vrp.range_bottom = pred->vrp.range_bottom;
308 } else if (is_Confirm(node)) {
309 pn_Cmp cmp = get_Confirm_cmp(node);
310 ir_node *bound = get_Confirm_bound(node);
312 /** @todo: Handle non-Const bounds */
314 if (cmp == pn_Cmp_Lg) {
315 /** @todo: Is there some way to preserve the information? */
316 new_range_type = VRP_ANTIRANGE;
317 if (is_Const(bound)) {
318 new_range_top = get_Const_tarval(bound);
319 new_range_bottom = get_Const_tarval(bound);
321 } else if (cmp == pn_Cmp_Le) {
322 if (node->vrp.range_type == VRP_UNDEFINED) {
323 new_range_type = VRP_RANGE;
324 if (is_Const(bound)) {
325 new_range_top = get_Const_tarval(bound);
327 new_range_bottom = get_tarval_min(get_irn_mode(node));
328 } else if (node->vrp.range_type == VRP_RANGE) {
329 if (is_Const(bound)) {
330 if (tarval_cmp(node->vrp.range_top,
331 get_Const_tarval(bound)) == pn_Cmp_Le) {
332 new_range_top = get_Const_tarval(bound);
334 new_range_bottom = get_tarval_min(get_irn_mode(node));
336 } else if (node->vrp.range_type == VRP_ANTIRANGE) {
337 /** @todo: How do we manage not to get a never ending loop? */
342 } else if (is_Phi(node)) {
343 // combine all ranges
345 int num = get_Phi_n_preds(node);
348 tarval *range_top, *range_bottom, *bits_set, *bits_not_set;
349 enum range_types range_type;
353 pred = get_Phi_pred(node,0);
354 range_top = pred->vrp.range_top;
355 range_bottom = pred->vrp.range_bottom;
356 range_type = pred->vrp.range_type;
357 bits_set = pred->vrp.bits_set;
358 bits_not_set = pred->vrp.bits_not_set;
360 for(i = 1; i < num; i++) {
361 pred = get_Phi_pred(node, i);
362 if (range_type == VRP_RANGE && pred->vrp.range_type ==
364 cmp = tarval_cmp(range_top, pred->vrp.range_top);
365 if (cmp == pn_Cmp_Lt) {
366 range_top = pred->vrp.range_top;
368 cmp = tarval_cmp(range_bottom, pred->vrp.range_bottom);
369 if (cmp == pn_Cmp_Gt) {
370 range_bottom = pred->vrp.range_bottom;
373 range_type = VRP_VARYING;
378 new_range_type = range_type;
379 new_range_top = range_top;
380 new_range_bottom = range_bottom;
383 // unhandled, therefore never updated
389 /* TODO: Check, if there can be information derived from any of these:
390 is_Abs(node) is_Alloc(node) is_Anchor(node) is_Borrow(node) is_Bound(node)
391 is_Break(node) is_Builtin(node) is_Call(node) is_CallBegin(node)
392 is_Carry(node) is_Cast(node) is_Cmp(node) is_Cond(node)
393 is_CopyB(node) is_Div(node) is_DivMod(node) is_Dummy(node)
394 is_End(node) is_EndExcept(node) is_EndReg(node) is_Filter(node) is_Free(node)
395 is_IJmp(node) is_InstOf(node) is_Jmp(node) is_Load(node) is_Minus(node)
396 is_Mod(node) is_Mul(node) is_Mulh(node) is_Mux(node) is_NoMem(node)
397 is_Pin(node) is_Proj(node) is_Quot(node)
398 is_Raise(node) is_Return(node) is_Sel(node) is_Start(node) is_Store(node)
399 is_SymConst(node) is_Sync(node) is_Tuple(node)
402 // Merge the newly calculated values with those that might already exist
404 if (new_bits_set != tarval_bad) {
405 new_bits_set = tarval_or(new_bits_set, node->vrp.bits_set);
406 if (tarval_cmp(new_bits_set, node->vrp.bits_set) != pn_Cmp_Eq) {
407 something_changed = 1;
408 node->vrp.bits_set = new_bits_set;
412 if (new_bits_not_set != tarval_bad) {
413 new_bits_not_set = tarval_or(new_bits_not_set, node->vrp.bits_not_set);
415 if (tarval_cmp(new_bits_not_set, node->vrp.bits_not_set) != pn_Cmp_Eq) {
416 something_changed = 1;
417 node->vrp.bits_not_set = new_bits_not_set;
421 if (node->vrp.bits_node == NULL && new_bits_node != NULL) {
422 something_changed = 1;
423 node->vrp.bits_node = new_bits_node;
426 if (node->vrp.range_type == VRP_UNDEFINED &&
427 new_range_type != VRP_UNDEFINED) {
428 something_changed = 1;
429 node->vrp.range_type = new_range_type;
430 node->vrp.range_bottom = new_range_bottom;
431 node->vrp.range_top = new_range_top;
432 node->vrp.range_op = new_range_op;
433 node->vrp.range_node = new_range_node;
435 } else if (node->vrp.range_type == VRP_RANGE) {
436 if (new_range_type == VRP_RANGE) {
437 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
438 (new_range_node == node->vrp.range_node &&
439 new_range_op == node->vrp.range_op)) {
440 if (tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Lt) {
441 something_changed = 1;
442 node->vrp.range_bottom = new_range_bottom;
444 if (tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Gt) {
445 something_changed = 1;
446 node->vrp.range_top = new_range_top;
450 // prefer the absolute value
451 if (new_range_node == NULL && node->vrp.range_node != NULL) {
452 something_changed = 1;
453 node->vrp.range_node = NULL;
454 node->vrp.range_top = new_range_top;
455 node->vrp.range_bottom = new_range_bottom;
459 if (new_range_type == VRP_ANTIRANGE) {
460 // if they are overlapping, cut the range.
461 // TODO: Maybe we can preserve more information here
462 if (new_range_node == NULL && node->vrp.range_node == NULL) {
463 if (tarval_cmp(node->vrp.range_bottom, new_range_top) == pn_Cmp_Gt &&
464 tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Gt) {
465 something_changed = 1;
466 node->vrp.range_bottom = new_range_top;
468 } else if (tarval_cmp(node->vrp.range_top, new_range_bottom) == pn_Cmp_Gt &&
469 tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Lt) {
470 something_changed = 1;
471 node->vrp.range_top = new_range_bottom;
474 // We can not handle the case where the anti range is in the
478 // prefer the absolute value
479 if (new_range_node == NULL && node->vrp.range_node != NULL) {
480 something_changed = 1;
481 node->vrp.range_node = NULL;
482 node->vrp.range_top = new_range_top;
483 node->vrp.range_bottom = new_range_bottom;
486 } else if (node->vrp.range_type == VRP_ANTIRANGE) {
487 if (new_range_type == VRP_ANTIRANGE) {
488 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
489 (new_range_node == node->vrp.range_node &&
490 new_range_op == node->vrp.range_op)) {
491 if (tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Gt) {
492 something_changed = 1;
493 node->vrp.range_bottom = new_range_bottom;
495 if (tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Lt) {
496 something_changed = 1;
497 node->vrp.range_top = new_range_top;
501 // prefer the absolute value
502 if (new_range_node == NULL && node->vrp.range_node != NULL) {
503 something_changed = 1;
504 node->vrp.range_node = NULL;
505 node->vrp.range_top = new_range_top;
506 node->vrp.range_bottom = new_range_bottom;
510 if (new_range_type == VRP_RANGE) {
511 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
512 (new_range_node == node->vrp.range_node &&
513 new_range_op == node->vrp.range_op)) {
514 if (tarval_cmp(node->vrp.range_bottom, new_range_top) == pn_Cmp_Gt) {
515 something_changed = 1;
516 node->vrp.range_bottom = new_range_top;
518 if (tarval_cmp(node->vrp.range_top, new_range_bottom) == pn_Cmp_Lt) {
519 something_changed = 1;
520 node->vrp.range_top = new_range_bottom;
524 // prefer the absolute value
525 if (new_range_node == NULL && node->vrp.range_node != NULL) {
526 something_changed = 1;
527 node->vrp.range_node = NULL;
528 node->vrp.range_top = new_range_top;
529 node->vrp.range_bottom = new_range_bottom;
534 assert(tarval_is_null(
535 tarval_and(node->vrp.bits_set, node->vrp.bits_not_set)));
537 return something_changed;
540 void vrp_first_pass(ir_node *n, void *e) {
542 worklist_t *tmp_entry;
544 struct vrp_env_t *env;
549 env = (struct vrp_env_t *) e;
552 set_irn_link(n, VISITED);
556 for (i = get_irn_n_outs(n) - 1; i >=0; --i) {
557 succ = get_irn_out(n, i);
558 if (get_irn_link(succ) == VISITED) {
561 tmp_entry = (struct worklist_t *)malloc(sizeof(struct worklist_t));
563 list_add(&(tmp_entry->nodes), &(env->worklist->nodes));
571 void set_vrp_data(ir_graph *irg) {
576 worklist_t *tmp_entry, *tmp_entry2;
577 struct vrp_env_t *env;
579 env = malloc(sizeof(struct vrp_env_t));
589 assure_irg_outs(irg); // ensure that out edges are consistent
591 // edges_activate(irg);
593 INIT_LIST_HEAD(&worklist.nodes);
595 env->worklist = &worklist;
596 irg_walk_graph(irg, NULL, vrp_first_pass, env);
600 // while there are entries in the worklist, continue
601 while( !list_empty(&worklist.nodes) ) {
603 struct list_head *pos, *next;
604 list_for_each_safe(pos, next, &worklist.nodes) {
606 tmp_entry = list_entry(pos, struct worklist_t, nodes);
608 if (update_vrp_data(tmp_entry->node)) {
609 // if something changed, add successors to worklist
610 for (i = get_irn_n_outs(tmp_entry->node) - 1; i >=0; --i) {
611 succ = get_irn_out(tmp_entry->node, i);
613 tmp_entry2 = (struct worklist_t *)malloc(sizeof(struct worklist_t));
614 tmp_entry2->node = succ;
615 list_add(&(tmp_entry2->nodes), &worklist.nodes);
626 ir_graph_pass_t *set_vrp_pass(const char *name) {
627 return def_graph_pass(name ? name : "set_vrp", set_vrp_data);
630 pn_Cmp vrp_cmp(ir_node *left, ir_node *right) {
631 if (!left->vrp.valid || !right->vrp.valid) {
635 if (!(left->vrp.range_type == VRP_UNDEFINED ||
636 left->vrp.range_type == VRP_VARYING) && !(
637 right->vrp.range_type == VRP_UNDEFINED ||
638 right->vrp.range_type == VRP_VARYING)) {
640 tarval *lefttop = left->vrp.range_top;
641 tarval *leftbottom = left->vrp.range_bottom;
642 tarval *righttop = right->vrp.range_top;
643 tarval *rightbottom = right->vrp.range_bottom;
644 if (left->vrp.range_type == VRP_RANGE && right->vrp.range_type ==
646 if (tarval_cmp(lefttop, rightbottom) == pn_Cmp_Lt) {
649 if (tarval_cmp(leftbottom, righttop) == pn_Cmp_Gt) {
656 if (!tarval_is_null(tarval_and(left->vrp.bits_set, right->vrp.bits_not_set)) ||
657 !tarval_is_null(tarval_and(left->vrp.bits_not_set, right->vrp.bits_set))) {
660 // TODO: We can get way more information here