2 * Copyright (C) 1995-2009 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief analyze graph to provide value range information
31 #include "irgraph_t.h"
40 static void *VISITED = &v;
42 typedef struct worklist_t worklist_t;
44 struct list_head nodes;
52 int update_vrp_data( ir_node *node)
55 tarval *new_bits_set = get_tarval_bad();
56 tarval *new_bits_not_set = get_tarval_bad();
57 tarval *new_range_bottom = get_tarval_bad();
58 tarval *new_range_top = get_tarval_bad();
59 ir_node *new_bits_node = NULL;
60 ir_node *new_range_node = NULL;
61 enum range_types new_range_type = VRP_UNDEFINED;
62 enum range_ops new_range_op = VRP_NONE;
63 int something_changed = 0;
66 // TODO: Check if all predecessors have valid VRP information
69 if (!mode_is_int(get_irn_mode(node))) {
70 return 0; // we don't optimize for non-int-nodes
74 tarval *tv = get_Const_tarval(node);
77 new_bits_not_set = tarval_not(tv);
78 new_range_bottom = tv;
80 new_range_type = VRP_RANGE;
81 } else if (is_And(node)) {
82 ir_node *pred0 = get_And_left(node);
83 ir_node *pred1 = get_And_right(node);
86 new_bits_set = tarval_and(pred0->vrp.bits_set, pred1->vrp.bits_set);
87 new_bits_not_set = tarval_or(pred0->vrp.bits_not_set, pred1->vrp.bits_not_set);
89 tmp = tarval_not(pred0->vrp.bits_set);
90 tmp = tarval_eor(pred0->vrp.bits_not_set, tmp);
91 //check if one of the predecessors is completely determined
92 if (tarval_is_null(tmp)) {
93 new_bits_node = pred1;
96 tmp = tarval_not(pred1->vrp.bits_set);
97 tmp = tarval_eor(pred1->vrp.bits_not_set, tmp);
98 if (tarval_is_null(tmp)) {
99 new_bits_node = pred0;
101 } else if (is_Add(node)) {
102 ir_node *pred0 = get_Add_left(node);
103 ir_node *pred1 = get_Add_right(node);
104 int overflow_top, overflow_bottom;
105 tarval *new_top, *new_bottom;
107 if (pred0->vrp.range_type == VRP_UNDEFINED || pred1->vrp.range_type ==
108 VRP_UNDEFINED || pred0->vrp.range_type == VRP_VARYING ||
109 pred1->vrp.range_type == VRP_VARYING) {
113 new_top = tarval_add(pred0->vrp.range_top, pred1->vrp.range_top);
114 overflow_top = tarval_carry();
115 new_bottom = tarval_add(pred0->vrp.range_bottom, pred1->vrp.range_bottom);
116 overflow_bottom = tarval_carry();
118 if (!overflow_top && !overflow_bottom && pred0->vrp.range_type == VRP_RANGE
119 &&pred1->vrp.range_type == VRP_RANGE) {
120 new_range_bottom = new_bottom;
121 new_range_top = new_top;
122 new_range_type = VRP_RANGE;
125 if (overflow_top || overflow_bottom) {
126 // TODO Implement overflow handling
127 new_range_type = VRP_UNDEFINED;
129 } else if (is_Sub(node)) {
130 ir_node *pred0 = get_Sub_left(node);
131 ir_node *pred1 = get_Sub_right(node);
132 int overflow_top, overflow_bottom;
133 tarval *new_top, *new_bottom;
135 if (pred0->vrp.range_type == VRP_UNDEFINED || pred1->vrp.range_type ==
140 new_top = tarval_sub(pred0->vrp.range_top, pred1->vrp.range_top, NULL);
141 overflow_top = tarval_carry();
142 new_bottom = tarval_sub(pred0->vrp.range_bottom, pred1->vrp.range_bottom, NULL);
143 overflow_bottom = tarval_carry();
145 if (!overflow_top && !overflow_bottom && pred0->vrp.range_type == VRP_RANGE
146 &&pred1->vrp.range_type == VRP_RANGE) {
147 new_range_bottom = new_bottom;
148 new_range_top = new_top;
149 new_range_type = VRP_RANGE;
152 if (overflow_top || overflow_bottom) {
153 // TODO Implement overflow handling
155 } else if (is_Or(node)) {
156 ir_node *a = get_Or_left(node);
157 ir_node *b = get_Or_right(node);
160 new_bits_set = tarval_or(a->vrp.bits_set, b->vrp.bits_set);
161 new_bits_not_set = tarval_and(a->vrp.bits_not_set, b->vrp.bits_not_set);
163 tmp = tarval_not(a->vrp.bits_set);
164 tmp = tarval_eor(a->vrp.bits_not_set, tmp);
165 //check if one of the predecessors is completely determined
166 if (tarval_is_null(tmp)) {
170 tmp = tarval_not(b->vrp.bits_set);
171 tmp = tarval_eor(b->vrp.bits_not_set, tmp);
172 if (tarval_is_null(tmp)) {
176 } else if (is_Rotl(node)) {
177 ir_node *a = get_Rotl_left(node);
178 ir_node *b = get_Rotl_right(node);
180 // We can only compute this if the right value is a constant
182 tarval *bits_set, *bits_not_set;
183 bits_set = tarval_rotl(a->vrp.bits_set, get_Const_tarval(b));
184 bits_not_set = tarval_rotl(a->vrp.bits_not_set, get_Const_tarval(b));
186 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
187 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
190 } else if (is_Shl(node)) {
191 ir_node *a = get_Shl_left(node);
192 ir_node *b = get_Shl_right(node);
194 // We can only compute this if the right value is a constant
196 tarval *bits_set, *bits_not_set;
197 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
198 bits_set = tarval_shl(a->vrp.bits_set, get_Const_tarval(b));
199 bits_not_set = tarval_shl(a->vrp.bits_not_set, get_Const_tarval(b));
201 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
202 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
204 bits_not_set = tarval_not( tarval_shl(
206 get_Const_tarval(b)));
207 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
211 } else if (is_Shr(node)) {
212 ir_node *a = get_Shr_left(node);
213 ir_node *b = get_Shr_right(node);
215 // We can only compute this if the right value is a constant
217 tarval *bits_set, *bits_not_set;
218 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
219 bits_set = tarval_shr(a->vrp.bits_set, get_Const_tarval(b));
220 bits_not_set = tarval_shr(a->vrp.bits_not_set, get_Const_tarval(b));
222 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
223 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
225 bits_not_set = tarval_not( tarval_shr(
227 get_Const_tarval(b)));
228 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
231 } else if (is_Shrs(node)) {
232 ir_node *a = get_Shrs_left(node);
233 ir_node *b = get_Shrs_right(node);
235 // We can only compute this if the right value is a constant
237 tarval *bits_set, *bits_not_set;
238 ir_mode *m = get_tarval_mode(node->vrp.bits_not_set);
239 bits_set = tarval_shrs(a->vrp.bits_set, get_Const_tarval(b));
240 bits_not_set = tarval_shrs(a->vrp.bits_not_set, get_Const_tarval(b));
242 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
243 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
245 bits_not_set = tarval_not( tarval_shrs(
247 get_Const_tarval(b)));
248 new_bits_not_set = tarval_or(bits_not_set, new_bits_not_set);
251 } else if (is_Eor(node)) {
252 ir_node *a = get_Eor_left(node);
253 ir_node *b = get_Eor_right(node);
255 tarval *bits_set, *bits_not_set;
256 bits_not_set = tarval_or(
257 tarval_and(a->vrp.bits_set, b->vrp.bits_set),
258 tarval_and(a->vrp.bits_not_set,
259 b->vrp.bits_not_set));
261 bits_set = tarval_or(
262 tarval_and(a->vrp.bits_set, b->vrp.bits_not_set),
263 tarval_and(a->vrp.bits_not_set, b->vrp.bits_set));
265 new_bits_set = tarval_or(bits_set, node->vrp.bits_set);
266 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
268 } else if (is_Id(node)) {
269 ir_node *pred = get_Id_pred(node);
270 new_bits_set = pred->vrp.bits_set;
271 new_bits_not_set = pred->vrp.bits_not_set;
272 new_range_top = pred->vrp.range_top;
273 new_range_bottom = pred->vrp.range_bottom;
274 new_range_type = pred->vrp.range_type;
276 } else if (is_Not(node)) {
277 ir_node *pred = get_Not_op(node);
278 new_bits_set = tarval_or(pred->vrp.bits_not_set, node->vrp.bits_set);
279 new_bits_not_set = tarval_or(pred->vrp.bits_set, node->vrp.bits_not_set);
281 } else if (is_Conv(node)) {
282 ir_node *pred = get_Conv_op(node);
283 ir_mode *old_mode = get_irn_mode(pred);
285 tarval *bits_not_set;
287 if (!mode_is_int(old_mode))
290 new_mode = get_irn_mode(node);
292 // The second and is needed if target type is smaller
293 bits_not_set = tarval_not(
294 tarval_convert_to(get_mode_all_one(old_mode),
297 bits_not_set = tarval_or(bits_not_set, tarval_convert_to(pred->vrp.bits_not_set, new_mode));
298 new_bits_not_set = tarval_or(bits_not_set, node->vrp.bits_not_set);
299 new_bits_set = tarval_and(
300 tarval_not(bits_not_set), tarval_convert_to(pred->vrp.bits_set, new_mode));
302 if (tarval_cmp(pred->vrp.range_top, get_mode_max(new_mode)) == pn_Cmp_Le) {
303 node->vrp.range_top = pred->vrp.range_top;
306 if (tarval_cmp(pred->vrp.range_bottom, get_mode_min(new_mode)) == pn_Cmp_Ge) {
307 node->vrp.range_bottom = pred->vrp.range_bottom;
310 } else if (is_Confirm(node)) {
311 pn_Cmp cmp = get_Confirm_cmp(node);
312 ir_node *bound = get_Confirm_bound(node);
314 /** @todo: Handle non-Const bounds */
316 if (cmp == pn_Cmp_Lg) {
317 /** @todo: Is there some way to preserve the information? */
318 new_range_type = VRP_ANTIRANGE;
319 if (is_Const(bound)) {
320 new_range_top = get_Const_tarval(bound);
321 new_range_bottom = get_Const_tarval(bound);
323 } else if (cmp == pn_Cmp_Le) {
324 if (node->vrp.range_type == VRP_UNDEFINED) {
325 new_range_type = VRP_RANGE;
326 if (is_Const(bound)) {
327 new_range_top = get_Const_tarval(bound);
329 new_range_bottom = get_tarval_min(get_irn_mode(node));
330 } else if (node->vrp.range_type == VRP_RANGE) {
331 if (is_Const(bound)) {
332 if (tarval_cmp(node->vrp.range_top,
333 get_Const_tarval(bound)) == pn_Cmp_Le) {
334 new_range_top = get_Const_tarval(bound);
336 new_range_bottom = get_tarval_min(get_irn_mode(node));
338 } else if (node->vrp.range_type == VRP_ANTIRANGE) {
339 /** @todo: How do we manage not to get a never ending loop? */
344 } else if (is_Phi(node)) {
345 // combine all ranges
347 int num = get_Phi_n_preds(node);
350 tarval *range_top, *range_bottom, *bits_set, *bits_not_set;
351 enum range_types range_type;
355 pred = get_Phi_pred(node,0);
356 range_top = pred->vrp.range_top;
357 range_bottom = pred->vrp.range_bottom;
358 range_type = pred->vrp.range_type;
359 bits_set = pred->vrp.bits_set;
360 bits_not_set = pred->vrp.bits_not_set;
362 for (i = 1; i < num; i++) {
363 pred = get_Phi_pred(node, i);
364 if (range_type == VRP_RANGE && pred->vrp.range_type ==
366 cmp = tarval_cmp(range_top, pred->vrp.range_top);
367 if (cmp == pn_Cmp_Lt) {
368 range_top = pred->vrp.range_top;
370 cmp = tarval_cmp(range_bottom, pred->vrp.range_bottom);
371 if (cmp == pn_Cmp_Gt) {
372 range_bottom = pred->vrp.range_bottom;
375 range_type = VRP_VARYING;
380 new_range_type = range_type;
381 new_range_top = range_top;
382 new_range_bottom = range_bottom;
385 // unhandled, therefore never updated
391 /* TODO: Check, if there can be information derived from any of these:
392 is_Abs(node) is_Alloc(node) is_Anchor(node) is_Borrow(node) is_Bound(node)
393 is_Break(node) is_Builtin(node) is_Call(node) is_CallBegin(node)
394 is_Carry(node) is_Cast(node) is_Cmp(node) is_Cond(node)
395 is_CopyB(node) is_Div(node) is_DivMod(node) is_Dummy(node)
396 is_End(node) is_EndExcept(node) is_EndReg(node) is_Filter(node) is_Free(node)
397 is_IJmp(node) is_InstOf(node) is_Jmp(node) is_Load(node) is_Minus(node)
398 is_Mod(node) is_Mul(node) is_Mulh(node) is_Mux(node) is_NoMem(node)
399 is_Pin(node) is_Proj(node) is_Quot(node)
400 is_Raise(node) is_Return(node) is_Sel(node) is_Start(node) is_Store(node)
401 is_SymConst(node) is_Sync(node) is_Tuple(node)
404 // Merge the newly calculated values with those that might already exist
406 if (new_bits_set != tarval_bad) {
407 new_bits_set = tarval_or(new_bits_set, node->vrp.bits_set);
408 if (tarval_cmp(new_bits_set, node->vrp.bits_set) != pn_Cmp_Eq) {
409 something_changed = 1;
410 node->vrp.bits_set = new_bits_set;
414 if (new_bits_not_set != tarval_bad) {
415 new_bits_not_set = tarval_or(new_bits_not_set, node->vrp.bits_not_set);
417 if (tarval_cmp(new_bits_not_set, node->vrp.bits_not_set) != pn_Cmp_Eq) {
418 something_changed = 1;
419 node->vrp.bits_not_set = new_bits_not_set;
423 if (node->vrp.bits_node == NULL && new_bits_node != NULL) {
424 something_changed = 1;
425 node->vrp.bits_node = new_bits_node;
428 if (node->vrp.range_type == VRP_UNDEFINED &&
429 new_range_type != VRP_UNDEFINED) {
430 something_changed = 1;
431 node->vrp.range_type = new_range_type;
432 node->vrp.range_bottom = new_range_bottom;
433 node->vrp.range_top = new_range_top;
434 node->vrp.range_op = new_range_op;
435 node->vrp.range_node = new_range_node;
437 } else if (node->vrp.range_type == VRP_RANGE) {
438 if (new_range_type == VRP_RANGE) {
439 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
440 (new_range_node == node->vrp.range_node &&
441 new_range_op == node->vrp.range_op)) {
442 if (tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Lt) {
443 something_changed = 1;
444 node->vrp.range_bottom = new_range_bottom;
446 if (tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Gt) {
447 something_changed = 1;
448 node->vrp.range_top = new_range_top;
452 // prefer the absolute value
453 if (new_range_node == NULL && node->vrp.range_node != NULL) {
454 something_changed = 1;
455 node->vrp.range_node = NULL;
456 node->vrp.range_top = new_range_top;
457 node->vrp.range_bottom = new_range_bottom;
461 if (new_range_type == VRP_ANTIRANGE) {
462 // if they are overlapping, cut the range.
463 // TODO: Maybe we can preserve more information here
464 if (new_range_node == NULL && node->vrp.range_node == NULL) {
465 if (tarval_cmp(node->vrp.range_bottom, new_range_top) == pn_Cmp_Gt &&
466 tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Gt) {
467 something_changed = 1;
468 node->vrp.range_bottom = new_range_top;
470 } else if (tarval_cmp(node->vrp.range_top, new_range_bottom) == pn_Cmp_Gt &&
471 tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Lt) {
472 something_changed = 1;
473 node->vrp.range_top = new_range_bottom;
476 // We can not handle the case where the anti range is in the
480 // prefer the absolute value
481 if (new_range_node == NULL && node->vrp.range_node != NULL) {
482 something_changed = 1;
483 node->vrp.range_node = NULL;
484 node->vrp.range_top = new_range_top;
485 node->vrp.range_bottom = new_range_bottom;
488 } else if (node->vrp.range_type == VRP_ANTIRANGE) {
489 if (new_range_type == VRP_ANTIRANGE) {
490 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
491 (new_range_node == node->vrp.range_node &&
492 new_range_op == node->vrp.range_op)) {
493 if (tarval_cmp(node->vrp.range_bottom, new_range_bottom) == pn_Cmp_Gt) {
494 something_changed = 1;
495 node->vrp.range_bottom = new_range_bottom;
497 if (tarval_cmp(node->vrp.range_top, new_range_top) == pn_Cmp_Lt) {
498 something_changed = 1;
499 node->vrp.range_top = new_range_top;
503 // prefer the absolute value
504 if (new_range_node == NULL && node->vrp.range_node != NULL) {
505 something_changed = 1;
506 node->vrp.range_node = NULL;
507 node->vrp.range_top = new_range_top;
508 node->vrp.range_bottom = new_range_bottom;
512 if (new_range_type == VRP_RANGE) {
513 if ((new_range_node == NULL && node->vrp.range_node == NULL) ||
514 (new_range_node == node->vrp.range_node &&
515 new_range_op == node->vrp.range_op)) {
516 if (tarval_cmp(node->vrp.range_bottom, new_range_top) == pn_Cmp_Gt) {
517 something_changed = 1;
518 node->vrp.range_bottom = new_range_top;
520 if (tarval_cmp(node->vrp.range_top, new_range_bottom) == pn_Cmp_Lt) {
521 something_changed = 1;
522 node->vrp.range_top = new_range_bottom;
526 // prefer the absolute value
527 if (new_range_node == NULL && node->vrp.range_node != NULL) {
528 something_changed = 1;
529 node->vrp.range_node = NULL;
530 node->vrp.range_top = new_range_top;
531 node->vrp.range_bottom = new_range_bottom;
536 assert(tarval_is_null(
537 tarval_and(node->vrp.bits_set, node->vrp.bits_not_set)));
539 return something_changed;
542 void vrp_first_pass(ir_node *n, void *e)
545 worklist_t *tmp_entry;
547 struct vrp_env_t *env = e;
552 set_irn_link(n, VISITED);
556 for (i = get_irn_n_outs(n) - 1; i >=0; --i) {
557 succ = get_irn_out(n, i);
558 if (get_irn_link(succ) == VISITED) {
561 tmp_entry = XMALLOC(worklist_t);
563 list_add(&(tmp_entry->nodes), &(env->worklist->nodes));
571 void set_vrp_data(ir_graph *irg)
577 worklist_t *tmp_entry, *tmp_entry2;
578 struct vrp_env_t env;
585 assure_irg_outs(irg); // ensure that out edges are consistent
587 // edges_activate(irg);
589 INIT_LIST_HEAD(&worklist.nodes);
591 env.worklist = &worklist;
592 irg_walk_graph(irg, NULL, vrp_first_pass, &env);
596 // while there are entries in the worklist, continue
597 while ( !list_empty(&worklist.nodes) ) {
599 list_head *pos, *next;
600 list_for_each_safe(pos, next, &worklist.nodes) {
602 tmp_entry = list_entry(pos, worklist_t, nodes);
604 if (update_vrp_data(tmp_entry->node)) {
605 // if something changed, add successors to worklist
606 for (i = get_irn_n_outs(tmp_entry->node) - 1; i >=0; --i) {
607 succ = get_irn_out(tmp_entry->node, i);
609 tmp_entry2 = XMALLOC(worklist_t);
610 tmp_entry2->node = succ;
611 list_add(&(tmp_entry2->nodes), &worklist.nodes);
622 ir_graph_pass_t *set_vrp_pass(const char *name)
624 return def_graph_pass(name ? name : "set_vrp", set_vrp_data);
627 pn_Cmp vrp_cmp(ir_node *left, ir_node *right)
629 if (!left->vrp.valid || !right->vrp.valid) {
633 if (left->vrp.range_type == VRP_RANGE && right->vrp.range_type == VRP_RANGE) {
634 if (tarval_cmp(left->vrp.range_top, right->vrp.range_bottom) == pn_Cmp_Lt) {
637 if (tarval_cmp(left->vrp.range_bottom, right->vrp.range_top) == pn_Cmp_Gt) {
642 if (!tarval_is_null(tarval_and(left->vrp.bits_set, right->vrp.bits_not_set)) ||
643 !tarval_is_null(tarval_and(left->vrp.bits_not_set, right->vrp.bits_set))) {
646 // TODO: We can get way more information here