2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief lowers operations with mode_b. The result is a graph which
23 * might still contains some convs from/to mode_b, but no
24 * operations are performed on them anymore, they are just there
25 * so modes match. A backend can safely skip all mode_b convs.
26 * @author Matthias Braun, Christoph Mallon
46 static lower_mode_b_config_t config;
47 static ir_type *lowered_type = NULL;
48 static pdeq *lowered_nodes = NULL;
51 * Removes a node if its out-edge count has reached 0.
52 * temporary hack until we have proper automatic dead code elimination.
54 static void maybe_kill_node(ir_node *node)
59 if(get_irn_n_edges(node) != 0)
62 irg = get_irn_irg(node);
64 assert(!is_Bad(node));
66 arity = get_irn_arity(node);
67 for (i = 0; i < arity; ++i) {
68 set_irn_n(node, i, new_Bad());
70 set_nodes_block(node, new_Bad());
72 edges_node_deleted(node, irg);
75 static ir_node *create_not(dbg_info *dbgi, ir_node *node)
77 ir_node *block = get_nodes_block(node);
78 ir_mode *mode = config.lowered_mode;
79 tarval *tv_one = get_tarval_one(mode);
80 ir_node *one = new_d_Const(dbgi, tv_one);
82 return new_rd_Eor(dbgi, block, node, one, mode);
85 static ir_node *create_convb(ir_node *node)
87 ir_node *block = get_nodes_block(node);
88 ir_node *conv = new_rd_Conv(NULL, block, node, mode_b);
93 static ir_type *create_lowered_type(void)
95 if(lowered_type == NULL) {
96 lowered_type = new_type_primitive(config.lowered_mode);
102 * creates a "set" node that produces a 0 or 1 based on a Cmp result
104 static ir_node *create_set(ir_node *node)
106 dbg_info *dbgi = get_irn_dbg_info(node);
107 ir_mode *mode = config.lowered_set_mode;
108 tarval *tv_one = get_tarval_one(mode);
109 ir_node *one = new_d_Const(dbgi, tv_one);
110 ir_node *block = get_nodes_block(node);
111 tarval *tv_zero = get_tarval_null(mode);
112 ir_node *zero = new_d_Const(dbgi, tv_zero);
114 ir_node *set = new_rd_Mux(dbgi, block, node, zero, one, mode);
116 if (mode != config.lowered_mode) {
117 set = new_r_Conv(block, set, config.lowered_mode);
123 static void adjust_method_type(ir_type *method_type)
129 n_params = get_method_n_params(method_type);
130 for(i = 0; i < n_params; ++i) {
131 ir_type *param = get_method_param_type(method_type, i);
132 if(get_type_mode(param) == mode_b) {
133 set_method_param_type(method_type, i, create_lowered_type());
137 n_res = get_method_n_ress(method_type);
138 for(i = 0; i < n_res; ++i) {
139 ir_type *res_type = get_method_res_type(method_type, i);
140 if(get_type_mode(res_type) == mode_b) {
141 set_method_res_type(method_type, i, create_lowered_type());
146 static ir_node *lower_node(ir_node *node)
148 dbg_info *dbgi = get_irn_dbg_info(node);
149 ir_node *block = get_nodes_block(node);
150 ir_mode *mode = config.lowered_mode;
153 assert(get_irn_mode(node) == mode_b);
155 res = get_irn_link(node);
159 /* TODO: be robust against phi-loops... */
160 switch (get_irn_opcode(node)) {
164 ir_node *unknown, *new_phi;
166 arity = get_irn_arity(node);
167 in = ALLOCAN(ir_node*, arity);
168 unknown = new_Unknown(config.lowered_mode);
169 for(i = 0; i < arity; ++i) {
172 new_phi = new_r_Phi(block, arity, in, config.lowered_mode);
173 set_irn_link(node, new_phi);
174 pdeq_putr(lowered_nodes, node);
176 for(i = 0; i < arity; ++i) {
177 ir_node *in = get_irn_n(node, i);
178 ir_node *low_in = lower_node(in);
180 set_irn_n(new_phi, i, low_in);
190 ir_node *copy = exact_copy(node);
192 arity = get_irn_arity(node);
193 for(i = 0; i < arity; ++i) {
194 ir_node *in = get_irn_n(node, i);
195 ir_node *low_in = lower_node(in);
197 set_irn_n(copy, i, low_in);
199 set_irn_mode(copy, config.lowered_mode);
201 set_irn_link(node, copy);
202 pdeq_putr(lowered_nodes, node);
206 ir_node *op = get_Not_op(node);
207 ir_node *low_op = lower_node(op);
209 res = create_not(dbgi, low_op);
210 set_irn_link(node, res);
211 pdeq_putr(lowered_nodes, node);
215 ir_node *cond = get_Mux_sel(node);
216 ir_node *low_cond = lower_node(cond);
217 ir_node *v_true = get_Mux_true(node);
218 ir_node *low_v_true = lower_node(v_true);
219 ir_node *v_false = get_Mux_false(node);
220 ir_node *low_v_false = lower_node(v_false);
222 ir_node *and0 = new_rd_And(dbgi, block, low_cond, low_v_true, mode);
223 ir_node *not_cond = create_not(dbgi, low_cond);
224 ir_node *and1 = new_rd_And(dbgi, block, not_cond, low_v_false, mode);
225 ir_node *or = new_rd_Or(dbgi, block, and0, and1, mode);
227 set_irn_link(node, or);
228 pdeq_putr(lowered_nodes, node);
232 ir_node *pred = get_Conv_op(node);
233 ir_mode *mode = get_irn_mode(pred);
234 tarval *tv_zeroc = get_tarval_null(mode);
235 ir_node *zero_cmp = new_d_Const(dbgi, tv_zeroc);
238 ir_node *cmp = new_rd_Cmp(dbgi, block, pred, zero_cmp);
239 ir_node *proj = new_rd_Proj(dbgi, block, cmp, mode_b, pn_Cmp_Lg);
240 set = create_set(proj);
242 set_irn_link(node, set);
243 pdeq_putr(lowered_nodes, node);
247 ir_node *pred = get_Proj_pred(node);
250 ir_node *left = get_Cmp_left(pred);
251 ir_node *right = get_Cmp_right(pred);
252 ir_mode *cmp_mode = get_irn_mode(left);
255 if ((mode_is_int(cmp_mode) || mode_is_reference(cmp_mode)) && (
256 get_mode_size_bits(cmp_mode) < get_mode_size_bits(mode) ||
257 (mode_is_signed(cmp_mode) && is_Const(right) && is_Const_null(right))
259 int pnc = get_Proj_proj(node);
264 if(pnc == pn_Cmp_Lt) {
265 /* a < b -> (a - b) >> 31 */
268 } else if(pnc == pn_Cmp_Le) {
269 /* a <= b -> ~(a - b) >> 31 */
273 } else if(pnc == pn_Cmp_Gt) {
274 /* a > b -> (b - a) >> 31 */
277 } else if(pnc == pn_Cmp_Ge) {
278 /* a >= b -> ~(a - b) >> 31 */
285 int bits = get_mode_size_bits(mode);
286 tarval *tv = new_tarval_from_long(bits-1, mode_Iu);
287 ir_node *shift_cnt = new_d_Const(dbgi, tv);
289 if(cmp_mode != mode) {
290 a = new_rd_Conv(dbgi, block, a, mode);
291 b = new_rd_Conv(dbgi, block, b, mode);
294 res = new_rd_Sub(dbgi, block, a, b, mode);
296 res = new_rd_Not(dbgi, block, res, mode);
298 res = new_rd_Shr(dbgi, block, res, shift_cnt, mode);
300 set_irn_link(node, res);
301 pdeq_putr(lowered_nodes, node);
306 /* synthesize the 0/1 value */
307 set = create_set(node);
308 set_irn_link(node, set);
309 pdeq_putr(lowered_nodes, node);
311 } else if(is_Proj(pred) && is_Call(get_Proj_pred(pred))) {
312 ir_type *type = get_Call_type(get_Proj_pred(pred));
313 adjust_method_type(type);
314 set_irn_mode(node, mode);
316 } else if(is_Proj(pred) && is_Start(get_Proj_pred(pred))) {
317 ir_entity *entity = get_irg_entity(current_ir_graph);
318 ir_type *type = get_entity_type(entity);
319 adjust_method_type(type);
320 set_irn_mode(node, mode);
324 panic("unexpected projb: %+F (pred: %+F)", node, pred);
327 tarval *tv = get_Const_tarval(node);
328 if(tv == get_tarval_b_true()) {
329 tarval *tv_one = get_tarval_one(mode);
330 res = new_d_Const(dbgi, tv_one);
331 } else if(tv == get_tarval_b_false()) {
332 tarval *tv_zero = get_tarval_null(mode);
333 res = new_d_Const(dbgi, tv_zero);
335 panic("invalid boolean const %+F", node);
337 set_irn_link(node, res);
338 pdeq_putr(lowered_nodes, node);
342 return new_Unknown(config.lowered_mode);
344 panic("didn't expect %+F to have mode_b", node);
348 static void lower_mode_b_walker(ir_node *node, void *env)
354 arity = get_irn_arity(node);
355 for(i = 0; i < arity; ++i) {
357 ir_node *in = get_irn_n(node, i);
358 if(get_irn_mode(in) != mode_b)
361 if(! config.lower_direct_cmp) {
363 (is_Mux(node) && get_irn_mode(node) != mode_b)) {
365 ir_node *pred = get_Proj_pred(in);
372 lowered_in = lower_node(in);
374 if(is_Return(node)) {
375 ir_entity *entity = get_irg_entity(current_ir_graph);
376 ir_type *type = get_entity_type(entity);
377 adjust_method_type(type);
378 } else if(is_Call(node)) {
379 ir_type *type = get_Call_type(node);
380 adjust_method_type(type);
382 lowered_in = create_convb(lowered_in);
384 set_irn_n(node, i, lowered_in);
388 add_identities(current_ir_graph->value_table, node);
392 static void clear_links(ir_node *node, void *env)
395 set_irn_link(node, NULL);
398 void ir_lower_mode_b(ir_graph *irg, const lower_mode_b_config_t *nconfig)
401 lowered_nodes = new_pdeq();
403 /* ensure no optimisation touches muxes anymore */
404 set_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX);
406 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
408 set_opt_allow_conv_b(0);
409 irg_walk_graph(irg, clear_links, NULL, NULL);
410 irg_walk_graph(irg, lower_mode_b_walker, NULL, NULL);
412 while(!pdeq_empty(lowered_nodes)) {
413 ir_node *node = (ir_node*) pdeq_getr(lowered_nodes);
414 maybe_kill_node(node);
416 del_pdeq(lowered_nodes);
418 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
422 ir_graph_pass_t pass;
423 const lower_mode_b_config_t *config;
427 * Wrapper to run ir_lower_mode_b() as an ir_graph pass
429 static int pass_wrapper(ir_graph *irg, void *context) {
430 struct pass_t *pass = context;
432 ir_lower_mode_b(irg, pass->config);
436 ir_graph_pass_t *ir_lower_mode_b_pass(
437 const char *name, const lower_mode_b_config_t *config) {
438 struct pass_t *pass = XMALLOCZ(struct pass_t);
440 pass->config = config;
441 return def_graph_pass_constructor(
442 &pass->pass, name ? name : "lower_mode_b", pass_wrapper);