2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief lowers operations with mode_b. The result is a graph which
23 * might still contains some convs from/to mode_b, but no
24 * operations are performed on them anymore, they are just there
25 * so modes match. A backend can safely skip all mode_b convs.
26 * @author Matthias Braun, Christoph Mallon
46 static lower_mode_b_config_t config;
47 static ir_type *lowered_type = NULL;
48 static pdeq *lowered_nodes = NULL;
51 * Removes a node if its out-edge count has reached 0.
52 * temporary hack until we have proper automatic dead code elimination.
54 static void maybe_kill_node(ir_node *node)
59 if(get_irn_n_edges(node) != 0)
62 irg = get_irn_irg(node);
64 assert(!is_Bad(node));
66 arity = get_irn_arity(node);
67 for (i = 0; i < arity; ++i) {
68 set_irn_n(node, i, new_Bad());
70 set_nodes_block(node, new_Bad());
72 edges_node_deleted(node, irg);
75 static ir_node *create_not(dbg_info *dbgi, ir_node *node)
77 ir_node *block = get_nodes_block(node);
78 ir_mode *mode = config.lowered_mode;
79 tarval *tv_one = get_tarval_one(mode);
80 ir_node *one = new_d_Const(dbgi, tv_one);
82 return new_rd_Eor(dbgi, block, node, one, mode);
85 static ir_node *create_convb(ir_node *node)
87 ir_node *block = get_nodes_block(node);
88 ir_node *conv = new_rd_Conv(NULL, block, node, mode_b);
93 static ir_type *create_lowered_type(void)
95 if(lowered_type == NULL) {
96 lowered_type = new_type_primitive(new_id_from_str("__lowered_mode_b"),
103 * creates a "set" node that produces a 0 or 1 based on a Cmp result
105 static ir_node *create_set(ir_node *node)
107 dbg_info *dbgi = get_irn_dbg_info(node);
108 ir_mode *mode = config.lowered_set_mode;
109 tarval *tv_one = get_tarval_one(mode);
110 ir_node *one = new_d_Const(dbgi, tv_one);
111 ir_node *block = get_nodes_block(node);
112 tarval *tv_zero = get_tarval_null(mode);
113 ir_node *zero = new_d_Const(dbgi, tv_zero);
115 ir_node *set = new_rd_Mux(dbgi, block, node, zero, one, mode);
117 if (mode != config.lowered_mode) {
118 set = new_r_Conv(block, set, config.lowered_mode);
124 static void adjust_method_type(ir_type *method_type)
130 n_params = get_method_n_params(method_type);
131 for(i = 0; i < n_params; ++i) {
132 ir_type *param = get_method_param_type(method_type, i);
133 if(get_type_mode(param) == mode_b) {
134 set_method_param_type(method_type, i, create_lowered_type());
138 n_res = get_method_n_ress(method_type);
139 for(i = 0; i < n_res; ++i) {
140 ir_type *res_type = get_method_res_type(method_type, i);
141 if(get_type_mode(res_type) == mode_b) {
142 set_method_res_type(method_type, i, create_lowered_type());
147 static ir_node *lower_node(ir_node *node)
149 dbg_info *dbgi = get_irn_dbg_info(node);
150 ir_node *block = get_nodes_block(node);
151 ir_mode *mode = config.lowered_mode;
154 assert(get_irn_mode(node) == mode_b);
156 res = get_irn_link(node);
160 /* TODO: be robust against phi-loops... */
161 switch (get_irn_opcode(node)) {
165 ir_node *unknown, *new_phi;
167 arity = get_irn_arity(node);
168 in = ALLOCAN(ir_node*, arity);
169 unknown = new_Unknown(config.lowered_mode);
170 for(i = 0; i < arity; ++i) {
173 new_phi = new_r_Phi(block, arity, in, config.lowered_mode);
174 set_irn_link(node, new_phi);
175 pdeq_putr(lowered_nodes, node);
177 for(i = 0; i < arity; ++i) {
178 ir_node *in = get_irn_n(node, i);
179 ir_node *low_in = lower_node(in);
181 set_irn_n(new_phi, i, low_in);
191 ir_node *copy = exact_copy(node);
193 arity = get_irn_arity(node);
194 for(i = 0; i < arity; ++i) {
195 ir_node *in = get_irn_n(node, i);
196 ir_node *low_in = lower_node(in);
198 set_irn_n(copy, i, low_in);
200 set_irn_mode(copy, config.lowered_mode);
202 set_irn_link(node, copy);
203 pdeq_putr(lowered_nodes, node);
207 ir_node *op = get_Not_op(node);
208 ir_node *low_op = lower_node(op);
210 res = create_not(dbgi, low_op);
211 set_irn_link(node, res);
212 pdeq_putr(lowered_nodes, node);
216 ir_node *cond = get_Mux_sel(node);
217 ir_node *low_cond = lower_node(cond);
218 ir_node *v_true = get_Mux_true(node);
219 ir_node *low_v_true = lower_node(v_true);
220 ir_node *v_false = get_Mux_false(node);
221 ir_node *low_v_false = lower_node(v_false);
223 ir_node *and0 = new_rd_And(dbgi, block, low_cond, low_v_true, mode);
224 ir_node *not_cond = create_not(dbgi, low_cond);
225 ir_node *and1 = new_rd_And(dbgi, block, not_cond, low_v_false, mode);
226 ir_node *or = new_rd_Or(dbgi, block, and0, and1, mode);
228 set_irn_link(node, or);
229 pdeq_putr(lowered_nodes, node);
233 ir_node *pred = get_Conv_op(node);
234 ir_mode *mode = get_irn_mode(pred);
235 tarval *tv_zeroc = get_tarval_null(mode);
236 ir_node *zero_cmp = new_d_Const(dbgi, tv_zeroc);
239 ir_node *cmp = new_rd_Cmp(dbgi, block, pred, zero_cmp);
240 ir_node *proj = new_rd_Proj(dbgi, block, cmp, mode_b, pn_Cmp_Lg);
241 set = create_set(proj);
243 set_irn_link(node, set);
244 pdeq_putr(lowered_nodes, node);
248 ir_node *pred = get_Proj_pred(node);
251 ir_node *left = get_Cmp_left(pred);
252 ir_node *right = get_Cmp_right(pred);
253 ir_mode *cmp_mode = get_irn_mode(left);
256 if ((mode_is_int(cmp_mode) || mode_is_reference(cmp_mode)) && (
257 get_mode_size_bits(cmp_mode) < get_mode_size_bits(mode) ||
258 (mode_is_signed(cmp_mode) && is_Const(right) && is_Const_null(right))
260 int pnc = get_Proj_proj(node);
265 if(pnc == pn_Cmp_Lt) {
266 /* a < b -> (a - b) >> 31 */
269 } else if(pnc == pn_Cmp_Le) {
270 /* a <= b -> ~(a - b) >> 31 */
274 } else if(pnc == pn_Cmp_Gt) {
275 /* a > b -> (b - a) >> 31 */
278 } else if(pnc == pn_Cmp_Ge) {
279 /* a >= b -> ~(a - b) >> 31 */
286 int bits = get_mode_size_bits(mode);
287 tarval *tv = new_tarval_from_long(bits-1, mode_Iu);
288 ir_node *shift_cnt = new_d_Const(dbgi, tv);
290 if(cmp_mode != mode) {
291 a = new_rd_Conv(dbgi, block, a, mode);
292 b = new_rd_Conv(dbgi, block, b, mode);
295 res = new_rd_Sub(dbgi, block, a, b, mode);
297 res = new_rd_Not(dbgi, block, res, mode);
299 res = new_rd_Shr(dbgi, block, res, shift_cnt, mode);
301 set_irn_link(node, res);
302 pdeq_putr(lowered_nodes, node);
307 /* synthesize the 0/1 value */
308 set = create_set(node);
309 set_irn_link(node, set);
310 pdeq_putr(lowered_nodes, node);
312 } else if(is_Proj(pred) && is_Call(get_Proj_pred(pred))) {
313 ir_type *type = get_Call_type(get_Proj_pred(pred));
314 adjust_method_type(type);
315 set_irn_mode(node, mode);
317 } else if(is_Proj(pred) && is_Start(get_Proj_pred(pred))) {
318 ir_entity *entity = get_irg_entity(current_ir_graph);
319 ir_type *type = get_entity_type(entity);
320 adjust_method_type(type);
321 set_irn_mode(node, mode);
325 panic("unexpected projb: %+F (pred: %+F)", node, pred);
328 tarval *tv = get_Const_tarval(node);
329 if(tv == get_tarval_b_true()) {
330 tarval *tv_one = get_tarval_one(mode);
331 res = new_d_Const(dbgi, tv_one);
332 } else if(tv == get_tarval_b_false()) {
333 tarval *tv_zero = get_tarval_null(mode);
334 res = new_d_Const(dbgi, tv_zero);
336 panic("invalid boolean const %+F", node);
338 set_irn_link(node, res);
339 pdeq_putr(lowered_nodes, node);
343 return new_Unknown(config.lowered_mode);
345 panic("didn't expect %+F to have mode_b", node);
349 static void lower_mode_b_walker(ir_node *node, void *env)
355 arity = get_irn_arity(node);
356 for(i = 0; i < arity; ++i) {
358 ir_node *in = get_irn_n(node, i);
359 if(get_irn_mode(in) != mode_b)
362 if(! config.lower_direct_cmp) {
364 (is_Mux(node) && get_irn_mode(node) != mode_b)) {
366 ir_node *pred = get_Proj_pred(in);
373 lowered_in = lower_node(in);
375 if(is_Return(node)) {
376 ir_entity *entity = get_irg_entity(current_ir_graph);
377 ir_type *type = get_entity_type(entity);
378 adjust_method_type(type);
379 } else if(is_Call(node)) {
380 ir_type *type = get_Call_type(node);
381 adjust_method_type(type);
383 lowered_in = create_convb(lowered_in);
385 set_irn_n(node, i, lowered_in);
389 add_identities(current_ir_graph->value_table, node);
393 static void clear_links(ir_node *node, void *env)
396 set_irn_link(node, NULL);
399 void ir_lower_mode_b(ir_graph *irg, const lower_mode_b_config_t *nconfig)
402 lowered_nodes = new_pdeq();
404 /* ensure no optimisation touches muxes anymore */
405 set_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX);
407 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
409 set_opt_allow_conv_b(0);
410 irg_walk_graph(irg, clear_links, NULL, NULL);
411 irg_walk_graph(irg, lower_mode_b_walker, NULL, NULL);
413 while(!pdeq_empty(lowered_nodes)) {
414 ir_node *node = (ir_node*) pdeq_getr(lowered_nodes);
415 maybe_kill_node(node);
417 del_pdeq(lowered_nodes);
419 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
423 ir_graph_pass_t pass;
424 const lower_mode_b_config_t *config;
428 * Wrapper to run ir_lower_mode_b() as an ir_graph pass
430 static int pass_wrapper(ir_graph *irg, void *context) {
431 struct pass_t *pass = context;
433 ir_lower_mode_b(irg, pass->config);
437 ir_graph_pass_t *ir_lower_mode_b_pass(
438 const char *name, const lower_mode_b_config_t *config) {
439 struct pass_t *pass = XMALLOCZ(struct pass_t);
441 pass->config = config;
442 return def_graph_pass_constructor(
443 &pass->pass, name ? name : "lower_mode_b", pass_wrapper);