2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief lowers operations with mode_b. The result is a graph which
23 * might still contains some convs from/to mode_b, but no
24 * operations are performed on them anymore, they are just there
25 * so modes match. A backend can safely skip all mode_b convs.
26 * @author Matthias Braun, Christoph Mallon
29 * After this pass the following should hold:
30 * - The only inputs with mode_b are for the Cond node or the
31 * Sel input of a Mux node.
32 * - The only nodes producing mode_b are: Proj(Cmp) and ConvB(X) (where X
33 * is some mode that can be converted to the lowered mode).
34 * ConvB will usually be implemented by a comparison with 0 producing some
35 * flags in the backends.
36 * All other former uses should be converted to manipulations with an integer
37 * mode that was specified in the pass configuration.
57 static lower_mode_b_config_t config;
58 static ir_type *lowered_type = NULL;
59 static pdeq *lowered_nodes = NULL;
62 * Removes a node if its out-edge count has reached 0.
63 * temporary hack until we have proper automatic dead code elimination.
65 static void maybe_kill_node(ir_node *node)
70 if (get_irn_n_edges(node) != 0)
73 irg = get_irn_irg(node);
75 assert(!is_Bad(node));
77 arity = get_irn_arity(node);
78 for (i = 0; i < arity; ++i) {
79 set_irn_n(node, i, new_Bad());
81 set_nodes_block(node, new_Bad());
83 edges_node_deleted(node, irg);
86 static ir_node *create_not(dbg_info *dbgi, ir_node *node)
88 ir_node *block = get_nodes_block(node);
89 ir_mode *mode = config.lowered_mode;
90 tarval *tv_one = get_tarval_one(mode);
91 ir_node *one = new_d_Const(dbgi, tv_one);
93 return new_rd_Eor(dbgi, block, node, one, mode);
96 static ir_node *create_convb(ir_node *node)
98 ir_node *block = get_nodes_block(node);
99 ir_node *conv = new_rd_Conv(NULL, block, node, mode_b);
104 static ir_type *create_lowered_type(void)
106 if (lowered_type == NULL) {
107 lowered_type = new_type_primitive(config.lowered_mode);
113 * creates a "set" node that produces a 0 or 1 based on a Cmp result
115 static ir_node *create_set(ir_node *node)
117 dbg_info *dbgi = get_irn_dbg_info(node);
118 ir_mode *mode = config.lowered_set_mode;
119 tarval *tv_one = get_tarval_one(mode);
120 ir_node *one = new_d_Const(dbgi, tv_one);
121 ir_node *block = get_nodes_block(node);
122 tarval *tv_zero = get_tarval_null(mode);
123 ir_node *zero = new_d_Const(dbgi, tv_zero);
125 ir_node *set = new_rd_Mux(dbgi, block, node, zero, one, mode);
127 if (mode != config.lowered_mode) {
128 set = new_r_Conv(block, set, config.lowered_mode);
134 static void adjust_method_type(ir_type *method_type)
140 n_params = get_method_n_params(method_type);
141 for (i = 0; i < n_params; ++i) {
142 ir_type *param = get_method_param_type(method_type, i);
143 if (get_type_mode(param) == mode_b) {
144 set_method_param_type(method_type, i, create_lowered_type());
148 n_res = get_method_n_ress(method_type);
149 for (i = 0; i < n_res; ++i) {
150 ir_type *res_type = get_method_res_type(method_type, i);
151 if (get_type_mode(res_type) == mode_b) {
152 set_method_res_type(method_type, i, create_lowered_type());
157 static ir_node *lower_node(ir_node *node)
159 dbg_info *dbgi = get_irn_dbg_info(node);
160 ir_node *block = get_nodes_block(node);
161 ir_mode *mode = config.lowered_mode;
164 assert(get_irn_mode(node) == mode_b);
166 res = get_irn_link(node);
170 switch (get_irn_opcode(node)) {
174 ir_node *unknown, *new_phi;
176 arity = get_irn_arity(node);
177 in = ALLOCAN(ir_node*, arity);
178 unknown = new_Unknown(config.lowered_mode);
179 for (i = 0; i < arity; ++i) {
182 new_phi = new_r_Phi(block, arity, in, config.lowered_mode);
183 set_irn_link(node, new_phi);
184 pdeq_putr(lowered_nodes, node);
186 for (i = 0; i < arity; ++i) {
187 ir_node *in = get_irn_n(node, i);
188 ir_node *low_in = lower_node(in);
190 set_irn_n(new_phi, i, low_in);
200 ir_node *copy = exact_copy(node);
202 arity = get_irn_arity(node);
203 for (i = 0; i < arity; ++i) {
204 ir_node *in = get_irn_n(node, i);
205 ir_node *low_in = lower_node(in);
207 set_irn_n(copy, i, low_in);
209 set_irn_mode(copy, config.lowered_mode);
211 set_irn_link(node, copy);
212 pdeq_putr(lowered_nodes, node);
216 ir_node *op = get_Not_op(node);
217 ir_node *low_op = lower_node(op);
219 res = create_not(dbgi, low_op);
220 set_irn_link(node, res);
221 pdeq_putr(lowered_nodes, node);
225 ir_node *cond = get_Mux_sel(node);
226 ir_node *low_cond = lower_node(cond);
227 ir_node *v_true = get_Mux_true(node);
228 ir_node *low_v_true = lower_node(v_true);
229 ir_node *v_false = get_Mux_false(node);
230 ir_node *low_v_false = lower_node(v_false);
232 ir_node *and0 = new_rd_And(dbgi, block, low_cond, low_v_true, mode);
233 ir_node *not_cond = create_not(dbgi, low_cond);
234 ir_node *and1 = new_rd_And(dbgi, block, not_cond, low_v_false, mode);
235 ir_node *or = new_rd_Or(dbgi, block, and0, and1, mode);
237 set_irn_link(node, or);
238 pdeq_putr(lowered_nodes, node);
242 ir_node *pred = get_Conv_op(node);
243 ir_mode *mode = get_irn_mode(pred);
244 tarval *tv_zeroc = get_tarval_null(mode);
245 ir_node *zero_cmp = new_d_Const(dbgi, tv_zeroc);
248 ir_node *cmp = new_rd_Cmp(dbgi, block, pred, zero_cmp);
249 ir_node *proj = new_rd_Proj(dbgi, block, cmp, mode_b, pn_Cmp_Lg);
250 set = create_set(proj);
252 set_irn_link(node, set);
253 pdeq_putr(lowered_nodes, node);
257 ir_node *pred = get_Proj_pred(node);
260 ir_node *left = get_Cmp_left(pred);
261 ir_node *right = get_Cmp_right(pred);
262 ir_mode *cmp_mode = get_irn_mode(left);
265 if ((mode_is_int(cmp_mode) || mode_is_reference(cmp_mode)) && (
266 get_mode_size_bits(cmp_mode) < get_mode_size_bits(mode) ||
267 (mode_is_signed(cmp_mode) && is_Const(right) && is_Const_null(right))
269 int pnc = get_Proj_proj(node);
274 if (pnc == pn_Cmp_Lt) {
275 /* a < b -> (a - b) >> 31 */
278 } else if (pnc == pn_Cmp_Le) {
279 /* a <= b -> ~(a - b) >> 31 */
283 } else if (pnc == pn_Cmp_Gt) {
284 /* a > b -> (b - a) >> 31 */
287 } else if (pnc == pn_Cmp_Ge) {
288 /* a >= b -> ~(a - b) >> 31 */
295 int bits = get_mode_size_bits(mode);
296 tarval *tv = new_tarval_from_long(bits-1, mode_Iu);
297 ir_node *shift_cnt = new_d_Const(dbgi, tv);
299 if (cmp_mode != mode) {
300 a = new_rd_Conv(dbgi, block, a, mode);
301 b = new_rd_Conv(dbgi, block, b, mode);
304 res = new_rd_Sub(dbgi, block, a, b, mode);
306 res = new_rd_Not(dbgi, block, res, mode);
308 res = new_rd_Shr(dbgi, block, res, shift_cnt, mode);
310 set_irn_link(node, res);
311 pdeq_putr(lowered_nodes, node);
316 /* synthesize the 0/1 value */
317 set = create_set(node);
318 set_irn_link(node, set);
319 pdeq_putr(lowered_nodes, node);
321 } else if (is_Proj(pred) && is_Call(get_Proj_pred(pred))) {
322 ir_type *type = get_Call_type(get_Proj_pred(pred));
323 adjust_method_type(type);
324 set_irn_mode(node, mode);
326 } else if (is_Proj(pred) && is_Start(get_Proj_pred(pred))) {
327 ir_entity *entity = get_irg_entity(current_ir_graph);
328 ir_type *type = get_entity_type(entity);
329 adjust_method_type(type);
330 set_irn_mode(node, mode);
334 panic("unexpected projb: %+F (pred: %+F)", node, pred);
337 tarval *tv = get_Const_tarval(node);
338 if (tv == get_tarval_b_true()) {
339 tarval *tv_one = get_tarval_one(mode);
340 res = new_d_Const(dbgi, tv_one);
341 } else if (tv == get_tarval_b_false()) {
342 tarval *tv_zero = get_tarval_null(mode);
343 res = new_d_Const(dbgi, tv_zero);
345 panic("invalid boolean const %+F", node);
347 set_irn_link(node, res);
348 pdeq_putr(lowered_nodes, node);
352 return new_Unknown(config.lowered_mode);
354 panic("didn't expect %+F to have mode_b", node);
358 static void lower_mode_b_walker(ir_node *node, void *env)
364 arity = get_irn_arity(node);
365 for (i = 0; i < arity; ++i) {
367 ir_node *in = get_irn_n(node, i);
368 if (get_irn_mode(in) != mode_b)
371 if (! config.lower_direct_cmp) {
372 /* Proj(Cmp) as input for Cond and Mux nodes needs no changes.
373 (Mux with mode_b is an exception as it gets replaced by and/or
374 anyway so we still lower the inputs then) */
376 (is_Mux(node) && get_irn_mode(node) != mode_b)) {
378 ir_node *pred = get_Proj_pred(in);
385 lowered_in = lower_node(in);
388 ir_type *type = get_Call_type(node);
389 adjust_method_type(type);
390 } else if (is_Cond(node) || (is_Mux(node) && i == 0)) {
391 lowered_in = create_convb(lowered_in);
393 set_irn_n(node, i, lowered_in);
397 add_identities(current_ir_graph->value_table, node);
401 static void clear_links(ir_node *node, void *env)
404 set_irn_link(node, NULL);
407 void ir_lower_mode_b(ir_graph *irg, const lower_mode_b_config_t *nconfig)
409 ir_entity *entity = get_irg_entity(irg);
410 ir_type *type = get_entity_type(entity);
413 lowered_nodes = new_pdeq();
416 /* ensure no optimisation touches muxes anymore */
417 set_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX);
419 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
421 adjust_method_type(type);
423 set_opt_allow_conv_b(0);
424 irg_walk_graph(irg, clear_links, NULL, NULL);
425 irg_walk_graph(irg, lower_mode_b_walker, NULL, NULL);
427 while(!pdeq_empty(lowered_nodes)) {
428 ir_node *node = (ir_node*) pdeq_getr(lowered_nodes);
429 maybe_kill_node(node);
431 del_pdeq(lowered_nodes);
433 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
437 ir_graph_pass_t pass;
438 const lower_mode_b_config_t *config;
442 * Wrapper to run ir_lower_mode_b() as an ir_graph pass
444 static int pass_wrapper(ir_graph *irg, void *context)
446 struct pass_t *pass = context;
448 ir_lower_mode_b(irg, pass->config);
452 ir_graph_pass_t *ir_lower_mode_b_pass(
453 const char *name, const lower_mode_b_config_t *config)
455 struct pass_t *pass = XMALLOCZ(struct pass_t);
457 pass->config = config;
458 return def_graph_pass_constructor(
459 &pass->pass, name ? name : "lower_mode_b", pass_wrapper);