2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief lowers operations with mode_b. The result is a graph which
23 * might still contains some convs from/to mode_b, but no
24 * operations are performed on them anymore, they are just there
25 * so modes match. A backend can safely skip all mode_b convs.
26 * @author Matthias Braun, Christoph Mallon
29 * After this pass the following should hold:
30 * - The only inputs with mode_b are for the Cond node or the
31 * Sel input of a Mux node.
32 * - The only nodes producing mode_b are: Proj(Cmp) and ConvB(X) (where X
33 * is some mode that can be converted to the lowered mode).
34 * ConvB will usually be implemented by a comparison with 0 producing some
35 * flags in the backends. It's debatable wether ConvB(X) is a goode idea.
36 * Maybe we should rather introduce a Test node.
37 * All other former uses should be converted to manipulations with an integer
38 * mode that was specified in the pass configuration.
58 static lower_mode_b_config_t config;
59 static ir_type *lowered_type = NULL;
60 static pdeq *lowered_nodes = NULL;
63 * Removes a node if its out-edge count has reached 0.
64 * temporary hack until we have proper automatic dead code elimination.
66 static void maybe_kill_node(ir_node *node)
71 if (get_irn_n_edges(node) != 0)
74 irg = get_irn_irg(node);
76 assert(!is_Bad(node));
78 arity = get_irn_arity(node);
79 for (i = 0; i < arity; ++i) {
80 set_irn_n(node, i, new_Bad());
82 set_nodes_block(node, new_Bad());
84 edges_node_deleted(node, irg);
87 static ir_node *create_not(dbg_info *dbgi, ir_node *node)
89 ir_node *block = get_nodes_block(node);
90 ir_mode *mode = config.lowered_mode;
91 tarval *tv_one = get_mode_one(mode);
92 ir_node *one = new_d_Const(dbgi, tv_one);
94 return new_rd_Eor(dbgi, block, node, one, mode);
97 static ir_node *create_convb(ir_node *node)
99 ir_node *block = get_nodes_block(node);
100 ir_node *conv = new_rd_Conv(NULL, block, node, mode_b);
105 static ir_type *create_lowered_type(void)
107 if (lowered_type == NULL) {
108 lowered_type = new_type_primitive(config.lowered_mode);
114 * creates a "set" node that produces a 0 or 1 based on a Cmp result
116 static ir_node *create_set(ir_node *node)
118 dbg_info *dbgi = get_irn_dbg_info(node);
119 ir_mode *mode = config.lowered_set_mode;
120 tarval *tv_one = get_mode_one(mode);
121 ir_node *one = new_d_Const(dbgi, tv_one);
122 ir_node *block = get_nodes_block(node);
123 tarval *tv_zero = get_mode_null(mode);
124 ir_node *zero = new_d_Const(dbgi, tv_zero);
126 ir_node *set = new_rd_Mux(dbgi, block, node, zero, one, mode);
128 if (mode != config.lowered_mode) {
129 set = new_r_Conv(block, set, config.lowered_mode);
135 static void adjust_method_type(ir_type *method_type)
141 n_params = get_method_n_params(method_type);
142 for (i = 0; i < n_params; ++i) {
143 ir_type *param = get_method_param_type(method_type, i);
144 if (get_type_mode(param) == mode_b) {
145 set_method_param_type(method_type, i, create_lowered_type());
149 n_res = get_method_n_ress(method_type);
150 for (i = 0; i < n_res; ++i) {
151 ir_type *res_type = get_method_res_type(method_type, i);
152 if (get_type_mode(res_type) == mode_b) {
153 set_method_res_type(method_type, i, create_lowered_type());
158 static ir_node *lower_node(ir_node *node)
160 dbg_info *dbgi = get_irn_dbg_info(node);
161 ir_node *block = get_nodes_block(node);
162 ir_mode *mode = config.lowered_mode;
165 res = get_irn_link(node);
169 assert(get_irn_mode(node) == mode_b);
171 switch (get_irn_opcode(node)) {
175 ir_node *unknown, *new_phi;
177 arity = get_irn_arity(node);
178 in = ALLOCAN(ir_node*, arity);
179 unknown = new_Unknown(mode);
180 for (i = 0; i < arity; ++i) {
183 new_phi = new_r_Phi(block, arity, in, mode);
184 /* FIXME This does not correctly break cycles: The Phi might not be the
185 * first in the recursion, so the caller(s) are some yet un-lowered nodes
186 * and this Phi might have them (indirectly) as operands, so they would be
188 set_irn_link(node, new_phi);
189 pdeq_putr(lowered_nodes, node);
191 for (i = 0; i < arity; ++i) {
192 ir_node *in = get_irn_n(node, i);
193 ir_node *low_in = lower_node(in);
195 set_irn_n(new_phi, i, low_in);
206 res = exact_copy(node);
207 arity = get_irn_arity(node);
208 for (i = 0; i < arity; ++i) {
209 ir_node *in = get_irn_n(node, i);
210 ir_node *low_in = lower_node(in);
212 set_irn_n(res, i, low_in);
214 set_irn_mode(res, mode);
219 ir_node *op = get_Not_op(node);
220 ir_node *low_op = lower_node(op);
222 res = create_not(dbgi, low_op);
227 ir_node *cond = get_Mux_sel(node);
228 ir_node *low_cond = lower_node(cond);
229 ir_node *v_true = get_Mux_true(node);
230 ir_node *low_v_true = lower_node(v_true);
231 ir_node *v_false = get_Mux_false(node);
232 ir_node *low_v_false = lower_node(v_false);
234 ir_node *and0 = new_rd_And(dbgi, block, low_cond, low_v_true, mode);
235 ir_node *not_cond = create_not(dbgi, low_cond);
236 ir_node *and1 = new_rd_And(dbgi, block, not_cond, low_v_false, mode);
237 res = new_rd_Or(dbgi, block, and0, and1, mode);
242 ir_node *pred = get_Conv_op(node);
243 ir_mode *mode = get_irn_mode(pred);
244 tarval *tv_zeroc = get_mode_null(mode);
245 ir_node *zero_cmp = new_d_Const(dbgi, tv_zeroc);
247 ir_node *cmp = new_rd_Cmp(dbgi, block, pred, zero_cmp);
248 ir_node *proj = new_rd_Proj(dbgi, cmp, mode_b, pn_Cmp_Lg);
249 res = create_set(proj);
254 ir_node *pred = get_Proj_pred(node);
257 ir_node *left = get_Cmp_left(pred);
258 ir_node *right = get_Cmp_right(pred);
259 ir_mode *cmp_mode = get_irn_mode(left);
261 if ((mode_is_int(cmp_mode) || mode_is_reference(cmp_mode)) &&
262 (get_mode_size_bits(cmp_mode) < get_mode_size_bits(mode) ||
263 (mode_is_signed(cmp_mode) && is_Const(right) && is_Const_null(right)))) {
264 int pnc = get_Proj_proj(node);
272 if (pnc == pn_Cmp_Lt) {
273 /* a < b -> (a - b) >> 31 */
276 } else if (pnc == pn_Cmp_Le) {
277 /* a <= b -> ~(a - b) >> 31 */
281 } else if (pnc == pn_Cmp_Gt) {
282 /* a > b -> (b - a) >> 31 */
285 } else if (pnc == pn_Cmp_Ge) {
286 /* a >= b -> ~(a - b) >> 31 */
294 bits = get_mode_size_bits(mode);
295 tv = new_tarval_from_long(bits-1, mode_Iu);
296 shift_cnt = new_d_Const(dbgi, tv);
298 if (cmp_mode != mode) {
299 a = new_rd_Conv(dbgi, block, a, mode);
300 b = new_rd_Conv(dbgi, block, b, mode);
303 res = new_rd_Sub(dbgi, block, a, b, mode);
305 res = new_rd_Not(dbgi, block, res, mode);
307 res = new_rd_Shr(dbgi, block, res, shift_cnt, mode);
309 /* synthesize the 0/1 value */
311 res = create_set(node);
313 } else if (is_Proj(pred) && is_Call(get_Proj_pred(pred))) {
314 ir_type *type = get_Call_type(get_Proj_pred(pred));
315 adjust_method_type(type);
316 set_irn_mode(node, mode);
318 goto own_replacement;
319 } else if (is_Proj(pred) && is_Start(get_Proj_pred(pred))) {
320 ir_entity *entity = get_irg_entity(current_ir_graph);
321 ir_type *type = get_entity_type(entity);
322 adjust_method_type(type);
323 set_irn_mode(node, mode);
325 goto own_replacement;
327 panic("unexpected projb: %+F (pred: %+F)", node, pred);
333 tarval *tv = get_Const_tarval(node);
334 if (tv == get_tarval_b_true()) {
335 tarval *tv_one = get_mode_one(mode);
336 res = new_d_Const(dbgi, tv_one);
337 } else if (tv == get_tarval_b_false()) {
338 tarval *tv_zero = get_mode_null(mode);
339 res = new_d_Const(dbgi, tv_zero);
341 panic("invalid boolean const %+F", node);
347 res = new_Unknown(mode);
351 panic("didn't expect %+F to have mode_b", node);
354 pdeq_putr(lowered_nodes, node);
356 set_irn_link(node, res);
360 static void lower_mode_b_walker(ir_node *node, void *env)
366 arity = get_irn_arity(node);
367 for (i = 0; i < arity; ++i) {
369 ir_node *in = get_irn_n(node, i);
370 if (get_irn_mode(in) != mode_b)
373 if (! config.lower_direct_cmp) {
374 /* Proj(Cmp) as input for Cond and Mux nodes needs no changes.
375 (Mux with mode_b is an exception as it gets replaced by and/or
376 anyway so we still lower the inputs then) */
378 (is_Mux(node) && get_irn_mode(node) != mode_b)) {
380 ir_node *pred = get_Proj_pred(in);
387 lowered_in = lower_node(in);
390 ir_type *type = get_Call_type(node);
391 adjust_method_type(type);
392 } else if (is_Cond(node) || (is_Mux(node) && i == 0)) {
393 lowered_in = create_convb(lowered_in);
395 set_irn_n(node, i, lowered_in);
399 add_identities(current_ir_graph->value_table, node);
403 void ir_lower_mode_b(ir_graph *irg, const lower_mode_b_config_t *nconfig)
405 ir_entity *entity = get_irg_entity(irg);
406 ir_type *type = get_entity_type(entity);
409 lowered_nodes = new_pdeq();
412 /* ensure no optimisation touches muxes anymore */
413 set_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX);
415 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
417 adjust_method_type(type);
419 set_opt_allow_conv_b(0);
420 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
421 irg_walk_graph(irg, lower_mode_b_walker, NULL, NULL);
423 while (!pdeq_empty(lowered_nodes)) {
424 ir_node *node = (ir_node*) pdeq_getr(lowered_nodes);
425 maybe_kill_node(node);
427 del_pdeq(lowered_nodes);
429 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
433 ir_graph_pass_t pass;
434 const lower_mode_b_config_t *config;
438 * Wrapper to run ir_lower_mode_b() as an ir_graph pass
440 static int pass_wrapper(ir_graph *irg, void *context)
442 struct pass_t *pass = context;
444 ir_lower_mode_b(irg, pass->config);
448 ir_graph_pass_t *ir_lower_mode_b_pass(
449 const char *name, const lower_mode_b_config_t *config)
451 struct pass_t *pass = XMALLOCZ(struct pass_t);
453 pass->config = config;
454 return def_graph_pass_constructor(
455 &pass->pass, name ? name : "lower_mode_b", pass_wrapper);