2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief lowers operations with mode_b. The result is a graph which
23 * might still contains some convs from/to mode_b, but no
24 * operations are performed on them anymore, they are just there
25 * so modes match. A backend can safely skip all mode_b convs.
26 * @author Matthias Braun, Christoph Mallon
29 * After this pass the following should hold:
30 * - The only inputs with mode_b are for the Cond node or the
31 * Sel input of a Mux node.
32 * - The only nodes producing mode_b are: Proj(Cmp) and ConvB(X) (where X
33 * is some mode that can be converted to the lowered mode).
34 * ConvB will usually be implemented by a comparison with 0 producing some
35 * flags in the backends. It's debatable wether ConvB(X) is a goode idea.
36 * Maybe we should rather introduce a Test node.
37 * All other former uses should be converted to manipulations with an integer
38 * mode that was specified in the pass configuration.
58 static lower_mode_b_config_t config;
59 static ir_type *lowered_type = NULL;
60 static pdeq *lowered_nodes = NULL;
63 * Removes a node if its out-edge count has reached 0.
64 * temporary hack until we have proper automatic dead code elimination.
66 static void maybe_kill_node(ir_node *node)
71 if (get_irn_n_edges(node) != 0)
74 irg = get_irn_irg(node);
76 assert(!is_Bad(node));
78 arity = get_irn_arity(node);
79 for (i = 0; i < arity; ++i) {
80 set_irn_n(node, i, new_r_Bad(irg));
82 set_nodes_block(node, new_r_Bad(irg));
84 edges_node_deleted(node, irg);
87 static ir_node *create_not(dbg_info *dbgi, ir_node *node)
89 ir_node *block = get_nodes_block(node);
90 ir_mode *mode = config.lowered_mode;
91 tarval *tv_one = get_mode_one(mode);
92 ir_graph *irg = get_irn_irg(node);
93 ir_node *one = new_rd_Const(dbgi, irg, tv_one);
95 return new_rd_Eor(dbgi, block, node, one, mode);
98 static ir_node *create_convb(ir_node *node)
100 ir_node *block = get_nodes_block(node);
101 ir_node *conv = new_rd_Conv(NULL, block, node, mode_b);
106 static ir_type *create_lowered_type(void)
108 if (lowered_type == NULL) {
109 lowered_type = new_type_primitive(config.lowered_mode);
115 * creates a "set" node that produces a 0 or 1 based on a Cmp result
117 static ir_node *create_set(ir_node *node)
119 dbg_info *dbgi = get_irn_dbg_info(node);
120 ir_graph *irg = get_irn_irg(node);
121 ir_mode *mode = config.lowered_set_mode;
122 tarval *tv_one = get_mode_one(mode);
123 ir_node *one = new_rd_Const(dbgi, irg, tv_one);
124 ir_node *block = get_nodes_block(node);
125 tarval *tv_zero = get_mode_null(mode);
126 ir_node *zero = new_rd_Const(dbgi, irg, tv_zero);
128 ir_node *set = new_rd_Mux(dbgi, block, node, zero, one, mode);
130 if (mode != config.lowered_mode) {
131 set = new_r_Conv(block, set, config.lowered_mode);
137 static void adjust_method_type(ir_type *method_type)
143 n_params = get_method_n_params(method_type);
144 for (i = 0; i < n_params; ++i) {
145 ir_type *param = get_method_param_type(method_type, i);
146 if (get_type_mode(param) == mode_b) {
147 set_method_param_type(method_type, i, create_lowered_type());
151 n_res = get_method_n_ress(method_type);
152 for (i = 0; i < n_res; ++i) {
153 ir_type *res_type = get_method_res_type(method_type, i);
154 if (get_type_mode(res_type) == mode_b) {
155 set_method_res_type(method_type, i, create_lowered_type());
160 static ir_node *lower_node(ir_node *node)
162 dbg_info *dbgi = get_irn_dbg_info(node);
163 ir_node *block = get_nodes_block(node);
164 ir_mode *mode = config.lowered_mode;
168 res = get_irn_link(node);
172 assert(get_irn_mode(node) == mode_b);
174 irg = get_irn_irg(node);
175 switch (get_irn_opcode(node)) {
179 ir_node *unknown, *new_phi;
181 arity = get_irn_arity(node);
182 in = ALLOCAN(ir_node*, arity);
183 unknown = new_r_Unknown(irg, mode);
184 for (i = 0; i < arity; ++i) {
187 new_phi = new_r_Phi(block, arity, in, mode);
188 /* FIXME This does not correctly break cycles: The Phi might not be the
189 * first in the recursion, so the caller(s) are some yet un-lowered nodes
190 * and this Phi might have them (indirectly) as operands, so they would be
192 set_irn_link(node, new_phi);
193 pdeq_putr(lowered_nodes, node);
195 for (i = 0; i < arity; ++i) {
196 ir_node *in = get_irn_n(node, i);
197 ir_node *low_in = lower_node(in);
199 set_irn_n(new_phi, i, low_in);
210 res = exact_copy(node);
211 arity = get_irn_arity(node);
212 for (i = 0; i < arity; ++i) {
213 ir_node *in = get_irn_n(node, i);
214 ir_node *low_in = lower_node(in);
216 set_irn_n(res, i, low_in);
218 set_irn_mode(res, mode);
223 ir_node *op = get_Not_op(node);
224 ir_node *low_op = lower_node(op);
226 res = create_not(dbgi, low_op);
231 ir_node *cond = get_Mux_sel(node);
232 ir_node *low_cond = lower_node(cond);
233 ir_node *v_true = get_Mux_true(node);
234 ir_node *low_v_true = lower_node(v_true);
235 ir_node *v_false = get_Mux_false(node);
236 ir_node *low_v_false = lower_node(v_false);
238 ir_node *and0 = new_rd_And(dbgi, block, low_cond, low_v_true, mode);
239 ir_node *not_cond = create_not(dbgi, low_cond);
240 ir_node *and1 = new_rd_And(dbgi, block, not_cond, low_v_false, mode);
241 res = new_rd_Or(dbgi, block, and0, and1, mode);
246 ir_node *pred = get_Conv_op(node);
247 ir_mode *mode = get_irn_mode(pred);
248 tarval *tv_zeroc = get_mode_null(mode);
249 ir_node *zero_cmp = new_rd_Const(dbgi, irg, tv_zeroc);
251 ir_node *cmp = new_rd_Cmp(dbgi, block, pred, zero_cmp);
252 ir_node *proj = new_rd_Proj(dbgi, cmp, mode_b, pn_Cmp_Lg);
253 res = create_set(proj);
258 ir_node *pred = get_Proj_pred(node);
261 ir_node *left = get_Cmp_left(pred);
262 ir_node *right = get_Cmp_right(pred);
263 ir_mode *cmp_mode = get_irn_mode(left);
265 if ((mode_is_int(cmp_mode) || mode_is_reference(cmp_mode)) &&
266 (get_mode_size_bits(cmp_mode) < get_mode_size_bits(mode) ||
267 (mode_is_signed(cmp_mode) && is_Const(right) && is_Const_null(right)))) {
268 int pnc = get_Proj_proj(node);
276 if (pnc == pn_Cmp_Lt) {
277 /* a < b -> (a - b) >> 31 */
280 } else if (pnc == pn_Cmp_Le) {
281 /* a <= b -> ~(a - b) >> 31 */
285 } else if (pnc == pn_Cmp_Gt) {
286 /* a > b -> (b - a) >> 31 */
289 } else if (pnc == pn_Cmp_Ge) {
290 /* a >= b -> ~(a - b) >> 31 */
298 bits = get_mode_size_bits(mode);
299 tv = new_tarval_from_long(bits-1, mode_Iu);
300 shift_cnt = new_rd_Const(dbgi, irg, tv);
302 if (cmp_mode != mode) {
303 a = new_rd_Conv(dbgi, block, a, mode);
304 b = new_rd_Conv(dbgi, block, b, mode);
307 res = new_rd_Sub(dbgi, block, a, b, mode);
309 res = new_rd_Not(dbgi, block, res, mode);
311 res = new_rd_Shr(dbgi, block, res, shift_cnt, mode);
313 /* synthesize the 0/1 value */
315 res = create_set(node);
317 } else if (is_Proj(pred) && is_Call(get_Proj_pred(pred))) {
318 ir_type *type = get_Call_type(get_Proj_pred(pred));
319 adjust_method_type(type);
320 set_irn_mode(node, mode);
322 goto own_replacement;
323 } else if (is_Proj(pred) && is_Start(get_Proj_pred(pred))) {
324 ir_graph *irg = get_irn_irg(node);
325 ir_entity *entity = get_irg_entity(irg);
326 ir_type *type = get_entity_type(entity);
327 adjust_method_type(type);
328 set_irn_mode(node, mode);
330 goto own_replacement;
332 panic("unexpected projb: %+F (pred: %+F)", node, pred);
338 tarval *tv = get_Const_tarval(node);
339 if (tv == get_tarval_b_true()) {
340 tarval *tv_one = get_mode_one(mode);
341 res = new_rd_Const(dbgi, irg, tv_one);
342 } else if (tv == get_tarval_b_false()) {
343 tarval *tv_zero = get_mode_null(mode);
344 res = new_rd_Const(dbgi, irg, tv_zero);
346 panic("invalid boolean const %+F", node);
352 res = new_r_Unknown(irg, mode);
356 panic("didn't expect %+F to have mode_b", node);
359 pdeq_putr(lowered_nodes, node);
361 set_irn_link(node, res);
365 static void lower_mode_b_walker(ir_node *node, void *env)
368 bool changed = false;
370 arity = get_irn_arity(node);
371 for (i = 0; i < arity; ++i) {
373 ir_node *in = get_irn_n(node, i);
374 if (get_irn_mode(in) != mode_b)
377 if (! config.lower_direct_cmp) {
378 /* Proj(Cmp) as input for Cond and Mux nodes needs no changes.
379 (Mux with mode_b is an exception as it gets replaced by and/or
380 anyway so we still lower the inputs then) */
382 (is_Mux(node) && get_irn_mode(node) != mode_b)) {
384 ir_node *pred = get_Proj_pred(in);
391 lowered_in = lower_node(in);
394 ir_type *type = get_Call_type(node);
395 adjust_method_type(type);
396 } else if (is_Cond(node) || (is_Mux(node) && i == 0)) {
397 lowered_in = create_convb(lowered_in);
399 set_irn_n(node, i, lowered_in);
403 bool *global_changed = env;
404 *global_changed = true;
405 add_identities(node);
409 void ir_lower_mode_b(ir_graph *irg, const lower_mode_b_config_t *nconfig)
411 ir_entity *entity = get_irg_entity(irg);
412 ir_type *type = get_entity_type(entity);
413 bool changed = false;
416 lowered_nodes = new_pdeq();
421 /* ensure no optimisation touches muxes anymore */
422 set_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX | IR_GRAPH_STATE_BCONV_ALLOWED);
424 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
426 adjust_method_type(type);
428 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
429 irg_walk_graph(irg, lower_mode_b_walker, NULL, &changed);
431 while (!pdeq_empty(lowered_nodes)) {
432 ir_node *node = (ir_node*) pdeq_getr(lowered_nodes);
433 maybe_kill_node(node);
435 del_pdeq(lowered_nodes);
437 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
440 set_irg_outs_inconsistent(irg);