2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower double word operations, i.e. 64bit -> 32bit, 32bit -> 16bit etc.
24 * @author Michael Beck
37 #include "irgraph_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
58 typedef struct lower_env_t lower_env_t;
61 * The type of a lower function.
63 * @param node the node to be lowered
64 * @param env the lower environment
66 typedef void (*lower_func)(ir_node *node, ir_mode *mode, lower_env_t *env);
68 /** A map from (op, imode, omode) to Intrinsic functions entities. */
69 static set *intrinsic_fkt;
71 /** A map from (imode, omode) to conv function types. */
72 static set *conv_types;
74 /** A map from a method type to its lowered type. */
75 static pmap *lowered_type;
77 /** The types for the binop and unop intrinsics. */
78 static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *shiftop_tp_u, *shiftop_tp_s, *tp_s, *tp_u;
80 /** the debug handle */
81 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
84 * An entry in the (op, imode, omode) -> entity map.
86 typedef struct op_mode_entry {
87 const ir_op *op; /**< the op */
88 const ir_mode *imode; /**< the input mode */
89 const ir_mode *omode; /**< the output mode */
90 ir_entity *ent; /**< the associated entity of this (op, imode, omode) triple */
94 * An entry in the (imode, omode) -> tp map.
96 typedef struct conv_tp_entry {
97 const ir_mode *imode; /**< the input mode */
98 const ir_mode *omode; /**< the output mode */
99 ir_type *mtd; /**< the associated method type of this (imode, omode) pair */
103 * Every double word node will be replaced,
104 * we need some store to hold the replacement:
106 typedef struct node_entry_t {
107 ir_node *low_word; /**< the low word */
108 ir_node *high_word; /**< the high word */
112 MUST_BE_LOWERED = 1, /**< graph must be lowered */
113 CF_CHANGED = 2, /**< control flow was changed */
117 * The lower environment.
120 node_entry_t **entries; /**< entries per node */
122 struct obstack obst; /**< an obstack holding the temporary data */
123 ir_type *l_mtp; /**< lowered method type of the current method */
124 ir_tarval *tv_mode_bytes; /**< a tarval containing the number of bytes in the lowered modes */
125 ir_tarval *tv_mode_bits; /**< a tarval containing the number of bits in the lowered modes */
126 pdeq *waitq; /**< a wait queue of all nodes that must be handled later */
127 ir_node **lowered_phis; /**< list of lowered phis */
128 pmap *proj_2_block; /**< a map from ProjX to its destination blocks */
129 ir_mode *high_signed; /**< doubleword signed type */
130 ir_mode *high_unsigned; /**< doubleword unsigned type */
131 ir_mode *low_signed; /**< word signed type */
132 ir_mode *low_unsigned; /**< word unsigned type */
133 ident *first_id; /**< .l for little and .h for big endian */
134 ident *next_id; /**< .h for little and .l for big endian */
135 const lwrdw_param_t *params; /**< transformation parameter */
136 unsigned flags; /**< some flags */
137 unsigned n_entries; /**< number of entries */
138 ir_type *value_param_tp; /**< the old value param type */
141 static void lower_node(lower_env_t *env, ir_node *node);
142 static bool mtp_must_be_lowered(lower_env_t *env, ir_type *mtp);
145 * Create a method type for a Conv emulation from imode to omode.
147 static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode, lower_env_t *env)
149 conv_tp_entry_t key, *entry;
156 entry = (conv_tp_entry_t*)set_insert(conv_types, &key, sizeof(key), HASH_PTR(imode) ^ HASH_PTR(omode));
158 int n_param = 1, n_res = 1;
160 if (imode == env->high_signed || imode == env->high_unsigned)
162 if (omode == env->high_signed || omode == env->high_unsigned)
165 /* create a new one */
166 mtd = new_type_method(n_param, n_res);
168 /* set param types and result types */
170 if (imode == env->high_signed) {
171 set_method_param_type(mtd, n_param++, tp_u);
172 set_method_param_type(mtd, n_param++, tp_s);
173 } else if (imode == env->high_unsigned) {
174 set_method_param_type(mtd, n_param++, tp_u);
175 set_method_param_type(mtd, n_param++, tp_u);
177 ir_type *tp = get_type_for_mode(imode);
178 set_method_param_type(mtd, n_param++, tp);
182 if (omode == env->high_signed) {
183 set_method_res_type(mtd, n_res++, tp_u);
184 set_method_res_type(mtd, n_res++, tp_s);
185 } else if (omode == env->high_unsigned) {
186 set_method_res_type(mtd, n_res++, tp_u);
187 set_method_res_type(mtd, n_res++, tp_u);
189 ir_type *tp = get_type_for_mode(omode);
190 set_method_res_type(mtd, n_res++, tp);
200 * Add an additional control flow input to a block.
201 * Patch all Phi nodes. The new Phi inputs are copied from
202 * old input number nr.
204 static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
206 int i, arity = get_irn_arity(block);
211 NEW_ARR_A(ir_node *, in, arity + 1);
212 for (i = 0; i < arity; ++i)
213 in[i] = get_irn_n(block, i);
216 set_irn_in(block, i + 1, in);
218 for (phi = get_Block_phis(block); phi != NULL; phi = get_Phi_next(phi)) {
219 for (i = 0; i < arity; ++i)
220 in[i] = get_irn_n(phi, i);
222 set_irn_in(phi, i + 1, in);
227 * Add an additional control flow input to a block.
228 * Patch all Phi nodes. The new Phi inputs are copied from
229 * old input from cf tmpl.
231 static void add_block_cf_input(ir_node *block, ir_node *tmpl, ir_node *cf)
233 int i, arity = get_irn_arity(block);
236 for (i = 0; i < arity; ++i) {
237 if (get_irn_n(block, i) == tmpl) {
243 add_block_cf_input_nr(block, nr, cf);
247 * Return the "operational" mode of a Firm node.
249 static ir_mode *get_irn_op_mode(ir_node *node)
251 switch (get_irn_opcode(node)) {
253 return get_Load_mode(node);
255 return get_irn_mode(get_Store_value(node));
257 return get_irn_mode(get_Div_left(node));
259 return get_irn_mode(get_Mod_left(node));
261 return get_irn_mode(get_Cmp_left(node));
263 return get_irn_mode(node);
268 * Walker, prepare the node links and determine which nodes need to be lowered
271 static void prepare_links(lower_env_t *env, ir_node *node)
273 ir_mode *mode = get_irn_op_mode(node);
277 if (mode == env->high_signed || mode == env->high_unsigned) {
278 unsigned idx = get_irn_idx(node);
279 /* ok, found a node that will be lowered */
280 link = OALLOCZ(&env->obst, node_entry_t);
282 if (idx >= env->n_entries) {
283 /* enlarge: this happens only for Rotl nodes which is RARELY */
284 unsigned old = env->n_entries;
285 unsigned n_idx = idx + (idx >> 3);
287 ARR_RESIZE(node_entry_t *, env->entries, n_idx);
288 memset(&env->entries[old], 0, (n_idx - old) * sizeof(env->entries[0]));
289 env->n_entries = n_idx;
291 env->entries[idx] = link;
292 env->flags |= MUST_BE_LOWERED;
293 } else if (is_Conv(node)) {
294 /* Conv nodes have two modes */
295 ir_node *pred = get_Conv_op(node);
296 mode = get_irn_mode(pred);
298 if (mode == env->high_signed || mode == env->high_unsigned) {
299 /* must lower this node either but don't need a link */
300 env->flags |= MUST_BE_LOWERED;
306 /* link all Proj nodes to its predecessor:
307 Note that Tuple Proj's and its Projs are linked either. */
308 ir_node *pred = get_Proj_pred(node);
310 set_irn_link(node, get_irn_link(pred));
311 set_irn_link(pred, node);
312 } else if (is_Phi(node)) {
313 /* link all Phi nodes to its block */
314 ir_node *block = get_nodes_block(node);
315 add_Block_phi(block, node);
316 } else if (is_Block(node)) {
317 /* fill the Proj -> Block map */
318 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
319 ir_node *pred = get_Block_cfgpred(node, i);
322 pmap_insert(env->proj_2_block, pred, node);
327 static node_entry_t *get_node_entry(lower_env_t *env, ir_node *node)
329 unsigned idx = get_irn_idx(node);
330 assert(idx < env->n_entries);
331 return env->entries[idx];
334 static void set_lowered(lower_env_t *env, ir_node *old,
335 ir_node *new_low, ir_node *new_high)
337 node_entry_t *entry = get_node_entry(env, old);
338 entry->low_word = new_low;
339 entry->high_word = new_high;
343 * Translate a Constant: create two.
345 static void lower_Const(ir_node *node, ir_mode *mode, lower_env_t *env)
347 ir_graph *irg = get_irn_irg(node);
348 dbg_info *dbg = get_irn_dbg_info(node);
349 ir_mode *low_mode = env->low_unsigned;
350 ir_tarval *tv = get_Const_tarval(node);
351 ir_tarval *tv_l = tarval_convert_to(tv, low_mode);
352 ir_node *res_low = new_rd_Const(dbg, irg, tv_l);
353 ir_tarval *tv_shrs = tarval_shrs(tv, env->tv_mode_bits);
354 ir_tarval *tv_h = tarval_convert_to(tv_shrs, mode);
355 ir_node *res_high = new_rd_Const(dbg, irg, tv_h);
357 set_lowered(env, node, res_low, res_high);
361 * Translate a Load: create two.
363 static void lower_Load(ir_node *node, ir_mode *mode, lower_env_t *env)
365 ir_mode *low_mode = env->low_unsigned;
366 ir_graph *irg = get_irn_irg(node);
367 ir_node *adr = get_Load_ptr(node);
368 ir_node *mem = get_Load_mem(node);
369 ir_node *low, *high, *proj;
371 ir_node *block = get_nodes_block(node);
372 ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
373 ? cons_volatile : cons_none;
375 if (env->params->little_endian) {
377 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
379 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
383 /* create two loads */
384 dbg = get_irn_dbg_info(node);
385 low = new_rd_Load(dbg, block, mem, low, low_mode, volatility);
386 proj = new_r_Proj(low, mode_M, pn_Load_M);
387 high = new_rd_Load(dbg, block, proj, high, mode, volatility);
389 set_lowered(env, node, low, high);
391 for (proj = (ir_node*)get_irn_link(node); proj;
392 proj = (ir_node*)get_irn_link(proj)) {
393 switch (get_Proj_proj(proj)) {
394 case pn_Load_M: /* Memory result. */
395 /* put it to the second one */
396 set_Proj_pred(proj, high);
398 case pn_Load_X_except: /* Execution result if exception occurred. */
399 /* put it to the first one */
400 set_Proj_pred(proj, low);
402 case pn_Load_res: { /* Result of load operation. */
403 ir_node *res_low = new_r_Proj(low, low_mode, pn_Load_res);
404 ir_node *res_high = new_r_Proj(high, mode, pn_Load_res);
405 set_lowered(env, proj, res_low, res_high);
409 assert(0 && "unexpected Proj number");
411 /* mark this proj: we have handled it already, otherwise we might fall
412 * into out new nodes. */
413 mark_irn_visited(proj);
418 * Translate a Store: create two.
420 static void lower_Store(ir_node *node, ir_mode *mode, lower_env_t *env)
423 ir_node *block, *adr, *mem;
424 ir_node *low, *high, *proj;
426 ir_node *value = get_Store_value(node);
427 const node_entry_t *entry = get_node_entry(env, value);
428 ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
429 ? cons_volatile : cons_none;
434 if (! entry->low_word) {
435 /* not ready yet, wait */
436 pdeq_putr(env->waitq, node);
440 irg = get_irn_irg(node);
441 adr = get_Store_ptr(node);
442 mem = get_Store_mem(node);
443 block = get_nodes_block(node);
445 if (env->params->little_endian) {
447 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
449 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
453 /* create two Stores */
454 dbg = get_irn_dbg_info(node);
455 low = new_rd_Store(dbg, block, mem, low, entry->low_word, volatility);
456 proj = new_r_Proj(low, mode_M, pn_Store_M);
457 high = new_rd_Store(dbg, block, proj, high, entry->high_word, volatility);
459 set_lowered(env, node, low, high);
461 for (proj = (ir_node*)get_irn_link(node); proj;
462 proj = (ir_node*)get_irn_link(proj)) {
463 switch (get_Proj_proj(proj)) {
464 case pn_Store_M: /* Memory result. */
465 /* put it to the second one */
466 set_Proj_pred(proj, high);
468 case pn_Store_X_except: /* Execution result if exception occurred. */
469 /* put it to the first one */
470 set_Proj_pred(proj, low);
473 assert(0 && "unexpected Proj number");
475 /* mark this proj: we have handled it already, otherwise we might fall into
477 mark_irn_visited(proj);
482 * Return a node containing the address of the intrinsic emulation function.
484 * @param method the method type of the emulation function
485 * @param op the emulated ir_op
486 * @param imode the input mode of the emulated opcode
487 * @param omode the output mode of the emulated opcode
488 * @param env the lower environment
490 static ir_node *get_intrinsic_address(ir_type *method, ir_op *op,
491 ir_mode *imode, ir_mode *omode,
496 op_mode_entry_t key, *entry;
503 entry = (op_mode_entry_t*)set_insert(intrinsic_fkt, &key, sizeof(key),
504 HASH_PTR(op) ^ HASH_PTR(imode) ^ (HASH_PTR(omode) << 8));
506 /* create a new one */
507 ent = env->params->create_intrinsic(method, op, imode, omode, env->params->ctx);
509 assert(ent && "Intrinsic creator must return an entity");
515 return new_r_SymConst(env->irg, mode_P_code, sym, symconst_addr_ent);
521 * Create an intrinsic Call.
523 static void lower_Div(ir_node *node, ir_mode *mode, lower_env_t *env)
525 ir_node *left = get_Div_left(node);
526 ir_node *right = get_Div_right(node);
527 const node_entry_t *left_entry = get_node_entry(env, left);
528 const node_entry_t *right_entry = get_node_entry(env, right);
529 ir_node *block = get_nodes_block(node);
530 dbg_info *dbgi = get_irn_dbg_info(node);
532 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
533 ir_mode *opmode = get_irn_op_mode(node);
535 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode, env);
537 left_entry->low_word, left_entry->high_word,
538 right_entry->low_word, right_entry->high_word };
540 = new_rd_Call(dbgi, block, get_Div_mem(node), addr, 4, in, mtp);
541 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
544 set_irn_pinned(call, get_irn_pinned(node));
546 for (proj = (ir_node*)get_irn_link(node); proj;
547 proj = (ir_node*)get_irn_link(proj)) {
548 switch (get_Proj_proj(proj)) {
549 case pn_Div_M: /* Memory result. */
550 /* reroute to the call */
551 set_Proj_pred(proj, call);
552 set_Proj_proj(proj, pn_Call_M);
554 case pn_Div_X_except: /* Execution result if exception occurred. */
555 /* reroute to the call */
556 set_Proj_pred(proj, call);
557 set_Proj_proj(proj, pn_Call_X_except);
560 /* Result of computation. */
561 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
562 ir_node *res_high = new_r_Proj(resproj, mode, 1);
563 set_lowered(env, proj, res_low, res_high);
567 assert(0 && "unexpected Proj number");
569 /* mark this proj: we have handled it already, otherwise we might fall into
571 mark_irn_visited(proj);
578 * Create an intrinsic Call.
580 static void lower_Mod(ir_node *node, ir_mode *mode, lower_env_t *env)
582 ir_node *left = get_Mod_left(node);
583 ir_node *right = get_Mod_right(node);
584 const node_entry_t *left_entry = get_node_entry(env, left);
585 const node_entry_t *right_entry = get_node_entry(env, right);
587 left_entry->low_word, left_entry->high_word,
588 right_entry->low_word, right_entry->high_word
590 dbg_info *dbgi = get_irn_dbg_info(node);
591 ir_node *block = get_nodes_block(node);
593 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
594 ir_mode *opmode = get_irn_op_mode(node);
596 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode, env);
598 = new_rd_Call(dbgi, block, get_Mod_mem(node), addr, 4, in, mtp);
599 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
601 set_irn_pinned(call, get_irn_pinned(node));
603 for (proj = (ir_node*)get_irn_link(node); proj;
604 proj = (ir_node*)get_irn_link(proj)) {
605 switch (get_Proj_proj(proj)) {
606 case pn_Mod_M: /* Memory result. */
607 /* reroute to the call */
608 set_Proj_pred(proj, call);
609 set_Proj_proj(proj, pn_Call_M);
611 case pn_Mod_X_except: /* Execution result if exception occurred. */
612 /* reroute to the call */
613 set_Proj_pred(proj, call);
614 set_Proj_proj(proj, pn_Call_X_except);
617 /* Result of computation. */
618 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
619 ir_node *res_high = new_r_Proj(resproj, mode, 1);
620 set_lowered(env, proj, res_low, res_high);
624 assert(0 && "unexpected Proj number");
626 /* mark this proj: we have handled it already, otherwise we might fall
627 * into out new nodes. */
628 mark_irn_visited(proj);
635 * Create an intrinsic Call.
637 static void lower_binop(ir_node *node, ir_mode *mode, lower_env_t *env)
639 ir_node *left = get_binop_left(node);
640 ir_node *right = get_binop_right(node);
641 const node_entry_t *left_entry = get_node_entry(env, left);
642 const node_entry_t *right_entry = get_node_entry(env, right);
644 left_entry->low_word, left_entry->high_word,
645 right_entry->low_word, right_entry->high_word
647 dbg_info *dbgi = get_irn_dbg_info(node);
648 ir_node *block = get_nodes_block(node);
649 ir_graph *irg = get_irn_irg(block);
651 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
653 = get_intrinsic_address(mtp, get_irn_op(node), mode, mode, env);
655 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
656 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
657 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
658 ir_node *res_high = new_r_Proj(resproj, mode, 1);
659 set_irn_pinned(call, get_irn_pinned(node));
660 set_lowered(env, node, res_low, res_high);
664 * Translate a Shiftop.
666 * Create an intrinsic Call.
668 static void lower_Shiftop(ir_node *node, ir_mode *mode, lower_env_t *env)
670 ir_node *block = get_nodes_block(node);
671 ir_node *left = get_binop_left(node);
672 const node_entry_t *left_entry = get_node_entry(env, left);
673 ir_node *right = get_binop_right(node);
675 left_entry->low_word, left_entry->high_word,
676 /* it should be safe to conv to low_unsigned */
677 new_r_Conv(block, right, env->low_unsigned)
679 dbg_info *dbgi = get_irn_dbg_info(node);
680 ir_graph *irg = get_irn_irg(block);
682 = mode_is_signed(mode) ? shiftop_tp_s : shiftop_tp_u;
684 = get_intrinsic_address(mtp, get_irn_op(node), mode, mode, env);
686 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 3, in, mtp);
687 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
688 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
689 ir_node *res_high = new_r_Proj(resproj, mode, 1);
691 set_irn_pinned(call, get_irn_pinned(node));
692 set_lowered(env, node, res_low, res_high);
696 * Translate a Shr and handle special cases.
698 static void lower_Shr(ir_node *node, ir_mode *mode, lower_env_t *env)
700 ir_graph *irg = get_irn_irg(node);
701 ir_node *right = get_Shr_right(node);
703 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
704 ir_tarval *tv = get_Const_tarval(right);
706 if (tarval_is_long(tv) &&
707 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
708 ir_node *block = get_nodes_block(node);
709 ir_node *left = get_Shr_left(node);
710 ir_mode *low_unsigned = env->low_unsigned;
711 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
712 const node_entry_t *left_entry = get_node_entry(env, left);
716 left = left_entry->high_word;
718 /* convert high word into low_unsigned mode if necessary */
719 if (get_irn_mode(left) != low_unsigned)
720 left = new_r_Conv(block, left, low_unsigned);
723 ir_node *c = new_r_Const_long(irg, low_unsigned, shf_cnt);
724 res_low = new_r_Shr(block, left, c, low_unsigned);
728 res_high = new_r_Const(irg, get_mode_null(mode));
729 set_lowered(env, node, res_low, res_high);
734 lower_Shiftop(node, mode, env);
738 * Translate a Shl and handle special cases.
740 static void lower_Shl(ir_node *node, ir_mode *mode, lower_env_t *env)
742 ir_graph *irg = get_irn_irg(node);
743 ir_node *right = get_Shl_right(node);
745 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
746 ir_tarval *tv = get_Const_tarval(right);
748 if (tarval_is_long(tv)) {
749 long value = get_tarval_long(tv);
750 if (value >= (long)get_mode_size_bits(mode)) {
751 /* simple case: shift above the lower word */
753 ir_node *block = get_nodes_block(node);
754 ir_node *left = get_Shl_left(node);
756 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
757 const node_entry_t *left_entry = get_node_entry(env, left);
761 left = left_entry->low_word;
762 left = new_r_Conv(block, left, mode);
764 mode_l = env->low_unsigned;
766 c = new_r_Const_long(irg, mode_l, shf_cnt);
767 res_high = new_r_Shl(block, left, c, mode);
771 res_low = new_r_Const(irg, get_mode_null(mode_l));
772 set_lowered(env, node, res_low, res_high);
777 /* left << 1 == left + left */
778 ir_node *left = get_binop_left(node);
779 const node_entry_t *left_entry = get_node_entry(env, left);
781 left_entry->low_word, left_entry->high_word,
782 left_entry->low_word, left_entry->high_word,
784 dbg_info *dbgi = get_irn_dbg_info(node);
785 ir_node *block = get_nodes_block(node);
786 ir_graph *irg = get_irn_irg(block);
788 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
790 = get_intrinsic_address(mtp, op_Add, mode, mode, env);
792 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
793 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
794 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
795 ir_node *res_high = new_r_Proj(resproj, mode, 1);
796 set_irn_pinned(call, get_irn_pinned(node));
797 set_lowered(env, node, res_low, res_high);
803 lower_Shiftop(node, mode, env);
807 * Translate a Shrs and handle special cases.
809 static void lower_Shrs(ir_node *node, ir_mode *mode, lower_env_t *env)
811 ir_graph *irg = get_irn_irg(node);
812 ir_node *right = get_Shrs_right(node);
814 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
815 ir_tarval *tv = get_Const_tarval(right);
817 if (tarval_is_long(tv) &&
818 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
819 ir_node *block = get_nodes_block(node);
820 ir_node *left = get_Shrs_left(node);
821 ir_mode *low_unsigned = env->low_unsigned;
822 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
823 const node_entry_t *left_entry = get_node_entry(env, left);
824 ir_node *left_unsigned = left;
829 left = left_entry->high_word;
831 /* convert high word into low_unsigned mode if necessary */
832 if (get_irn_mode(left_unsigned) != low_unsigned)
833 left_unsigned = new_r_Conv(block, left, low_unsigned);
836 c = new_r_Const_long(irg, low_unsigned, shf_cnt);
837 res_low = new_r_Shrs(block, left_unsigned, c, low_unsigned);
839 res_low = left_unsigned;
842 c = new_r_Const(irg, get_mode_all_one(low_unsigned));
843 res_high = new_r_Shrs(block, left, c, mode);
844 set_lowered(env, node, res_low, res_high);
848 lower_Shiftop(node, mode, env);
852 * Rebuild Rotl nodes into Or(Shl, Shr) and prepare all nodes.
854 static void prepare_links_and_handle_rotl(ir_node *node, void *env)
856 lower_env_t *lenv = (lower_env_t*)env;
859 ir_mode *mode = get_irn_op_mode(node);
861 ir_node *left, *shl, *shr, *ornode, *block, *sub, *c;
862 ir_mode *omode, *rmode;
865 optimization_state_t state;
867 if (mode != lenv->high_signed && mode != lenv->high_unsigned) {
868 prepare_links(lenv, node);
872 /* replace the Rotl(x,y) by an Or(Shl(x,y), Shr(x,64-y)) */
873 right = get_Rotl_right(node);
874 irg = get_irn_irg(node);
875 dbg = get_irn_dbg_info(node);
876 omode = get_irn_mode(node);
877 left = get_Rotl_left(node);
878 block = get_nodes_block(node);
879 shl = new_rd_Shl(dbg, block, left, right, omode);
880 rmode = get_irn_mode(right);
881 c = new_r_Const_long(irg, rmode, get_mode_size_bits(omode));
882 sub = new_rd_Sub(dbg, block, c, right, rmode);
883 shr = new_rd_Shr(dbg, block, left, sub, omode);
885 /* switch optimization off here, or we will get the Rotl back */
886 save_optimization_state(&state);
887 set_opt_algebraic_simplification(0);
888 ornode = new_rd_Or(dbg, block, shl, shr, omode);
889 restore_optimization_state(&state);
891 exchange(node, ornode);
893 /* do lowering on the new nodes */
894 prepare_links(lenv, shl);
895 prepare_links(lenv, c);
896 prepare_links(lenv, sub);
897 prepare_links(lenv, shr);
898 prepare_links(lenv, ornode);
902 prepare_links(lenv, node);
908 * Create an intrinsic Call.
910 static void lower_Unop(ir_node *node, ir_mode *mode, lower_env_t *env)
912 ir_node *op = get_unop_op(node);
913 const node_entry_t *op_entry = get_node_entry(env, op);
914 ir_node *in[2] = { op_entry->low_word, op_entry->high_word };
915 dbg_info *dbgi = get_irn_dbg_info(node);
916 ir_node *block = get_nodes_block(node);
917 ir_graph *irg = get_irn_irg(block);
918 ir_type *mtp = mode_is_signed(mode) ? unop_tp_s : unop_tp_u;
919 ir_op *irop = get_irn_op(node);
920 ir_node *addr = get_intrinsic_address(mtp, irop, mode, mode, env);
921 ir_node *nomem = get_irg_no_mem(irg);
922 ir_node *call = new_rd_Call(dbgi, block, nomem, addr, 2, in, mtp);
923 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
924 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
925 ir_node *res_high = new_r_Proj(resproj, mode, 1);
926 set_irn_pinned(call, get_irn_pinned(node));
927 set_lowered(env, node, res_low, res_high);
931 * Translate a logical binop.
933 * Create two logical binops.
935 static void lower_binop_logical(ir_node *node, ir_mode *mode, lower_env_t *env,
936 ir_node *(*constr_rd)(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) )
938 ir_node *left = get_binop_left(node);
939 ir_node *right = get_binop_right(node);
940 const node_entry_t *left_entry = get_node_entry(env, left);
941 const node_entry_t *right_entry = get_node_entry(env, right);
942 dbg_info *dbgi = get_irn_dbg_info(node);
943 ir_node *block = get_nodes_block(node);
945 = constr_rd(dbgi, block, left_entry->low_word, right_entry->low_word,
948 = constr_rd(dbgi, block, left_entry->high_word, right_entry->high_word,
950 set_lowered(env, node, res_low, res_high);
953 static void lower_And(ir_node *node, ir_mode *mode, lower_env_t *env)
955 lower_binop_logical(node, mode, env, new_rd_And);
958 static void lower_Or(ir_node *node, ir_mode *mode, lower_env_t *env)
960 lower_binop_logical(node, mode, env, new_rd_Or);
963 static void lower_Eor(ir_node *node, ir_mode *mode, lower_env_t *env)
965 lower_binop_logical(node, mode, env, new_rd_Eor);
971 * Create two logical Nots.
973 static void lower_Not(ir_node *node, ir_mode *mode, lower_env_t *env)
975 ir_node *op = get_Not_op(node);
976 const node_entry_t *op_entry = get_node_entry(env, op);
977 dbg_info *dbgi = get_irn_dbg_info(node);
978 ir_node *block = get_nodes_block(node);
980 = new_rd_Not(dbgi, block, op_entry->low_word, env->low_unsigned);
982 = new_rd_Not(dbgi, block, op_entry->high_word, mode);
983 set_lowered(env, node, res_low, res_high);
986 static bool is_equality_cmp_0(const ir_node *node)
988 ir_relation relation = get_Cmp_relation(node);
989 ir_node *left = get_Cmp_left(node);
990 ir_node *right = get_Cmp_right(node);
991 ir_mode *mode = get_irn_mode(left);
993 /* this probably makes no sense if unordered is involved */
994 assert(!mode_is_float(mode));
996 if (!is_Const(right) || !is_Const_null(right))
998 if (relation == ir_relation_equal)
1000 if (mode_is_signed(mode)) {
1001 return relation == ir_relation_less_greater;
1003 return relation == ir_relation_greater;
1010 static void lower_Cond(ir_node *node, ir_mode *mode, lower_env_t *env)
1012 ir_node *left, *right, *block;
1013 ir_node *sel = get_Cond_selector(node);
1014 ir_mode *m = get_irn_mode(sel);
1016 const node_entry_t *lentry, *rentry;
1017 ir_node *proj, *projT = NULL, *projF = NULL;
1018 ir_node *new_bl, *irn;
1019 ir_node *projHF, *projHT;
1021 ir_relation relation;
1028 if (m == env->high_signed || m == env->high_unsigned) {
1029 /* bad we can't really handle Switch with 64bit offsets */
1030 panic("Cond with 64bit jumptable not supported");
1032 lower_node(env, sel);
1037 lower_node(env, sel);
1041 left = get_Cmp_left(sel);
1042 cmp_mode = get_irn_mode(left);
1043 if (cmp_mode != env->high_signed && cmp_mode != env->high_unsigned) {
1044 lower_node(env, sel);
1048 right = get_Cmp_right(sel);
1049 lower_node(env, left);
1050 lower_node(env, right);
1051 lentry = get_node_entry(env, left);
1052 rentry = get_node_entry(env, right);
1054 /* all right, build the code */
1055 for (proj = (ir_node*)get_irn_link(node); proj;
1056 proj = (ir_node*)get_irn_link(proj)) {
1057 long proj_nr = get_Proj_proj(proj);
1059 if (proj_nr == pn_Cond_true) {
1060 assert(projT == NULL && "more than one Proj(true)");
1063 assert(proj_nr == pn_Cond_false);
1064 assert(projF == NULL && "more than one Proj(false)");
1067 mark_irn_visited(proj);
1069 assert(projT && projF);
1071 /* create a new high compare */
1072 block = get_nodes_block(node);
1073 irg = get_Block_irg(block);
1074 dbg = get_irn_dbg_info(sel);
1075 relation = get_Cmp_relation(sel);
1077 if (is_equality_cmp_0(sel)) {
1078 /* x ==/!= 0 ==> or(low,high) ==/!= 0 */
1079 ir_mode *mode = env->low_unsigned;
1080 ir_node *low = new_r_Conv(block, lentry->low_word, mode);
1081 ir_node *high = new_r_Conv(block, lentry->high_word, mode);
1082 ir_node *ornode = new_rd_Or(dbg, block, low, high, mode);
1083 ir_node *cmp = new_rd_Cmp(dbg, block, ornode, new_r_Const_long(irg, mode, 0), relation);
1084 set_Cond_selector(node, cmp);
1088 if (relation == ir_relation_equal) {
1089 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1090 pmap_entry *entry = pmap_find(env->proj_2_block, projF);
1093 dst_blk = (ir_node*)entry->value;
1095 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1097 dbg = get_irn_dbg_info(node);
1098 irn = new_rd_Cond(dbg, block, irn);
1100 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1101 mark_irn_visited(projHF);
1102 exchange(projF, projHF);
1104 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1105 mark_irn_visited(projHT);
1107 new_bl = new_r_Block(irg, 1, &projHT);
1109 dbg = get_irn_dbg_info(sel);
1110 irn = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word,
1112 dbg = get_irn_dbg_info(node);
1113 irn = new_rd_Cond(dbg, new_bl, irn);
1115 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1116 mark_irn_visited(proj);
1117 add_block_cf_input(dst_blk, projHF, proj);
1119 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1120 mark_irn_visited(proj);
1121 exchange(projT, proj);
1122 } else if (relation == ir_relation_less_greater) {
1123 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1124 pmap_entry *entry = pmap_find(env->proj_2_block, projT);
1127 dst_blk = (ir_node*)entry->value;
1129 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1130 ir_relation_less_greater);
1131 dbg = get_irn_dbg_info(node);
1132 irn = new_rd_Cond(dbg, block, irn);
1134 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1135 mark_irn_visited(projHT);
1136 exchange(projT, projHT);
1138 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1139 mark_irn_visited(projHF);
1141 new_bl = new_r_Block(irg, 1, &projHF);
1143 dbg = get_irn_dbg_info(sel);
1144 irn = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word,
1145 ir_relation_less_greater);
1146 dbg = get_irn_dbg_info(node);
1147 irn = new_rd_Cond(dbg, new_bl, irn);
1149 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1150 mark_irn_visited(proj);
1151 add_block_cf_input(dst_blk, projHT, proj);
1153 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1154 mark_irn_visited(proj);
1155 exchange(projF, proj);
1157 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1158 ir_node *dstT, *dstF, *newbl_eq, *newbl_l;
1161 entry = pmap_find(env->proj_2_block, projT);
1163 dstT = (ir_node*)entry->value;
1165 entry = pmap_find(env->proj_2_block, projF);
1167 dstF = (ir_node*)entry->value;
1169 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1170 relation & ~ir_relation_equal);
1171 dbg = get_irn_dbg_info(node);
1172 irn = new_rd_Cond(dbg, block, irn);
1174 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1175 mark_irn_visited(projHT);
1176 exchange(projT, projHT);
1179 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1180 mark_irn_visited(projHF);
1182 newbl_eq = new_r_Block(irg, 1, &projHF);
1184 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1186 irn = new_rd_Cond(dbg, newbl_eq, irn);
1188 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1189 mark_irn_visited(proj);
1190 exchange(projF, proj);
1193 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1194 mark_irn_visited(proj);
1196 newbl_l = new_r_Block(irg, 1, &proj);
1198 dbg = get_irn_dbg_info(sel);
1199 irn = new_rd_Cmp(dbg, newbl_l, lentry->low_word, rentry->low_word,
1201 dbg = get_irn_dbg_info(node);
1202 irn = new_rd_Cond(dbg, newbl_l, irn);
1204 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1205 mark_irn_visited(proj);
1206 add_block_cf_input(dstT, projT, proj);
1208 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1209 mark_irn_visited(proj);
1210 add_block_cf_input(dstF, projF, proj);
1213 /* we have changed the control flow */
1214 env->flags |= CF_CHANGED;
1218 * Translate a Conv to higher_signed
1220 static void lower_Conv_to_Ll(ir_node *node, lower_env_t *env)
1222 ir_mode *omode = get_irn_mode(node);
1223 ir_node *op = get_Conv_op(node);
1224 ir_mode *imode = get_irn_mode(op);
1225 ir_graph *irg = get_irn_irg(node);
1226 ir_node *block = get_nodes_block(node);
1227 dbg_info *dbg = get_irn_dbg_info(node);
1231 ir_mode *low_unsigned = env->low_unsigned;
1233 = mode_is_signed(omode) ? env->low_signed : low_unsigned;
1235 if (mode_is_int(imode) || mode_is_reference(imode)) {
1236 if (imode == env->high_signed || imode == env->high_unsigned) {
1237 /* a Conv from Lu to Ls or Ls to Lu */
1238 const node_entry_t *op_entry = get_node_entry(env, op);
1239 res_low = op_entry->low_word;
1240 res_high = new_rd_Conv(dbg, block, op_entry->high_word, low_signed);
1242 /* simple case: create a high word */
1243 if (imode != low_unsigned)
1244 op = new_rd_Conv(dbg, block, op, low_unsigned);
1248 if (mode_is_signed(imode)) {
1249 int c = get_mode_size_bits(low_signed) - 1;
1250 ir_node *cnst = new_r_Const_long(irg, low_unsigned, c);
1251 if (get_irn_mode(op) != low_signed)
1252 op = new_rd_Conv(dbg, block, op, low_signed);
1253 res_high = new_rd_Shrs(dbg, block, op, cnst, low_signed);
1255 res_high = new_r_Const(irg, get_mode_null(low_signed));
1258 } else if (imode == mode_b) {
1259 res_low = new_rd_Conv(dbg, block, op, low_unsigned);
1260 res_high = new_r_Const(irg, get_mode_null(low_signed));
1262 ir_node *irn, *call;
1263 ir_type *mtp = get_conv_type(imode, omode, env);
1265 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode, env);
1266 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 1, &op, mtp);
1267 set_irn_pinned(call, get_irn_pinned(node));
1268 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1270 res_low = new_r_Proj(irn, low_unsigned, 0);
1271 res_high = new_r_Proj(irn, low_signed, 1);
1273 set_lowered(env, node, res_low, res_high);
1277 * Translate a Conv from higher_unsigned
1279 static void lower_Conv_from_Ll(ir_node *node, lower_env_t *env)
1281 ir_node *op = get_Conv_op(node);
1282 ir_mode *omode = get_irn_mode(node);
1283 ir_node *block = get_nodes_block(node);
1284 dbg_info *dbg = get_irn_dbg_info(node);
1285 ir_graph *irg = get_irn_irg(node);
1286 const node_entry_t *entry = get_node_entry(env, op);
1288 if (mode_is_int(omode) || mode_is_reference(omode)) {
1289 op = entry->low_word;
1291 /* simple case: create a high word */
1292 if (omode != env->low_unsigned)
1293 op = new_rd_Conv(dbg, block, op, omode);
1295 set_Conv_op(node, op);
1296 } else if (omode == mode_b) {
1297 /* llu ? true : false <=> (low|high) ? true : false */
1298 ir_mode *mode = env->low_unsigned;
1299 ir_node *ornode = new_rd_Or(dbg, block, entry->low_word,
1300 entry->high_word, mode);
1301 set_Conv_op(node, ornode);
1303 ir_node *irn, *call, *in[2];
1304 ir_mode *imode = get_irn_mode(op);
1305 ir_type *mtp = get_conv_type(imode, omode, env);
1307 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode, env);
1308 in[0] = entry->low_word;
1309 in[1] = entry->high_word;
1311 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 2, in, mtp);
1312 set_irn_pinned(call, get_irn_pinned(node));
1313 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1315 exchange(node, new_r_Proj(irn, omode, 0));
1322 static void lower_Cmp(ir_node *cmp, ir_mode *m, lower_env_t *env)
1324 ir_node *l = get_Cmp_left(cmp);
1325 ir_mode *mode = get_irn_mode(l);
1326 ir_node *r, *low, *high, *t, *res;
1327 ir_relation relation;
1330 const node_entry_t *lentry;
1331 const node_entry_t *rentry;
1334 if (mode != env->high_signed && mode != env->high_unsigned)
1337 r = get_Cmp_right(cmp);
1338 lentry = get_node_entry(env, l);
1339 rentry = get_node_entry(env, r);
1340 relation = get_Cmp_relation(cmp);
1341 block = get_nodes_block(cmp);
1342 dbg = get_irn_dbg_info(cmp);
1344 /* easy case for x ==/!= 0 (see lower_Cond for details) */
1345 if (is_equality_cmp_0(cmp)) {
1346 ir_graph *irg = get_irn_irg(cmp);
1347 ir_mode *mode = env->low_unsigned;
1348 ir_node *low = new_r_Conv(block, lentry->low_word, mode);
1349 ir_node *high = new_r_Conv(block, lentry->high_word, mode);
1350 ir_node *ornode = new_rd_Or(dbg, block, low, high, mode);
1351 ir_node *new_cmp = new_rd_Cmp(dbg, block, ornode, new_r_Const_long(irg, mode, 0), relation);
1352 exchange(cmp, new_cmp);
1356 if (relation == ir_relation_equal) {
1357 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1358 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1360 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1362 res = new_rd_And(dbg, block, low, high, mode_b);
1363 } else if (relation == ir_relation_less_greater) {
1364 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1365 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1367 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1369 res = new_rd_Or(dbg, block, low, high, mode_b);
1371 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1372 ir_node *high1 = new_rd_Cmp(dbg, block, lentry->high_word,
1373 rentry->high_word, relation & ~ir_relation_equal);
1374 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1376 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1378 t = new_rd_And(dbg, block, low, high, mode_b);
1379 res = new_rd_Or(dbg, block, high1, t, mode_b);
1387 static void lower_Conv(ir_node *node, ir_mode *mode, lower_env_t *env)
1389 mode = get_irn_mode(node);
1391 if (mode == env->high_signed || mode == env->high_unsigned) {
1392 lower_Conv_to_Ll(node, env);
1394 ir_mode *op_mode = get_irn_mode(get_Conv_op(node));
1396 if (op_mode == env->high_signed || op_mode == env->high_unsigned) {
1397 lower_Conv_from_Ll(node, env);
1403 * Remember the new argument index of this value type entity in the lowered
1406 * @param ent the entity
1407 * @param pos the argument index of this entity
1409 static inline void set_entity_arg_idx(ir_entity *ent, size_t pos)
1411 set_entity_link(ent, INT_TO_PTR(pos));
1415 * Retrieve the argument index of a value type entity.
1417 * @param ent the entity
1419 static size_t get_entity_arg_idx(const ir_entity *ent) {
1420 return PTR_TO_INT(get_entity_link(ent));
1424 * Lower the method type.
1426 * @param env the lower environment
1427 * @param mtp the method type to lower
1429 * @return the lowered type
1431 static ir_type *lower_mtp(lower_env_t *env, ir_type *mtp)
1434 ir_type *res, *value_type;
1436 entry = pmap_find(lowered_type, mtp);
1438 size_t i, orig_n_params, orig_n_res, n_param, n_res;
1440 /* count new number of params */
1441 n_param = orig_n_params = get_method_n_params(mtp);
1442 for (i = orig_n_params; i > 0;) {
1443 ir_type *tp = get_method_param_type(mtp, --i);
1445 if (is_Primitive_type(tp)) {
1446 ir_mode *mode = get_type_mode(tp);
1448 if (mode == env->high_signed ||
1449 mode == env->high_unsigned)
1454 /* count new number of results */
1455 n_res = orig_n_res = get_method_n_ress(mtp);
1456 for (i = orig_n_res; i > 0;) {
1457 ir_type *tp = get_method_res_type(mtp, --i);
1459 if (is_Primitive_type(tp)) {
1460 ir_mode *mode = get_type_mode(tp);
1462 if (mode == env->high_signed ||
1463 mode == env->high_unsigned)
1468 res = new_type_method(n_param, n_res);
1470 /* set param types and result types */
1471 for (i = n_param = 0; i < orig_n_params; ++i) {
1472 ir_type *tp = get_method_param_type(mtp, i);
1474 if (is_Primitive_type(tp)) {
1475 ir_mode *mode = get_type_mode(tp);
1477 if (mode == env->high_signed) {
1478 set_method_param_type(res, n_param++, tp_u);
1479 set_method_param_type(res, n_param++, tp_s);
1480 } else if (mode == env->high_unsigned) {
1481 set_method_param_type(res, n_param++, tp_u);
1482 set_method_param_type(res, n_param++, tp_u);
1484 set_method_param_type(res, n_param++, tp);
1487 set_method_param_type(res, n_param++, tp);
1490 for (i = n_res = 0; i < orig_n_res; ++i) {
1491 ir_type *tp = get_method_res_type(mtp, i);
1493 if (is_Primitive_type(tp)) {
1494 ir_mode *mode = get_type_mode(tp);
1496 if (mode == env->high_signed) {
1497 set_method_res_type(res, n_res++, tp_u);
1498 set_method_res_type(res, n_res++, tp_s);
1499 } else if (mode == env->high_unsigned) {
1500 set_method_res_type(res, n_res++, tp_u);
1501 set_method_res_type(res, n_res++, tp_u);
1503 set_method_res_type(res, n_res++, tp);
1506 set_method_res_type(res, n_res++, tp);
1509 set_lowered_type(mtp, res);
1510 pmap_insert(lowered_type, mtp, res);
1512 value_type = get_method_value_param_type(mtp);
1513 if (value_type != NULL) {
1514 /* this creates a new value parameter type */
1515 (void)get_method_value_param_ent(res, 0);
1517 /* set new param positions for all entities of the value type */
1518 for (i = n_param = 0; i < orig_n_params; ++i) {
1519 ir_type *tp = get_method_param_type(mtp, i);
1520 ir_entity *ent = get_method_value_param_ent(mtp, i);
1522 set_entity_arg_idx(ent, n_param);
1523 if (is_Primitive_type(tp)) {
1524 ir_mode *mode = get_type_mode(tp);
1526 if (mode == env->high_signed || mode == env->high_unsigned) {
1534 set_lowered_type(value_type, get_method_value_param_type(res));
1537 res = (ir_type*)entry->value;
1543 * Translate a Return.
1545 static void lower_Return(ir_node *node, ir_mode *mode, lower_env_t *env)
1547 ir_graph *irg = get_irn_irg(node);
1548 ir_entity *ent = get_irg_entity(irg);
1549 ir_type *mtp = get_entity_type(ent);
1555 /* check if this return must be lowered */
1556 for (i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1557 ir_node *pred = get_Return_res(node, i);
1558 ir_mode *mode = get_irn_op_mode(pred);
1560 if (mode == env->high_signed || mode == env->high_unsigned)
1566 ent = get_irg_entity(irg);
1567 mtp = get_entity_type(ent);
1569 mtp = lower_mtp(env, mtp);
1570 set_entity_type(ent, mtp);
1572 /* create a new in array */
1573 NEW_ARR_A(ir_node *, in, get_method_n_ress(mtp) + 1);
1574 in[0] = get_Return_mem(node);
1576 for (j = i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1577 ir_node *pred = get_Return_res(node, i);
1578 ir_mode *pred_mode = get_irn_mode(pred);
1580 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1581 const node_entry_t *entry = get_node_entry(env, pred);
1582 in[++j] = entry->low_word;
1583 in[++j] = entry->high_word;
1589 set_irn_in(node, j+1, in);
1593 * Translate the parameters.
1595 static void lower_Start(ir_node *node, ir_mode *mode, lower_env_t *env)
1597 ir_graph *irg = get_irn_irg(node);
1598 ir_entity *ent = get_irg_entity(irg);
1599 ir_type *tp = get_entity_type(ent);
1601 size_t i, j, n_params;
1603 ir_node *proj, *args;
1606 if (!mtp_must_be_lowered(env, tp)) return;
1608 n_params = get_method_n_params(tp);
1610 NEW_ARR_A(long, new_projs, n_params);
1612 /* Calculate mapping of proj numbers in new_projs */
1613 for (i = j = 0; i < n_params; ++i, ++j) {
1614 ir_type *ptp = get_method_param_type(tp, i);
1617 if (is_Primitive_type(ptp)) {
1618 ir_mode *mode = get_type_mode(ptp);
1620 if (mode == env->high_signed ||
1621 mode == env->high_unsigned)
1626 /* lower method type */
1627 tp = lower_mtp(env, tp);
1628 set_entity_type(ent, tp);
1630 /* switch off optimization for new Proj nodes or they might be CSE'ed
1631 with not patched one's */
1632 rem = get_optimize();
1635 /* fix all Proj's and create new ones */
1636 args = get_irg_args(irg);
1637 for (proj = (ir_node*)get_irn_link(node); proj;
1638 proj = (ir_node*)get_irn_link(proj)) {
1639 ir_node *pred = get_Proj_pred(proj);
1648 /* do not visit this node again */
1649 mark_irn_visited(proj);
1654 proj_nr = get_Proj_proj(proj);
1655 set_Proj_proj(proj, new_projs[proj_nr]);
1657 mode = get_irn_mode(proj);
1658 mode_l = env->low_unsigned;
1659 if (mode == env->high_signed) {
1660 mode_h = env->low_signed;
1661 } else if (mode == env->high_unsigned) {
1662 mode_h = env->low_unsigned;
1667 dbg = get_irn_dbg_info(proj);
1668 res_low = new_rd_Proj(dbg, args, mode_l, new_projs[proj_nr]);
1669 res_high = new_rd_Proj(dbg, args, mode_h, new_projs[proj_nr] + 1);
1670 set_lowered(env, proj, res_low, res_high);
1678 static void lower_Call(ir_node *node, ir_mode *mode, lower_env_t *env)
1680 ir_type *tp = get_Call_type(node);
1681 ir_node **in, *proj, *results;
1682 size_t n_params, n_res;
1683 bool need_lower = false;
1686 long *res_numbers = NULL;
1689 n_params = get_method_n_params(tp);
1690 for (p = 0; p < n_params; ++p) {
1691 ir_type *ptp = get_method_param_type(tp, p);
1693 if (is_Primitive_type(ptp)) {
1694 ir_mode *mode = get_type_mode(ptp);
1696 if (mode == env->high_signed || mode == env->high_unsigned) {
1702 n_res = get_method_n_ress(tp);
1704 NEW_ARR_A(long, res_numbers, n_res);
1706 for (i = j = 0; i < n_res; ++i, ++j) {
1707 ir_type *ptp = get_method_res_type(tp, i);
1710 if (is_Primitive_type(ptp)) {
1711 ir_mode *mode = get_type_mode(ptp);
1713 if (mode == env->high_signed || mode == env->high_unsigned) {
1724 /* let's lower it */
1725 tp = lower_mtp(env, tp);
1726 set_Call_type(node, tp);
1728 NEW_ARR_A(ir_node *, in, get_method_n_params(tp) + 2);
1730 in[0] = get_Call_mem(node);
1731 in[1] = get_Call_ptr(node);
1733 for (j = 2, i = 0; i < n_params; ++i) {
1734 ir_node *pred = get_Call_param(node, i);
1735 ir_mode *pred_mode = get_irn_mode(pred);
1737 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1738 const node_entry_t *pred_entry = get_node_entry(env, pred);
1739 in[j++] = pred_entry->low_word;
1740 in[j++] = pred_entry->high_word;
1746 set_irn_in(node, j, in);
1748 /* fix the results */
1750 for (proj = (ir_node*)get_irn_link(node); proj;
1751 proj = (ir_node*)get_irn_link(proj)) {
1752 long proj_nr = get_Proj_proj(proj);
1754 if (proj_nr == pn_Call_T_result && get_Proj_pred(proj) == node) {
1755 /* found the result proj */
1761 if (results != NULL) { /* there are results */
1762 int rem = get_optimize();
1764 /* switch off optimization for new Proj nodes or they might be CSE'ed
1765 with not patched one's */
1767 for (proj = (ir_node*)get_irn_link(results); proj; proj = (ir_node*)get_irn_link(proj)) {
1768 if (get_Proj_pred(proj) == results) {
1769 long proj_nr = get_Proj_proj(proj);
1770 ir_mode *proj_mode = get_irn_mode(proj);
1777 /* found a result */
1778 mark_irn_visited(proj);
1780 set_Proj_proj(proj, res_numbers[proj_nr]);
1782 mode_l = env->low_unsigned;
1783 if (proj_mode == env->high_signed) {
1784 mode_h = env->low_signed;
1785 } else if (proj_mode == env->high_unsigned) {
1786 mode_h = env->low_unsigned;
1791 dbg = get_irn_dbg_info(proj);
1792 res_low = new_rd_Proj(dbg, results, mode_l, res_numbers[proj_nr]);
1793 res_high = new_rd_Proj(dbg, results, mode_h, res_numbers[proj_nr] + 1);
1794 set_lowered(env, proj, res_low, res_high);
1802 * Translate an Unknown into two.
1804 static void lower_Unknown(ir_node *node, ir_mode *mode, lower_env_t *env)
1806 ir_mode *low_mode = env->low_unsigned;
1807 ir_graph *irg = get_irn_irg(node);
1808 ir_node *res_low = new_r_Unknown(irg, low_mode);
1809 ir_node *res_high = new_r_Unknown(irg, mode);
1810 set_lowered(env, node, res_low, res_high);
1816 * First step: just create two templates
1818 static void lower_Phi(lower_env_t *env, ir_node *phi)
1820 ir_mode *mode = get_irn_mode(phi);
1835 /* enqueue predecessors */
1836 arity = get_Phi_n_preds(phi);
1837 for (i = 0; i < arity; ++i) {
1838 ir_node *pred = get_Phi_pred(phi, i);
1839 pdeq_putr(env->waitq, pred);
1842 if (mode != env->high_signed && mode != env->high_unsigned)
1845 /* first create a new in array */
1846 NEW_ARR_A(ir_node *, in_l, arity);
1847 NEW_ARR_A(ir_node *, in_h, arity);
1848 irg = get_irn_irg(phi);
1849 mode_l = env->low_unsigned;
1850 mode_h = mode == env->high_signed ? env->low_signed : env->low_unsigned;
1851 unk_l = new_r_Dummy(irg, mode_l);
1852 unk_h = new_r_Dummy(irg, mode_h);
1853 for (i = 0; i < arity; ++i) {
1858 dbg = get_irn_dbg_info(phi);
1859 block = get_nodes_block(phi);
1860 phi_l = new_rd_Phi(dbg, block, arity, in_l, mode_l);
1861 phi_h = new_rd_Phi(dbg, block, arity, in_h, mode_h);
1863 set_lowered(env, phi, phi_l, phi_h);
1865 /* remember that we need to fixup the predecessors later */
1866 ARR_APP1(ir_node*, env->lowered_phis, phi);
1868 /* Don't forget to link the new Phi nodes into the block.
1869 * Beware that some Phis might be optimized away. */
1871 add_Block_phi(block, phi_l);
1873 add_Block_phi(block, phi_h);
1876 static void fixup_phi(lower_env_t *env, ir_node *phi)
1878 const node_entry_t *entry = get_node_entry(env, phi);
1879 ir_node *phi_l = entry->low_word;
1880 ir_node *phi_h = entry->high_word;
1881 int arity = get_Phi_n_preds(phi);
1884 /* exchange phi predecessors which are lowered by now */
1885 for (i = 0; i < arity; ++i) {
1886 ir_node *pred = get_Phi_pred(phi, i);
1887 const node_entry_t *pred_entry = get_node_entry(env, pred);
1889 set_Phi_pred(phi_l, i, pred_entry->low_word);
1890 set_Phi_pred(phi_h, i, pred_entry->high_word);
1897 static void lower_Mux(ir_node *mux, ir_mode *mode, lower_env_t *env)
1899 ir_node *truen = get_Mux_true(mux);
1900 ir_node *falsen = get_Mux_false(mux);
1901 ir_node *sel = get_Mux_sel(mux);
1902 const node_entry_t *true_entry = get_node_entry(env, truen);
1903 const node_entry_t *false_entry = get_node_entry(env, falsen);
1904 ir_node *true_l = true_entry->low_word;
1905 ir_node *true_h = true_entry->high_word;
1906 ir_node *false_l = false_entry->low_word;
1907 ir_node *false_h = false_entry->high_word;
1908 dbg_info *dbgi = get_irn_dbg_info(mux);
1909 ir_node *block = get_nodes_block(mux);
1911 = new_rd_Mux(dbgi, block, sel, false_l, true_l, env->low_unsigned);
1913 = new_rd_Mux(dbgi, block, sel, false_h, true_h, mode);
1914 set_lowered(env, mux, res_low, res_high);
1918 * Translate an ASM node.
1920 static void lower_ASM(ir_node *asmn, ir_mode *mode, lower_env_t *env)
1922 ir_mode *high_signed = env->high_signed;
1923 ir_mode *high_unsigned = env->high_unsigned;
1924 int n_outs = get_ASM_n_output_constraints(asmn);
1925 ir_asm_constraint *output_constraints = get_ASM_output_constraints(asmn);
1926 ir_asm_constraint *input_constraints = get_ASM_input_constraints(asmn);
1927 unsigned n_64bit_outs = 0;
1933 for (i = get_irn_arity(asmn) - 1; i >= 0; --i) {
1934 ir_node *op = get_irn_n(asmn, i);
1935 ir_mode *op_mode = get_irn_mode(op);
1936 if (op_mode == high_signed || op_mode == high_unsigned) {
1937 panic("lowering ASM 64bit input unimplemented");
1941 for (i = 0; i < n_outs; ++i) {
1942 const ir_asm_constraint *constraint = &output_constraints[i];
1943 if (constraint->mode == high_signed || constraint->mode == high_unsigned) {
1944 const char *constr = get_id_str(constraint->constraint);
1946 /* TODO: How to do this architecture neutral? This is very
1947 * i386 specific... */
1948 if (constr[0] != '=' || constr[1] != 'A') {
1949 panic("lowering ASM 64bit output only supports '=A' currently");
1954 if (n_64bit_outs == 0)
1958 dbg_info *dbgi = get_irn_dbg_info(asmn);
1959 ir_node *block = get_nodes_block(asmn);
1960 int arity = get_irn_arity(asmn);
1961 ir_node **in = get_irn_in(asmn) + 1;
1962 int n_outs = get_ASM_n_output_constraints(asmn);
1964 int n_clobber = get_ASM_n_clobbers(asmn);
1965 long *proj_map = ALLOCAN(long, n_outs);
1966 ident **clobbers = get_ASM_clobbers(asmn);
1967 ident *asm_text = get_ASM_text(asmn);
1968 ir_asm_constraint *new_outputs
1969 = ALLOCAN(ir_asm_constraint, n_outs+n_64bit_outs);
1972 for (i = 0; i < n_outs; ++i) {
1973 const ir_asm_constraint *constraint = &output_constraints[i];
1974 if (constraint->mode == high_signed || constraint->mode == high_unsigned) {
1975 new_outputs[new_n_outs].pos = constraint->pos;
1976 new_outputs[new_n_outs].constraint = new_id_from_str("=a");
1977 new_outputs[new_n_outs].mode = env->low_unsigned;
1978 proj_map[i] = new_n_outs;
1980 new_outputs[new_n_outs].pos = constraint->pos;
1981 new_outputs[new_n_outs].constraint = new_id_from_str("=d");
1982 if (constraint->mode == high_signed)
1983 new_outputs[new_n_outs].mode = env->low_signed;
1985 new_outputs[new_n_outs].mode = env->low_unsigned;
1988 new_outputs[new_n_outs] = *constraint;
1989 proj_map[i] = new_n_outs;
1993 assert(new_n_outs == n_outs+(int)n_64bit_outs);
1995 new_asm = new_rd_ASM(dbgi, block, arity, in, input_constraints,
1996 new_n_outs, new_outputs, n_clobber, clobbers,
2002 n = (ir_node*)get_irn_link(n);
2005 proj_mode = get_irn_mode(n);
2006 pn = get_Proj_proj(n);
2010 pn = new_n_outs + pn - n_outs;
2012 if (proj_mode == high_signed || proj_mode == high_unsigned) {
2014 = proj_mode == high_signed ? env->low_signed : env->low_unsigned;
2015 ir_node *np_low = new_r_Proj(new_asm, env->low_unsigned, pn);
2016 ir_node *np_high = new_r_Proj(new_asm, high_mode, pn+1);
2017 set_lowered(env, n, np_low, np_high);
2019 ir_node *np = new_r_Proj(new_asm, proj_mode, pn);
2027 * Translate a Sel node.
2029 static void lower_Sel(ir_node *sel, ir_mode *mode, lower_env_t *env)
2033 /* we must only lower value parameter Sels if we change the
2034 value parameter type. */
2035 if (env->value_param_tp != NULL) {
2036 ir_entity *ent = get_Sel_entity(sel);
2037 if (get_entity_owner(ent) == env->value_param_tp) {
2038 size_t pos = get_entity_arg_idx(ent);
2040 ent = get_method_value_param_ent(env->l_mtp, pos);
2041 set_Sel_entity(sel, ent);
2047 * check for opcodes that must always be lowered.
2049 static bool always_lower(unsigned code)
2067 * Compare two op_mode_entry_t's.
2069 static int cmp_op_mode(const void *elt, const void *key, size_t size)
2071 const op_mode_entry_t *e1 = (const op_mode_entry_t*)elt;
2072 const op_mode_entry_t *e2 = (const op_mode_entry_t*)key;
2075 return (e1->op != e2->op) | (e1->imode != e2->imode) | (e1->omode != e2->omode);
2079 * Compare two conv_tp_entry_t's.
2081 static int cmp_conv_tp(const void *elt, const void *key, size_t size)
2083 const conv_tp_entry_t *e1 = (const conv_tp_entry_t*)elt;
2084 const conv_tp_entry_t *e2 = (const conv_tp_entry_t*)key;
2087 return (e1->imode != e2->imode) | (e1->omode != e2->omode);
2091 * Enter a lowering function into an ir_op.
2093 static void enter_lower_func(ir_op *op, lower_func func)
2095 op->ops.generic = (op_func)func;
2099 * Returns non-zero if a method type must be lowered.
2101 * @param mtp the method type
2103 static bool mtp_must_be_lowered(lower_env_t *env, ir_type *mtp)
2105 size_t i, n_params = get_method_n_params(mtp);
2107 /* first check if we have parameters that must be fixed */
2108 for (i = 0; i < n_params; ++i) {
2109 ir_type *tp = get_method_param_type(mtp, i);
2111 if (is_Primitive_type(tp)) {
2112 ir_mode *mode = get_type_mode(tp);
2114 if (mode == env->high_signed ||
2115 mode == env->high_unsigned)
2122 /* Determine which modes need to be lowered */
2123 static void setup_modes(lower_env_t *env)
2125 unsigned size_bits = env->params->doubleword_size;
2126 ir_mode *doubleword_signed = NULL;
2127 ir_mode *doubleword_unsigned = NULL;
2128 size_t n_modes = get_irp_n_modes();
2129 ir_mode_arithmetic arithmetic;
2130 unsigned modulo_shift;
2133 /* search for doubleword modes... */
2134 for (i = 0; i < n_modes; ++i) {
2135 ir_mode *mode = get_irp_mode(i);
2136 if (!mode_is_int(mode))
2138 if (get_mode_size_bits(mode) != size_bits)
2140 if (mode_is_signed(mode)) {
2141 if (doubleword_signed != NULL) {
2142 /* sigh - the lowerer should really just lower all mode with
2143 * size_bits it finds. Unfortunately this required a bigger
2145 panic("multiple double word signed modes found");
2147 doubleword_signed = mode;
2149 if (doubleword_unsigned != NULL) {
2150 /* sigh - the lowerer should really just lower all mode with
2151 * size_bits it finds. Unfortunately this required a bigger
2153 panic("multiple double word unsigned modes found");
2155 doubleword_unsigned = mode;
2158 if (doubleword_signed == NULL || doubleword_unsigned == NULL) {
2159 panic("Couldn't find doubleword modes");
2162 arithmetic = get_mode_arithmetic(doubleword_signed);
2163 modulo_shift = get_mode_modulo_shift(doubleword_signed);
2165 assert(get_mode_size_bits(doubleword_unsigned) == size_bits);
2166 assert(size_bits % 2 == 0);
2167 assert(get_mode_sign(doubleword_signed) == 1);
2168 assert(get_mode_sign(doubleword_unsigned) == 0);
2169 assert(get_mode_sort(doubleword_signed) == irms_int_number);
2170 assert(get_mode_sort(doubleword_unsigned) == irms_int_number);
2171 assert(get_mode_arithmetic(doubleword_unsigned) == arithmetic);
2172 assert(get_mode_modulo_shift(doubleword_unsigned) == modulo_shift);
2174 /* try to guess a sensible modulo shift for the new mode.
2175 * (This is IMO another indication that this should really be a node
2176 * attribute instead of a mode thing) */
2177 if (modulo_shift == size_bits) {
2178 modulo_shift = modulo_shift / 2;
2179 } else if (modulo_shift == 0) {
2182 panic("Don't know what new modulo shift to use for lowered doubleword mode");
2186 /* produce lowered modes */
2187 env->high_signed = doubleword_signed;
2188 env->high_unsigned = doubleword_unsigned;
2189 env->low_signed = new_ir_mode("WS", irms_int_number, size_bits, 1,
2190 arithmetic, modulo_shift);
2191 env->low_unsigned = new_ir_mode("WU", irms_int_number, size_bits, 0,
2192 arithmetic, modulo_shift);
2195 static void enqueue_preds(lower_env_t *env, ir_node *node)
2197 int arity = get_irn_arity(node);
2200 for (i = 0; i < arity; ++i) {
2201 ir_node *pred = get_irn_n(node, i);
2202 pdeq_putr(env->waitq, pred);
2206 static void lower_node(lower_env_t *env, ir_node *node)
2214 node_entry_t *entry;
2216 if (irn_visited_else_mark(node))
2219 /* cycles are always broken at Phi and Block nodes. So we don't need special
2220 * magic in all the other lower functions */
2221 if (is_Block(node)) {
2222 enqueue_preds(env, node);
2224 } else if (is_Phi(node)) {
2225 lower_Phi(env, node);
2229 /* depth-first: descend into operands */
2230 if (!is_Block(node)) {
2231 ir_node *block = get_nodes_block(node);
2232 lower_node(env, block);
2235 if (!is_Cond(node)) {
2236 arity = get_irn_arity(node);
2237 for (i = 0; i < arity; ++i) {
2238 ir_node *pred = get_irn_n(node, i);
2239 lower_node(env, pred);
2243 op = get_irn_op(node);
2244 func = (lower_func) op->ops.generic;
2248 idx = get_irn_idx(node);
2249 entry = idx < env->n_entries ? env->entries[idx] : NULL;
2250 if (entry != NULL || always_lower(get_irn_opcode(node))) {
2251 mode = get_irn_op_mode(node);
2252 if (mode == env->high_signed) {
2253 mode = env->low_signed;
2255 mode = env->low_unsigned;
2257 DB((dbg, LEVEL_1, " %+F\n", node));
2258 func(node, mode, env);
2262 static void lower_irg(lower_env_t *env, ir_graph *irg)
2268 obstack_init(&env->obst);
2270 n_idx = get_irg_last_idx(irg);
2271 n_idx = n_idx + (n_idx >> 2); /* add 25% */
2272 env->n_entries = n_idx;
2273 env->entries = NEW_ARR_F(node_entry_t*, n_idx);
2274 memset(env->entries, 0, sizeof(env->entries[0]) * n_idx);
2279 env->proj_2_block = pmap_create();
2280 env->value_param_tp = NULL;
2282 ent = get_irg_entity(irg);
2283 mtp = get_entity_type(ent);
2285 if (mtp_must_be_lowered(env, mtp)) {
2286 ir_type *ltp = lower_mtp(env, mtp);
2287 /* Do not update the entity type yet, this will be done by lower_Start! */
2288 env->flags |= MUST_BE_LOWERED;
2290 env->value_param_tp = get_method_value_param_type(mtp);
2293 /* first step: link all nodes and allocate data */
2294 ir_reserve_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2295 irg_walk_graph(irg, firm_clear_node_and_phi_links,
2296 prepare_links_and_handle_rotl, env);
2298 if (env->flags & MUST_BE_LOWERED) {
2300 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
2301 inc_irg_visited(irg);
2303 assert(pdeq_empty(env->waitq));
2304 pdeq_putr(env->waitq, get_irg_end(irg));
2306 env->lowered_phis = NEW_ARR_F(ir_node*, 0);
2307 while (!pdeq_empty(env->waitq)) {
2308 ir_node *node = (ir_node*)pdeq_getl(env->waitq);
2309 lower_node(env, node);
2312 /* we need to fixup phis */
2313 for (i = 0; i < ARR_LEN(env->lowered_phis); ++i) {
2314 ir_node *phi = env->lowered_phis[i];
2315 fixup_phi(env, phi);
2317 DEL_ARR_F(env->lowered_phis);
2320 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
2322 if (env->flags & CF_CHANGED) {
2323 /* control flow changed, dominance info is invalid */
2324 set_irg_doms_inconsistent(irg);
2325 set_irg_extblk_inconsistent(irg);
2326 set_irg_loopinfo_inconsistent(irg);
2330 ir_free_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2332 pmap_destroy(env->proj_2_block);
2333 DEL_ARR_F(env->entries);
2334 obstack_free(&env->obst, NULL);
2340 void lower_dw_ops(const lwrdw_param_t *param)
2345 assert(param != NULL);
2346 FIRM_DBG_REGISTER(dbg, "firm.lower.dw");
2348 memset(&lenv, 0, sizeof(lenv));
2349 lenv.params = param;
2352 /* create the necessary maps */
2353 if (! intrinsic_fkt)
2354 intrinsic_fkt = new_set(cmp_op_mode, iro_Last + 1);
2356 conv_types = new_set(cmp_conv_tp, 16);
2358 lowered_type = pmap_create();
2360 /* create a primitive unsigned and signed type */
2362 tp_u = get_type_for_mode(lenv.low_unsigned);
2364 tp_s = get_type_for_mode(lenv.low_signed);
2366 /* create method types for the created binop calls */
2368 binop_tp_u = new_type_method(4, 2);
2369 set_method_param_type(binop_tp_u, 0, tp_u);
2370 set_method_param_type(binop_tp_u, 1, tp_u);
2371 set_method_param_type(binop_tp_u, 2, tp_u);
2372 set_method_param_type(binop_tp_u, 3, tp_u);
2373 set_method_res_type(binop_tp_u, 0, tp_u);
2374 set_method_res_type(binop_tp_u, 1, tp_u);
2377 binop_tp_s = new_type_method(4, 2);
2378 set_method_param_type(binop_tp_s, 0, tp_u);
2379 set_method_param_type(binop_tp_s, 1, tp_s);
2380 set_method_param_type(binop_tp_s, 2, tp_u);
2381 set_method_param_type(binop_tp_s, 3, tp_s);
2382 set_method_res_type(binop_tp_s, 0, tp_u);
2383 set_method_res_type(binop_tp_s, 1, tp_s);
2385 if (! shiftop_tp_u) {
2386 shiftop_tp_u = new_type_method(3, 2);
2387 set_method_param_type(shiftop_tp_u, 0, tp_u);
2388 set_method_param_type(shiftop_tp_u, 1, tp_u);
2389 set_method_param_type(shiftop_tp_u, 2, tp_u);
2390 set_method_res_type(shiftop_tp_u, 0, tp_u);
2391 set_method_res_type(shiftop_tp_u, 1, tp_u);
2393 if (! shiftop_tp_s) {
2394 shiftop_tp_s = new_type_method(3, 2);
2395 set_method_param_type(shiftop_tp_s, 0, tp_u);
2396 set_method_param_type(shiftop_tp_s, 1, tp_s);
2397 set_method_param_type(shiftop_tp_s, 2, tp_u);
2398 set_method_res_type(shiftop_tp_s, 0, tp_u);
2399 set_method_res_type(shiftop_tp_s, 1, tp_s);
2402 unop_tp_u = new_type_method(2, 2);
2403 set_method_param_type(unop_tp_u, 0, tp_u);
2404 set_method_param_type(unop_tp_u, 1, tp_u);
2405 set_method_res_type(unop_tp_u, 0, tp_u);
2406 set_method_res_type(unop_tp_u, 1, tp_u);
2409 unop_tp_s = new_type_method(2, 2);
2410 set_method_param_type(unop_tp_s, 0, tp_u);
2411 set_method_param_type(unop_tp_s, 1, tp_s);
2412 set_method_res_type(unop_tp_s, 0, tp_u);
2413 set_method_res_type(unop_tp_s, 1, tp_s);
2416 clear_irp_opcodes_generic_func();
2417 enter_lower_func(op_ASM, lower_ASM);
2418 enter_lower_func(op_Add, lower_binop);
2419 enter_lower_func(op_And, lower_And);
2420 enter_lower_func(op_Call, lower_Call);
2421 enter_lower_func(op_Cmp, lower_Cmp);
2422 enter_lower_func(op_Cond, lower_Cond);
2423 enter_lower_func(op_Const, lower_Const);
2424 enter_lower_func(op_Conv, lower_Conv);
2425 enter_lower_func(op_Div, lower_Div);
2426 enter_lower_func(op_Eor, lower_Eor);
2427 enter_lower_func(op_Load, lower_Load);
2428 enter_lower_func(op_Minus, lower_Unop);
2429 enter_lower_func(op_Mod, lower_Mod);
2430 enter_lower_func(op_Mul, lower_binop);
2431 enter_lower_func(op_Mux, lower_Mux);
2432 enter_lower_func(op_Not, lower_Not);
2433 enter_lower_func(op_Or, lower_Or);
2434 enter_lower_func(op_Return, lower_Return);
2435 enter_lower_func(op_Sel, lower_Sel);
2436 enter_lower_func(op_Shl, lower_Shl);
2437 enter_lower_func(op_Shr, lower_Shr);
2438 enter_lower_func(op_Shrs, lower_Shrs);
2439 enter_lower_func(op_Start, lower_Start);
2440 enter_lower_func(op_Store, lower_Store);
2441 enter_lower_func(op_Sub, lower_binop);
2442 enter_lower_func(op_Unknown, lower_Unknown);
2444 lenv.tv_mode_bytes = new_tarval_from_long(param->doubleword_size/(2*8), lenv.low_unsigned);
2445 lenv.tv_mode_bits = new_tarval_from_long(param->doubleword_size/2, lenv.low_unsigned);
2446 lenv.waitq = new_pdeq();
2447 lenv.first_id = new_id_from_chars(param->little_endian ? ".l" : ".h", 2);
2448 lenv.next_id = new_id_from_chars(param->little_endian ? ".h" : ".l", 2);
2450 /* transform all graphs */
2451 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
2452 ir_graph *irg = get_irp_irg(i);
2453 lower_irg(&lenv, irg);
2455 del_pdeq(lenv.waitq);
2458 /* Default implementation. */
2459 ir_entity *def_create_intrinsic_fkt(ir_type *method, const ir_op *op,
2460 const ir_mode *imode, const ir_mode *omode,
2468 if (imode == omode) {
2469 snprintf(buf, sizeof(buf), "__l%s%s", get_op_name(op), get_mode_name(imode));
2471 snprintf(buf, sizeof(buf), "__l%s%s%s", get_op_name(op),
2472 get_mode_name(imode), get_mode_name(omode));
2474 id = new_id_from_str(buf);
2476 ent = new_entity(get_glob_type(), id, method);
2477 set_entity_ld_ident(ent, get_entity_ident(ent));