2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower double word operations, i.e. 64bit -> 32bit, 32bit -> 16bit etc.
24 * @author Michael Beck
37 #include "irgraph_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
59 /** A map from (op, imode, omode) to Intrinsic functions entities. */
60 static set *intrinsic_fkt;
62 /** A map from (imode, omode) to conv function types. */
63 static set *conv_types;
65 /** A map from a method type to its lowered type. */
66 static pmap *lowered_type;
68 /** The types for the binop and unop intrinsics. */
69 static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *shiftop_tp_u, *shiftop_tp_s, *tp_s, *tp_u;
71 /** the debug handle */
72 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
75 * An entry in the (op, imode, omode) -> entity map.
77 typedef struct op_mode_entry {
78 const ir_op *op; /**< the op */
79 const ir_mode *imode; /**< the input mode */
80 const ir_mode *omode; /**< the output mode */
81 ir_entity *ent; /**< the associated entity of this (op, imode, omode) triple */
85 * An entry in the (imode, omode) -> tp map.
87 typedef struct conv_tp_entry {
88 const ir_mode *imode; /**< the input mode */
89 const ir_mode *omode; /**< the output mode */
90 ir_type *mtd; /**< the associated method type of this (imode, omode) pair */
94 MUST_BE_LOWERED = 1, /**< graph must be lowered */
95 CF_CHANGED = 2, /**< control flow was changed */
99 * The lower environment.
101 typedef struct lower_dw_env_t {
102 lower64_entry_t **entries; /**< entries per node */
104 struct obstack obst; /**< an obstack holding the temporary data */
105 ir_type *l_mtp; /**< lowered method type of the current method */
106 ir_tarval *tv_mode_bytes; /**< a tarval containing the number of bytes in the lowered modes */
107 ir_tarval *tv_mode_bits; /**< a tarval containing the number of bits in the lowered modes */
108 pdeq *waitq; /**< a wait queue of all nodes that must be handled later */
109 ir_node **lowered_phis; /**< list of lowered phis */
110 pmap *proj_2_block; /**< a map from ProjX to its destination blocks */
111 ir_mode *high_signed; /**< doubleword signed type */
112 ir_mode *high_unsigned; /**< doubleword unsigned type */
113 ir_mode *low_signed; /**< word signed type */
114 ir_mode *low_unsigned; /**< word unsigned type */
115 ident *first_id; /**< .l for little and .h for big endian */
116 ident *next_id; /**< .h for little and .l for big endian */
117 const lwrdw_param_t *params; /**< transformation parameter */
118 unsigned flags; /**< some flags */
119 unsigned n_entries; /**< number of entries */
120 ir_type *value_param_tp; /**< the old value param type */
123 static lower_dw_env_t *env;
125 static void lower_node(ir_node *node);
126 static bool mtp_must_be_lowered(ir_type *mtp);
129 * Create a method type for a Conv emulation from imode to omode.
131 static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode)
133 conv_tp_entry_t key, *entry;
140 entry = (conv_tp_entry_t*)set_insert(conv_types, &key, sizeof(key), HASH_PTR(imode) ^ HASH_PTR(omode));
142 int n_param = 1, n_res = 1;
144 if (imode == env->high_signed || imode == env->high_unsigned)
146 if (omode == env->high_signed || omode == env->high_unsigned)
149 /* create a new one */
150 mtd = new_type_method(n_param, n_res);
152 /* set param types and result types */
154 if (imode == env->high_signed) {
155 set_method_param_type(mtd, n_param++, tp_u);
156 set_method_param_type(mtd, n_param++, tp_s);
157 } else if (imode == env->high_unsigned) {
158 set_method_param_type(mtd, n_param++, tp_u);
159 set_method_param_type(mtd, n_param++, tp_u);
161 ir_type *tp = get_type_for_mode(imode);
162 set_method_param_type(mtd, n_param++, tp);
166 if (omode == env->high_signed) {
167 set_method_res_type(mtd, n_res++, tp_u);
168 set_method_res_type(mtd, n_res++, tp_s);
169 } else if (omode == env->high_unsigned) {
170 set_method_res_type(mtd, n_res++, tp_u);
171 set_method_res_type(mtd, n_res++, tp_u);
173 ir_type *tp = get_type_for_mode(omode);
174 set_method_res_type(mtd, n_res++, tp);
184 * Add an additional control flow input to a block.
185 * Patch all Phi nodes. The new Phi inputs are copied from
186 * old input number nr.
188 static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
190 int i, arity = get_irn_arity(block);
195 NEW_ARR_A(ir_node *, in, arity + 1);
196 for (i = 0; i < arity; ++i)
197 in[i] = get_irn_n(block, i);
200 set_irn_in(block, i + 1, in);
202 for (phi = get_Block_phis(block); phi != NULL; phi = get_Phi_next(phi)) {
203 for (i = 0; i < arity; ++i)
204 in[i] = get_irn_n(phi, i);
206 set_irn_in(phi, i + 1, in);
211 * Add an additional control flow input to a block.
212 * Patch all Phi nodes. The new Phi inputs are copied from
213 * old input from cf tmpl.
215 static void add_block_cf_input(ir_node *block, ir_node *tmpl, ir_node *cf)
217 int i, arity = get_irn_arity(block);
220 for (i = 0; i < arity; ++i) {
221 if (get_irn_n(block, i) == tmpl) {
227 add_block_cf_input_nr(block, nr, cf);
231 * Return the "operational" mode of a Firm node.
233 static ir_mode *get_irn_op_mode(ir_node *node)
235 switch (get_irn_opcode(node)) {
237 return get_Load_mode(node);
239 return get_irn_mode(get_Store_value(node));
241 return get_irn_mode(get_Div_left(node));
243 return get_irn_mode(get_Mod_left(node));
245 return get_irn_mode(get_Cmp_left(node));
247 return get_irn_mode(node);
252 * Walker, prepare the node links and determine which nodes need to be lowered
255 static void prepare_links(ir_node *node)
257 ir_mode *mode = get_irn_op_mode(node);
258 lower64_entry_t *link;
261 if (mode == env->high_signed || mode == env->high_unsigned) {
262 unsigned idx = get_irn_idx(node);
263 /* ok, found a node that will be lowered */
264 link = OALLOCZ(&env->obst, lower64_entry_t);
266 if (idx >= env->n_entries) {
267 /* enlarge: this happens only for Rotl nodes which is RARELY */
268 unsigned old = env->n_entries;
269 unsigned n_idx = idx + (idx >> 3);
271 ARR_RESIZE(lower64_entry_t *, env->entries, n_idx);
272 memset(&env->entries[old], 0, (n_idx - old) * sizeof(env->entries[0]));
273 env->n_entries = n_idx;
275 env->entries[idx] = link;
276 env->flags |= MUST_BE_LOWERED;
277 } else if (is_Conv(node)) {
278 /* Conv nodes have two modes */
279 ir_node *pred = get_Conv_op(node);
280 mode = get_irn_mode(pred);
282 if (mode == env->high_signed || mode == env->high_unsigned) {
283 /* must lower this node either but don't need a link */
284 env->flags |= MUST_BE_LOWERED;
290 /* link all Proj nodes to its predecessor:
291 Note that Tuple Proj's and its Projs are linked either. */
292 ir_node *pred = get_Proj_pred(node);
294 set_irn_link(node, get_irn_link(pred));
295 set_irn_link(pred, node);
296 } else if (is_Phi(node)) {
297 /* link all Phi nodes to its block */
298 ir_node *block = get_nodes_block(node);
299 add_Block_phi(block, node);
300 } else if (is_Block(node)) {
301 /* fill the Proj -> Block map */
302 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
303 ir_node *pred = get_Block_cfgpred(node, i);
306 pmap_insert(env->proj_2_block, pred, node);
311 lower64_entry_t *get_node_entry(ir_node *node)
313 unsigned idx = get_irn_idx(node);
314 assert(idx < env->n_entries);
315 return env->entries[idx];
318 void ir_set_dw_lowered(ir_node *old, ir_node *new_low, ir_node *new_high)
320 lower64_entry_t *entry = get_node_entry(old);
321 entry->low_word = new_low;
322 entry->high_word = new_high;
326 * Translate a Constant: create two.
328 static void lower_Const(ir_node *node, ir_mode *mode)
330 ir_graph *irg = get_irn_irg(node);
331 dbg_info *dbg = get_irn_dbg_info(node);
332 ir_mode *low_mode = env->low_unsigned;
333 ir_tarval *tv = get_Const_tarval(node);
334 ir_tarval *tv_l = tarval_convert_to(tv, low_mode);
335 ir_node *res_low = new_rd_Const(dbg, irg, tv_l);
336 ir_tarval *tv_shrs = tarval_shrs(tv, env->tv_mode_bits);
337 ir_tarval *tv_h = tarval_convert_to(tv_shrs, mode);
338 ir_node *res_high = new_rd_Const(dbg, irg, tv_h);
340 ir_set_dw_lowered(node, res_low, res_high);
344 * Translate a Load: create two.
346 static void lower_Load(ir_node *node, ir_mode *mode)
348 ir_mode *low_mode = env->low_unsigned;
349 ir_graph *irg = get_irn_irg(node);
350 ir_node *adr = get_Load_ptr(node);
351 ir_node *mem = get_Load_mem(node);
352 ir_node *low, *high, *proj;
354 ir_node *block = get_nodes_block(node);
355 ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
356 ? cons_volatile : cons_none;
358 if (env->params->little_endian) {
360 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
362 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
366 /* create two loads */
367 dbg = get_irn_dbg_info(node);
368 low = new_rd_Load(dbg, block, mem, low, low_mode, volatility);
369 proj = new_r_Proj(low, mode_M, pn_Load_M);
370 high = new_rd_Load(dbg, block, proj, high, mode, volatility);
372 ir_set_dw_lowered(node, low, high);
374 for (proj = (ir_node*)get_irn_link(node); proj;
375 proj = (ir_node*)get_irn_link(proj)) {
376 switch (get_Proj_proj(proj)) {
377 case pn_Load_M: /* Memory result. */
378 /* put it to the second one */
379 set_Proj_pred(proj, high);
381 case pn_Load_X_except: /* Execution result if exception occurred. */
382 /* put it to the first one */
383 set_Proj_pred(proj, low);
385 case pn_Load_res: { /* Result of load operation. */
386 ir_node *res_low = new_r_Proj(low, low_mode, pn_Load_res);
387 ir_node *res_high = new_r_Proj(high, mode, pn_Load_res);
388 ir_set_dw_lowered(proj, res_low, res_high);
392 assert(0 && "unexpected Proj number");
394 /* mark this proj: we have handled it already, otherwise we might fall
395 * into out new nodes. */
396 mark_irn_visited(proj);
401 * Translate a Store: create two.
403 static void lower_Store(ir_node *node, ir_mode *mode)
406 ir_node *block, *adr, *mem;
407 ir_node *low, *high, *proj;
409 ir_node *value = get_Store_value(node);
410 const lower64_entry_t *entry = get_node_entry(value);
411 ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
412 ? cons_volatile : cons_none;
417 if (! entry->low_word) {
418 /* not ready yet, wait */
419 pdeq_putr(env->waitq, node);
423 irg = get_irn_irg(node);
424 adr = get_Store_ptr(node);
425 mem = get_Store_mem(node);
426 block = get_nodes_block(node);
428 if (env->params->little_endian) {
430 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
432 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
436 /* create two Stores */
437 dbg = get_irn_dbg_info(node);
438 low = new_rd_Store(dbg, block, mem, low, entry->low_word, volatility);
439 proj = new_r_Proj(low, mode_M, pn_Store_M);
440 high = new_rd_Store(dbg, block, proj, high, entry->high_word, volatility);
442 ir_set_dw_lowered(node, low, high);
444 for (proj = (ir_node*)get_irn_link(node); proj;
445 proj = (ir_node*)get_irn_link(proj)) {
446 switch (get_Proj_proj(proj)) {
447 case pn_Store_M: /* Memory result. */
448 /* put it to the second one */
449 set_Proj_pred(proj, high);
451 case pn_Store_X_except: /* Execution result if exception occurred. */
452 /* put it to the first one */
453 set_Proj_pred(proj, low);
456 assert(0 && "unexpected Proj number");
458 /* mark this proj: we have handled it already, otherwise we might fall into
460 mark_irn_visited(proj);
465 * Return a node containing the address of the intrinsic emulation function.
467 * @param method the method type of the emulation function
468 * @param op the emulated ir_op
469 * @param imode the input mode of the emulated opcode
470 * @param omode the output mode of the emulated opcode
471 * @param env the lower environment
473 static ir_node *get_intrinsic_address(ir_type *method, ir_op *op,
474 ir_mode *imode, ir_mode *omode)
478 op_mode_entry_t key, *entry;
485 entry = (op_mode_entry_t*)set_insert(intrinsic_fkt, &key, sizeof(key),
486 HASH_PTR(op) ^ HASH_PTR(imode) ^ (HASH_PTR(omode) << 8));
488 /* create a new one */
489 ent = env->params->create_intrinsic(method, op, imode, omode, env->params->ctx);
491 assert(ent && "Intrinsic creator must return an entity");
497 return new_r_SymConst(env->irg, mode_P_code, sym, symconst_addr_ent);
503 * Create an intrinsic Call.
505 static void lower_Div(ir_node *node, ir_mode *mode)
507 ir_node *left = get_Div_left(node);
508 ir_node *right = get_Div_right(node);
509 ir_node *block = get_nodes_block(node);
510 dbg_info *dbgi = get_irn_dbg_info(node);
511 ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
512 ir_mode *opmode = get_irn_op_mode(node);
514 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
518 const ir_edge_t *edge;
519 const ir_edge_t *next;
521 if (env->params->little_endian) {
522 in[0] = get_lowered_low(left);
523 in[1] = get_lowered_high(left);
524 in[2] = get_lowered_low(right);
525 in[3] = get_lowered_high(right);
527 in[0] = get_lowered_high(left);
528 in[1] = get_lowered_low(left);
529 in[2] = get_lowered_high(right);
530 in[3] = get_lowered_low(right);
532 call = new_rd_Call(dbgi, block, get_Div_mem(node), addr, 4, in, mtp);
533 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
534 set_irn_pinned(call, get_irn_pinned(node));
536 for (proj = (ir_node*)get_irn_link(node); proj;
537 proj = (ir_node*)get_irn_link(proj)) {
538 switch (get_Proj_proj(proj)) {
539 case pn_Div_M: /* Memory result. */
540 /* reroute to the call */
541 set_Proj_pred(proj, call);
542 set_Proj_proj(proj, pn_Call_M);
544 case pn_Div_X_except: /* Execution result if exception occurred. */
545 /* reroute to the call */
546 set_Proj_pred(proj, call);
547 set_Proj_proj(proj, pn_Call_X_except);
550 if (env->params->little_endian) {
551 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
552 ir_node *res_high = new_r_Proj(resproj, mode, 1);
553 ir_set_dw_lowered(proj, res_low, res_high);
555 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
556 ir_node *res_high = new_r_Proj(resproj, mode, 0);
557 ir_set_dw_lowered(proj, res_low, res_high);
561 assert(0 && "unexpected Proj number");
563 /* mark this proj: we have handled it already, otherwise we might fall into
565 mark_irn_visited(proj);
572 * Create an intrinsic Call.
574 static void lower_Mod(ir_node *node, ir_mode *mode)
576 ir_node *left = get_Mod_left(node);
577 ir_node *right = get_Mod_right(node);
578 dbg_info *dbgi = get_irn_dbg_info(node);
579 ir_node *block = get_nodes_block(node);
580 ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
581 ir_mode *opmode = get_irn_op_mode(node);
583 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
587 const ir_edge_t *edge;
588 const ir_edge_t *next;
590 if (env->params->little_endian) {
591 in[0] = get_lowered_low(left);
592 in[1] = get_lowered_high(left);
593 in[2] = get_lowered_low(right);
594 in[3] = get_lowered_high(right);
596 in[0] = get_lowered_high(left);
597 in[1] = get_lowered_low(left);
598 in[2] = get_lowered_high(right);
599 in[3] = get_lowered_low(right);
601 call = new_rd_Call(dbgi, block, get_Mod_mem(node), addr, 4, in, mtp);
602 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
603 set_irn_pinned(call, get_irn_pinned(node));
605 for (proj = (ir_node*)get_irn_link(node); proj;
606 proj = (ir_node*)get_irn_link(proj)) {
607 switch (get_Proj_proj(proj)) {
608 case pn_Mod_M: /* Memory result. */
609 /* reroute to the call */
610 set_Proj_pred(proj, call);
611 set_Proj_proj(proj, pn_Call_M);
613 case pn_Mod_X_except: /* Execution result if exception occurred. */
614 /* reroute to the call */
615 set_Proj_pred(proj, call);
616 set_Proj_proj(proj, pn_Call_X_except);
619 if (env->params->little_endian) {
620 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
621 ir_node *res_high = new_r_Proj(resproj, mode, 1);
622 ir_set_dw_lowered(proj, res_low, res_high);
624 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
625 ir_node *res_high = new_r_Proj(resproj, mode, 0);
626 ir_set_dw_lowered(proj, res_low, res_high);
630 assert(0 && "unexpected Proj number");
632 /* mark this proj: we have handled it already, otherwise we might fall
633 * into out new nodes. */
634 mark_irn_visited(proj);
641 * Create an intrinsic Call.
643 static void lower_binop(ir_node *node, ir_mode *mode)
645 ir_node *left = get_binop_left(node);
646 ir_node *right = get_binop_right(node);
647 dbg_info *dbgi = get_irn_dbg_info(node);
648 ir_node *block = get_nodes_block(node);
649 ir_graph *irg = get_irn_irg(block);
650 ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
651 ir_node *addr = get_intrinsic_address(mtp, get_irn_op(node), mode, mode);
656 if (env->params->little_endian) {
657 in[0] = get_lowered_low(left);
658 in[1] = get_lowered_high(left);
659 in[2] = get_lowered_low(right);
660 in[3] = get_lowered_high(right);
662 in[0] = get_lowered_high(left);
663 in[1] = get_lowered_low(left);
664 in[2] = get_lowered_high(right);
665 in[3] = get_lowered_low(right);
667 call = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
668 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
669 set_irn_pinned(call, get_irn_pinned(node));
671 if (env->params->little_endian) {
672 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
673 ir_node *res_high = new_r_Proj(resproj, mode, 1);
674 ir_set_dw_lowered(node, res_low, res_high);
676 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
677 ir_node *res_high = new_r_Proj(resproj, mode, 0);
678 ir_set_dw_lowered(node, res_low, res_high);
683 * Translate a Shiftop.
685 * Create an intrinsic Call.
687 static void lower_Shiftop(ir_node *node, ir_mode *mode)
689 ir_node *block = get_nodes_block(node);
690 ir_node *left = get_binop_left(node);
691 const lower64_entry_t *left_entry = get_node_entry(left);
692 ir_node *right = get_binop_right(node);
694 left_entry->low_word, left_entry->high_word,
695 /* it should be safe to conv to low_unsigned */
696 new_r_Conv(block, right, env->low_unsigned)
698 dbg_info *dbgi = get_irn_dbg_info(node);
699 ir_graph *irg = get_irn_irg(block);
701 = mode_is_signed(mode) ? shiftop_tp_s : shiftop_tp_u;
703 = get_intrinsic_address(mtp, get_irn_op(node), mode, mode);
705 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 3, in, mtp);
706 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
707 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
708 ir_node *res_high = new_r_Proj(resproj, mode, 1);
710 set_irn_pinned(call, get_irn_pinned(node));
711 ir_set_dw_lowered(node, res_low, res_high);
715 * Translate a Shr and handle special cases.
717 static void lower_Shr(ir_node *node, ir_mode *mode)
719 ir_graph *irg = get_irn_irg(node);
720 ir_node *right = get_Shr_right(node);
722 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
723 ir_tarval *tv = get_Const_tarval(right);
725 if (tarval_is_long(tv) &&
726 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
727 ir_node *block = get_nodes_block(node);
728 ir_node *left = get_Shr_left(node);
729 ir_mode *low_unsigned = env->low_unsigned;
730 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
731 const lower64_entry_t *left_entry = get_node_entry(left);
735 left = left_entry->high_word;
737 /* convert high word into low_unsigned mode if necessary */
738 if (get_irn_mode(left) != low_unsigned)
739 left = new_r_Conv(block, left, low_unsigned);
742 ir_node *c = new_r_Const_long(irg, low_unsigned, shf_cnt);
743 res_low = new_r_Shr(block, left, c, low_unsigned);
747 res_high = new_r_Const(irg, get_mode_null(mode));
748 ir_set_dw_lowered(node, res_low, res_high);
753 lower_Shiftop(node, mode);
757 * Translate a Shl and handle special cases.
759 static void lower_Shl(ir_node *node, ir_mode *mode)
761 ir_graph *irg = get_irn_irg(node);
762 ir_node *right = get_Shl_right(node);
764 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
765 ir_tarval *tv = get_Const_tarval(right);
767 if (tarval_is_long(tv)) {
768 long value = get_tarval_long(tv);
769 if (value >= (long)get_mode_size_bits(mode)) {
770 /* simple case: shift above the lower word */
772 ir_node *block = get_nodes_block(node);
773 ir_node *left = get_Shl_left(node);
775 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
776 const lower64_entry_t *left_entry = get_node_entry(left);
780 left = left_entry->low_word;
781 left = new_r_Conv(block, left, mode);
783 mode_l = env->low_unsigned;
785 c = new_r_Const_long(irg, mode_l, shf_cnt);
786 res_high = new_r_Shl(block, left, c, mode);
790 res_low = new_r_Const(irg, get_mode_null(mode_l));
791 ir_set_dw_lowered(node, res_low, res_high);
796 /* left << 1 == left + left */
797 ir_node *left = get_binop_left(node);
798 const lower64_entry_t *left_entry = get_node_entry(left);
800 left_entry->low_word, left_entry->high_word,
801 left_entry->low_word, left_entry->high_word,
803 dbg_info *dbgi = get_irn_dbg_info(node);
804 ir_node *block = get_nodes_block(node);
805 ir_graph *irg = get_irn_irg(block);
807 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
809 = get_intrinsic_address(mtp, op_Add, mode, mode);
811 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
812 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
813 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
814 ir_node *res_high = new_r_Proj(resproj, mode, 1);
815 set_irn_pinned(call, get_irn_pinned(node));
816 ir_set_dw_lowered(node, res_low, res_high);
822 lower_Shiftop(node, mode);
826 * Translate a Shrs and handle special cases.
828 static void lower_Shrs(ir_node *node, ir_mode *mode)
830 ir_graph *irg = get_irn_irg(node);
831 ir_node *right = get_Shrs_right(node);
833 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
834 ir_tarval *tv = get_Const_tarval(right);
836 if (tarval_is_long(tv) &&
837 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
838 ir_node *block = get_nodes_block(node);
839 ir_node *left = get_Shrs_left(node);
840 ir_mode *low_unsigned = env->low_unsigned;
841 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
842 const lower64_entry_t *left_entry = get_node_entry(left);
843 ir_node *left_unsigned = left;
848 left = left_entry->high_word;
850 /* convert high word into low_unsigned mode if necessary */
851 if (get_irn_mode(left_unsigned) != low_unsigned)
852 left_unsigned = new_r_Conv(block, left, low_unsigned);
855 c = new_r_Const_long(irg, low_unsigned, shf_cnt);
856 res_low = new_r_Shrs(block, left_unsigned, c, low_unsigned);
858 res_low = left_unsigned;
861 c = new_r_Const(irg, get_mode_all_one(low_unsigned));
862 res_high = new_r_Shrs(block, left, c, mode);
863 ir_set_dw_lowered(node, res_low, res_high);
867 lower_Shiftop(node, mode);
871 * Rebuild Rotl nodes into Or(Shl, Shr) and prepare all nodes.
873 static void prepare_links_and_handle_rotl(ir_node *node, void *data)
877 ir_mode *mode = get_irn_op_mode(node);
879 ir_node *left, *shl, *shr, *ornode, *block, *sub, *c;
880 ir_mode *omode, *rmode;
883 optimization_state_t state;
885 if (mode != env->high_signed && mode != env->high_unsigned) {
890 /* replace the Rotl(x,y) by an Or(Shl(x,y), Shr(x,64-y)) */
891 right = get_Rotl_right(node);
892 irg = get_irn_irg(node);
893 dbg = get_irn_dbg_info(node);
894 omode = get_irn_mode(node);
895 left = get_Rotl_left(node);
896 block = get_nodes_block(node);
897 shl = new_rd_Shl(dbg, block, left, right, omode);
898 rmode = get_irn_mode(right);
899 c = new_r_Const_long(irg, rmode, get_mode_size_bits(omode));
900 sub = new_rd_Sub(dbg, block, c, right, rmode);
901 shr = new_rd_Shr(dbg, block, left, sub, omode);
903 /* switch optimization off here, or we will get the Rotl back */
904 save_optimization_state(&state);
905 set_opt_algebraic_simplification(0);
906 ornode = new_rd_Or(dbg, block, shl, shr, omode);
907 restore_optimization_state(&state);
909 exchange(node, ornode);
911 /* do lowering on the new nodes */
916 prepare_links(ornode);
926 * Create an intrinsic Call.
928 static void lower_unop(ir_node *node, ir_mode *mode)
930 ir_node *op = get_unop_op(node);
931 dbg_info *dbgi = get_irn_dbg_info(node);
932 ir_node *block = get_nodes_block(node);
933 ir_graph *irg = get_irn_irg(block);
934 ir_type *mtp = mode_is_signed(mode) ? unop_tp_s : unop_tp_u;
935 ir_op *irop = get_irn_op(node);
936 ir_node *addr = get_intrinsic_address(mtp, irop, mode, mode);
937 ir_node *nomem = get_irg_no_mem(irg);
942 if (env->params->little_endian) {
943 in[0] = get_lowered_low(op);
944 in[1] = get_lowered_high(op);
946 in[0] = get_lowered_high(op);
947 in[1] = get_lowered_low(op);
949 call = new_rd_Call(dbgi, block, nomem, addr, 2, in, mtp);
950 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
951 set_irn_pinned(call, get_irn_pinned(node));
953 if (env->params->little_endian) {
954 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
955 ir_node *res_high = new_r_Proj(resproj, mode, 1);
956 ir_set_dw_lowered(node, res_low, res_high);
958 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
959 ir_node *res_high = new_r_Proj(resproj, mode, 0);
960 ir_set_dw_lowered(node, res_low, res_high);
965 * Translate a logical binop.
967 * Create two logical binops.
969 static void lower_binop_logical(ir_node *node, ir_mode *mode,
970 ir_node *(*constr_rd)(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) )
972 ir_node *left = get_binop_left(node);
973 ir_node *right = get_binop_right(node);
974 const lower64_entry_t *left_entry = get_node_entry(left);
975 const lower64_entry_t *right_entry = get_node_entry(right);
976 dbg_info *dbgi = get_irn_dbg_info(node);
977 ir_node *block = get_nodes_block(node);
979 = constr_rd(dbgi, block, left_entry->low_word, right_entry->low_word,
982 = constr_rd(dbgi, block, left_entry->high_word, right_entry->high_word,
984 ir_set_dw_lowered(node, res_low, res_high);
987 static void lower_And(ir_node *node, ir_mode *mode)
989 lower_binop_logical(node, mode, new_rd_And);
992 static void lower_Or(ir_node *node, ir_mode *mode)
994 lower_binop_logical(node, mode, new_rd_Or);
997 static void lower_Eor(ir_node *node, ir_mode *mode)
999 lower_binop_logical(node, mode, new_rd_Eor);
1005 * Create two logical Nots.
1007 static void lower_Not(ir_node *node, ir_mode *mode)
1009 ir_node *op = get_Not_op(node);
1010 const lower64_entry_t *op_entry = get_node_entry(op);
1011 dbg_info *dbgi = get_irn_dbg_info(node);
1012 ir_node *block = get_nodes_block(node);
1014 = new_rd_Not(dbgi, block, op_entry->low_word, env->low_unsigned);
1016 = new_rd_Not(dbgi, block, op_entry->high_word, mode);
1017 ir_set_dw_lowered(node, res_low, res_high);
1020 static bool is_equality_cmp_0(const ir_node *node)
1022 ir_relation relation = get_Cmp_relation(node);
1023 ir_node *left = get_Cmp_left(node);
1024 ir_node *right = get_Cmp_right(node);
1025 ir_mode *mode = get_irn_mode(left);
1027 /* this probably makes no sense if unordered is involved */
1028 assert(!mode_is_float(mode));
1030 if (!is_Const(right) || !is_Const_null(right))
1032 if (relation == ir_relation_equal)
1034 if (mode_is_signed(mode)) {
1035 return relation == ir_relation_less_greater;
1037 return relation == ir_relation_greater;
1044 static void lower_Cond(ir_node *node, ir_mode *mode)
1046 ir_node *left, *right, *block;
1047 ir_node *sel = get_Cond_selector(node);
1048 ir_mode *m = get_irn_mode(sel);
1050 const lower64_entry_t *lentry, *rentry;
1051 ir_node *proj, *projT = NULL, *projF = NULL;
1052 ir_node *new_bl, *irn;
1053 ir_node *projHF, *projHT;
1055 ir_relation relation;
1062 if (m == env->high_signed || m == env->high_unsigned) {
1063 /* bad we can't really handle Switch with 64bit offsets */
1064 panic("Cond with 64bit jumptable not supported");
1075 left = get_Cmp_left(sel);
1076 cmp_mode = get_irn_mode(left);
1077 if (cmp_mode != env->high_signed && cmp_mode != env->high_unsigned) {
1082 right = get_Cmp_right(sel);
1085 lentry = get_node_entry(left);
1086 rentry = get_node_entry(right);
1088 /* all right, build the code */
1089 for (proj = (ir_node*)get_irn_link(node); proj;
1090 proj = (ir_node*)get_irn_link(proj)) {
1091 long proj_nr = get_Proj_proj(proj);
1093 if (proj_nr == pn_Cond_true) {
1094 assert(projT == NULL && "more than one Proj(true)");
1097 assert(proj_nr == pn_Cond_false);
1098 assert(projF == NULL && "more than one Proj(false)");
1101 mark_irn_visited(proj);
1103 assert(projT && projF);
1105 /* create a new high compare */
1106 block = get_nodes_block(node);
1107 irg = get_Block_irg(block);
1108 dbg = get_irn_dbg_info(sel);
1109 relation = get_Cmp_relation(sel);
1111 if (is_equality_cmp_0(sel)) {
1112 /* x ==/!= 0 ==> or(low,high) ==/!= 0 */
1113 ir_mode *mode = env->low_unsigned;
1114 ir_node *low = new_r_Conv(block, lentry->low_word, mode);
1115 ir_node *high = new_r_Conv(block, lentry->high_word, mode);
1116 ir_node *ornode = new_rd_Or(dbg, block, low, high, mode);
1117 ir_node *cmp = new_rd_Cmp(dbg, block, ornode, new_r_Const_long(irg, mode, 0), relation);
1118 set_Cond_selector(node, cmp);
1122 if (relation == ir_relation_equal) {
1123 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1124 pmap_entry *entry = pmap_find(env->proj_2_block, projF);
1127 dst_blk = (ir_node*)entry->value;
1129 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1131 dbg = get_irn_dbg_info(node);
1132 irn = new_rd_Cond(dbg, block, irn);
1134 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1135 mark_irn_visited(projHF);
1136 exchange(projF, projHF);
1138 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1139 mark_irn_visited(projHT);
1141 new_bl = new_r_Block(irg, 1, &projHT);
1143 dbg = get_irn_dbg_info(sel);
1144 irn = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word,
1146 dbg = get_irn_dbg_info(node);
1147 irn = new_rd_Cond(dbg, new_bl, irn);
1149 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1150 mark_irn_visited(proj);
1151 add_block_cf_input(dst_blk, projHF, proj);
1153 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1154 mark_irn_visited(proj);
1155 exchange(projT, proj);
1156 } else if (relation == ir_relation_less_greater) {
1157 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1158 pmap_entry *entry = pmap_find(env->proj_2_block, projT);
1161 dst_blk = (ir_node*)entry->value;
1163 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1164 ir_relation_less_greater);
1165 dbg = get_irn_dbg_info(node);
1166 irn = new_rd_Cond(dbg, block, irn);
1168 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1169 mark_irn_visited(projHT);
1170 exchange(projT, projHT);
1172 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1173 mark_irn_visited(projHF);
1175 new_bl = new_r_Block(irg, 1, &projHF);
1177 dbg = get_irn_dbg_info(sel);
1178 irn = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word,
1179 ir_relation_less_greater);
1180 dbg = get_irn_dbg_info(node);
1181 irn = new_rd_Cond(dbg, new_bl, irn);
1183 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1184 mark_irn_visited(proj);
1185 add_block_cf_input(dst_blk, projHT, proj);
1187 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1188 mark_irn_visited(proj);
1189 exchange(projF, proj);
1191 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1192 ir_node *dstT, *dstF, *newbl_eq, *newbl_l;
1196 entry = pmap_find(env->proj_2_block, projT);
1198 dstT = (ir_node*)entry->value;
1200 entry = pmap_find(env->proj_2_block, projF);
1202 dstF = (ir_node*)entry->value;
1204 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1205 relation & ~ir_relation_equal);
1206 dbg = get_irn_dbg_info(node);
1207 irn = new_rd_Cond(dbg, block, irn);
1209 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1210 mark_irn_visited(projHT);
1212 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1213 mark_irn_visited(projHF);
1215 newbl_eq = new_r_Block(irg, 1, &projHF);
1217 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1219 irn = new_rd_Cond(dbg, newbl_eq, irn);
1221 projEqF = new_r_Proj(irn, mode_X, pn_Cond_false);
1222 mark_irn_visited(projEqF);
1224 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1225 mark_irn_visited(proj);
1227 newbl_l = new_r_Block(irg, 1, &proj);
1229 dbg = get_irn_dbg_info(sel);
1230 irn = new_rd_Cmp(dbg, newbl_l, lentry->low_word, rentry->low_word,
1232 dbg = get_irn_dbg_info(node);
1233 irn = new_rd_Cond(dbg, newbl_l, irn);
1235 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1236 mark_irn_visited(proj);
1237 add_block_cf_input(dstT, projT, proj);
1239 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1240 mark_irn_visited(proj);
1241 add_block_cf_input(dstF, projF, proj);
1243 exchange(projT, projHT);
1244 exchange(projF, projEqF);
1247 /* we have changed the control flow */
1248 env->flags |= CF_CHANGED;
1252 * Translate a Conv to higher_signed
1254 static void lower_Conv_to_Ll(ir_node *node)
1256 ir_mode *omode = get_irn_mode(node);
1257 ir_node *op = get_Conv_op(node);
1258 ir_mode *imode = get_irn_mode(op);
1259 ir_graph *irg = get_irn_irg(node);
1260 ir_node *block = get_nodes_block(node);
1261 dbg_info *dbg = get_irn_dbg_info(node);
1265 ir_mode *low_unsigned = env->low_unsigned;
1267 = mode_is_signed(omode) ? env->low_signed : low_unsigned;
1269 if (mode_is_int(imode) || mode_is_reference(imode)) {
1270 if (imode == env->high_signed || imode == env->high_unsigned) {
1271 /* a Conv from Lu to Ls or Ls to Lu */
1272 const lower64_entry_t *op_entry = get_node_entry(op);
1273 res_low = op_entry->low_word;
1274 res_high = new_rd_Conv(dbg, block, op_entry->high_word, low_signed);
1276 /* simple case: create a high word */
1277 if (imode != low_unsigned)
1278 op = new_rd_Conv(dbg, block, op, low_unsigned);
1282 if (mode_is_signed(imode)) {
1283 int c = get_mode_size_bits(low_signed) - 1;
1284 ir_node *cnst = new_r_Const_long(irg, low_unsigned, c);
1285 if (get_irn_mode(op) != low_signed)
1286 op = new_rd_Conv(dbg, block, op, low_signed);
1287 res_high = new_rd_Shrs(dbg, block, op, cnst, low_signed);
1289 res_high = new_r_Const(irg, get_mode_null(low_signed));
1292 } else if (imode == mode_b) {
1293 res_low = new_rd_Conv(dbg, block, op, low_unsigned);
1294 res_high = new_r_Const(irg, get_mode_null(low_signed));
1296 ir_node *irn, *call;
1297 ir_type *mtp = get_conv_type(imode, omode);
1299 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode);
1300 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 1, &op, mtp);
1301 set_irn_pinned(call, get_irn_pinned(node));
1302 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1304 res_low = new_r_Proj(irn, low_unsigned, 0);
1305 res_high = new_r_Proj(irn, low_signed, 1);
1307 ir_set_dw_lowered(node, res_low, res_high);
1311 * Translate a Conv from higher_unsigned
1313 static void lower_Conv_from_Ll(ir_node *node)
1315 ir_node *op = get_Conv_op(node);
1316 ir_mode *omode = get_irn_mode(node);
1317 ir_node *block = get_nodes_block(node);
1318 dbg_info *dbg = get_irn_dbg_info(node);
1319 ir_graph *irg = get_irn_irg(node);
1320 const lower64_entry_t *entry = get_node_entry(op);
1322 if (mode_is_int(omode) || mode_is_reference(omode)) {
1323 op = entry->low_word;
1325 /* simple case: create a high word */
1326 if (omode != env->low_unsigned)
1327 op = new_rd_Conv(dbg, block, op, omode);
1329 set_Conv_op(node, op);
1330 } else if (omode == mode_b) {
1331 /* llu ? true : false <=> (low|high) ? true : false */
1332 ir_mode *mode = env->low_unsigned;
1333 ir_node *ornode = new_rd_Or(dbg, block, entry->low_word,
1334 entry->high_word, mode);
1335 set_Conv_op(node, ornode);
1337 ir_node *irn, *call, *in[2];
1338 ir_mode *imode = get_irn_mode(op);
1339 ir_type *mtp = get_conv_type(imode, omode);
1341 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode);
1342 in[0] = entry->low_word;
1343 in[1] = entry->high_word;
1345 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 2, in, mtp);
1346 set_irn_pinned(call, get_irn_pinned(node));
1347 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1349 exchange(node, new_r_Proj(irn, omode, 0));
1356 static void lower_Cmp(ir_node *cmp, ir_mode *m)
1358 ir_node *l = get_Cmp_left(cmp);
1359 ir_mode *mode = get_irn_mode(l);
1360 ir_node *r, *low, *high, *t, *res;
1361 ir_relation relation;
1364 const lower64_entry_t *lentry;
1365 const lower64_entry_t *rentry;
1368 if (mode != env->high_signed && mode != env->high_unsigned)
1371 r = get_Cmp_right(cmp);
1372 lentry = get_node_entry(l);
1373 rentry = get_node_entry(r);
1374 relation = get_Cmp_relation(cmp);
1375 block = get_nodes_block(cmp);
1376 dbg = get_irn_dbg_info(cmp);
1378 /* easy case for x ==/!= 0 (see lower_Cond for details) */
1379 if (is_equality_cmp_0(cmp)) {
1380 ir_graph *irg = get_irn_irg(cmp);
1381 ir_mode *mode = env->low_unsigned;
1382 ir_node *low = new_r_Conv(block, lentry->low_word, mode);
1383 ir_node *high = new_r_Conv(block, lentry->high_word, mode);
1384 ir_node *ornode = new_rd_Or(dbg, block, low, high, mode);
1385 ir_node *new_cmp = new_rd_Cmp(dbg, block, ornode, new_r_Const_long(irg, mode, 0), relation);
1386 exchange(cmp, new_cmp);
1390 if (relation == ir_relation_equal) {
1391 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1392 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1394 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1396 res = new_rd_And(dbg, block, low, high, mode_b);
1397 } else if (relation == ir_relation_less_greater) {
1398 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1399 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1401 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1403 res = new_rd_Or(dbg, block, low, high, mode_b);
1405 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1406 ir_node *high1 = new_rd_Cmp(dbg, block, lentry->high_word,
1407 rentry->high_word, relation & ~ir_relation_equal);
1408 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1410 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1412 t = new_rd_And(dbg, block, low, high, mode_b);
1413 res = new_rd_Or(dbg, block, high1, t, mode_b);
1421 static void lower_Conv(ir_node *node, ir_mode *mode)
1423 mode = get_irn_mode(node);
1425 if (mode == env->high_signed || mode == env->high_unsigned) {
1426 lower_Conv_to_Ll(node);
1428 ir_mode *op_mode = get_irn_mode(get_Conv_op(node));
1430 if (op_mode == env->high_signed || op_mode == env->high_unsigned) {
1431 lower_Conv_from_Ll(node);
1437 * Remember the new argument index of this value type entity in the lowered
1440 * @param ent the entity
1441 * @param pos the argument index of this entity
1443 static inline void set_entity_arg_idx(ir_entity *ent, size_t pos)
1445 set_entity_link(ent, INT_TO_PTR(pos));
1449 * Retrieve the argument index of a value type entity.
1451 * @param ent the entity
1453 static size_t get_entity_arg_idx(const ir_entity *ent) {
1454 return PTR_TO_INT(get_entity_link(ent));
1458 * Lower the method type.
1460 * @param env the lower environment
1461 * @param mtp the method type to lower
1463 * @return the lowered type
1465 static ir_type *lower_mtp(ir_type *mtp)
1468 ir_type *res, *value_type;
1470 entry = pmap_find(lowered_type, mtp);
1472 size_t i, orig_n_params, orig_n_res, n_param, n_res;
1474 /* count new number of params */
1475 n_param = orig_n_params = get_method_n_params(mtp);
1476 for (i = orig_n_params; i > 0;) {
1477 ir_type *tp = get_method_param_type(mtp, --i);
1479 if (is_Primitive_type(tp)) {
1480 ir_mode *mode = get_type_mode(tp);
1482 if (mode == env->high_signed || mode == env->high_unsigned)
1487 /* count new number of results */
1488 n_res = orig_n_res = get_method_n_ress(mtp);
1489 for (i = orig_n_res; i > 0;) {
1490 ir_type *tp = get_method_res_type(mtp, --i);
1492 if (is_Primitive_type(tp)) {
1493 ir_mode *mode = get_type_mode(tp);
1495 if (mode == env->high_signed || mode == env->high_unsigned)
1500 res = new_type_method(n_param, n_res);
1502 /* set param types and result types */
1503 for (i = n_param = 0; i < orig_n_params; ++i) {
1504 ir_type *tp = get_method_param_type(mtp, i);
1506 if (is_Primitive_type(tp)) {
1507 ir_mode *mode = get_type_mode(tp);
1509 if (mode == env->high_signed) {
1510 if (env->params->little_endian) {
1511 set_method_param_type(res, n_param++, tp_u);
1512 set_method_param_type(res, n_param++, tp_s);
1514 set_method_param_type(res, n_param++, tp_s);
1515 set_method_param_type(res, n_param++, tp_u);
1517 } else if (mode == env->high_unsigned) {
1518 set_method_param_type(res, n_param++, tp_u);
1519 set_method_param_type(res, n_param++, tp_u);
1521 set_method_param_type(res, n_param++, tp);
1524 set_method_param_type(res, n_param++, tp);
1527 for (i = n_res = 0; i < orig_n_res; ++i) {
1528 ir_type *tp = get_method_res_type(mtp, i);
1530 if (is_Primitive_type(tp)) {
1531 ir_mode *mode = get_type_mode(tp);
1533 if (mode == env->high_signed) {
1534 if (env->params->little_endian) {
1535 set_method_res_type(res, n_res++, tp_u);
1536 set_method_res_type(res, n_res++, tp_s);
1538 set_method_res_type(res, n_res++, tp_s);
1539 set_method_res_type(res, n_res++, tp_u);
1541 } else if (mode == env->high_unsigned) {
1542 set_method_res_type(res, n_res++, tp_u);
1543 set_method_res_type(res, n_res++, tp_u);
1545 set_method_res_type(res, n_res++, tp);
1548 set_method_res_type(res, n_res++, tp);
1551 set_lowered_type(mtp, res);
1552 pmap_insert(lowered_type, mtp, res);
1554 value_type = get_method_value_param_type(mtp);
1555 if (value_type != NULL) {
1556 /* this creates a new value parameter type */
1557 (void)get_method_value_param_ent(res, 0);
1559 /* set new param positions for all entities of the value type */
1560 for (i = n_param = 0; i < orig_n_params; ++i) {
1561 ir_type *tp = get_method_param_type(mtp, i);
1562 ir_entity *ent = get_method_value_param_ent(mtp, i);
1564 set_entity_arg_idx(ent, n_param);
1565 if (is_Primitive_type(tp)) {
1566 ir_mode *mode = get_type_mode(tp);
1568 if (mode == env->high_signed
1569 || mode == env->high_unsigned) {
1577 set_lowered_type(value_type, get_method_value_param_type(res));
1580 res = (ir_type*)entry->value;
1586 * Translate a Return.
1588 static void lower_Return(ir_node *node, ir_mode *mode)
1590 ir_graph *irg = get_irn_irg(node);
1591 ir_entity *ent = get_irg_entity(irg);
1592 ir_type *mtp = get_entity_type(ent);
1598 /* check if this return must be lowered */
1599 for (i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1600 ir_node *pred = get_Return_res(node, i);
1601 ir_mode *mode = get_irn_op_mode(pred);
1603 if (mode == env->high_signed || mode == env->high_unsigned)
1609 ent = get_irg_entity(irg);
1610 mtp = get_entity_type(ent);
1612 mtp = lower_mtp(mtp);
1613 set_entity_type(ent, mtp);
1615 /* create a new in array */
1616 NEW_ARR_A(ir_node *, in, get_method_n_ress(mtp) + 1);
1617 in[0] = get_Return_mem(node);
1619 for (j = i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1620 ir_node *pred = get_Return_res(node, i);
1621 ir_mode *pred_mode = get_irn_mode(pred);
1623 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1624 const lower64_entry_t *entry = get_node_entry(pred);
1625 if (env->params->little_endian) {
1626 in[++j] = entry->low_word;
1627 in[++j] = entry->high_word;
1629 in[++j] = entry->high_word;
1630 in[++j] = entry->low_word;
1637 set_irn_in(node, j+1, in);
1641 * Translate the parameters.
1643 static void lower_Start(ir_node *node, ir_mode *mode)
1645 ir_graph *irg = get_irn_irg(node);
1646 ir_entity *ent = get_irg_entity(irg);
1647 ir_type *tp = get_entity_type(ent);
1649 size_t i, j, n_params;
1651 ir_node *proj, *args;
1654 if (!mtp_must_be_lowered(tp))
1657 n_params = get_method_n_params(tp);
1659 NEW_ARR_A(long, new_projs, n_params);
1661 /* Calculate mapping of proj numbers in new_projs */
1662 for (i = j = 0; i < n_params; ++i, ++j) {
1663 ir_type *ptp = get_method_param_type(tp, i);
1666 if (is_Primitive_type(ptp)) {
1667 ir_mode *mode = get_type_mode(ptp);
1669 if (mode == env->high_signed || mode == env->high_unsigned)
1674 /* lower method type */
1676 set_entity_type(ent, tp);
1678 /* switch off optimization for new Proj nodes or they might be CSE'ed
1679 with not patched one's */
1680 rem = get_optimize();
1683 /* fix all Proj's and create new ones */
1684 args = get_irg_args(irg);
1685 for (proj = (ir_node*)get_irn_link(node); proj;
1686 proj = (ir_node*)get_irn_link(proj)) {
1687 ir_node *pred = get_Proj_pred(proj);
1696 /* do not visit this node again */
1697 mark_irn_visited(proj);
1702 proj_nr = get_Proj_proj(proj);
1703 set_Proj_proj(proj, new_projs[proj_nr]);
1705 mode = get_irn_mode(proj);
1706 mode_l = env->low_unsigned;
1707 if (mode == env->high_signed) {
1708 mode_h = env->low_signed;
1709 } else if (mode == env->high_unsigned) {
1710 mode_h = env->low_unsigned;
1715 dbg = get_irn_dbg_info(proj);
1716 if (env->params->little_endian) {
1717 res_low = new_rd_Proj(dbg, args, mode_l, new_projs[proj_nr]);
1718 res_high = new_rd_Proj(dbg, args, mode_h, new_projs[proj_nr] + 1);
1720 res_high = new_rd_Proj(dbg, args, mode_h, new_projs[proj_nr]);
1721 res_low = new_rd_Proj(dbg, args, mode_l, new_projs[proj_nr] + 1);
1723 ir_set_dw_lowered(proj, res_low, res_high);
1731 static void lower_Call(ir_node *node, ir_mode *mode)
1733 ir_type *tp = get_Call_type(node);
1734 ir_node **in, *proj, *results;
1735 size_t n_params, n_res;
1736 bool need_lower = false;
1739 long *res_numbers = NULL;
1742 n_params = get_method_n_params(tp);
1743 for (p = 0; p < n_params; ++p) {
1744 ir_type *ptp = get_method_param_type(tp, p);
1746 if (is_Primitive_type(ptp)) {
1747 ir_mode *mode = get_type_mode(ptp);
1749 if (mode == env->high_signed || mode == env->high_unsigned) {
1755 n_res = get_method_n_ress(tp);
1757 NEW_ARR_A(long, res_numbers, n_res);
1759 for (i = j = 0; i < n_res; ++i, ++j) {
1760 ir_type *ptp = get_method_res_type(tp, i);
1763 if (is_Primitive_type(ptp)) {
1764 ir_mode *mode = get_type_mode(ptp);
1766 if (mode == env->high_signed || mode == env->high_unsigned) {
1777 /* let's lower it */
1779 set_Call_type(node, tp);
1781 NEW_ARR_A(ir_node *, in, get_method_n_params(tp) + 2);
1783 in[0] = get_Call_mem(node);
1784 in[1] = get_Call_ptr(node);
1786 for (j = 2, i = 0; i < n_params; ++i) {
1787 ir_node *pred = get_Call_param(node, i);
1788 ir_mode *pred_mode = get_irn_mode(pred);
1790 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1791 const lower64_entry_t *pred_entry = get_node_entry(pred);
1792 if (env->params->little_endian) {
1793 in[j++] = pred_entry->low_word;
1794 in[j++] = pred_entry->high_word;
1796 in[j++] = pred_entry->high_word;
1797 in[j++] = pred_entry->low_word;
1804 set_irn_in(node, j, in);
1806 /* fix the results */
1808 for (proj = (ir_node*)get_irn_link(node); proj;
1809 proj = (ir_node*)get_irn_link(proj)) {
1810 long proj_nr = get_Proj_proj(proj);
1812 if (proj_nr == pn_Call_T_result && get_Proj_pred(proj) == node) {
1813 /* found the result proj */
1819 if (results != NULL) { /* there are results */
1820 int rem = get_optimize();
1822 /* switch off optimization for new Proj nodes or they might be CSE'ed
1823 with not patched one's */
1825 for (proj = (ir_node*)get_irn_link(results); proj; proj = (ir_node*)get_irn_link(proj)) {
1826 if (get_Proj_pred(proj) == results) {
1827 long proj_nr = get_Proj_proj(proj);
1828 ir_mode *proj_mode = get_irn_mode(proj);
1835 /* found a result */
1836 mark_irn_visited(proj);
1838 set_Proj_proj(proj, res_numbers[proj_nr]);
1840 mode_l = env->low_unsigned;
1841 if (proj_mode == env->high_signed) {
1842 mode_h = env->low_signed;
1843 } else if (proj_mode == env->high_unsigned) {
1844 mode_h = env->low_unsigned;
1849 dbg = get_irn_dbg_info(proj);
1850 if (env->params->little_endian) {
1851 res_low = new_rd_Proj(dbg, results, mode_l,
1852 res_numbers[proj_nr]);
1853 res_high = new_rd_Proj(dbg, results, mode_h,
1854 res_numbers[proj_nr] + 1);
1856 res_high = new_rd_Proj(dbg, results, mode_h,
1857 res_numbers[proj_nr]);
1858 res_low = new_rd_Proj(dbg, results, mode_l,
1859 res_numbers[proj_nr] + 1);
1861 ir_set_dw_lowered(proj, res_low, res_high);
1869 * Translate an Unknown into two.
1871 static void lower_Unknown(ir_node *node, ir_mode *mode)
1873 ir_mode *low_mode = env->low_unsigned;
1874 ir_graph *irg = get_irn_irg(node);
1875 ir_node *res_low = new_r_Unknown(irg, low_mode);
1876 ir_node *res_high = new_r_Unknown(irg, mode);
1877 ir_set_dw_lowered(node, res_low, res_high);
1881 * Translate a Bad into two.
1883 static void lower_Bad(ir_node *node, ir_mode *mode)
1885 ir_mode *low_mode = env->low_unsigned;
1886 ir_graph *irg = get_irn_irg(node);
1887 ir_node *res_low = new_r_Bad(irg, low_mode);
1888 ir_node *res_high = new_r_Bad(irg, mode);
1889 ir_set_dw_lowered(node, res_low, res_high);
1895 * First step: just create two templates
1897 static void lower_Phi(ir_node *phi)
1899 ir_mode *mode = get_irn_mode(phi);
1914 /* enqueue predecessors */
1915 arity = get_Phi_n_preds(phi);
1916 for (i = 0; i < arity; ++i) {
1917 ir_node *pred = get_Phi_pred(phi, i);
1918 pdeq_putr(env->waitq, pred);
1921 if (mode != env->high_signed && mode != env->high_unsigned)
1924 /* first create a new in array */
1925 NEW_ARR_A(ir_node *, in_l, arity);
1926 NEW_ARR_A(ir_node *, in_h, arity);
1927 irg = get_irn_irg(phi);
1928 mode_l = env->low_unsigned;
1929 mode_h = mode == env->high_signed ? env->low_signed : env->low_unsigned;
1930 unk_l = new_r_Dummy(irg, mode_l);
1931 unk_h = new_r_Dummy(irg, mode_h);
1932 for (i = 0; i < arity; ++i) {
1937 dbg = get_irn_dbg_info(phi);
1938 block = get_nodes_block(phi);
1939 phi_l = new_rd_Phi(dbg, block, arity, in_l, mode_l);
1940 phi_h = new_rd_Phi(dbg, block, arity, in_h, mode_h);
1942 ir_set_dw_lowered(phi, phi_l, phi_h);
1944 /* remember that we need to fixup the predecessors later */
1945 ARR_APP1(ir_node*, env->lowered_phis, phi);
1947 /* Don't forget to link the new Phi nodes into the block.
1948 * Beware that some Phis might be optimized away. */
1950 add_Block_phi(block, phi_l);
1952 add_Block_phi(block, phi_h);
1955 static void fixup_phi(ir_node *phi)
1957 const lower64_entry_t *entry = get_node_entry(phi);
1958 ir_node *phi_l = entry->low_word;
1959 ir_node *phi_h = entry->high_word;
1960 int arity = get_Phi_n_preds(phi);
1963 /* exchange phi predecessors which are lowered by now */
1964 for (i = 0; i < arity; ++i) {
1965 ir_node *pred = get_Phi_pred(phi, i);
1966 const lower64_entry_t *pred_entry = get_node_entry(pred);
1968 set_Phi_pred(phi_l, i, pred_entry->low_word);
1969 set_Phi_pred(phi_h, i, pred_entry->high_word);
1976 static void lower_Mux(ir_node *mux, ir_mode *mode)
1978 ir_node *truen = get_Mux_true(mux);
1979 ir_node *falsen = get_Mux_false(mux);
1980 ir_node *sel = get_Mux_sel(mux);
1981 const lower64_entry_t *true_entry = get_node_entry(truen);
1982 const lower64_entry_t *false_entry = get_node_entry(falsen);
1983 ir_node *true_l = true_entry->low_word;
1984 ir_node *true_h = true_entry->high_word;
1985 ir_node *false_l = false_entry->low_word;
1986 ir_node *false_h = false_entry->high_word;
1987 dbg_info *dbgi = get_irn_dbg_info(mux);
1988 ir_node *block = get_nodes_block(mux);
1990 = new_rd_Mux(dbgi, block, sel, false_l, true_l, env->low_unsigned);
1992 = new_rd_Mux(dbgi, block, sel, false_h, true_h, mode);
1993 ir_set_dw_lowered(mux, res_low, res_high);
1997 * Translate an ASM node.
1999 static void lower_ASM(ir_node *asmn, ir_mode *mode)
2001 ir_mode *high_signed = env->high_signed;
2002 ir_mode *high_unsigned = env->high_unsigned;
2003 int n_outs = get_ASM_n_output_constraints(asmn);
2004 ir_asm_constraint *output_constraints = get_ASM_output_constraints(asmn);
2005 ir_asm_constraint *input_constraints = get_ASM_input_constraints(asmn);
2006 unsigned n_64bit_outs = 0;
2012 for (i = get_irn_arity(asmn) - 1; i >= 0; --i) {
2013 ir_node *op = get_irn_n(asmn, i);
2014 ir_mode *op_mode = get_irn_mode(op);
2015 if (op_mode == high_signed || op_mode == high_unsigned) {
2016 panic("lowering ASM 64bit input unimplemented");
2020 for (i = 0; i < n_outs; ++i) {
2021 const ir_asm_constraint *constraint = &output_constraints[i];
2022 if (constraint->mode == high_signed || constraint->mode == high_unsigned) {
2023 const char *constr = get_id_str(constraint->constraint);
2025 /* TODO: How to do this architecture neutral? This is very
2026 * i386 specific... */
2027 if (constr[0] != '=' || constr[1] != 'A') {
2028 panic("lowering ASM 64bit output only supports '=A' currently");
2033 if (n_64bit_outs == 0)
2037 dbg_info *dbgi = get_irn_dbg_info(asmn);
2038 ir_node *block = get_nodes_block(asmn);
2039 int arity = get_irn_arity(asmn);
2040 ir_node **in = get_irn_in(asmn) + 1;
2041 int n_outs = get_ASM_n_output_constraints(asmn);
2043 int n_clobber = get_ASM_n_clobbers(asmn);
2044 long *proj_map = ALLOCAN(long, n_outs);
2045 ident **clobbers = get_ASM_clobbers(asmn);
2046 ident *asm_text = get_ASM_text(asmn);
2047 ir_asm_constraint *new_outputs
2048 = ALLOCAN(ir_asm_constraint, n_outs+n_64bit_outs);
2051 for (i = 0; i < n_outs; ++i) {
2052 const ir_asm_constraint *constraint = &output_constraints[i];
2053 if (constraint->mode == high_signed || constraint->mode == high_unsigned) {
2054 new_outputs[new_n_outs].pos = constraint->pos;
2055 new_outputs[new_n_outs].constraint = new_id_from_str("=a");
2056 new_outputs[new_n_outs].mode = env->low_unsigned;
2057 proj_map[i] = new_n_outs;
2059 new_outputs[new_n_outs].pos = constraint->pos;
2060 new_outputs[new_n_outs].constraint = new_id_from_str("=d");
2061 if (constraint->mode == high_signed)
2062 new_outputs[new_n_outs].mode = env->low_signed;
2064 new_outputs[new_n_outs].mode = env->low_unsigned;
2067 new_outputs[new_n_outs] = *constraint;
2068 proj_map[i] = new_n_outs;
2072 assert(new_n_outs == n_outs+(int)n_64bit_outs);
2074 new_asm = new_rd_ASM(dbgi, block, arity, in, input_constraints,
2075 new_n_outs, new_outputs, n_clobber, clobbers,
2081 n = (ir_node*)get_irn_link(n);
2084 proj_mode = get_irn_mode(n);
2085 pn = get_Proj_proj(n);
2089 pn = new_n_outs + pn - n_outs;
2091 if (proj_mode == high_signed || proj_mode == high_unsigned) {
2093 = proj_mode == high_signed ? env->low_signed : env->low_unsigned;
2094 ir_node *np_low = new_r_Proj(new_asm, env->low_unsigned, pn);
2095 ir_node *np_high = new_r_Proj(new_asm, high_mode, pn+1);
2096 ir_set_dw_lowered(n, np_low, np_high);
2098 ir_node *np = new_r_Proj(new_asm, proj_mode, pn);
2106 * Translate a Sel node.
2108 static void lower_Sel(ir_node *sel, ir_mode *mode)
2112 /* we must only lower value parameter Sels if we change the
2113 value parameter type. */
2114 if (env->value_param_tp != NULL) {
2115 ir_entity *ent = get_Sel_entity(sel);
2116 if (get_entity_owner(ent) == env->value_param_tp) {
2117 size_t pos = get_entity_arg_idx(ent);
2119 ent = get_method_value_param_ent(env->l_mtp, pos);
2120 set_Sel_entity(sel, ent);
2126 * check for opcodes that must always be lowered.
2128 static bool always_lower(unsigned code)
2146 * Compare two op_mode_entry_t's.
2148 static int cmp_op_mode(const void *elt, const void *key, size_t size)
2150 const op_mode_entry_t *e1 = (const op_mode_entry_t*)elt;
2151 const op_mode_entry_t *e2 = (const op_mode_entry_t*)key;
2154 return (e1->op != e2->op) | (e1->imode != e2->imode) | (e1->omode != e2->omode);
2158 * Compare two conv_tp_entry_t's.
2160 static int cmp_conv_tp(const void *elt, const void *key, size_t size)
2162 const conv_tp_entry_t *e1 = (const conv_tp_entry_t*)elt;
2163 const conv_tp_entry_t *e2 = (const conv_tp_entry_t*)key;
2166 return (e1->imode != e2->imode) | (e1->omode != e2->omode);
2170 * Enter a lowering function into an ir_op.
2172 void ir_register_dw_lower_function(ir_op *op, lower_dw_func func)
2174 op->ops.generic = (op_func)func;
2178 * Returns non-zero if a method type must be lowered.
2180 * @param mtp the method type
2182 static bool mtp_must_be_lowered(ir_type *mtp)
2184 size_t i, n_params = get_method_n_params(mtp);
2186 /* first check if we have parameters that must be fixed */
2187 for (i = 0; i < n_params; ++i) {
2188 ir_type *tp = get_method_param_type(mtp, i);
2190 if (is_Primitive_type(tp)) {
2191 ir_mode *mode = get_type_mode(tp);
2193 if (mode == env->high_signed || mode == env->high_unsigned)
2200 /* Determine which modes need to be lowered */
2201 static void setup_modes(void)
2203 unsigned size_bits = env->params->doubleword_size;
2204 ir_mode *doubleword_signed = NULL;
2205 ir_mode *doubleword_unsigned = NULL;
2206 size_t n_modes = get_irp_n_modes();
2207 ir_mode_arithmetic arithmetic;
2208 unsigned modulo_shift;
2211 /* search for doubleword modes... */
2212 for (i = 0; i < n_modes; ++i) {
2213 ir_mode *mode = get_irp_mode(i);
2214 if (!mode_is_int(mode))
2216 if (get_mode_size_bits(mode) != size_bits)
2218 if (mode_is_signed(mode)) {
2219 if (doubleword_signed != NULL) {
2220 /* sigh - the lowerer should really just lower all mode with
2221 * size_bits it finds. Unfortunately this required a bigger
2223 panic("multiple double word signed modes found");
2225 doubleword_signed = mode;
2227 if (doubleword_unsigned != NULL) {
2228 /* sigh - the lowerer should really just lower all mode with
2229 * size_bits it finds. Unfortunately this required a bigger
2231 panic("multiple double word unsigned modes found");
2233 doubleword_unsigned = mode;
2236 if (doubleword_signed == NULL || doubleword_unsigned == NULL) {
2237 panic("Couldn't find doubleword modes");
2240 arithmetic = get_mode_arithmetic(doubleword_signed);
2241 modulo_shift = get_mode_modulo_shift(doubleword_signed);
2243 assert(get_mode_size_bits(doubleword_unsigned) == size_bits);
2244 assert(size_bits % 2 == 0);
2245 assert(get_mode_sign(doubleword_signed) == 1);
2246 assert(get_mode_sign(doubleword_unsigned) == 0);
2247 assert(get_mode_sort(doubleword_signed) == irms_int_number);
2248 assert(get_mode_sort(doubleword_unsigned) == irms_int_number);
2249 assert(get_mode_arithmetic(doubleword_unsigned) == arithmetic);
2250 assert(get_mode_modulo_shift(doubleword_unsigned) == modulo_shift);
2252 /* try to guess a sensible modulo shift for the new mode.
2253 * (This is IMO another indication that this should really be a node
2254 * attribute instead of a mode thing) */
2255 if (modulo_shift == size_bits) {
2256 modulo_shift = modulo_shift / 2;
2257 } else if (modulo_shift == 0) {
2260 panic("Don't know what new modulo shift to use for lowered doubleword mode");
2264 /* produce lowered modes */
2265 env->high_signed = doubleword_signed;
2266 env->high_unsigned = doubleword_unsigned;
2267 env->low_signed = new_ir_mode("WS", irms_int_number, size_bits, 1,
2268 arithmetic, modulo_shift);
2269 env->low_unsigned = new_ir_mode("WU", irms_int_number, size_bits, 0,
2270 arithmetic, modulo_shift);
2273 static void enqueue_preds(ir_node *node)
2275 int arity = get_irn_arity(node);
2278 for (i = 0; i < arity; ++i) {
2279 ir_node *pred = get_irn_n(node, i);
2280 pdeq_putr(env->waitq, pred);
2284 static void lower_node(ir_node *node)
2292 lower64_entry_t *entry;
2294 if (irn_visited_else_mark(node))
2297 /* cycles are always broken at Phi and Block nodes. So we don't need special
2298 * magic in all the other lower functions */
2299 if (is_Block(node)) {
2300 enqueue_preds(node);
2302 } else if (is_Phi(node)) {
2307 /* depth-first: descend into operands */
2308 if (!is_Block(node)) {
2309 ir_node *block = get_nodes_block(node);
2313 if (!is_Cond(node)) {
2314 arity = get_irn_arity(node);
2315 for (i = 0; i < arity; ++i) {
2316 ir_node *pred = get_irn_n(node, i);
2321 op = get_irn_op(node);
2322 func = (lower_dw_func) op->ops.generic;
2326 idx = get_irn_idx(node);
2327 entry = idx < env->n_entries ? env->entries[idx] : NULL;
2328 if (entry != NULL || always_lower(get_irn_opcode(node))) {
2329 mode = get_irn_op_mode(node);
2330 if (mode == env->high_signed) {
2331 mode = env->low_signed;
2333 mode = env->low_unsigned;
2335 DB((dbg, LEVEL_1, " %+F\n", node));
2340 static void lower_irg(ir_graph *irg)
2346 obstack_init(&env->obst);
2348 n_idx = get_irg_last_idx(irg);
2349 n_idx = n_idx + (n_idx >> 2); /* add 25% */
2350 env->n_entries = n_idx;
2351 env->entries = NEW_ARR_F(lower64_entry_t*, n_idx);
2352 memset(env->entries, 0, sizeof(env->entries[0]) * n_idx);
2357 env->proj_2_block = pmap_create();
2358 env->value_param_tp = NULL;
2360 ent = get_irg_entity(irg);
2361 mtp = get_entity_type(ent);
2363 if (mtp_must_be_lowered(mtp)) {
2364 ir_type *ltp = lower_mtp(mtp);
2365 /* Do not update the entity type yet, this will be done by lower_Start! */
2366 env->flags |= MUST_BE_LOWERED;
2368 env->value_param_tp = get_method_value_param_type(mtp);
2371 /* first step: link all nodes and allocate data */
2372 ir_reserve_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2373 irg_walk_graph(irg, firm_clear_node_and_phi_links,
2374 prepare_links_and_handle_rotl, env);
2376 if (env->flags & MUST_BE_LOWERED) {
2378 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
2379 inc_irg_visited(irg);
2381 assert(pdeq_empty(env->waitq));
2382 pdeq_putr(env->waitq, get_irg_end(irg));
2384 env->lowered_phis = NEW_ARR_F(ir_node*, 0);
2385 while (!pdeq_empty(env->waitq)) {
2386 ir_node *node = (ir_node*)pdeq_getl(env->waitq);
2390 /* we need to fixup phis */
2391 for (i = 0; i < ARR_LEN(env->lowered_phis); ++i) {
2392 ir_node *phi = env->lowered_phis[i];
2395 DEL_ARR_F(env->lowered_phis);
2398 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
2400 if (env->flags & CF_CHANGED) {
2401 /* control flow changed, dominance info is invalid */
2402 set_irg_doms_inconsistent(irg);
2403 set_irg_extblk_inconsistent(irg);
2407 ir_free_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2409 pmap_destroy(env->proj_2_block);
2410 DEL_ARR_F(env->entries);
2411 obstack_free(&env->obst, NULL);
2414 static const lwrdw_param_t *param;
2416 void ir_prepare_dw_lowering(const lwrdw_param_t *new_param)
2418 assert(new_param != NULL);
2419 FIRM_DBG_REGISTER(dbg, "firm.lower.dw");
2423 clear_irp_opcodes_generic_func();
2424 ir_register_dw_lower_function(op_ASM, lower_ASM);
2425 ir_register_dw_lower_function(op_Add, lower_binop);
2426 ir_register_dw_lower_function(op_And, lower_And);
2427 ir_register_dw_lower_function(op_Bad, lower_Bad);
2428 ir_register_dw_lower_function(op_Call, lower_Call);
2429 ir_register_dw_lower_function(op_Cmp, lower_Cmp);
2430 ir_register_dw_lower_function(op_Cond, lower_Cond);
2431 ir_register_dw_lower_function(op_Const, lower_Const);
2432 ir_register_dw_lower_function(op_Conv, lower_Conv);
2433 ir_register_dw_lower_function(op_Div, lower_Div);
2434 ir_register_dw_lower_function(op_Eor, lower_Eor);
2435 ir_register_dw_lower_function(op_Load, lower_Load);
2436 ir_register_dw_lower_function(op_Minus, lower_unop);
2437 ir_register_dw_lower_function(op_Mod, lower_Mod);
2438 ir_register_dw_lower_function(op_Mul, lower_binop);
2439 ir_register_dw_lower_function(op_Mux, lower_Mux);
2440 ir_register_dw_lower_function(op_Not, lower_Not);
2441 ir_register_dw_lower_function(op_Or, lower_Or);
2442 ir_register_dw_lower_function(op_Return, lower_Return);
2443 ir_register_dw_lower_function(op_Sel, lower_Sel);
2444 ir_register_dw_lower_function(op_Shl, lower_Shl);
2445 ir_register_dw_lower_function(op_Shr, lower_Shr);
2446 ir_register_dw_lower_function(op_Shrs, lower_Shrs);
2447 ir_register_dw_lower_function(op_Start, lower_Start);
2448 ir_register_dw_lower_function(op_Store, lower_Store);
2449 ir_register_dw_lower_function(op_Sub, lower_binop);
2450 ir_register_dw_lower_function(op_Unknown, lower_Unknown);
2456 void ir_lower_dw_ops(void)
2458 lower_dw_env_t lenv;
2461 memset(&lenv, 0, sizeof(lenv));
2462 lenv.params = param;
2467 /* create the necessary maps */
2468 if (! intrinsic_fkt)
2469 intrinsic_fkt = new_set(cmp_op_mode, iro_Last + 1);
2471 conv_types = new_set(cmp_conv_tp, 16);
2473 lowered_type = pmap_create();
2475 /* create a primitive unsigned and signed type */
2477 tp_u = get_type_for_mode(lenv.low_unsigned);
2479 tp_s = get_type_for_mode(lenv.low_signed);
2481 /* create method types for the created binop calls */
2483 binop_tp_u = new_type_method(4, 2);
2484 set_method_param_type(binop_tp_u, 0, tp_u);
2485 set_method_param_type(binop_tp_u, 1, tp_u);
2486 set_method_param_type(binop_tp_u, 2, tp_u);
2487 set_method_param_type(binop_tp_u, 3, tp_u);
2488 set_method_res_type(binop_tp_u, 0, tp_u);
2489 set_method_res_type(binop_tp_u, 1, tp_u);
2492 binop_tp_s = new_type_method(4, 2);
2493 if (env->params->little_endian) {
2494 set_method_param_type(binop_tp_s, 0, tp_u);
2495 set_method_param_type(binop_tp_s, 1, tp_s);
2496 set_method_param_type(binop_tp_s, 2, tp_u);
2497 set_method_param_type(binop_tp_s, 3, tp_s);
2498 set_method_res_type(binop_tp_s, 0, tp_u);
2499 set_method_res_type(binop_tp_s, 1, tp_s);
2501 set_method_param_type(binop_tp_s, 0, tp_s);
2502 set_method_param_type(binop_tp_s, 1, tp_u);
2503 set_method_param_type(binop_tp_s, 2, tp_s);
2504 set_method_param_type(binop_tp_s, 3, tp_u);
2505 set_method_res_type(binop_tp_s, 0, tp_s);
2506 set_method_res_type(binop_tp_s, 1, tp_u);
2509 if (! shiftop_tp_u) {
2510 shiftop_tp_u = new_type_method(3, 2);
2511 set_method_param_type(shiftop_tp_u, 0, tp_u);
2512 set_method_param_type(shiftop_tp_u, 1, tp_u);
2513 set_method_param_type(shiftop_tp_u, 2, tp_u);
2514 set_method_res_type(shiftop_tp_u, 0, tp_u);
2515 set_method_res_type(shiftop_tp_u, 1, tp_u);
2517 if (! shiftop_tp_s) {
2518 shiftop_tp_s = new_type_method(3, 2);
2519 set_method_param_type(shiftop_tp_s, 0, tp_u);
2520 set_method_param_type(shiftop_tp_s, 1, tp_s);
2521 set_method_param_type(shiftop_tp_s, 2, tp_u);
2522 set_method_res_type(shiftop_tp_s, 0, tp_u);
2523 set_method_res_type(shiftop_tp_s, 1, tp_s);
2526 unop_tp_u = new_type_method(2, 2);
2527 set_method_param_type(unop_tp_u, 0, tp_u);
2528 set_method_param_type(unop_tp_u, 1, tp_u);
2529 set_method_res_type(unop_tp_u, 0, tp_u);
2530 set_method_res_type(unop_tp_u, 1, tp_u);
2533 unop_tp_s = new_type_method(2, 2);
2534 if (env->params->little_endian) {
2535 set_method_param_type(unop_tp_s, 0, tp_u);
2536 set_method_param_type(unop_tp_s, 1, tp_s);
2537 set_method_res_type(unop_tp_s, 0, tp_u);
2538 set_method_res_type(unop_tp_s, 1, tp_s);
2540 set_method_param_type(unop_tp_s, 0, tp_s);
2541 set_method_param_type(unop_tp_s, 1, tp_u);
2542 set_method_res_type(unop_tp_s, 0, tp_s);
2543 set_method_res_type(unop_tp_s, 1, tp_u);
2547 lenv.tv_mode_bytes = new_tarval_from_long(param->doubleword_size/(2*8), lenv.low_unsigned);
2548 lenv.tv_mode_bits = new_tarval_from_long(param->doubleword_size/2, lenv.low_unsigned);
2549 lenv.waitq = new_pdeq();
2550 lenv.first_id = new_id_from_chars(param->little_endian ? ".l" : ".h", 2);
2551 lenv.next_id = new_id_from_chars(param->little_endian ? ".h" : ".l", 2);
2553 /* transform all graphs */
2554 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
2555 ir_graph *irg = get_irp_irg(i);
2558 del_pdeq(lenv.waitq);
2563 /* Default implementation. */
2564 ir_entity *def_create_intrinsic_fkt(ir_type *method, const ir_op *op,
2565 const ir_mode *imode, const ir_mode *omode,
2573 if (imode == omode) {
2574 snprintf(buf, sizeof(buf), "__l%s%s", get_op_name(op), get_mode_name(imode));
2576 snprintf(buf, sizeof(buf), "__l%s%s%s", get_op_name(op),
2577 get_mode_name(imode), get_mode_name(omode));
2579 id = new_id_from_str(buf);
2581 ent = new_entity(get_glob_type(), id, method);
2582 set_entity_ld_ident(ent, get_entity_ident(ent));