2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower double word operations, i.e. 64bit -> 32bit, 32bit -> 16bit etc.
24 * @author Michael Beck
37 #include "irnodeset.h"
38 #include "irgraph_t.h"
43 #include "dbginfo_t.h"
44 #include "iropt_dbg.h"
50 #include "iroptimize.h"
61 /** A map from (op, imode, omode) to Intrinsic functions entities. */
62 static set *intrinsic_fkt;
64 /** A map from (imode, omode) to conv function types. */
65 static set *conv_types;
67 /** A map from a method type to its lowered type. */
68 static pmap *lowered_type;
70 /** A map from a builtin type to its lower and higher type. */
71 static pmap *lowered_builtin_type_high;
72 static pmap *lowered_builtin_type_low;
74 /** The types for the binop and unop intrinsics. */
75 static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *tp_s, *tp_u;
77 static ir_nodeset_t created_mux_nodes;
79 /** the debug handle */
80 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
83 * An entry in the (op, imode, omode) -> entity map.
85 typedef struct op_mode_entry {
86 const ir_op *op; /**< the op */
87 const ir_mode *imode; /**< the input mode */
88 const ir_mode *omode; /**< the output mode */
89 ir_entity *ent; /**< the associated entity of this (op, imode, omode) triple */
93 * An entry in the (imode, omode) -> tp map.
95 typedef struct conv_tp_entry {
96 const ir_mode *imode; /**< the input mode */
97 const ir_mode *omode; /**< the output mode */
98 ir_type *mtd; /**< the associated method type of this (imode, omode) pair */
102 MUST_BE_LOWERED = 1, /**< graph must be lowered */
103 CF_CHANGED = 2, /**< control flow was changed */
107 * The lower environment.
109 typedef struct lower_dw_env_t {
110 lower64_entry_t **entries; /**< entries per node */
112 struct obstack obst; /**< an obstack holding the temporary data */
113 ir_tarval *tv_mode_bytes; /**< a tarval containing the number of bytes in the lowered modes */
114 ir_tarval *tv_mode_bits; /**< a tarval containing the number of bits in the lowered modes */
115 pdeq *waitq; /**< a wait queue of all nodes that must be handled later */
116 ir_node **lowered_phis; /**< list of lowered phis */
117 ir_mode *high_signed; /**< doubleword signed type */
118 ir_mode *high_unsigned; /**< doubleword unsigned type */
119 ir_mode *low_signed; /**< word signed type */
120 ir_mode *low_unsigned; /**< word unsigned type */
121 ident *first_id; /**< .l for little and .h for big endian */
122 ident *next_id; /**< .h for little and .l for big endian */
123 const lwrdw_param_t *params; /**< transformation parameter */
124 unsigned flags; /**< some flags */
125 unsigned n_entries; /**< number of entries */
128 static lower_dw_env_t *env;
130 static void lower_node(ir_node *node);
133 * Create a method type for a Conv emulation from imode to omode.
135 static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode)
137 conv_tp_entry_t key, *entry;
144 entry = (conv_tp_entry_t*)set_insert(conv_types, &key, sizeof(key), hash_ptr(imode) ^ hash_ptr(omode));
146 int n_param = 1, n_res = 1;
148 if (imode == env->high_signed || imode == env->high_unsigned)
150 if (omode == env->high_signed || omode == env->high_unsigned)
153 /* create a new one */
154 mtd = new_type_method(n_param, n_res);
156 /* set param types and result types */
158 if (imode == env->high_signed) {
159 set_method_param_type(mtd, n_param++, tp_u);
160 set_method_param_type(mtd, n_param++, tp_s);
161 } else if (imode == env->high_unsigned) {
162 set_method_param_type(mtd, n_param++, tp_u);
163 set_method_param_type(mtd, n_param++, tp_u);
165 ir_type *tp = get_type_for_mode(imode);
166 set_method_param_type(mtd, n_param++, tp);
170 if (omode == env->high_signed) {
171 set_method_res_type(mtd, n_res++, tp_u);
172 set_method_res_type(mtd, n_res++, tp_s);
173 } else if (omode == env->high_unsigned) {
174 set_method_res_type(mtd, n_res++, tp_u);
175 set_method_res_type(mtd, n_res++, tp_u);
177 ir_type *tp = get_type_for_mode(omode);
178 set_method_res_type(mtd, n_res++, tp);
188 * Add an additional control flow input to a block.
189 * Patch all Phi nodes. The new Phi inputs are copied from
190 * old input number nr.
192 static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
194 int i, arity = get_irn_arity(block);
196 const ir_edge_t *edge;
200 NEW_ARR_A(ir_node *, in, arity + 1);
201 for (i = 0; i < arity; ++i)
202 in[i] = get_irn_n(block, i);
205 set_irn_in(block, i + 1, in);
207 foreach_out_edge(block, edge) {
208 ir_node *phi = get_edge_src_irn(edge);
212 for (i = 0; i < arity; ++i)
213 in[i] = get_irn_n(phi, i);
215 set_irn_in(phi, i + 1, in);
220 * Add an additional control flow input to a block.
221 * Patch all Phi nodes. The new Phi inputs are copied from
222 * old input from cf tmpl.
224 static void add_block_cf_input(ir_node *block, ir_node *tmpl, ir_node *cf)
226 int i, arity = get_irn_arity(block);
229 for (i = 0; i < arity; ++i) {
230 if (get_irn_n(block, i) == tmpl) {
236 add_block_cf_input_nr(block, nr, cf);
240 * Return the "operational" mode of a Firm node.
242 static ir_mode *get_irn_op_mode(ir_node *node)
244 switch (get_irn_opcode(node)) {
246 return get_Load_mode(node);
248 return get_irn_mode(get_Store_value(node));
250 return get_irn_mode(get_Div_left(node));
252 return get_irn_mode(get_Mod_left(node));
254 return get_irn_mode(get_Cmp_left(node));
256 return get_irn_mode(node);
261 * Walker, prepare the node links and determine which nodes need to be lowered
264 static void prepare_links(ir_node *node)
266 ir_mode *mode = get_irn_op_mode(node);
267 lower64_entry_t *link;
269 if (mode == env->high_signed || mode == env->high_unsigned) {
270 unsigned idx = get_irn_idx(node);
271 /* ok, found a node that will be lowered */
272 link = OALLOCZ(&env->obst, lower64_entry_t);
274 if (idx >= env->n_entries) {
275 /* enlarge: this happens only for Rotl nodes which is RARELY */
276 unsigned old = env->n_entries;
277 unsigned n_idx = idx + (idx >> 3);
279 ARR_RESIZE(lower64_entry_t *, env->entries, n_idx);
280 memset(&env->entries[old], 0, (n_idx - old) * sizeof(env->entries[0]));
281 env->n_entries = n_idx;
283 env->entries[idx] = link;
284 env->flags |= MUST_BE_LOWERED;
285 } else if (is_Conv(node)) {
286 /* Conv nodes have two modes */
287 ir_node *pred = get_Conv_op(node);
288 mode = get_irn_mode(pred);
290 if (mode == env->high_signed || mode == env->high_unsigned) {
291 /* must lower this node either but don't need a link */
292 env->flags |= MUST_BE_LOWERED;
295 } else if (is_Call(node)) {
296 /* Special case: If the result of the Call is never used, we won't
297 * find a Proj with a mode that potentially triggers MUST_BE_LOWERED
298 * to be set. Thus, if we see a call, we check its result types and
299 * decide whether MUST_BE_LOWERED has to be set.
301 ir_type *tp = get_Call_type(node);
304 n_res = get_method_n_ress(tp);
305 for (i = 0; i < n_res; ++i) {
306 ir_type *rtp = get_method_res_type(tp, i);
308 if (is_Primitive_type(rtp)) {
309 ir_mode *rmode = get_type_mode(rtp);
311 if (rmode == env->high_signed || rmode == env->high_unsigned) {
312 env->flags |= MUST_BE_LOWERED;
319 lower64_entry_t *get_node_entry(ir_node *node)
321 unsigned idx = get_irn_idx(node);
322 assert(idx < env->n_entries);
323 return env->entries[idx];
326 void ir_set_dw_lowered(ir_node *old, ir_node *new_low, ir_node *new_high)
328 lower64_entry_t *entry = get_node_entry(old);
329 entry->low_word = new_low;
330 entry->high_word = new_high;
333 ir_mode *ir_get_low_unsigned_mode(void)
335 return env->low_unsigned;
339 * Translate a Constant: create two.
341 static void lower_Const(ir_node *node, ir_mode *mode)
343 ir_graph *irg = get_irn_irg(node);
344 dbg_info *dbg = get_irn_dbg_info(node);
345 ir_mode *low_mode = env->low_unsigned;
346 ir_tarval *tv = get_Const_tarval(node);
347 ir_tarval *tv_l = tarval_convert_to(tv, low_mode);
348 ir_node *res_low = new_rd_Const(dbg, irg, tv_l);
349 ir_tarval *tv_shrs = tarval_shrs(tv, env->tv_mode_bits);
350 ir_tarval *tv_h = tarval_convert_to(tv_shrs, mode);
351 ir_node *res_high = new_rd_Const(dbg, irg, tv_h);
353 ir_set_dw_lowered(node, res_low, res_high);
357 * Translate a Load: create two.
359 static void lower_Load(ir_node *node, ir_mode *mode)
361 ir_mode *low_mode = env->low_unsigned;
362 ir_graph *irg = get_irn_irg(node);
363 ir_node *adr = get_Load_ptr(node);
364 ir_node *mem = get_Load_mem(node);
369 ir_node *block = get_nodes_block(node);
370 ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
371 ? cons_volatile : cons_none;
372 const ir_edge_t *edge;
373 const ir_edge_t *next;
375 if (env->params->little_endian) {
377 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
379 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
383 /* create two loads */
384 dbg = get_irn_dbg_info(node);
385 low = new_rd_Load(dbg, block, mem, low, low_mode, volatility);
386 proj_m = new_r_Proj(low, mode_M, pn_Load_M);
387 high = new_rd_Load(dbg, block, proj_m, high, mode, volatility);
389 foreach_out_edge_safe(node, edge, next) {
390 ir_node *proj = get_edge_src_irn(edge);
394 switch (get_Proj_proj(proj)) {
395 case pn_Load_M: /* Memory result. */
396 /* put it to the second one */
397 set_Proj_pred(proj, high);
399 case pn_Load_X_except: /* Execution result if exception occurred. */
400 /* put it to the first one */
401 set_Proj_pred(proj, low);
403 case pn_Load_res: { /* Result of load operation. */
404 ir_node *res_low = new_r_Proj(low, low_mode, pn_Load_res);
405 ir_node *res_high = new_r_Proj(high, mode, pn_Load_res);
406 ir_set_dw_lowered(proj, res_low, res_high);
410 assert(0 && "unexpected Proj number");
412 /* mark this proj: we have handled it already, otherwise we might fall
413 * into out new nodes. */
414 mark_irn_visited(proj);
419 * Translate a Store: create two.
421 static void lower_Store(ir_node *node, ir_mode *mode)
424 ir_node *block, *adr, *mem;
425 ir_node *low, *high, *proj_m;
427 ir_node *value = get_Store_value(node);
428 const lower64_entry_t *entry = get_node_entry(value);
429 ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
430 ? cons_volatile : cons_none;
431 const ir_edge_t *edge;
432 const ir_edge_t *next;
437 if (! entry->low_word) {
438 /* not ready yet, wait */
439 pdeq_putr(env->waitq, node);
443 irg = get_irn_irg(node);
444 adr = get_Store_ptr(node);
445 mem = get_Store_mem(node);
446 block = get_nodes_block(node);
448 if (env->params->little_endian) {
450 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
452 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
456 /* create two Stores */
457 dbg = get_irn_dbg_info(node);
458 low = new_rd_Store(dbg, block, mem, low, entry->low_word, volatility);
459 proj_m = new_r_Proj(low, mode_M, pn_Store_M);
460 high = new_rd_Store(dbg, block, proj_m, high, entry->high_word, volatility);
462 foreach_out_edge_safe(node, edge, next) {
463 ir_node *proj = get_edge_src_irn(edge);
467 switch (get_Proj_proj(proj)) {
468 case pn_Store_M: /* Memory result. */
469 /* put it to the second one */
470 set_Proj_pred(proj, high);
472 case pn_Store_X_except: /* Execution result if exception occurred. */
473 /* put it to the first one */
474 set_Proj_pred(proj, low);
477 assert(0 && "unexpected Proj number");
479 /* mark this proj: we have handled it already, otherwise we might fall into
481 mark_irn_visited(proj);
486 * Return a node containing the address of the intrinsic emulation function.
488 * @param method the method type of the emulation function
489 * @param op the emulated ir_op
490 * @param imode the input mode of the emulated opcode
491 * @param omode the output mode of the emulated opcode
492 * @param env the lower environment
494 static ir_node *get_intrinsic_address(ir_type *method, ir_op *op,
495 ir_mode *imode, ir_mode *omode)
499 op_mode_entry_t key, *entry;
506 entry = (op_mode_entry_t*)set_insert(intrinsic_fkt, &key, sizeof(key),
507 hash_ptr(op) ^ hash_ptr(imode) ^ (hash_ptr(omode) << 8));
509 /* create a new one */
510 ent = env->params->create_intrinsic(method, op, imode, omode, env->params->ctx);
512 assert(ent && "Intrinsic creator must return an entity");
518 return new_r_SymConst(env->irg, mode_P_code, sym, symconst_addr_ent);
524 * Create an intrinsic Call.
526 static void lower_Div(ir_node *node, ir_mode *mode)
528 ir_node *left = get_Div_left(node);
529 ir_node *right = get_Div_right(node);
530 ir_node *block = get_nodes_block(node);
531 dbg_info *dbgi = get_irn_dbg_info(node);
532 ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
533 ir_mode *opmode = get_irn_op_mode(node);
535 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
539 const ir_edge_t *edge;
540 const ir_edge_t *next;
542 if (env->params->little_endian) {
543 in[0] = get_lowered_low(left);
544 in[1] = get_lowered_high(left);
545 in[2] = get_lowered_low(right);
546 in[3] = get_lowered_high(right);
548 in[0] = get_lowered_high(left);
549 in[1] = get_lowered_low(left);
550 in[2] = get_lowered_high(right);
551 in[3] = get_lowered_low(right);
553 call = new_rd_Call(dbgi, block, get_Div_mem(node), addr, 4, in, mtp);
554 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
555 set_irn_pinned(call, get_irn_pinned(node));
557 foreach_out_edge_safe(node, edge, next) {
558 ir_node *proj = get_edge_src_irn(edge);
562 switch (get_Proj_proj(proj)) {
563 case pn_Div_M: /* Memory result. */
564 /* reroute to the call */
565 set_Proj_pred(proj, call);
566 set_Proj_proj(proj, pn_Call_M);
568 case pn_Div_X_regular:
569 set_Proj_pred(proj, call);
570 set_Proj_proj(proj, pn_Call_X_regular);
572 case pn_Div_X_except:
573 set_Proj_pred(proj, call);
574 set_Proj_proj(proj, pn_Call_X_except);
577 if (env->params->little_endian) {
578 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
579 ir_node *res_high = new_r_Proj(resproj, mode, 1);
580 ir_set_dw_lowered(proj, res_low, res_high);
582 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
583 ir_node *res_high = new_r_Proj(resproj, mode, 0);
584 ir_set_dw_lowered(proj, res_low, res_high);
588 assert(0 && "unexpected Proj number");
590 /* mark this proj: we have handled it already, otherwise we might fall into
592 mark_irn_visited(proj);
599 * Create an intrinsic Call.
601 static void lower_Mod(ir_node *node, ir_mode *mode)
603 ir_node *left = get_Mod_left(node);
604 ir_node *right = get_Mod_right(node);
605 dbg_info *dbgi = get_irn_dbg_info(node);
606 ir_node *block = get_nodes_block(node);
607 ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
608 ir_mode *opmode = get_irn_op_mode(node);
610 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
614 const ir_edge_t *edge;
615 const ir_edge_t *next;
617 if (env->params->little_endian) {
618 in[0] = get_lowered_low(left);
619 in[1] = get_lowered_high(left);
620 in[2] = get_lowered_low(right);
621 in[3] = get_lowered_high(right);
623 in[0] = get_lowered_high(left);
624 in[1] = get_lowered_low(left);
625 in[2] = get_lowered_high(right);
626 in[3] = get_lowered_low(right);
628 call = new_rd_Call(dbgi, block, get_Mod_mem(node), addr, 4, in, mtp);
629 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
630 set_irn_pinned(call, get_irn_pinned(node));
632 foreach_out_edge_safe(node, edge, next) {
633 ir_node *proj = get_edge_src_irn(edge);
637 switch (get_Proj_proj(proj)) {
638 case pn_Mod_M: /* Memory result. */
639 /* reroute to the call */
640 set_Proj_pred(proj, call);
641 set_Proj_proj(proj, pn_Call_M);
643 case pn_Div_X_regular:
644 set_Proj_pred(proj, call);
645 set_Proj_proj(proj, pn_Call_X_regular);
647 case pn_Mod_X_except:
648 set_Proj_pred(proj, call);
649 set_Proj_proj(proj, pn_Call_X_except);
652 if (env->params->little_endian) {
653 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
654 ir_node *res_high = new_r_Proj(resproj, mode, 1);
655 ir_set_dw_lowered(proj, res_low, res_high);
657 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
658 ir_node *res_high = new_r_Proj(resproj, mode, 0);
659 ir_set_dw_lowered(proj, res_low, res_high);
663 assert(0 && "unexpected Proj number");
665 /* mark this proj: we have handled it already, otherwise we might fall
666 * into out new nodes. */
667 mark_irn_visited(proj);
674 * Create an intrinsic Call.
676 static void lower_binop(ir_node *node, ir_mode *mode)
678 ir_node *left = get_binop_left(node);
679 ir_node *right = get_binop_right(node);
680 dbg_info *dbgi = get_irn_dbg_info(node);
681 ir_node *block = get_nodes_block(node);
682 ir_graph *irg = get_irn_irg(block);
683 ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
684 ir_node *addr = get_intrinsic_address(mtp, get_irn_op(node), mode, mode);
689 if (env->params->little_endian) {
690 in[0] = get_lowered_low(left);
691 in[1] = get_lowered_high(left);
692 in[2] = get_lowered_low(right);
693 in[3] = get_lowered_high(right);
695 in[0] = get_lowered_high(left);
696 in[1] = get_lowered_low(left);
697 in[2] = get_lowered_high(right);
698 in[3] = get_lowered_low(right);
700 call = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
701 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
702 set_irn_pinned(call, get_irn_pinned(node));
704 if (env->params->little_endian) {
705 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
706 ir_node *res_high = new_r_Proj(resproj, mode, 1);
707 ir_set_dw_lowered(node, res_low, res_high);
709 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
710 ir_node *res_high = new_r_Proj(resproj, mode, 0);
711 ir_set_dw_lowered(node, res_low, res_high);
715 static ir_node *create_conv(ir_node *block, ir_node *node, ir_mode *dest_mode)
717 if (get_irn_mode(node) == dest_mode)
719 return new_r_Conv(block, node, dest_mode);
723 * Moves node and all predecessors of node from from_bl to to_bl.
724 * Does not move predecessors of Phi nodes (or block nodes).
726 static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl)
731 set_nodes_block(node, to_bl);
734 if (get_irn_mode(node) == mode_T) {
735 const ir_edge_t *edge;
736 foreach_out_edge(node, edge) {
737 ir_node *proj = get_edge_src_irn(edge);
740 move(proj, from_bl, to_bl);
744 /* We must not move predecessors of Phi nodes, even if they are in
745 * from_bl. (because these are values from an earlier loop iteration
746 * which are not predecessors of node here)
752 arity = get_irn_arity(node);
753 for (i = 0; i < arity; i++) {
754 ir_node *pred = get_irn_n(node, i);
755 ir_mode *pred_mode = get_irn_mode(pred);
756 if (get_nodes_block(pred) == from_bl)
757 move(pred, from_bl, to_bl);
758 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
759 ir_node *pred_low = get_lowered_low(pred);
760 ir_node *pred_high = get_lowered_high(pred);
761 if (get_nodes_block(pred_low) == from_bl)
762 move(pred_low, from_bl, to_bl);
763 if (pred_high != NULL && get_nodes_block(pred_high) == from_bl)
764 move(pred_high, from_bl, to_bl);
770 * We need a custom version of part_block_edges because during transformation
771 * not all data-dependencies are explicit yet if a lowered nodes users are not
773 * We can fix this by modifying move to look for such implicit dependencies.
774 * Additionally we have to keep the proj_2_block map updated
776 static ir_node *part_block_dw(ir_node *node)
778 ir_graph *irg = get_irn_irg(node);
779 ir_node *old_block = get_nodes_block(node);
780 int n_cfgpreds = get_Block_n_cfgpreds(old_block);
781 ir_node **cfgpreds = get_Block_cfgpred_arr(old_block);
782 ir_node *new_block = new_r_Block(irg, n_cfgpreds, cfgpreds);
783 const ir_edge_t *edge;
784 const ir_edge_t *next;
786 /* old_block has no predecessors anymore for now */
787 set_irn_in(old_block, 0, NULL);
789 /* move node and its predecessors to new_block */
790 move(node, old_block, new_block);
792 /* move Phi nodes to new_block */
793 foreach_out_edge_safe(old_block, edge, next) {
794 ir_node *phi = get_edge_src_irn(edge);
797 set_nodes_block(phi, new_block);
802 typedef ir_node* (*new_rd_shr_func)(dbg_info *dbgi, ir_node *block,
803 ir_node *left, ir_node *right,
806 static void lower_shr_helper(ir_node *node, ir_mode *mode,
807 new_rd_shr_func new_rd_shrs)
809 ir_node *right = get_binop_right(node);
810 ir_node *left = get_binop_left(node);
811 ir_mode *shr_mode = get_irn_mode(node);
812 unsigned modulo_shift = get_mode_modulo_shift(shr_mode);
813 ir_mode *low_unsigned = env->low_unsigned;
814 unsigned modulo_shift2 = get_mode_modulo_shift(mode);
815 ir_graph *irg = get_irn_irg(node);
816 ir_node *left_low = get_lowered_low(left);
817 ir_node *left_high = get_lowered_high(left);
818 dbg_info *dbgi = get_irn_dbg_info(node);
819 ir_node *lower_block;
829 ir_node *lower_in[2];
830 ir_node *phi_low_in[2];
831 ir_node *phi_high_in[2];
833 /* this version is optimized for modulo shift architectures
834 * (and can't handle anything else) */
835 if (modulo_shift != get_mode_size_bits(shr_mode)
836 || modulo_shift2<<1 != modulo_shift) {
837 panic("Shr lowering only implemented for modulo shift shr operations");
839 if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
840 panic("Shr lowering only implemented for power-of-2 modes");
842 /* without 2-complement the -x instead of (bit_width-x) trick won't work */
843 if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
844 panic("Shr lowering only implemented for two-complement modes");
847 block = get_nodes_block(node);
849 /* if the right operand is a 64bit value, we're only interested in the
851 if (get_irn_mode(right) == env->high_unsigned) {
852 right = get_lowered_low(right);
854 /* shift should never have signed mode on the right */
855 assert(get_irn_mode(right) != env->high_signed);
856 right = create_conv(block, right, low_unsigned);
859 lower_block = part_block_dw(node);
860 env->flags |= CF_CHANGED;
861 block = get_nodes_block(node);
863 /* add a Cmp to test if highest bit is set <=> whether we shift more
864 * than half the word width */
865 cnst = new_r_Const_long(irg, low_unsigned, modulo_shift2);
866 and = new_r_And(block, right, cnst, low_unsigned);
867 cnst = new_r_Const(irg, get_mode_null(low_unsigned));
868 cmp = new_rd_Cmp(dbgi, block, and, cnst, ir_relation_equal);
869 cond = new_rd_Cond(dbgi, block, cmp);
870 proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
871 proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
873 /* the true block => shift_width < 1word */
875 /* In theory the low value (for 64bit shifts) is:
876 * Or(High << (32-x)), Low >> x)
877 * In practice High << 32-x will fail when x is zero (since we have
878 * modulo shift and 32 will be 0). So instead we use:
879 * Or(High<<1<<~x, Low >> x)
881 ir_node *in[1] = { proj_true };
882 ir_node *block_true = new_r_Block(irg, ARRAY_SIZE(in), in);
883 ir_node *res_high = new_rd_shrs(dbgi, block_true, left_high,
885 ir_node *shift_low = new_rd_Shr(dbgi, block_true, left_low, right,
887 ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
889 ir_node *conv = create_conv(block_true, left_high,
891 ir_node *one = new_r_Const(irg, get_mode_one(low_unsigned));
892 ir_node *carry0 = new_rd_Shl(dbgi, block_true, conv, one,
894 ir_node *carry1 = new_rd_Shl(dbgi, block_true, carry0,
895 not_shiftval, low_unsigned);
896 ir_node *res_low = new_rd_Or(dbgi, block_true, shift_low, carry1,
898 lower_in[0] = new_r_Jmp(block_true);
899 phi_low_in[0] = res_low;
900 phi_high_in[0] = res_high;
903 /* false block => shift_width > 1word */
905 ir_node *in[1] = { proj_false };
906 ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
907 ir_node *conv = create_conv(block_false, left_high, low_unsigned);
908 ir_node *res_low = new_rd_shrs(dbgi, block_false, conv, right,
910 int cnsti = modulo_shift2-1;
911 ir_node *cnst2 = new_r_Const_long(irg, low_unsigned, cnsti);
913 if (new_rd_shrs == new_rd_Shrs) {
914 res_high = new_rd_shrs(dbgi, block_false, left_high, cnst2, mode);
916 res_high = new_r_Const(irg, get_mode_null(mode));
918 lower_in[1] = new_r_Jmp(block_false);
919 phi_low_in[1] = res_low;
920 phi_high_in[1] = res_high;
923 /* patch lower block */
924 set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
925 phi_low = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
927 phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
929 ir_set_dw_lowered(node, phi_low, phi_high);
932 static void lower_Shr(ir_node *node, ir_mode *mode)
934 lower_shr_helper(node, mode, new_rd_Shr);
937 static void lower_Shrs(ir_node *node, ir_mode *mode)
939 lower_shr_helper(node, mode, new_rd_Shrs);
942 static void lower_Shl(ir_node *node, ir_mode *mode)
944 ir_node *right = get_binop_right(node);
945 ir_node *left = get_binop_left(node);
946 ir_mode *shr_mode = get_irn_mode(node);
947 unsigned modulo_shift = get_mode_modulo_shift(shr_mode);
948 ir_mode *low_unsigned = env->low_unsigned;
949 unsigned modulo_shift2 = get_mode_modulo_shift(mode);
950 ir_graph *irg = get_irn_irg(node);
951 ir_node *left_low = get_lowered_low(left);
952 ir_node *left_high = get_lowered_high(left);
953 dbg_info *dbgi = get_irn_dbg_info(node);
954 ir_node *lower_block = get_nodes_block(node);
964 ir_node *lower_in[2];
965 ir_node *phi_low_in[2];
966 ir_node *phi_high_in[2];
968 /* this version is optimized for modulo shift architectures
969 * (and can't handle anything else) */
970 if (modulo_shift != get_mode_size_bits(shr_mode)
971 || modulo_shift2<<1 != modulo_shift) {
972 panic("Shl lowering only implemented for modulo shift shl operations");
974 if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
975 panic("Shl lowering only implemented for power-of-2 modes");
977 /* without 2-complement the -x instead of (bit_width-x) trick won't work */
978 if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
979 panic("Shl lowering only implemented for two-complement modes");
982 /* if the right operand is a 64bit value, we're only interested in the
984 if (get_irn_mode(right) == env->high_unsigned) {
985 right = get_lowered_low(right);
987 /* shift should never have signed mode on the right */
988 assert(get_irn_mode(right) != env->high_signed);
989 right = create_conv(lower_block, right, low_unsigned);
993 env->flags |= CF_CHANGED;
994 block = get_nodes_block(node);
996 /* add a Cmp to test if highest bit is set <=> whether we shift more
997 * than half the word width */
998 cnst = new_r_Const_long(irg, low_unsigned, modulo_shift2);
999 and = new_r_And(block, right, cnst, low_unsigned);
1000 cnst = new_r_Const(irg, get_mode_null(low_unsigned));
1001 cmp = new_rd_Cmp(dbgi, block, and, cnst, ir_relation_equal);
1002 cond = new_rd_Cond(dbgi, block, cmp);
1003 proj_true = new_r_Proj(cond, mode_X, pn_Cond_true);
1004 proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);
1006 /* the true block => shift_width < 1word */
1008 ir_node *in[1] = { proj_true };
1009 ir_node *block_true = new_r_Block(irg, ARRAY_SIZE(in), in);
1011 ir_node *res_low = new_rd_Shl(dbgi, block_true, left_low,
1012 right, low_unsigned);
1013 ir_node *shift_high = new_rd_Shl(dbgi, block_true, left_high, right,
1015 ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
1017 ir_node *conv = create_conv(block_true, left_low, mode);
1018 ir_node *one = new_r_Const(irg, get_mode_one(low_unsigned));
1019 ir_node *carry0 = new_rd_Shr(dbgi, block_true, conv, one, mode);
1020 ir_node *carry1 = new_rd_Shr(dbgi, block_true, carry0,
1021 not_shiftval, mode);
1022 ir_node *res_high = new_rd_Or(dbgi, block_true, shift_high, carry1,
1024 lower_in[0] = new_r_Jmp(block_true);
1025 phi_low_in[0] = res_low;
1026 phi_high_in[0] = res_high;
1029 /* false block => shift_width > 1word */
1031 ir_node *in[1] = { proj_false };
1032 ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
1033 ir_node *res_low = new_r_Const(irg, get_mode_null(low_unsigned));
1034 ir_node *conv = create_conv(block_false, left_low, mode);
1035 ir_node *res_high = new_rd_Shl(dbgi, block_false, conv, right, mode);
1036 lower_in[1] = new_r_Jmp(block_false);
1037 phi_low_in[1] = res_low;
1038 phi_high_in[1] = res_high;
1041 /* patch lower block */
1042 set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
1043 phi_low = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
1045 phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
1047 ir_set_dw_lowered(node, phi_low, phi_high);
1051 * Rebuild Rotl nodes into Or(Shl, Shr) and prepare all nodes.
1053 static void prepare_links_and_handle_rotl(ir_node *node, void *data)
1056 if (is_Rotl(node)) {
1057 ir_mode *mode = get_irn_op_mode(node);
1059 ir_node *left, *shl, *shr, *ornode, *block, *sub, *c;
1060 ir_mode *omode, *rmode;
1063 optimization_state_t state;
1065 if (mode != env->high_signed && mode != env->high_unsigned) {
1066 prepare_links(node);
1070 /* replace the Rotl(x,y) by an Or(Shl(x,y), Shr(x,64-y)) */
1071 right = get_Rotl_right(node);
1072 irg = get_irn_irg(node);
1073 dbg = get_irn_dbg_info(node);
1074 omode = get_irn_mode(node);
1075 left = get_Rotl_left(node);
1076 block = get_nodes_block(node);
1077 shl = new_rd_Shl(dbg, block, left, right, omode);
1078 rmode = get_irn_mode(right);
1079 c = new_r_Const_long(irg, rmode, get_mode_size_bits(omode));
1080 sub = new_rd_Sub(dbg, block, c, right, rmode);
1081 shr = new_rd_Shr(dbg, block, left, sub, omode);
1083 /* switch optimization off here, or we will get the Rotl back */
1084 save_optimization_state(&state);
1085 set_opt_algebraic_simplification(0);
1086 ornode = new_rd_Or(dbg, block, shl, shr, omode);
1087 restore_optimization_state(&state);
1089 exchange(node, ornode);
1091 /* do lowering on the new nodes */
1096 prepare_links(ornode);
1100 prepare_links(node);
1104 * Translate an Unop.
1106 * Create an intrinsic Call.
1108 static void lower_unop(ir_node *node, ir_mode *mode)
1110 ir_node *op = get_unop_op(node);
1111 dbg_info *dbgi = get_irn_dbg_info(node);
1112 ir_node *block = get_nodes_block(node);
1113 ir_graph *irg = get_irn_irg(block);
1114 ir_type *mtp = mode_is_signed(mode) ? unop_tp_s : unop_tp_u;
1115 ir_op *irop = get_irn_op(node);
1116 ir_node *addr = get_intrinsic_address(mtp, irop, mode, mode);
1117 ir_node *nomem = get_irg_no_mem(irg);
1122 if (env->params->little_endian) {
1123 in[0] = get_lowered_low(op);
1124 in[1] = get_lowered_high(op);
1126 in[0] = get_lowered_high(op);
1127 in[1] = get_lowered_low(op);
1129 call = new_rd_Call(dbgi, block, nomem, addr, 2, in, mtp);
1130 resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
1131 set_irn_pinned(call, get_irn_pinned(node));
1133 if (env->params->little_endian) {
1134 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
1135 ir_node *res_high = new_r_Proj(resproj, mode, 1);
1136 ir_set_dw_lowered(node, res_low, res_high);
1138 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 1);
1139 ir_node *res_high = new_r_Proj(resproj, mode, 0);
1140 ir_set_dw_lowered(node, res_low, res_high);
1145 * Translate a logical binop.
1147 * Create two logical binops.
1149 static void lower_binop_logical(ir_node *node, ir_mode *mode,
1150 ir_node *(*constr_rd)(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) )
1152 ir_node *left = get_binop_left(node);
1153 ir_node *right = get_binop_right(node);
1154 const lower64_entry_t *left_entry = get_node_entry(left);
1155 const lower64_entry_t *right_entry = get_node_entry(right);
1156 dbg_info *dbgi = get_irn_dbg_info(node);
1157 ir_node *block = get_nodes_block(node);
1159 = constr_rd(dbgi, block, left_entry->low_word, right_entry->low_word,
1162 = constr_rd(dbgi, block, left_entry->high_word, right_entry->high_word,
1164 ir_set_dw_lowered(node, res_low, res_high);
1167 static void lower_And(ir_node *node, ir_mode *mode)
1169 lower_binop_logical(node, mode, new_rd_And);
1172 static void lower_Or(ir_node *node, ir_mode *mode)
1174 lower_binop_logical(node, mode, new_rd_Or);
1177 static void lower_Eor(ir_node *node, ir_mode *mode)
1179 lower_binop_logical(node, mode, new_rd_Eor);
1185 * Create two logical Nots.
1187 static void lower_Not(ir_node *node, ir_mode *mode)
1189 ir_node *op = get_Not_op(node);
1190 const lower64_entry_t *op_entry = get_node_entry(op);
1191 dbg_info *dbgi = get_irn_dbg_info(node);
1192 ir_node *block = get_nodes_block(node);
1194 = new_rd_Not(dbgi, block, op_entry->low_word, env->low_unsigned);
1196 = new_rd_Not(dbgi, block, op_entry->high_word, mode);
1197 ir_set_dw_lowered(node, res_low, res_high);
1200 static bool is_equality_cmp(const ir_node *node)
1202 ir_relation relation = get_Cmp_relation(node);
1203 ir_node *left = get_Cmp_left(node);
1204 ir_node *right = get_Cmp_right(node);
1205 ir_mode *mode = get_irn_mode(left);
1207 /* this probably makes no sense if unordered is involved */
1208 assert(!mode_is_float(mode));
1210 if (relation == ir_relation_equal || relation == ir_relation_less_greater)
1213 if (!is_Const(right) || !is_Const_null(right))
1215 if (mode_is_signed(mode)) {
1216 return relation == ir_relation_less_greater;
1218 return relation == ir_relation_greater;
1222 static ir_node *get_cfop_destination(const ir_node *cfop)
1224 const ir_edge_t *first = get_irn_out_edge_first(cfop);
1225 /* we should only have 1 destination */
1226 assert(get_irn_n_edges(cfop) == 1);
1227 return get_edge_src_irn(first);
1230 static void lower_Switch(ir_node *node, ir_mode *high_mode)
1232 ir_node *selector = get_Switch_selector(node);
1233 ir_mode *mode = get_irn_mode(selector);
1235 if (mode == env->high_signed || mode == env->high_unsigned) {
1236 /* we can't really handle Switch with 64bit offsets */
1237 panic("Switch with 64bit jumptable not supported");
1239 lower_node(selector);
1245 static void lower_Cond(ir_node *node, ir_mode *high_mode)
1247 ir_node *left, *right, *block;
1248 ir_node *sel = get_Cond_selector(node);
1250 const lower64_entry_t *lentry, *rentry;
1251 ir_node *projT = NULL, *projF = NULL;
1252 ir_node *new_bl, *irn;
1253 ir_node *projHF, *projHT;
1255 ir_relation relation;
1258 const ir_edge_t *edge;
1259 const ir_edge_t *next;
1268 left = get_Cmp_left(sel);
1269 cmp_mode = get_irn_mode(left);
1270 if (cmp_mode != env->high_signed && cmp_mode != env->high_unsigned) {
1275 right = get_Cmp_right(sel);
1278 lentry = get_node_entry(left);
1279 rentry = get_node_entry(right);
1281 /* all right, build the code */
1282 foreach_out_edge_safe(node, edge, next) {
1283 ir_node *proj = get_edge_src_irn(edge);
1287 proj_nr = get_Proj_proj(proj);
1289 if (proj_nr == pn_Cond_true) {
1290 assert(projT == NULL && "more than one Proj(true)");
1293 assert(proj_nr == pn_Cond_false);
1294 assert(projF == NULL && "more than one Proj(false)");
1297 mark_irn_visited(proj);
1299 assert(projT && projF);
1301 /* create a new high compare */
1302 block = get_nodes_block(node);
1303 irg = get_Block_irg(block);
1304 dbg = get_irn_dbg_info(sel);
1305 relation = get_Cmp_relation(sel);
1307 if (is_equality_cmp(sel)) {
1308 /* x ==/!= y ==> or(x_low^y_low,x_high^y_high) ==/!= 0 */
1309 ir_mode *mode = env->low_unsigned;
1310 ir_node *low_left = new_rd_Conv(dbg, block, lentry->low_word, mode);
1311 ir_node *high_left = new_rd_Conv(dbg, block, lentry->high_word, mode);
1312 ir_node *low_right = new_rd_Conv(dbg, block, rentry->low_word, mode);
1313 ir_node *high_right = new_rd_Conv(dbg, block, rentry->high_word, mode);
1314 ir_node *xor_low = new_rd_Eor(dbg, block, low_left, low_right, mode);
1315 ir_node *xor_high = new_rd_Eor(dbg, block, high_left, high_right, mode);
1316 ir_node *ornode = new_rd_Or(dbg, block, xor_low, xor_high, mode);
1317 ir_node *cmp = new_rd_Cmp(dbg, block, ornode, new_r_Const(irg, get_mode_null(mode)), relation);
1318 set_Cond_selector(node, cmp);
1322 if (relation == ir_relation_equal) {
1324 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1325 dst_blk = get_cfop_destination(projF);
1327 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1329 dbg = get_irn_dbg_info(node);
1330 irn = new_rd_Cond(dbg, block, irn);
1332 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1333 mark_irn_visited(projHF);
1334 exchange(projF, projHF);
1336 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1337 mark_irn_visited(projHT);
1339 new_bl = new_r_Block(irg, 1, &projHT);
1341 dbg = get_irn_dbg_info(sel);
1342 irn = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word,
1344 dbg = get_irn_dbg_info(node);
1345 irn = new_rd_Cond(dbg, new_bl, irn);
1347 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1348 mark_irn_visited(proj);
1349 add_block_cf_input(dst_blk, projHF, proj);
1351 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1352 mark_irn_visited(proj);
1353 exchange(projT, proj);
1354 } else if (relation == ir_relation_less_greater) {
1356 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1357 dst_blk = get_cfop_destination(projT);
1359 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1360 ir_relation_less_greater);
1361 dbg = get_irn_dbg_info(node);
1362 irn = new_rd_Cond(dbg, block, irn);
1364 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1365 mark_irn_visited(projHT);
1366 exchange(projT, projHT);
1368 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1369 mark_irn_visited(projHF);
1371 new_bl = new_r_Block(irg, 1, &projHF);
1373 dbg = get_irn_dbg_info(sel);
1374 irn = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word,
1375 ir_relation_less_greater);
1376 dbg = get_irn_dbg_info(node);
1377 irn = new_rd_Cond(dbg, new_bl, irn);
1379 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1380 mark_irn_visited(proj);
1381 add_block_cf_input(dst_blk, projHT, proj);
1383 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1384 mark_irn_visited(proj);
1385 exchange(projF, proj);
1388 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1389 ir_node *dstT, *dstF, *newbl_eq, *newbl_l;
1392 dstT = get_cfop_destination(projT);
1393 dstF = get_cfop_destination(projF);
1395 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1396 relation & ~ir_relation_equal);
1397 dbg = get_irn_dbg_info(node);
1398 irn = new_rd_Cond(dbg, block, irn);
1400 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1401 mark_irn_visited(projHT);
1403 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1404 mark_irn_visited(projHF);
1406 newbl_eq = new_r_Block(irg, 1, &projHF);
1408 irn = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1410 irn = new_rd_Cond(dbg, newbl_eq, irn);
1412 projEqF = new_r_Proj(irn, mode_X, pn_Cond_false);
1413 mark_irn_visited(projEqF);
1415 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1416 mark_irn_visited(proj);
1418 newbl_l = new_r_Block(irg, 1, &proj);
1420 dbg = get_irn_dbg_info(sel);
1421 irn = new_rd_Cmp(dbg, newbl_l, lentry->low_word, rentry->low_word,
1423 dbg = get_irn_dbg_info(node);
1424 irn = new_rd_Cond(dbg, newbl_l, irn);
1426 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1427 mark_irn_visited(proj);
1428 add_block_cf_input(dstT, projT, proj);
1430 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1431 mark_irn_visited(proj);
1432 add_block_cf_input(dstF, projF, proj);
1434 exchange(projT, projHT);
1435 exchange(projF, projEqF);
1438 /* we have changed the control flow */
1439 env->flags |= CF_CHANGED;
1443 * Translate a Conv to higher_signed
1445 static void lower_Conv_to_Ll(ir_node *node)
1447 ir_mode *omode = get_irn_mode(node);
1448 ir_node *op = get_Conv_op(node);
1449 ir_mode *imode = get_irn_mode(op);
1450 ir_graph *irg = get_irn_irg(node);
1451 ir_node *block = get_nodes_block(node);
1452 dbg_info *dbg = get_irn_dbg_info(node);
1456 ir_mode *low_unsigned = env->low_unsigned;
1458 = mode_is_signed(omode) ? env->low_signed : low_unsigned;
1460 if (mode_is_int(imode) || mode_is_reference(imode)) {
1461 if (imode == env->high_signed || imode == env->high_unsigned) {
1462 /* a Conv from Lu to Ls or Ls to Lu */
1463 const lower64_entry_t *op_entry = get_node_entry(op);
1464 res_low = op_entry->low_word;
1465 res_high = new_rd_Conv(dbg, block, op_entry->high_word, low_signed);
1467 /* simple case: create a high word */
1468 if (imode != low_unsigned)
1469 op = new_rd_Conv(dbg, block, op, low_unsigned);
1473 if (mode_is_signed(imode)) {
1474 int c = get_mode_size_bits(low_signed) - 1;
1475 ir_node *cnst = new_r_Const_long(irg, low_unsigned, c);
1476 if (get_irn_mode(op) != low_signed)
1477 op = new_rd_Conv(dbg, block, op, low_signed);
1478 res_high = new_rd_Shrs(dbg, block, op, cnst, low_signed);
1480 res_high = new_r_Const(irg, get_mode_null(low_signed));
1483 } else if (imode == mode_b) {
1484 res_low = new_rd_Conv(dbg, block, op, low_unsigned);
1485 res_high = new_r_Const(irg, get_mode_null(low_signed));
1487 ir_node *irn, *call;
1488 ir_type *mtp = get_conv_type(imode, omode);
1490 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode);
1491 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 1, &op, mtp);
1492 set_irn_pinned(call, get_irn_pinned(node));
1493 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1495 res_low = new_r_Proj(irn, low_unsigned, 0);
1496 res_high = new_r_Proj(irn, low_signed, 1);
1498 ir_set_dw_lowered(node, res_low, res_high);
1502 * Translate a Conv from higher_unsigned
1504 static void lower_Conv_from_Ll(ir_node *node)
1506 ir_node *op = get_Conv_op(node);
1507 ir_mode *omode = get_irn_mode(node);
1508 ir_node *block = get_nodes_block(node);
1509 dbg_info *dbg = get_irn_dbg_info(node);
1510 ir_graph *irg = get_irn_irg(node);
1511 const lower64_entry_t *entry = get_node_entry(op);
1513 if (mode_is_int(omode) || mode_is_reference(omode)) {
1514 op = entry->low_word;
1516 /* simple case: create a high word */
1517 if (omode != env->low_unsigned)
1518 op = new_rd_Conv(dbg, block, op, omode);
1520 set_Conv_op(node, op);
1521 } else if (omode == mode_b) {
1522 /* llu ? true : false <=> (low|high) ? true : false */
1523 ir_mode *mode = env->low_unsigned;
1524 ir_node *ornode = new_rd_Or(dbg, block, entry->low_word,
1525 entry->high_word, mode);
1526 set_Conv_op(node, ornode);
1528 ir_node *irn, *call, *in[2];
1529 ir_mode *imode = get_irn_mode(op);
1530 ir_type *mtp = get_conv_type(imode, omode);
1533 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode);
1534 in[0] = entry->low_word;
1535 in[1] = entry->high_word;
1537 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 2, in, mtp);
1538 set_irn_pinned(call, get_irn_pinned(node));
1539 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1540 res = new_r_Proj(irn, omode, 0);
1542 exchange(node, res);
1549 static void lower_Cmp(ir_node *cmp, ir_mode *m)
1551 ir_node *l = get_Cmp_left(cmp);
1552 ir_mode *cmp_mode = get_irn_mode(l);
1553 ir_node *r, *low, *high, *t, *res;
1554 ir_relation relation;
1557 const lower64_entry_t *lentry;
1558 const lower64_entry_t *rentry;
1561 if (cmp_mode != env->high_signed && cmp_mode != env->high_unsigned)
1564 r = get_Cmp_right(cmp);
1565 lentry = get_node_entry(l);
1566 rentry = get_node_entry(r);
1567 relation = get_Cmp_relation(cmp);
1568 block = get_nodes_block(cmp);
1569 dbg = get_irn_dbg_info(cmp);
1571 /* easy case for x ==/!= 0 (see lower_Cond for details) */
1572 if (is_equality_cmp(cmp)) {
1573 ir_graph *irg = get_irn_irg(cmp);
1574 ir_mode *mode = env->low_unsigned;
1575 ir_node *low_left = new_rd_Conv(dbg, block, lentry->low_word, mode);
1576 ir_node *high_left = new_rd_Conv(dbg, block, lentry->high_word, mode);
1577 ir_node *low_right = new_rd_Conv(dbg, block, rentry->low_word, mode);
1578 ir_node *high_right = new_rd_Conv(dbg, block, rentry->high_word, mode);
1579 ir_node *xor_low = new_rd_Eor(dbg, block, low_left, low_right, mode);
1580 ir_node *xor_high = new_rd_Eor(dbg, block, high_left, high_right, mode);
1581 ir_node *ornode = new_rd_Or(dbg, block, xor_low, xor_high, mode);
1582 ir_node *new_cmp = new_rd_Cmp(dbg, block, ornode, new_r_Const(irg, get_mode_null(mode)), relation);
1583 exchange(cmp, new_cmp);
1587 if (relation == ir_relation_equal) {
1588 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1589 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1591 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1593 res = new_rd_And(dbg, block, low, high, mode_b);
1594 } else if (relation == ir_relation_less_greater) {
1595 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1596 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1598 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1600 res = new_rd_Or(dbg, block, low, high, mode_b);
1602 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1603 ir_node *high1 = new_rd_Cmp(dbg, block, lentry->high_word,
1604 rentry->high_word, relation & ~ir_relation_equal);
1605 low = new_rd_Cmp(dbg, block, lentry->low_word, rentry->low_word,
1607 high = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word,
1609 t = new_rd_And(dbg, block, low, high, mode_b);
1610 res = new_rd_Or(dbg, block, high1, t, mode_b);
1618 static void lower_Conv(ir_node *node, ir_mode *mode)
1620 mode = get_irn_mode(node);
1622 if (mode == env->high_signed || mode == env->high_unsigned) {
1623 lower_Conv_to_Ll(node);
1625 ir_mode *op_mode = get_irn_mode(get_Conv_op(node));
1627 if (op_mode == env->high_signed || op_mode == env->high_unsigned) {
1628 lower_Conv_from_Ll(node);
1633 static void fix_parameter_entities(ir_graph *irg)
1635 ir_entity *entity = get_irg_entity(irg);
1636 ir_type *mtp = get_entity_type(entity);
1637 ir_type *orig_mtp = get_type_link(mtp);
1639 size_t orig_n_params = get_method_n_params(orig_mtp);
1640 ir_entity **parameter_entities;
1642 parameter_entities = ALLOCANZ(ir_entity*, orig_n_params);
1644 ir_type *frame_type = get_irg_frame_type(irg);
1645 size_t n = get_compound_n_members(frame_type);
1649 /* collect parameter entities */
1650 for (i = 0; i < n; ++i) {
1651 ir_entity *entity = get_compound_member(frame_type, i);
1653 if (!is_parameter_entity(entity))
1655 p = get_entity_parameter_number(entity);
1656 if (p == IR_VA_START_PARAMETER_NUMBER)
1658 assert(p < orig_n_params);
1659 assert(parameter_entities[p] == NULL);
1660 parameter_entities[p] = entity;
1663 /* adjust indices */
1665 for (i = 0; i < orig_n_params; ++i, ++n_param) {
1666 ir_entity *entity = parameter_entities[i];
1670 set_entity_parameter_number(entity, n_param);
1672 tp = get_method_param_type(orig_mtp, i);
1673 if (is_Primitive_type(tp)) {
1674 ir_mode *mode = get_type_mode(tp);
1675 if (mode == env->high_signed || mode == env->high_unsigned) {
1677 /* note that we do not change the type of the parameter
1678 * entities, as calling convention fixup later still needs to
1679 * know which is/was a lowered doubleword.
1680 * So we just mark/remember it for later */
1681 if (entity != NULL) {
1682 assert(entity->attr.parameter.doubleword_low_mode == NULL);
1683 entity->attr.parameter.doubleword_low_mode
1684 = env->low_unsigned;
1692 * Lower the method type.
1694 * @param env the lower environment
1695 * @param mtp the method type to lower
1697 * @return the lowered type
1699 static ir_type *lower_mtp(ir_type *mtp)
1703 size_t orig_n_params;
1707 bool must_be_lowered;
1709 res = (ir_type*)pmap_get(lowered_type, mtp);
1713 orig_n_params = get_method_n_params(mtp);
1714 orig_n_res = get_method_n_ress(mtp);
1715 n_param = orig_n_params;
1717 must_be_lowered = false;
1719 /* count new number of params */
1720 for (i = orig_n_params; i > 0;) {
1721 ir_type *tp = get_method_param_type(mtp, --i);
1723 if (is_Primitive_type(tp)) {
1724 ir_mode *mode = get_type_mode(tp);
1726 if (mode == env->high_signed || mode == env->high_unsigned) {
1728 must_be_lowered = true;
1733 /* count new number of results */
1734 for (i = orig_n_res; i > 0;) {
1735 ir_type *tp = get_method_res_type(mtp, --i);
1737 if (is_Primitive_type(tp)) {
1738 ir_mode *mode = get_type_mode(tp);
1740 if (mode == env->high_signed || mode == env->high_unsigned) {
1742 must_be_lowered = true;
1746 if (!must_be_lowered) {
1747 set_type_link(mtp, NULL);
1751 res = new_d_type_method(n_param, n_res, get_type_dbg_info(mtp));
1753 /* set param types and result types */
1754 for (i = n_param = 0; i < orig_n_params; ++i) {
1755 ir_type *tp = get_method_param_type(mtp, i);
1757 if (is_Primitive_type(tp)) {
1758 ir_mode *mode = get_type_mode(tp);
1760 if (mode == env->high_signed) {
1761 if (env->params->little_endian) {
1762 set_method_param_type(res, n_param++, tp_u);
1763 set_method_param_type(res, n_param++, tp_s);
1765 set_method_param_type(res, n_param++, tp_s);
1766 set_method_param_type(res, n_param++, tp_u);
1768 } else if (mode == env->high_unsigned) {
1769 set_method_param_type(res, n_param++, tp_u);
1770 set_method_param_type(res, n_param++, tp_u);
1772 set_method_param_type(res, n_param, tp);
1776 set_method_param_type(res, n_param, tp);
1780 for (i = n_res = 0; i < orig_n_res; ++i) {
1781 ir_type *tp = get_method_res_type(mtp, i);
1783 if (is_Primitive_type(tp)) {
1784 ir_mode *mode = get_type_mode(tp);
1786 if (mode == env->high_signed) {
1787 if (env->params->little_endian) {
1788 set_method_res_type(res, n_res++, tp_u);
1789 set_method_res_type(res, n_res++, tp_s);
1791 set_method_res_type(res, n_res++, tp_s);
1792 set_method_res_type(res, n_res++, tp_u);
1794 } else if (mode == env->high_unsigned) {
1795 set_method_res_type(res, n_res++, tp_u);
1796 set_method_res_type(res, n_res++, tp_u);
1798 set_method_res_type(res, n_res++, tp);
1801 set_method_res_type(res, n_res++, tp);
1805 set_method_variadicity(res, get_method_variadicity(mtp));
1806 set_method_calling_convention(res, get_method_calling_convention(mtp));
1807 set_method_additional_properties(res, get_method_additional_properties(mtp));
1809 set_higher_type(res, mtp);
1810 set_type_link(res, mtp);
1812 pmap_insert(lowered_type, mtp, res);
1817 * Translate a Return.
1819 static void lower_Return(ir_node *node, ir_mode *mode)
1821 ir_graph *irg = get_irn_irg(node);
1822 ir_entity *ent = get_irg_entity(irg);
1823 ir_type *mtp = get_entity_type(ent);
1829 /* check if this return must be lowered */
1830 for (i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1831 ir_node *pred = get_Return_res(node, i);
1832 ir_mode *rmode = get_irn_op_mode(pred);
1834 if (rmode == env->high_signed || rmode == env->high_unsigned)
1840 ent = get_irg_entity(irg);
1841 mtp = get_entity_type(ent);
1843 /* create a new in array */
1844 NEW_ARR_A(ir_node *, in, get_method_n_ress(mtp) + 1);
1846 in[j++] = get_Return_mem(node);
1848 for (i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1849 ir_node *pred = get_Return_res(node, i);
1850 ir_mode *pred_mode = get_irn_mode(pred);
1852 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1853 const lower64_entry_t *entry = get_node_entry(pred);
1854 if (env->params->little_endian) {
1855 in[j++] = entry->low_word;
1856 in[j++] = entry->high_word;
1858 in[j++] = entry->high_word;
1859 in[j++] = entry->low_word;
1865 assert(j == get_method_n_ress(mtp)+1);
1867 set_irn_in(node, j, in);
1871 * Translate the parameters.
1873 static void lower_Start(ir_node *node, ir_mode *high_mode)
1875 ir_graph *irg = get_irn_irg(node);
1876 ir_entity *ent = get_irg_entity(irg);
1877 ir_type *mtp = get_entity_type(ent);
1878 ir_type *orig_mtp = get_type_link(mtp);
1881 size_t i, j, n_params;
1882 const ir_edge_t *edge;
1883 const ir_edge_t *next;
1886 /* if type link is NULL then the type was not lowered, hence no changes
1887 * at Start necessary */
1888 if (orig_mtp == NULL)
1891 n_params = get_method_n_params(orig_mtp);
1893 NEW_ARR_A(long, new_projs, n_params);
1895 /* Calculate mapping of proj numbers in new_projs */
1896 for (i = j = 0; i < n_params; ++i, ++j) {
1897 ir_type *ptp = get_method_param_type(orig_mtp, i);
1900 if (is_Primitive_type(ptp)) {
1901 ir_mode *amode = get_type_mode(ptp);
1902 if (amode == env->high_signed || amode == env->high_unsigned)
1907 /* lower method type */
1909 foreach_out_edge(node, edge) {
1910 ir_node *proj = get_edge_src_irn(edge);
1913 if (get_Proj_proj(proj) == pn_Start_T_args) {
1921 /* fix all Proj's and create new ones */
1922 foreach_out_edge_safe(args, edge, next) {
1923 ir_node *proj = get_edge_src_irn(edge);
1924 ir_mode *mode = get_irn_mode(proj);
1925 ir_mode *mode_l = env->low_unsigned;
1936 pred = get_Proj_pred(proj);
1937 proj_nr = get_Proj_proj(proj);
1939 if (mode == env->high_signed) {
1940 mode_h = env->low_signed;
1941 } else if (mode == env->high_unsigned) {
1942 mode_h = env->low_unsigned;
1944 long new_pn = new_projs[proj_nr];
1945 set_Proj_proj(proj, new_pn);
1949 /* Switch off CSE or we might get an already existing Proj. */
1950 old_cse = get_opt_cse();
1952 dbg = get_irn_dbg_info(proj);
1953 if (env->params->little_endian) {
1954 res_low = new_rd_Proj(dbg, pred, mode_l, new_projs[proj_nr]);
1955 res_high = new_rd_Proj(dbg, pred, mode_h, new_projs[proj_nr] + 1);
1957 res_high = new_rd_Proj(dbg, pred, mode_h, new_projs[proj_nr]);
1958 res_low = new_rd_Proj(dbg, pred, mode_l, new_projs[proj_nr] + 1);
1960 set_opt_cse(old_cse);
1961 ir_set_dw_lowered(proj, res_low, res_high);
1968 static void lower_Call(ir_node *node, ir_mode *mode)
1970 ir_type *tp = get_Call_type(node);
1972 size_t n_params, n_res;
1973 bool need_lower = false;
1976 long *res_numbers = NULL;
1978 const ir_edge_t *edge;
1979 const ir_edge_t *next;
1982 n_params = get_method_n_params(tp);
1983 for (p = 0; p < n_params; ++p) {
1984 ir_type *ptp = get_method_param_type(tp, p);
1986 if (is_Primitive_type(ptp)) {
1987 ir_mode *pmode = get_type_mode(ptp);
1988 if (pmode == env->high_signed || pmode == env->high_unsigned) {
1994 n_res = get_method_n_ress(tp);
1996 NEW_ARR_A(long, res_numbers, n_res);
1998 for (i = j = 0; i < n_res; ++i, ++j) {
1999 ir_type *ptp = get_method_res_type(tp, i);
2002 if (is_Primitive_type(ptp)) {
2003 ir_mode *rmode = get_type_mode(ptp);
2004 if (rmode == env->high_signed || rmode == env->high_unsigned) {
2015 /* let's lower it */
2017 set_Call_type(node, tp);
2019 NEW_ARR_A(ir_node *, in, get_method_n_params(tp) + 2);
2021 in[0] = get_Call_mem(node);
2022 in[1] = get_Call_ptr(node);
2024 for (j = 2, i = 0; i < n_params; ++i) {
2025 ir_node *pred = get_Call_param(node, i);
2026 ir_mode *pred_mode = get_irn_mode(pred);
2028 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
2029 const lower64_entry_t *pred_entry = get_node_entry(pred);
2030 if (env->params->little_endian) {
2031 in[j++] = pred_entry->low_word;
2032 in[j++] = pred_entry->high_word;
2034 in[j++] = pred_entry->high_word;
2035 in[j++] = pred_entry->low_word;
2042 set_irn_in(node, j, in);
2044 /* find results T */
2046 foreach_out_edge(node, edge) {
2047 ir_node *proj = get_edge_src_irn(edge);
2050 if (get_Proj_proj(proj) == pn_Call_T_result) {
2055 if (resproj == NULL)
2058 /* fix the results */
2059 foreach_out_edge_safe(resproj, edge, next) {
2060 ir_node *proj = get_edge_src_irn(edge);
2061 ir_mode *proj_mode = get_irn_mode(proj);
2062 ir_mode *mode_l = env->low_unsigned;
2072 pred = get_Proj_pred(proj);
2073 proj_nr = get_Proj_proj(proj);
2075 if (proj_mode == env->high_signed) {
2076 mode_h = env->low_signed;
2077 } else if (proj_mode == env->high_unsigned) {
2078 mode_h = env->low_unsigned;
2080 long new_nr = res_numbers[proj_nr];
2081 set_Proj_proj(proj, new_nr);
2085 dbg = get_irn_dbg_info(proj);
2086 if (env->params->little_endian) {
2087 res_low = new_rd_Proj(dbg, pred, mode_l, res_numbers[proj_nr]);
2088 res_high = new_rd_Proj(dbg, pred, mode_h, res_numbers[proj_nr] + 1);
2090 res_high = new_rd_Proj(dbg, pred, mode_h, res_numbers[proj_nr]);
2091 res_low = new_rd_Proj(dbg, pred, mode_l, res_numbers[proj_nr] + 1);
2093 ir_set_dw_lowered(proj, res_low, res_high);
2098 * Translate an Unknown into two.
2100 static void lower_Unknown(ir_node *node, ir_mode *mode)
2102 ir_mode *low_mode = env->low_unsigned;
2103 ir_graph *irg = get_irn_irg(node);
2104 ir_node *res_low = new_r_Unknown(irg, low_mode);
2105 ir_node *res_high = new_r_Unknown(irg, mode);
2106 ir_set_dw_lowered(node, res_low, res_high);
2110 * Translate a Bad into two.
2112 static void lower_Bad(ir_node *node, ir_mode *mode)
2114 ir_mode *low_mode = env->low_unsigned;
2115 ir_graph *irg = get_irn_irg(node);
2116 ir_node *res_low = new_r_Bad(irg, low_mode);
2117 ir_node *res_high = new_r_Bad(irg, mode);
2118 ir_set_dw_lowered(node, res_low, res_high);
2124 * First step: just create two templates
2126 static void lower_Phi(ir_node *phi)
2128 ir_mode *mode = get_irn_mode(phi);
2143 /* enqueue predecessors */
2144 arity = get_Phi_n_preds(phi);
2145 for (i = 0; i < arity; ++i) {
2146 ir_node *pred = get_Phi_pred(phi, i);
2147 pdeq_putr(env->waitq, pred);
2150 if (mode != env->high_signed && mode != env->high_unsigned)
2153 /* first create a new in array */
2154 NEW_ARR_A(ir_node *, in_l, arity);
2155 NEW_ARR_A(ir_node *, in_h, arity);
2156 irg = get_irn_irg(phi);
2157 mode_l = env->low_unsigned;
2158 mode_h = mode == env->high_signed ? env->low_signed : env->low_unsigned;
2159 unk_l = new_r_Dummy(irg, mode_l);
2160 unk_h = new_r_Dummy(irg, mode_h);
2161 for (i = 0; i < arity; ++i) {
2166 dbg = get_irn_dbg_info(phi);
2167 block = get_nodes_block(phi);
2168 phi_l = new_rd_Phi(dbg, block, arity, in_l, mode_l);
2169 phi_h = new_rd_Phi(dbg, block, arity, in_h, mode_h);
2171 ir_set_dw_lowered(phi, phi_l, phi_h);
2173 /* remember that we need to fixup the predecessors later */
2174 ARR_APP1(ir_node*, env->lowered_phis, phi);
2177 static void fixup_phi(ir_node *phi)
2179 const lower64_entry_t *entry = get_node_entry(phi);
2180 ir_node *phi_l = entry->low_word;
2181 ir_node *phi_h = entry->high_word;
2182 int arity = get_Phi_n_preds(phi);
2185 /* exchange phi predecessors which are lowered by now */
2186 for (i = 0; i < arity; ++i) {
2187 ir_node *pred = get_Phi_pred(phi, i);
2188 const lower64_entry_t *pred_entry = get_node_entry(pred);
2190 set_Phi_pred(phi_l, i, pred_entry->low_word);
2191 set_Phi_pred(phi_h, i, pred_entry->high_word);
2198 static void lower_Mux(ir_node *mux, ir_mode *mode)
2200 ir_node *truen = get_Mux_true(mux);
2201 ir_node *falsen = get_Mux_false(mux);
2202 ir_node *sel = get_Mux_sel(mux);
2203 const lower64_entry_t *true_entry = get_node_entry(truen);
2204 const lower64_entry_t *false_entry = get_node_entry(falsen);
2205 ir_node *true_l = true_entry->low_word;
2206 ir_node *true_h = true_entry->high_word;
2207 ir_node *false_l = false_entry->low_word;
2208 ir_node *false_h = false_entry->high_word;
2209 dbg_info *dbgi = get_irn_dbg_info(mux);
2210 ir_node *block = get_nodes_block(mux);
2212 = new_rd_Mux(dbgi, block, sel, false_l, true_l, env->low_unsigned);
2214 = new_rd_Mux(dbgi, block, sel, false_h, true_h, mode);
2215 ir_set_dw_lowered(mux, res_low, res_high);
2219 * Translate an ASM node.
2221 static void lower_ASM(ir_node *asmn, ir_mode *mode)
2223 ir_mode *high_signed = env->high_signed;
2224 ir_mode *high_unsigned = env->high_unsigned;
2225 int n_outs = get_ASM_n_output_constraints(asmn);
2226 ir_asm_constraint *output_constraints = get_ASM_output_constraints(asmn);
2227 ir_asm_constraint *input_constraints = get_ASM_input_constraints(asmn);
2228 unsigned n_64bit_outs = 0;
2233 for (i = get_irn_arity(asmn) - 1; i >= 0; --i) {
2234 ir_node *op = get_irn_n(asmn, i);
2235 ir_mode *op_mode = get_irn_mode(op);
2236 if (op_mode == high_signed || op_mode == high_unsigned) {
2237 panic("lowering ASM 64bit input unimplemented");
2241 for (i = 0; i < n_outs; ++i) {
2242 const ir_asm_constraint *constraint = &output_constraints[i];
2243 if (constraint->mode == high_signed || constraint->mode == high_unsigned) {
2244 const char *constr = get_id_str(constraint->constraint);
2246 /* TODO: How to do this architecture neutral? This is very
2247 * i386 specific... */
2248 if (constr[0] != '=' || constr[1] != 'A') {
2249 panic("lowering ASM 64bit output only supports '=A' currently");
2254 if (n_64bit_outs == 0)
2258 dbg_info *dbgi = get_irn_dbg_info(asmn);
2259 ir_node *block = get_nodes_block(asmn);
2260 int arity = get_irn_arity(asmn);
2261 ir_node **in = get_irn_in(asmn) + 1;
2263 int n_clobber = get_ASM_n_clobbers(asmn);
2264 long *proj_map = ALLOCAN(long, n_outs);
2265 ident **clobbers = get_ASM_clobbers(asmn);
2266 ident *asm_text = get_ASM_text(asmn);
2267 ir_asm_constraint *new_outputs
2268 = ALLOCAN(ir_asm_constraint, n_outs+n_64bit_outs);
2270 const ir_edge_t *edge;
2271 const ir_edge_t *next;
2273 for (i = 0; i < n_outs; ++i) {
2274 const ir_asm_constraint *constraint = &output_constraints[i];
2275 if (constraint->mode == high_signed || constraint->mode == high_unsigned) {
2276 new_outputs[new_n_outs].pos = constraint->pos;
2277 new_outputs[new_n_outs].constraint = new_id_from_str("=a");
2278 new_outputs[new_n_outs].mode = env->low_unsigned;
2279 proj_map[i] = new_n_outs;
2281 new_outputs[new_n_outs].pos = constraint->pos;
2282 new_outputs[new_n_outs].constraint = new_id_from_str("=d");
2283 if (constraint->mode == high_signed)
2284 new_outputs[new_n_outs].mode = env->low_signed;
2286 new_outputs[new_n_outs].mode = env->low_unsigned;
2289 new_outputs[new_n_outs] = *constraint;
2290 proj_map[i] = new_n_outs;
2294 assert(new_n_outs == n_outs+(int)n_64bit_outs);
2296 new_asm = new_rd_ASM(dbgi, block, arity, in, input_constraints,
2297 new_n_outs, new_outputs, n_clobber, clobbers,
2300 foreach_out_edge_safe(asmn, edge, next) {
2301 ir_node *proj = get_edge_src_irn(edge);
2302 ir_mode *proj_mode = get_irn_mode(proj);
2307 pn = get_Proj_proj(proj);
2312 pn = new_n_outs + pn - n_outs;
2314 if (proj_mode == high_signed || proj_mode == high_unsigned) {
2316 = proj_mode == high_signed ? env->low_signed : env->low_unsigned;
2317 ir_node *np_low = new_r_Proj(new_asm, env->low_unsigned, pn);
2318 ir_node *np_high = new_r_Proj(new_asm, high_mode, pn+1);
2319 ir_set_dw_lowered(proj, np_low, np_high);
2321 ir_node *np = new_r_Proj(new_asm, proj_mode, pn);
2329 * Lower the builtin type to its higher part.
2331 * @param mtp the builtin type to lower
2333 * @return the lowered type
2335 static ir_type *lower_Builtin_type_high(ir_type *mtp)
2341 bool must_be_lowered;
2343 res = (ir_type*)pmap_get(lowered_builtin_type_high, mtp);
2347 n_params = get_method_n_params(mtp);
2348 n_results = get_method_n_ress(mtp);
2349 must_be_lowered = false;
2351 /* check for double word parameter */
2352 for (i = n_params; i > 0;) {
2353 ir_type *tp = get_method_param_type(mtp, --i);
2355 if (is_Primitive_type(tp)) {
2356 ir_mode *mode = get_type_mode(tp);
2358 if (mode == env->high_signed || mode == env->high_unsigned) {
2359 must_be_lowered = true;
2365 if (!must_be_lowered) {
2366 set_type_link(mtp, NULL);
2370 res = new_d_type_method(n_params, n_results, get_type_dbg_info(mtp));
2372 /* set param types and result types */
2373 for (i = 0; i < n_params; ++i) {
2374 ir_type *tp = get_method_param_type(mtp, i);
2376 if (is_Primitive_type(tp)) {
2377 ir_mode *mode = get_type_mode(tp);
2379 if (mode == env->high_signed) {
2380 if (env->params->little_endian) {
2381 set_method_param_type(res, i, tp_u);
2383 set_method_param_type(res, i, tp_s);
2385 } else if (mode == env->high_unsigned) {
2386 set_method_param_type(res, i, tp_u);
2388 set_method_param_type(res, i, tp);
2391 set_method_param_type(res, i, tp);
2394 for (i = n_results = 0; i < n_results; ++i) {
2395 ir_type *tp = get_method_res_type(mtp, i);
2397 set_method_res_type(res, i, tp);
2400 set_method_variadicity(res, get_method_variadicity(mtp));
2401 set_method_calling_convention(res, get_method_calling_convention(mtp));
2402 set_method_additional_properties(res, get_method_additional_properties(mtp));
2404 pmap_insert(lowered_builtin_type_high, mtp, res);
2409 * Lower the builtin type to its lower part.
2411 * @param mtp the builtin type to lower
2413 * @return the lowered type
2415 static ir_type *lower_Builtin_type_low(ir_type *mtp)
2421 bool must_be_lowered;
2423 res = (ir_type*)pmap_get(lowered_builtin_type_low, mtp);
2427 n_params = get_method_n_params(mtp);
2428 n_results = get_method_n_ress(mtp);
2429 must_be_lowered = false;
2431 /* check for double word parameter */
2432 for (i = n_params; i > 0;) {
2433 ir_type *tp = get_method_param_type(mtp, --i);
2435 if (is_Primitive_type(tp)) {
2436 ir_mode *mode = get_type_mode(tp);
2438 if (mode == env->high_signed || mode == env->high_unsigned) {
2439 must_be_lowered = true;
2445 if (!must_be_lowered) {
2446 set_type_link(mtp, NULL);
2450 res = new_d_type_method(n_params, n_results, get_type_dbg_info(mtp));
2452 /* set param types and result types */
2453 for (i = 0; i < n_params; ++i) {
2454 ir_type *tp = get_method_param_type(mtp, i);
2456 if (is_Primitive_type(tp)) {
2457 ir_mode *mode = get_type_mode(tp);
2459 if (mode == env->high_signed) {
2460 if (env->params->little_endian) {
2461 set_method_param_type(res, i, tp_s);
2463 set_method_param_type(res, i, tp_u);
2465 } else if (mode == env->high_unsigned) {
2466 set_method_param_type(res, i, tp_u);
2468 set_method_param_type(res, i, tp);
2471 set_method_param_type(res, i, tp);
2474 for (i = 0; i < n_results; ++i) {
2475 ir_type *tp = get_method_res_type(mtp, i);
2477 set_method_res_type(res, i, tp);
2480 set_method_variadicity(res, get_method_variadicity(mtp));
2481 set_method_calling_convention(res, get_method_calling_convention(mtp));
2482 set_method_additional_properties(res, get_method_additional_properties(mtp));
2484 pmap_insert(lowered_builtin_type_low, mtp, res);
2489 * Lower double word builtins.
2491 static void lower_Builtin(ir_node *builtin, ir_mode *mode)
2493 ir_builtin_kind kind = get_Builtin_kind(builtin);
2495 ir_mode *operand_mode;
2499 case ir_bk_debugbreak:
2500 case ir_bk_return_address:
2501 case ir_bk_frame_address:
2502 case ir_bk_prefetch:
2506 case ir_bk_inner_trampoline:
2507 /* Nothing to do. */
2512 case ir_bk_popcount:
2516 panic("unknown builtin");
2519 operand = get_Builtin_param(builtin, 0);
2520 operand_mode = get_irn_mode(operand);
2522 if (operand_mode != env->high_signed && operand_mode != env->high_unsigned)
2525 arch_allow_ifconv_func allow_ifconv = be_get_backend_param()->allow_ifconv;
2526 int arity = get_irn_arity(builtin);
2527 dbg_info *dbgi = get_irn_dbg_info(builtin);
2528 ir_graph *irg = get_irn_irg(builtin);
2529 ir_type *type = get_Builtin_type(builtin);
2530 ir_type *lowered_type_high = lower_Builtin_type_high(type);
2531 ir_type *lowered_type_low = lower_Builtin_type_low(type);
2532 ir_node *block = get_nodes_block(builtin);
2533 ir_node *mem = get_Builtin_mem(builtin);
2536 assert(is_NoMem(mem));
2541 const lower64_entry_t *entry = get_node_entry(operand);
2542 ir_node *in_high[1] = {entry->high_word};
2543 ir_node *in_low[1] = {entry->low_word};
2544 ir_node *number_of_bits = new_r_Const_long(irg, mode_Is, get_mode_size_bits(env->low_unsigned));
2545 ir_node *zero_signed = new_rd_Const(dbgi, irg, get_mode_null(mode_Is));
2546 ir_node *zero_unsigned = new_rd_Const(dbgi, irg, get_mode_null(mode_Iu));
2547 ir_node *cmp_low = new_rd_Cmp(dbgi, block, entry->low_word, zero_unsigned, ir_relation_equal);
2548 ir_node *cmp_high = new_rd_Cmp(dbgi, block, entry->high_word, zero_unsigned, ir_relation_equal);
2549 ir_node *ffs_high = new_rd_Builtin(dbgi, block, mem, 1, in_high, kind, lowered_type_high);
2550 ir_node *high_proj = new_r_Proj(ffs_high, mode_Is, pn_Builtin_max+1);
2551 ir_node *high = new_rd_Add(dbgi, block, high_proj, number_of_bits, mode_Is);
2552 ir_node *ffs_low = new_rd_Builtin(dbgi, block, mem, 1, in_low, kind, lowered_type_low);
2553 ir_node *low = new_r_Proj(ffs_low, mode_Is, pn_Builtin_max+1);
2554 ir_node *mux_high = new_rd_Mux(dbgi, block, cmp_high, high, zero_signed, mode_Is);
2556 if (! allow_ifconv(cmp_high, high, zero_signed))
2557 ir_nodeset_insert(&created_mux_nodes, mux_high);
2559 res = new_rd_Mux(dbgi, block, cmp_low, low, mux_high, mode_Is);
2561 if (! allow_ifconv(cmp_low, low, mux_high))
2562 ir_nodeset_insert(&created_mux_nodes, res);
2566 const lower64_entry_t *entry = get_node_entry(operand);
2567 ir_node *in_high[1] = {entry->high_word};
2568 ir_node *in_low[1] = {entry->low_word};
2569 ir_node *number_of_bits = new_r_Const_long(irg, mode_Is, get_mode_size_bits(mode));
2570 ir_node *zero_unsigned = new_rd_Const(dbgi, irg, get_mode_null(mode_Iu));
2571 ir_node *cmp_high = new_rd_Cmp(dbgi, block, entry->high_word, zero_unsigned, ir_relation_equal);
2572 ir_node *clz_high = new_rd_Builtin(dbgi, block, mem, 1, in_high, kind, lowered_type_high);
2573 ir_node *high = new_r_Proj(clz_high, mode_Is, pn_Builtin_max+1);
2574 ir_node *clz_low = new_rd_Builtin(dbgi, block, mem, 1, in_low, kind, lowered_type_low);
2575 ir_node *low_proj = new_r_Proj(clz_low, mode_Is, pn_Builtin_max+1);
2576 ir_node *low = new_rd_Add(dbgi, block, low_proj, number_of_bits, mode_Is);
2578 res = new_rd_Mux(dbgi, block, cmp_high, high, low, mode_Is);
2580 if (! allow_ifconv(cmp_high, high, low))
2581 ir_nodeset_insert(&created_mux_nodes, res);
2585 const lower64_entry_t *entry = get_node_entry(operand);
2586 ir_node *in_high[1] = {entry->high_word};
2587 ir_node *in_low[1] = {entry->low_word};
2588 ir_node *number_of_bits = new_r_Const_long(irg, mode_Is, get_mode_size_bits(env->low_unsigned));
2589 ir_node *zero_unsigned = new_rd_Const(dbgi, irg, get_mode_null(mode_Iu));
2590 ir_node *cmp_low = new_rd_Cmp(dbgi, block, entry->low_word, zero_unsigned, ir_relation_equal);
2591 ir_node *ffs_high = new_rd_Builtin(dbgi, block, mem, 1, in_high, kind, lowered_type_high);
2592 ir_node *high_proj = new_r_Proj(ffs_high, mode_Is, pn_Builtin_max+1);
2593 ir_node *high = new_rd_Add(dbgi, block, high_proj, number_of_bits, mode_Is);
2594 ir_node *ffs_low = new_rd_Builtin(dbgi, block, mem, 1, in_low, kind, lowered_type_low);
2595 ir_node *low = new_r_Proj(ffs_low, mode_Is, pn_Builtin_max+1);
2597 res = new_rd_Mux(dbgi, block, cmp_low, low, high, mode_Is);
2599 if (! allow_ifconv(cmp_low, low, high))
2600 ir_nodeset_insert(&created_mux_nodes, res);
2603 case ir_bk_popcount: {
2604 const lower64_entry_t *entry = get_node_entry(operand);
2605 ir_node *in_high[1] = {entry->high_word};
2606 ir_node *in_low[1] = {entry->low_word};
2607 ir_node *popcount_high = new_rd_Builtin(dbgi, block, mem, 1, in_high, kind, lowered_type_high);
2608 ir_node *popcount_low = new_rd_Builtin(dbgi, block, mem, 1, in_low, kind, lowered_type_low);
2609 ir_node *high = new_r_Proj(popcount_high, mode_Is, pn_Builtin_max+1);
2610 ir_node *low = new_r_Proj(popcount_low, mode_Is, pn_Builtin_max+1);
2612 res = new_rd_Add(dbgi, block, high, low, mode_Is);
2615 case ir_bk_parity: {
2616 const lower64_entry_t *entry = get_node_entry(operand);
2617 ir_node *in_high[1] = {entry->high_word};
2618 ir_node *in_low[1] = {entry->low_word};
2619 ir_node *parity_high;
2620 ir_node *parity_low;
2626 parity_high = new_rd_Builtin(dbgi, block, mem, 1, in_high, kind, lowered_type_high);
2627 high = new_r_Proj(parity_high, mode_Is, pn_Builtin_max+1);
2628 parity_low = new_rd_Builtin(dbgi, block, mem, 1, in_low, kind, lowered_type_low);
2629 low = new_r_Proj(parity_low, mode_Is, pn_Builtin_max+1);
2630 res = new_rd_Eor(dbgi, block, high, low, mode_Is);
2634 panic("unexpected builtin");
2637 turn_into_tuple(builtin, 2);
2638 set_irn_n(builtin, pn_Builtin_M, mem);
2639 set_irn_n(builtin, pn_Builtin_max+1, res);
2643 * check for opcodes that must always be lowered.
2645 static bool always_lower(unsigned code)
2664 * Compare two op_mode_entry_t's.
2666 static int cmp_op_mode(const void *elt, const void *key, size_t size)
2668 const op_mode_entry_t *e1 = (const op_mode_entry_t*)elt;
2669 const op_mode_entry_t *e2 = (const op_mode_entry_t*)key;
2672 return (e1->op != e2->op) | (e1->imode != e2->imode) | (e1->omode != e2->omode);
2676 * Compare two conv_tp_entry_t's.
2678 static int cmp_conv_tp(const void *elt, const void *key, size_t size)
2680 const conv_tp_entry_t *e1 = (const conv_tp_entry_t*)elt;
2681 const conv_tp_entry_t *e2 = (const conv_tp_entry_t*)key;
2684 return (e1->imode != e2->imode) | (e1->omode != e2->omode);
2688 * Enter a lowering function into an ir_op.
2690 void ir_register_dw_lower_function(ir_op *op, lower_dw_func func)
2692 op->ops.generic = (op_func)func;
2695 /* Determine which modes need to be lowered */
2696 static void setup_modes(void)
2698 unsigned size_bits = env->params->doubleword_size;
2699 ir_mode *doubleword_signed = NULL;
2700 ir_mode *doubleword_unsigned = NULL;
2701 size_t n_modes = ir_get_n_modes();
2702 ir_mode_arithmetic arithmetic;
2703 unsigned modulo_shift;
2706 /* search for doubleword modes... */
2707 for (i = 0; i < n_modes; ++i) {
2708 ir_mode *mode = ir_get_mode(i);
2709 if (!mode_is_int(mode))
2711 if (get_mode_size_bits(mode) != size_bits)
2713 if (mode_is_signed(mode)) {
2714 if (doubleword_signed != NULL) {
2715 /* sigh - the lowerer should really just lower all mode with
2716 * size_bits it finds. Unfortunately this required a bigger
2718 panic("multiple double word signed modes found");
2720 doubleword_signed = mode;
2722 if (doubleword_unsigned != NULL) {
2723 /* sigh - the lowerer should really just lower all mode with
2724 * size_bits it finds. Unfortunately this required a bigger
2726 panic("multiple double word unsigned modes found");
2728 doubleword_unsigned = mode;
2731 if (doubleword_signed == NULL || doubleword_unsigned == NULL) {
2732 panic("Couldn't find doubleword modes");
2735 arithmetic = get_mode_arithmetic(doubleword_signed);
2736 modulo_shift = get_mode_modulo_shift(doubleword_signed);
2738 assert(get_mode_size_bits(doubleword_unsigned) == size_bits);
2739 assert(size_bits % 2 == 0);
2740 assert(get_mode_sign(doubleword_signed) == 1);
2741 assert(get_mode_sign(doubleword_unsigned) == 0);
2742 assert(get_mode_sort(doubleword_signed) == irms_int_number);
2743 assert(get_mode_sort(doubleword_unsigned) == irms_int_number);
2744 assert(get_mode_arithmetic(doubleword_unsigned) == arithmetic);
2745 assert(get_mode_modulo_shift(doubleword_unsigned) == modulo_shift);
2747 /* try to guess a sensible modulo shift for the new mode.
2748 * (This is IMO another indication that this should really be a node
2749 * attribute instead of a mode thing) */
2750 if (modulo_shift == size_bits) {
2751 modulo_shift = modulo_shift / 2;
2752 } else if (modulo_shift == 0) {
2755 panic("Don't know what new modulo shift to use for lowered doubleword mode");
2759 /* produce lowered modes */
2760 env->high_signed = doubleword_signed;
2761 env->high_unsigned = doubleword_unsigned;
2762 env->low_signed = new_int_mode("WS", arithmetic, size_bits, 1,
2764 env->low_unsigned = new_int_mode("WU", arithmetic, size_bits, 0,
2768 static void enqueue_preds(ir_node *node)
2770 int arity = get_irn_arity(node);
2773 for (i = 0; i < arity; ++i) {
2774 ir_node *pred = get_irn_n(node, i);
2775 pdeq_putr(env->waitq, pred);
2779 static void lower_node(ir_node *node)
2787 lower64_entry_t *entry;
2789 if (irn_visited_else_mark(node))
2792 /* cycles are always broken at Phi and Block nodes. So we don't need special
2793 * magic in all the other lower functions */
2794 if (is_Block(node)) {
2795 enqueue_preds(node);
2797 } else if (is_Phi(node)) {
2802 /* depth-first: descend into operands */
2803 if (!is_Block(node)) {
2804 ir_node *block = get_nodes_block(node);
2808 if (!is_Cond(node)) {
2809 arity = get_irn_arity(node);
2810 for (i = 0; i < arity; ++i) {
2811 ir_node *pred = get_irn_n(node, i);
2816 op = get_irn_op(node);
2817 func = (lower_dw_func) op->ops.generic;
2821 idx = get_irn_idx(node);
2822 entry = idx < env->n_entries ? env->entries[idx] : NULL;
2823 if (entry != NULL || always_lower(get_irn_opcode(node))) {
2824 mode = get_irn_op_mode(node);
2825 if (mode == env->high_signed) {
2826 mode = env->low_signed;
2828 mode = env->low_unsigned;
2830 DB((dbg, LEVEL_1, " %+F\n", node));
2835 static void clear_node_and_phi_links(ir_node *node, void *data)
2838 if (get_irn_mode(node) == mode_T) {
2839 set_irn_link(node, node);
2841 set_irn_link(node, NULL);
2844 set_Block_phis(node, NULL);
2845 else if (is_Phi(node))
2846 set_Phi_next(node, NULL);
2849 static void lower_irg(ir_graph *irg)
2853 ir_type *lowered_mtp;
2856 obstack_init(&env->obst);
2858 /* just here for debugging */
2859 current_ir_graph = irg;
2862 n_idx = get_irg_last_idx(irg);
2863 n_idx = n_idx + (n_idx >> 2); /* add 25% */
2864 env->n_entries = n_idx;
2865 env->entries = NEW_ARR_F(lower64_entry_t*, n_idx);
2866 memset(env->entries, 0, sizeof(env->entries[0]) * n_idx);
2871 ent = get_irg_entity(irg);
2872 mtp = get_entity_type(ent);
2873 lowered_mtp = lower_mtp(mtp);
2875 if (lowered_mtp != mtp) {
2876 set_entity_type(ent, lowered_mtp);
2877 env->flags |= MUST_BE_LOWERED;
2879 fix_parameter_entities(irg);
2882 /* first step: link all nodes and allocate data */
2883 ir_reserve_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2884 visit_all_identities(irg, clear_node_and_phi_links, NULL);
2885 irg_walk_graph(irg, NULL, prepare_links_and_handle_rotl, env);
2887 if (env->flags & MUST_BE_LOWERED) {
2889 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
2890 inc_irg_visited(irg);
2892 assert(pdeq_empty(env->waitq));
2893 pdeq_putr(env->waitq, get_irg_end(irg));
2895 env->lowered_phis = NEW_ARR_F(ir_node*, 0);
2896 while (!pdeq_empty(env->waitq)) {
2897 ir_node *node = (ir_node*)pdeq_getl(env->waitq);
2901 /* we need to fixup phis */
2902 for (i = 0; i < ARR_LEN(env->lowered_phis); ++i) {
2903 ir_node *phi = env->lowered_phis[i];
2906 DEL_ARR_F(env->lowered_phis);
2909 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
2911 if (env->flags & CF_CHANGED) {
2912 /* control flow changed, dominance info is invalid */
2913 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
2914 | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
2916 edges_deactivate(irg);
2919 ir_free_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2921 DEL_ARR_F(env->entries);
2922 obstack_free(&env->obst, NULL);
2925 static const lwrdw_param_t *param;
2927 void ir_prepare_dw_lowering(const lwrdw_param_t *new_param)
2929 assert(new_param != NULL);
2930 FIRM_DBG_REGISTER(dbg, "firm.lower.dw");
2934 ir_clear_opcodes_generic_func();
2935 ir_register_dw_lower_function(op_ASM, lower_ASM);
2936 ir_register_dw_lower_function(op_Add, lower_binop);
2937 ir_register_dw_lower_function(op_And, lower_And);
2938 ir_register_dw_lower_function(op_Bad, lower_Bad);
2939 ir_register_dw_lower_function(op_Builtin, lower_Builtin);
2940 ir_register_dw_lower_function(op_Call, lower_Call);
2941 ir_register_dw_lower_function(op_Cmp, lower_Cmp);
2942 ir_register_dw_lower_function(op_Cond, lower_Cond);
2943 ir_register_dw_lower_function(op_Const, lower_Const);
2944 ir_register_dw_lower_function(op_Conv, lower_Conv);
2945 ir_register_dw_lower_function(op_Div, lower_Div);
2946 ir_register_dw_lower_function(op_Eor, lower_Eor);
2947 ir_register_dw_lower_function(op_Load, lower_Load);
2948 ir_register_dw_lower_function(op_Minus, lower_unop);
2949 ir_register_dw_lower_function(op_Mod, lower_Mod);
2950 ir_register_dw_lower_function(op_Mul, lower_binop);
2951 ir_register_dw_lower_function(op_Mux, lower_Mux);
2952 ir_register_dw_lower_function(op_Not, lower_Not);
2953 ir_register_dw_lower_function(op_Or, lower_Or);
2954 ir_register_dw_lower_function(op_Return, lower_Return);
2955 ir_register_dw_lower_function(op_Shl, lower_Shl);
2956 ir_register_dw_lower_function(op_Shr, lower_Shr);
2957 ir_register_dw_lower_function(op_Shrs, lower_Shrs);
2958 ir_register_dw_lower_function(op_Start, lower_Start);
2959 ir_register_dw_lower_function(op_Store, lower_Store);
2960 ir_register_dw_lower_function(op_Sub, lower_binop);
2961 ir_register_dw_lower_function(op_Switch, lower_Switch);
2962 ir_register_dw_lower_function(op_Unknown, lower_Unknown);
2966 * Callback to lower only the Mux nodes we created.
2968 static int lower_mux_cb(ir_node *mux)
2970 return ir_nodeset_contains(&created_mux_nodes, mux);
2976 void ir_lower_dw_ops(void)
2978 lower_dw_env_t lenv;
2981 memset(&lenv, 0, sizeof(lenv));
2982 lenv.params = param;
2987 /* create the necessary maps */
2988 if (! intrinsic_fkt)
2989 intrinsic_fkt = new_set(cmp_op_mode, iro_Last + 1);
2991 conv_types = new_set(cmp_conv_tp, 16);
2993 lowered_type = pmap_create();
2994 if (! lowered_builtin_type_low)
2995 lowered_builtin_type_low = pmap_create();
2996 if (! lowered_builtin_type_high)
2997 lowered_builtin_type_high = pmap_create();
2999 /* create a primitive unsigned and signed type */
3001 tp_u = get_type_for_mode(lenv.low_unsigned);
3003 tp_s = get_type_for_mode(lenv.low_signed);
3005 /* create method types for the created binop calls */
3007 binop_tp_u = new_type_method(4, 2);
3008 set_method_param_type(binop_tp_u, 0, tp_u);
3009 set_method_param_type(binop_tp_u, 1, tp_u);
3010 set_method_param_type(binop_tp_u, 2, tp_u);
3011 set_method_param_type(binop_tp_u, 3, tp_u);
3012 set_method_res_type(binop_tp_u, 0, tp_u);
3013 set_method_res_type(binop_tp_u, 1, tp_u);
3016 binop_tp_s = new_type_method(4, 2);
3017 if (env->params->little_endian) {
3018 set_method_param_type(binop_tp_s, 0, tp_u);
3019 set_method_param_type(binop_tp_s, 1, tp_s);
3020 set_method_param_type(binop_tp_s, 2, tp_u);
3021 set_method_param_type(binop_tp_s, 3, tp_s);
3022 set_method_res_type(binop_tp_s, 0, tp_u);
3023 set_method_res_type(binop_tp_s, 1, tp_s);
3025 set_method_param_type(binop_tp_s, 0, tp_s);
3026 set_method_param_type(binop_tp_s, 1, tp_u);
3027 set_method_param_type(binop_tp_s, 2, tp_s);
3028 set_method_param_type(binop_tp_s, 3, tp_u);
3029 set_method_res_type(binop_tp_s, 0, tp_s);
3030 set_method_res_type(binop_tp_s, 1, tp_u);
3034 unop_tp_u = new_type_method(2, 2);
3035 set_method_param_type(unop_tp_u, 0, tp_u);
3036 set_method_param_type(unop_tp_u, 1, tp_u);
3037 set_method_res_type(unop_tp_u, 0, tp_u);
3038 set_method_res_type(unop_tp_u, 1, tp_u);
3041 unop_tp_s = new_type_method(2, 2);
3042 if (env->params->little_endian) {
3043 set_method_param_type(unop_tp_s, 0, tp_u);
3044 set_method_param_type(unop_tp_s, 1, tp_s);
3045 set_method_res_type(unop_tp_s, 0, tp_u);
3046 set_method_res_type(unop_tp_s, 1, tp_s);
3048 set_method_param_type(unop_tp_s, 0, tp_s);
3049 set_method_param_type(unop_tp_s, 1, tp_u);
3050 set_method_res_type(unop_tp_s, 0, tp_s);
3051 set_method_res_type(unop_tp_s, 1, tp_u);
3055 lenv.tv_mode_bytes = new_tarval_from_long(param->doubleword_size/(2*8), lenv.low_unsigned);
3056 lenv.tv_mode_bits = new_tarval_from_long(param->doubleword_size/2, lenv.low_unsigned);
3057 lenv.waitq = new_pdeq();
3058 lenv.first_id = new_id_from_chars(param->little_endian ? ".l" : ".h", 2);
3059 lenv.next_id = new_id_from_chars(param->little_endian ? ".h" : ".l", 2);
3061 irp_reserve_resources(irp, IRP_RESOURCE_TYPE_LINK);
3062 /* transform all graphs */
3063 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
3064 ir_graph *irg = get_irp_irg(i);
3066 ir_nodeset_init(&created_mux_nodes);
3070 if (ir_nodeset_size(&created_mux_nodes) > 0)
3071 lower_mux(irg, lower_mux_cb);
3073 ir_nodeset_destroy(&created_mux_nodes);
3075 irp_free_resources(irp, IRP_RESOURCE_TYPE_LINK);
3076 del_pdeq(lenv.waitq);
3081 /* Default implementation. */
3082 ir_entity *def_create_intrinsic_fkt(ir_type *method, const ir_op *op,
3083 const ir_mode *imode, const ir_mode *omode,
3091 if (imode == omode) {
3092 snprintf(buf, sizeof(buf), "__l%s%s", get_op_name(op), get_mode_name(imode));
3094 snprintf(buf, sizeof(buf), "__l%s%s%s", get_op_name(op),
3095 get_mode_name(imode), get_mode_name(omode));
3097 id = new_id_from_str(buf);
3099 ent = new_entity(get_glob_type(), id, method);
3100 set_entity_ld_ident(ent, get_entity_ident(ent));
3101 set_entity_visibility(ent, ir_visibility_external);