2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower Double word operations, ie 64bit -> 32bit, 32bit -> 16bit etc.
24 * @author Michael Beck
37 #include "irgraph_t.h"
42 #include "dbginfo_t.h"
43 #include "iropt_dbg.h"
58 typedef struct lower_env_t lower_env_t;
61 * The type of a lower function.
63 * @param node the node to be lowered
64 * @param env the lower environment
66 typedef void (*lower_func)(ir_node *node, ir_mode *mode, lower_env_t *env);
68 /** A map from (op, imode, omode) to Intrinsic functions entities. */
69 static set *intrinsic_fkt;
71 /** A map from (imode, omode) to conv function types. */
72 static set *conv_types;
74 /** A map from a method type to its lowered type. */
75 static pmap *lowered_type;
77 /** The types for the binop and unop intrinsics. */
78 static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *shiftop_tp_u, *shiftop_tp_s, *tp_s, *tp_u;
80 /** the debug handle */
81 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
84 * An entry in the (op, imode, omode) -> entity map.
86 typedef struct op_mode_entry {
87 const ir_op *op; /**< the op */
88 const ir_mode *imode; /**< the input mode */
89 const ir_mode *omode; /**< the output mode */
90 ir_entity *ent; /**< the associated entity of this (op, imode, omode) triple */
94 * An entry in the (imode, omode) -> tp map.
96 typedef struct conv_tp_entry {
97 const ir_mode *imode; /**< the input mode */
98 const ir_mode *omode; /**< the output mode */
99 ir_type *mtd; /**< the associated method type of this (imode, omode) pair */
103 * Every double word node will be replaced,
104 * we need some store to hold the replacement:
106 typedef struct node_entry_t {
107 ir_node *low_word; /**< the low word */
108 ir_node *high_word; /**< the high word */
112 MUST_BE_LOWERED = 1, /**< graph must be lowered */
113 CF_CHANGED = 2, /**< control flow was changed */
117 * The lower environment.
120 node_entry_t **entries; /**< entries per node */
122 struct obstack obst; /**< an obstack holding the temporary data */
123 ir_type *l_mtp; /**< lowered method type of the current method */
124 ir_tarval *tv_mode_bytes; /**< a tarval containing the number of bytes in the lowered modes */
125 ir_tarval *tv_mode_bits; /**< a tarval containing the number of bits in the lowered modes */
126 pdeq *waitq; /**< a wait queue of all nodes that must be handled later */
127 ir_node **lowered_phis; /**< list of lowered phis */
128 pmap *proj_2_block; /**< a map from ProjX to its destination blocks */
129 ir_mode *high_signed; /**< doubleword signed type */
130 ir_mode *high_unsigned; /**< doubleword unsigned type */
131 ir_mode *low_signed; /**< word signed type */
132 ir_mode *low_unsigned; /**< word unsigned type */
133 ident *first_id; /**< .l for little and .h for big endian */
134 ident *next_id; /**< .h for little and .l for big endian */
135 const lwrdw_param_t *params; /**< transformation parameter */
136 unsigned flags; /**< some flags */
137 unsigned n_entries; /**< number of entries */
138 ir_type *value_param_tp; /**< the old value param type */
142 * Create a method type for a Conv emulation from imode to omode.
144 static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode, lower_env_t *env)
146 conv_tp_entry_t key, *entry;
153 entry = set_insert(conv_types, &key, sizeof(key), HASH_PTR(imode) ^ HASH_PTR(omode));
155 int n_param = 1, n_res = 1;
157 if (imode == env->high_signed || imode == env->high_unsigned)
159 if (omode == env->high_signed || omode == env->high_unsigned)
162 /* create a new one */
163 mtd = new_type_method(n_param, n_res);
165 /* set param types and result types */
167 if (imode == env->high_signed) {
168 set_method_param_type(mtd, n_param++, tp_u);
169 set_method_param_type(mtd, n_param++, tp_s);
170 } else if (imode == env->high_unsigned) {
171 set_method_param_type(mtd, n_param++, tp_u);
172 set_method_param_type(mtd, n_param++, tp_u);
174 ir_type *tp = get_type_for_mode(imode);
175 set_method_param_type(mtd, n_param++, tp);
179 if (omode == env->high_signed) {
180 set_method_res_type(mtd, n_res++, tp_u);
181 set_method_res_type(mtd, n_res++, tp_s);
182 } else if (omode == env->high_unsigned) {
183 set_method_res_type(mtd, n_res++, tp_u);
184 set_method_res_type(mtd, n_res++, tp_u);
186 ir_type *tp = get_type_for_mode(omode);
187 set_method_res_type(mtd, n_res++, tp);
197 * Add an additional control flow input to a block.
198 * Patch all Phi nodes. The new Phi inputs are copied from
199 * old input number nr.
201 static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
203 int i, arity = get_irn_arity(block);
208 NEW_ARR_A(ir_node *, in, arity + 1);
209 for (i = 0; i < arity; ++i)
210 in[i] = get_irn_n(block, i);
213 set_irn_in(block, i + 1, in);
215 for (phi = get_Block_phis(block); phi != NULL; phi = get_Phi_next(phi)) {
216 for (i = 0; i < arity; ++i)
217 in[i] = get_irn_n(phi, i);
219 set_irn_in(phi, i + 1, in);
224 * Add an additional control flow input to a block.
225 * Patch all Phi nodes. The new Phi inputs are copied from
226 * old input from cf tmpl.
228 static void add_block_cf_input(ir_node *block, ir_node *tmpl, ir_node *cf)
230 int i, arity = get_irn_arity(block);
233 for (i = 0; i < arity; ++i) {
234 if (get_irn_n(block, i) == tmpl) {
240 add_block_cf_input_nr(block, nr, cf);
244 * Return the "operational" mode of a Firm node.
246 static ir_mode *get_irn_op_mode(ir_node *node)
248 switch (get_irn_opcode(node)) {
250 return get_Load_mode(node);
252 return get_irn_mode(get_Store_value(node));
254 return get_irn_mode(get_DivMod_left(node));
256 return get_irn_mode(get_Div_left(node));
258 return get_irn_mode(get_Mod_left(node));
260 return get_irn_mode(get_Cmp_left(node));
262 return get_irn_mode(node);
267 * Walker, prepare the node links and determine which nodes need to be lowered
270 static void prepare_links(lower_env_t *env, ir_node *node)
272 ir_mode *mode = get_irn_op_mode(node);
276 if (mode == env->high_signed || mode == env->high_unsigned) {
277 unsigned idx = get_irn_idx(node);
278 /* ok, found a node that will be lowered */
279 link = OALLOCZ(&env->obst, node_entry_t);
281 if (idx >= env->n_entries) {
282 /* enlarge: this happens only for Rotl nodes which is RARELY */
283 unsigned old = env->n_entries;
284 unsigned n_idx = idx + (idx >> 3);
286 ARR_RESIZE(node_entry_t *, env->entries, n_idx);
287 memset(&env->entries[old], 0, (n_idx - old) * sizeof(env->entries[0]));
288 env->n_entries = n_idx;
290 env->entries[idx] = link;
291 env->flags |= MUST_BE_LOWERED;
292 } else if (is_Conv(node)) {
293 /* Conv nodes have two modes */
294 ir_node *pred = get_Conv_op(node);
295 mode = get_irn_mode(pred);
297 if (mode == env->high_signed || mode == env->high_unsigned) {
298 /* must lower this node either but don't need a link */
299 env->flags |= MUST_BE_LOWERED;
305 /* link all Proj nodes to its predecessor:
306 Note that Tuple Proj's and its Projs are linked either. */
307 ir_node *pred = get_Proj_pred(node);
309 set_irn_link(node, get_irn_link(pred));
310 set_irn_link(pred, node);
311 } else if (is_Phi(node)) {
312 /* link all Phi nodes to its block */
313 ir_node *block = get_nodes_block(node);
314 add_Block_phi(block, node);
315 } else if (is_Block(node)) {
316 /* fill the Proj -> Block map */
317 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
318 ir_node *pred = get_Block_cfgpred(node, i);
321 pmap_insert(env->proj_2_block, pred, node);
326 static node_entry_t *get_node_entry(lower_env_t *env, ir_node *node)
328 unsigned idx = get_irn_idx(node);
329 assert(idx < env->n_entries);
330 return env->entries[idx];
333 static void set_lowered(lower_env_t *env, ir_node *old,
334 ir_node *new_low, ir_node *new_high)
336 node_entry_t *entry = get_node_entry(env, old);
337 entry->low_word = new_low;
338 entry->high_word = new_high;
342 * Translate a Constant: create two.
344 static void lower_Const(ir_node *node, ir_mode *mode, lower_env_t *env)
346 ir_graph *irg = get_irn_irg(node);
347 dbg_info *dbg = get_irn_dbg_info(node);
348 ir_mode *low_mode = env->low_unsigned;
349 ir_tarval *tv = get_Const_tarval(node);
350 ir_tarval *tv_l = tarval_convert_to(tv, low_mode);
351 ir_node *res_low = new_rd_Const(dbg, irg, tv_l);
352 ir_tarval *tv_shrs = tarval_shrs(tv, env->tv_mode_bits);
353 ir_tarval *tv_h = tarval_convert_to(tv_shrs, mode);
354 ir_node *res_high = new_rd_Const(dbg, irg, tv_h);
356 set_lowered(env, node, res_low, res_high);
360 * Translate a Load: create two.
362 static void lower_Load(ir_node *node, ir_mode *mode, lower_env_t *env)
364 ir_mode *low_mode = env->low_unsigned;
365 ir_graph *irg = get_irn_irg(node);
366 ir_node *adr = get_Load_ptr(node);
367 ir_node *mem = get_Load_mem(node);
368 ir_node *low, *high, *proj;
370 ir_node *block = get_nodes_block(node);
371 ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
374 if (env->params->little_endian) {
376 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
378 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
382 /* create two loads */
383 dbg = get_irn_dbg_info(node);
384 low = new_rd_Load(dbg, block, mem, low, low_mode, volatility);
385 proj = new_r_Proj(low, mode_M, pn_Load_M);
386 high = new_rd_Load(dbg, block, proj, high, mode, volatility);
388 set_lowered(env, node, low, high);
390 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
391 switch (get_Proj_proj(proj)) {
392 case pn_Load_M: /* Memory result. */
393 /* put it to the second one */
394 set_Proj_pred(proj, high);
396 case pn_Load_X_except: /* Execution result if exception occurred. */
397 /* put it to the first one */
398 set_Proj_pred(proj, low);
400 case pn_Load_res: { /* Result of load operation. */
401 ir_node *res_low = new_r_Proj(low, low_mode, pn_Load_res);
402 ir_node *res_high = new_r_Proj(high, mode, pn_Load_res);
403 set_lowered(env, proj, res_low, res_high);
407 assert(0 && "unexpected Proj number");
409 /* mark this proj: we have handled it already, otherwise we might fall
410 * into out new nodes. */
411 mark_irn_visited(proj);
416 * Translate a Store: create two.
418 static void lower_Store(ir_node *node, ir_mode *mode, lower_env_t *env)
421 ir_node *block, *adr, *mem;
422 ir_node *low, *high, *proj;
424 ir_node *value = get_Store_value(node);
425 const node_entry_t *entry = get_node_entry(env, value);
426 ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
432 if (! entry->low_word) {
433 /* not ready yet, wait */
434 pdeq_putr(env->waitq, node);
438 irg = get_irn_irg(node);
439 adr = get_Store_ptr(node);
440 mem = get_Store_mem(node);
441 block = get_nodes_block(node);
443 if (env->params->little_endian) {
445 high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
447 low = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
451 /* create two Stores */
452 dbg = get_irn_dbg_info(node);
453 low = new_rd_Store(dbg, block, mem, low, entry->low_word, volatility);
454 proj = new_r_Proj(low, mode_M, pn_Store_M);
455 high = new_rd_Store(dbg, block, proj, high, entry->high_word, volatility);
457 set_lowered(env, node, low, high);
459 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
460 switch (get_Proj_proj(proj)) {
461 case pn_Store_M: /* Memory result. */
462 /* put it to the second one */
463 set_Proj_pred(proj, high);
465 case pn_Store_X_except: /* Execution result if exception occurred. */
466 /* put it to the first one */
467 set_Proj_pred(proj, low);
470 assert(0 && "unexpected Proj number");
472 /* mark this proj: we have handled it already, otherwise we might fall into
474 mark_irn_visited(proj);
479 * Return a node containing the address of the intrinsic emulation function.
481 * @param method the method type of the emulation function
482 * @param op the emulated ir_op
483 * @param imode the input mode of the emulated opcode
484 * @param omode the output mode of the emulated opcode
485 * @param env the lower environment
487 static ir_node *get_intrinsic_address(ir_type *method, ir_op *op,
488 ir_mode *imode, ir_mode *omode,
493 op_mode_entry_t key, *entry;
500 entry = set_insert(intrinsic_fkt, &key, sizeof(key),
501 HASH_PTR(op) ^ HASH_PTR(imode) ^ (HASH_PTR(omode) << 8));
503 /* create a new one */
504 ent = env->params->create_intrinsic(method, op, imode, omode, env->params->ctx);
506 assert(ent && "Intrinsic creator must return an entity");
512 return new_r_SymConst(env->irg, mode_P_code, sym, symconst_addr_ent);
518 * Create an intrinsic Call.
520 static void lower_Div(ir_node *node, ir_mode *mode, lower_env_t *env)
522 ir_node *left = get_Div_left(node);
523 ir_node *right = get_Div_right(node);
524 const node_entry_t *left_entry = get_node_entry(env, left);
525 const node_entry_t *right_entry = get_node_entry(env, right);
526 ir_node *block = get_nodes_block(node);
527 dbg_info *dbgi = get_irn_dbg_info(node);
529 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
530 ir_mode *opmode = get_irn_op_mode(node);
532 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode, env);
534 left_entry->low_word, left_entry->high_word,
535 right_entry->low_word, right_entry->high_word };
537 = new_rd_Call(dbgi, block, get_Div_mem(node), addr, 4, in, mtp);
538 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
541 set_irn_pinned(call, get_irn_pinned(node));
543 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
544 switch (get_Proj_proj(proj)) {
545 case pn_Div_M: /* Memory result. */
546 /* reroute to the call */
547 set_Proj_pred(proj, call);
548 set_Proj_proj(proj, pn_Call_M);
550 case pn_Div_X_except: /* Execution result if exception occurred. */
551 /* reroute to the call */
552 set_Proj_pred(proj, call);
553 set_Proj_proj(proj, pn_Call_X_except);
556 /* Result of computation. */
557 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
558 ir_node *res_high = new_r_Proj(resproj, mode, 1);
559 set_lowered(env, proj, res_low, res_high);
563 assert(0 && "unexpected Proj number");
565 /* mark this proj: we have handled it already, otherwise we might fall into
567 mark_irn_visited(proj);
574 * Create an intrinsic Call.
576 static void lower_Mod(ir_node *node, ir_mode *mode, lower_env_t *env)
578 ir_node *left = get_Mod_left(node);
579 ir_node *right = get_Mod_right(node);
580 const node_entry_t *left_entry = get_node_entry(env, left);
581 const node_entry_t *right_entry = get_node_entry(env, right);
583 left_entry->low_word, left_entry->high_word,
584 right_entry->low_word, right_entry->high_word
586 dbg_info *dbgi = get_irn_dbg_info(node);
587 ir_node *block = get_nodes_block(node);
589 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
590 ir_mode *opmode = get_irn_op_mode(node);
592 = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode, env);
594 = new_rd_Call(dbgi, block, get_Mod_mem(node), addr, 4, in, mtp);
595 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
597 set_irn_pinned(call, get_irn_pinned(node));
599 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
600 switch (get_Proj_proj(proj)) {
601 case pn_Mod_M: /* Memory result. */
602 /* reroute to the call */
603 set_Proj_pred(proj, call);
604 set_Proj_proj(proj, pn_Call_M);
606 case pn_Mod_X_except: /* Execution result if exception occurred. */
607 /* reroute to the call */
608 set_Proj_pred(proj, call);
609 set_Proj_proj(proj, pn_Call_X_except);
612 /* Result of computation. */
613 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
614 ir_node *res_high = new_r_Proj(resproj, mode, 1);
615 set_lowered(env, proj, res_low, res_high);
619 assert(0 && "unexpected Proj number");
621 /* mark this proj: we have handled it already, otherwise we might fall
622 * into out new nodes. */
623 mark_irn_visited(proj);
627 static void lower_DivMod(ir_node *node, ir_mode *mode, lower_env_t *env)
632 panic("DivMod is deprecated, no doubleword lowering");
638 * Create an intrinsic Call.
640 static void lower_binop(ir_node *node, ir_mode *mode, lower_env_t *env)
642 ir_node *left = get_binop_left(node);
643 ir_node *right = get_binop_right(node);
644 const node_entry_t *left_entry = get_node_entry(env, left);
645 const node_entry_t *right_entry = get_node_entry(env, right);
647 left_entry->low_word, left_entry->high_word,
648 right_entry->low_word, right_entry->high_word
650 dbg_info *dbgi = get_irn_dbg_info(node);
651 ir_node *block = get_nodes_block(node);
652 ir_graph *irg = get_irn_irg(block);
654 = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
656 = get_intrinsic_address(mtp, get_irn_op(node), mode, mode, env);
658 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
659 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
660 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
661 ir_node *res_high = new_r_Proj(resproj, mode, 1);
662 set_irn_pinned(call, get_irn_pinned(node));
663 set_lowered(env, node, res_low, res_high);
667 * Translate a Shiftop.
669 * Create an intrinsic Call.
671 static void lower_Shiftop(ir_node *node, ir_mode *mode, lower_env_t *env)
673 ir_node *left = get_binop_left(node);
674 const node_entry_t *left_entry = get_node_entry(env, left);
675 ir_node *right = get_binop_right(node);
677 left_entry->low_word, left_entry->high_word, right
679 dbg_info *dbgi = get_irn_dbg_info(node);
680 ir_node *block = get_nodes_block(node);
681 ir_graph *irg = get_irn_irg(block);
683 = mode_is_signed(mode) ? shiftop_tp_s : shiftop_tp_u;
685 = get_intrinsic_address(mtp, get_irn_op(node), mode, mode, env);
687 = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 3, in, mtp);
688 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
689 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
690 ir_node *res_high = new_r_Proj(resproj, mode, 1);
692 set_irn_pinned(call, get_irn_pinned(node));
693 set_lowered(env, node, res_low, res_high);
695 /* The shift count is always mode_Iu, no need for lowering */
696 assert(get_irn_mode(right) != env->high_signed
697 && get_irn_mode(right) != env->high_unsigned);
701 * Translate a Shr and handle special cases.
703 static void lower_Shr(ir_node *node, ir_mode *mode, lower_env_t *env)
705 ir_graph *irg = get_irn_irg(node);
706 ir_node *right = get_Shr_right(node);
708 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
709 ir_tarval *tv = get_Const_tarval(right);
711 if (tarval_is_long(tv) &&
712 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
713 ir_node *block = get_nodes_block(node);
714 ir_node *left = get_Shr_left(node);
715 ir_mode *low_unsigned = env->low_unsigned;
716 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
717 const node_entry_t *left_entry = get_node_entry(env, left);
721 left = left_entry->high_word;
723 /* convert high word into low_unsigned mode if necessary */
724 if (get_irn_mode(left) != low_unsigned)
725 left = new_r_Conv(block, left, low_unsigned);
728 ir_node *c = new_r_Const_long(irg, low_unsigned, shf_cnt);
729 res_low = new_r_Shr(block, left, c, low_unsigned);
733 res_high = new_r_Const(irg, get_mode_null(mode));
734 set_lowered(env, node, res_low, res_high);
739 lower_Shiftop(node, mode, env);
743 * Translate a Shl and handle special cases.
745 static void lower_Shl(ir_node *node, ir_mode *mode, lower_env_t *env)
747 ir_graph *irg = get_irn_irg(node);
748 ir_node *right = get_Shl_right(node);
750 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
751 ir_tarval *tv = get_Const_tarval(right);
753 if (tarval_is_long(tv) &&
754 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
756 ir_node *block = get_nodes_block(node);
757 ir_node *left = get_Shl_left(node);
759 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
760 const node_entry_t *left_entry = get_node_entry(env, left);
764 left = left_entry->low_word;
765 left = new_r_Conv(block, left, mode);
767 mode_l = env->low_unsigned;
769 c = new_r_Const_long(irg, mode_l, shf_cnt);
770 res_high = new_r_Shl(block, left, c, mode);
774 res_low = new_r_Const(irg, get_mode_null(mode_l));
775 set_lowered(env, node, res_low, res_high);
780 lower_Shiftop(node, mode, env);
784 * Translate a Shrs and handle special cases.
786 static void lower_Shrs(ir_node *node, ir_mode *mode, lower_env_t *env)
788 ir_graph *irg = get_irn_irg(node);
789 ir_node *right = get_Shrs_right(node);
791 if (get_mode_arithmetic(mode) == irma_twos_complement && is_Const(right)) {
792 ir_tarval *tv = get_Const_tarval(right);
794 if (tarval_is_long(tv) &&
795 get_tarval_long(tv) >= (long)get_mode_size_bits(mode)) {
796 ir_node *block = get_nodes_block(node);
797 ir_node *left = get_Shrs_left(node);
798 ir_mode *low_unsigned = env->low_unsigned;
799 long shf_cnt = get_tarval_long(tv) - get_mode_size_bits(mode);
800 const node_entry_t *left_entry = get_node_entry(env, left);
801 ir_node *left_unsigned = left;
806 left = left_entry->high_word;
808 /* convert high word into low_unsigned mode if necessary */
809 if (get_irn_mode(left_unsigned) != low_unsigned)
810 left_unsigned = new_r_Conv(block, left, low_unsigned);
813 c = new_r_Const_long(irg, low_unsigned, shf_cnt);
814 res_low = new_r_Shrs(block, left_unsigned, c, low_unsigned);
816 res_low = left_unsigned;
819 c = new_r_Const(irg, get_mode_all_one(low_unsigned));
820 res_high = new_r_Shrs(block, left, c, mode);
821 set_lowered(env, node, res_low, res_high);
825 lower_Shiftop(node, mode, env);
829 * Rebuild Rotl nodes into Or(Shl, Shr) and prepare all nodes.
831 static void prepare_links_and_handle_rotl(ir_node *node, void *env)
833 lower_env_t *lenv = env;
836 ir_mode *mode = get_irn_op_mode(node);
838 ir_node *left, *shl, *shr, *or, *block, *sub, *c;
839 ir_mode *omode, *rmode;
842 optimization_state_t state;
844 if (mode != lenv->high_signed && mode != lenv->high_unsigned) {
845 prepare_links(lenv, node);
849 /* replace the Rotl(x,y) by an Or(Shl(x,y), Shr(x,64-y)) */
850 right = get_Rotl_right(node);
851 irg = get_irn_irg(node);
852 dbg = get_irn_dbg_info(node);
853 omode = get_irn_mode(node);
854 left = get_Rotl_left(node);
855 block = get_nodes_block(node);
856 shl = new_rd_Shl(dbg, block, left, right, omode);
857 rmode = get_irn_mode(right);
858 c = new_r_Const_long(irg, rmode, get_mode_size_bits(omode));
859 sub = new_rd_Sub(dbg, block, c, right, rmode);
860 shr = new_rd_Shr(dbg, block, left, sub, omode);
862 /* switch optimization off here, or we will get the Rotl back */
863 save_optimization_state(&state);
864 set_opt_algebraic_simplification(0);
865 or = new_rd_Or(dbg, block, shl, shr, omode);
866 restore_optimization_state(&state);
870 /* do lowering on the new nodes */
871 prepare_links(lenv, shl);
872 prepare_links(lenv, c);
873 prepare_links(lenv, sub);
874 prepare_links(lenv, shr);
875 prepare_links(lenv, or);
879 prepare_links(lenv, node);
885 * Create an intrinsic Call.
887 static void lower_Unop(ir_node *node, ir_mode *mode, lower_env_t *env)
889 ir_node *op = get_unop_op(node);
890 const node_entry_t *op_entry = get_node_entry(env, op);
891 ir_node *in[2] = { op_entry->low_word, op_entry->high_word };
892 dbg_info *dbgi = get_irn_dbg_info(node);
893 ir_node *block = get_nodes_block(node);
894 ir_graph *irg = get_irn_irg(block);
895 ir_type *mtp = mode_is_signed(mode) ? unop_tp_s : unop_tp_u;
896 ir_op *irop = get_irn_op(node);
897 ir_node *addr = get_intrinsic_address(mtp, irop, mode, mode, env);
898 ir_node *nomem = get_irg_no_mem(irg);
899 ir_node *call = new_rd_Call(dbgi, block, nomem, addr, 2, in, mtp);
900 ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
901 ir_node *res_low = new_r_Proj(resproj, env->low_unsigned, 0);
902 ir_node *res_high = new_r_Proj(resproj, mode, 1);
903 set_irn_pinned(call, get_irn_pinned(node));
904 set_lowered(env, node, res_low, res_high);
908 * Translate a logical binop.
910 * Create two logical binops.
912 static void lower_binop_logical(ir_node *node, ir_mode *mode, lower_env_t *env,
913 ir_node *(*constr_rd)(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) )
915 ir_node *left = get_binop_left(node);
916 ir_node *right = get_binop_right(node);
917 const node_entry_t *left_entry = get_node_entry(env, left);
918 const node_entry_t *right_entry = get_node_entry(env, right);
919 dbg_info *dbgi = get_irn_dbg_info(node);
920 ir_node *block = get_nodes_block(node);
922 = constr_rd(dbgi, block, left_entry->low_word, right_entry->low_word,
925 = constr_rd(dbgi, block, left_entry->high_word, right_entry->high_word,
927 set_lowered(env, node, res_low, res_high);
930 static void lower_And(ir_node *node, ir_mode *mode, lower_env_t *env)
932 lower_binop_logical(node, mode, env, new_rd_And);
935 static void lower_Or(ir_node *node, ir_mode *mode, lower_env_t *env)
937 lower_binop_logical(node, mode, env, new_rd_Or);
940 static void lower_Eor(ir_node *node, ir_mode *mode, lower_env_t *env)
942 lower_binop_logical(node, mode, env, new_rd_Eor);
948 * Create two logical Nots.
950 static void lower_Not(ir_node *node, ir_mode *mode, lower_env_t *env)
952 ir_node *op = get_Not_op(node);
953 const node_entry_t *op_entry = get_node_entry(env, op);
954 dbg_info *dbgi = get_irn_dbg_info(node);
955 ir_node *block = get_nodes_block(node);
957 = new_rd_Not(dbgi, block, op_entry->low_word, env->low_unsigned);
959 = new_rd_Not(dbgi, block, op_entry->high_word, mode);
960 set_lowered(env, node, res_low, res_high);
966 static void lower_Cond(ir_node *node, ir_mode *mode, lower_env_t *env)
968 ir_node *cmp, *left, *right, *block;
969 ir_node *sel = get_Cond_selector(node);
970 ir_mode *m = get_irn_mode(sel);
972 const node_entry_t *lentry, *rentry;
973 ir_node *proj, *projT = NULL, *projF = NULL;
974 ir_node *new_bl, *cmpH, *cmpL, *irn;
975 ir_node *projHF, *projHT;
984 if (m == env->high_signed || m == env->high_unsigned) {
985 /* bad we can't really handle Switch with 64bit offsets */
986 panic("Cond with 64bit jumptable not supported");
994 cmp = get_Proj_pred(sel);
998 left = get_Cmp_left(cmp);
999 cmp_mode = get_irn_mode(left);
1000 if (cmp_mode != env->high_signed && cmp_mode != env->high_unsigned)
1003 right = get_Cmp_right(cmp);
1004 lentry = get_node_entry(env, left);
1005 rentry = get_node_entry(env, right);
1007 /* all right, build the code */
1008 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
1009 long proj_nr = get_Proj_proj(proj);
1011 if (proj_nr == pn_Cond_true) {
1012 assert(projT == NULL && "more than one Proj(true)");
1015 assert(proj_nr == pn_Cond_false);
1016 assert(projF == NULL && "more than one Proj(false)");
1019 mark_irn_visited(proj);
1021 assert(projT && projF);
1023 /* create a new high compare */
1024 block = get_nodes_block(node);
1025 irg = get_Block_irg(block);
1026 dbg = get_irn_dbg_info(cmp);
1027 pnc = get_Proj_proj(sel);
1029 if (is_Const(right) && is_Const_null(right)) {
1030 if (pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) {
1031 /* x ==/!= 0 ==> or(low,high) ==/!= 0 */
1032 ir_mode *mode = env->low_unsigned;
1033 ir_node *low = new_r_Conv(block, lentry->low_word, mode);
1034 ir_node *high = new_r_Conv(block, lentry->high_word, mode);
1035 ir_node *or = new_rd_Or(dbg, block, low, high, mode);
1036 ir_node *cmp = new_rd_Cmp(dbg, block, or, new_r_Const_long(irg, mode, 0));
1038 ir_node *proj = new_r_Proj(cmp, mode_b, pnc);
1039 set_Cond_selector(node, proj);
1044 cmpH = new_rd_Cmp(dbg, block, lentry->high_word, rentry->high_word);
1046 if (pnc == pn_Cmp_Eq) {
1047 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1048 pmap_entry *entry = pmap_find(env->proj_2_block, projF);
1051 dst_blk = entry->value;
1053 irn = new_r_Proj(cmpH, mode_b, pn_Cmp_Eq);
1054 dbg = get_irn_dbg_info(node);
1055 irn = new_rd_Cond(dbg, block, irn);
1057 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1058 mark_irn_visited(projHF);
1059 exchange(projF, projHF);
1061 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1062 mark_irn_visited(projHT);
1064 new_bl = new_r_Block(irg, 1, &projHT);
1066 dbg = get_irn_dbg_info(cmp);
1067 cmpL = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word);
1068 irn = new_r_Proj(cmpL, mode_b, pn_Cmp_Eq);
1069 dbg = get_irn_dbg_info(node);
1070 irn = new_rd_Cond(dbg, new_bl, irn);
1072 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1073 mark_irn_visited(proj);
1074 add_block_cf_input(dst_blk, projHF, proj);
1076 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1077 mark_irn_visited(proj);
1078 exchange(projT, proj);
1079 } else if (pnc == pn_Cmp_Lg) {
1080 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1081 pmap_entry *entry = pmap_find(env->proj_2_block, projT);
1084 dst_blk = entry->value;
1086 irn = new_r_Proj(cmpH, mode_b, pn_Cmp_Lg);
1087 dbg = get_irn_dbg_info(node);
1088 irn = new_rd_Cond(dbg, block, irn);
1090 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1091 mark_irn_visited(projHT);
1092 exchange(projT, projHT);
1094 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1095 mark_irn_visited(projHF);
1097 new_bl = new_r_Block(irg, 1, &projHF);
1099 dbg = get_irn_dbg_info(cmp);
1100 cmpL = new_rd_Cmp(dbg, new_bl, lentry->low_word, rentry->low_word);
1101 irn = new_r_Proj(cmpL, mode_b, pn_Cmp_Lg);
1102 dbg = get_irn_dbg_info(node);
1103 irn = new_rd_Cond(dbg, new_bl, irn);
1105 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1106 mark_irn_visited(proj);
1107 add_block_cf_input(dst_blk, projHT, proj);
1109 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1110 mark_irn_visited(proj);
1111 exchange(projF, proj);
1113 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1114 ir_node *dstT, *dstF, *newbl_eq, *newbl_l;
1117 entry = pmap_find(env->proj_2_block, projT);
1119 dstT = entry->value;
1121 entry = pmap_find(env->proj_2_block, projF);
1123 dstF = entry->value;
1125 irn = new_r_Proj(cmpH, mode_b, pnc & ~pn_Cmp_Eq);
1126 dbg = get_irn_dbg_info(node);
1127 irn = new_rd_Cond(dbg, block, irn);
1129 projHT = new_r_Proj(irn, mode_X, pn_Cond_true);
1130 mark_irn_visited(projHT);
1131 exchange(projT, projHT);
1134 projHF = new_r_Proj(irn, mode_X, pn_Cond_false);
1135 mark_irn_visited(projHF);
1137 newbl_eq = new_r_Block(irg, 1, &projHF);
1139 irn = new_r_Proj(cmpH, mode_b, pn_Cmp_Eq);
1140 irn = new_rd_Cond(dbg, newbl_eq, irn);
1142 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1143 mark_irn_visited(proj);
1144 exchange(projF, proj);
1147 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1148 mark_irn_visited(proj);
1150 newbl_l = new_r_Block(irg, 1, &proj);
1152 dbg = get_irn_dbg_info(cmp);
1153 cmpL = new_rd_Cmp(dbg, newbl_l, lentry->low_word, rentry->low_word);
1154 irn = new_r_Proj(cmpL, mode_b, pnc);
1155 dbg = get_irn_dbg_info(node);
1156 irn = new_rd_Cond(dbg, newbl_l, irn);
1158 proj = new_r_Proj(irn, mode_X, pn_Cond_true);
1159 mark_irn_visited(proj);
1160 add_block_cf_input(dstT, projT, proj);
1162 proj = new_r_Proj(irn, mode_X, pn_Cond_false);
1163 mark_irn_visited(proj);
1164 add_block_cf_input(dstF, projF, proj);
1167 /* we have changed the control flow */
1168 env->flags |= CF_CHANGED;
1172 * Translate a Conv to higher_signed
1174 static void lower_Conv_to_Ll(ir_node *node, lower_env_t *env)
1176 ir_mode *omode = get_irn_mode(node);
1177 ir_node *op = get_Conv_op(node);
1178 ir_mode *imode = get_irn_mode(op);
1179 ir_graph *irg = get_irn_irg(node);
1180 ir_node *block = get_nodes_block(node);
1181 dbg_info *dbg = get_irn_dbg_info(node);
1185 ir_mode *low_unsigned = env->low_unsigned;
1187 = mode_is_signed(omode) ? env->low_signed : low_unsigned;
1189 if (mode_is_int(imode) || mode_is_reference(imode)) {
1190 if (imode == env->high_signed || imode == env->high_unsigned) {
1191 /* a Conv from Lu to Ls or Ls to Lu */
1192 const node_entry_t *op_entry = get_node_entry(env, op);
1193 res_low = op_entry->low_word;
1194 res_high = new_rd_Conv(dbg, block, op_entry->high_word, low_signed);
1196 /* simple case: create a high word */
1197 if (imode != low_unsigned)
1198 op = new_rd_Conv(dbg, block, op, low_unsigned);
1202 if (mode_is_signed(imode)) {
1203 int c = get_mode_size_bits(low_signed) - 1;
1204 ir_node *cnst = new_r_Const_long(irg, low_unsigned, c);
1205 if (get_irn_mode(op) != low_signed)
1206 op = new_rd_Conv(dbg, block, op, low_signed);
1207 res_high = new_rd_Shrs(dbg, block, op, cnst, low_signed);
1209 res_high = new_r_Const(irg, get_mode_null(low_signed));
1212 } else if (imode == mode_b) {
1213 res_low = new_rd_Conv(dbg, block, op, low_unsigned);
1214 res_high = new_r_Const(irg, get_mode_null(low_signed));
1216 ir_node *irn, *call;
1217 ir_type *mtp = get_conv_type(imode, omode, env);
1219 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode, env);
1220 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 1, &op, mtp);
1221 set_irn_pinned(call, get_irn_pinned(node));
1222 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1224 res_low = new_r_Proj(irn, low_unsigned, 0);
1225 res_high = new_r_Proj(irn, low_signed, 1);
1227 set_lowered(env, node, res_low, res_high);
1231 * Translate a Conv from higher_unsigned
1233 static void lower_Conv_from_Ll(ir_node *node, lower_env_t *env)
1235 ir_node *op = get_Conv_op(node);
1236 ir_mode *omode = get_irn_mode(node);
1237 ir_node *block = get_nodes_block(node);
1238 dbg_info *dbg = get_irn_dbg_info(node);
1239 ir_graph *irg = get_irn_irg(node);
1240 const node_entry_t *entry = get_node_entry(env, op);
1242 if (mode_is_int(omode) || mode_is_reference(omode)) {
1243 op = entry->low_word;
1245 /* simple case: create a high word */
1246 if (omode != env->low_unsigned)
1247 op = new_rd_Conv(dbg, block, op, omode);
1249 set_Conv_op(node, op);
1250 } else if (omode == mode_b) {
1251 /* llu ? true : false <=> (low|high) ? true : false */
1252 ir_mode *mode = env->low_unsigned;
1253 ir_node *or = new_rd_Or(dbg, block, entry->low_word, entry->high_word,
1255 set_Conv_op(node, or);
1257 ir_node *irn, *call, *in[2];
1258 ir_mode *imode = get_irn_mode(op);
1259 ir_type *mtp = get_conv_type(imode, omode, env);
1261 irn = get_intrinsic_address(mtp, get_irn_op(node), imode, omode, env);
1262 in[0] = entry->low_word;
1263 in[1] = entry->high_word;
1265 call = new_rd_Call(dbg, block, get_irg_no_mem(irg), irn, 2, in, mtp);
1266 set_irn_pinned(call, get_irn_pinned(node));
1267 irn = new_r_Proj(call, mode_T, pn_Call_T_result);
1269 exchange(node, new_r_Proj(irn, omode, 0));
1274 * lower boolean Proj(Cmp)
1276 static void lower_Proj_Cmp(lower_env_t *env, ir_node *proj)
1278 ir_node *cmp = get_Proj_pred(proj);
1279 ir_node *l = get_Cmp_left(cmp);
1280 ir_mode *mode = get_irn_mode(l);
1281 ir_node *r, *low, *high, *t, *res;
1285 const node_entry_t *lentry;
1286 const node_entry_t *rentry;
1288 if (mode != env->high_signed && mode != env->high_unsigned) {
1292 r = get_Cmp_right(cmp);
1293 lentry = get_node_entry(env, l);
1294 rentry = get_node_entry(env, r);
1295 pnc = get_Proj_proj(proj);
1296 blk = get_nodes_block(cmp);
1297 db = get_irn_dbg_info(cmp);
1298 low = new_rd_Cmp(db, blk, lentry->low_word, rentry->low_word);
1299 high = new_rd_Cmp(db, blk, lentry->high_word, rentry->high_word);
1301 if (pnc == pn_Cmp_Eq) {
1302 /* simple case:a == b <==> a_h == b_h && a_l == b_l */
1303 res = new_rd_And(db, blk,
1304 new_r_Proj(low, mode_b, pnc),
1305 new_r_Proj(high, mode_b, pnc),
1307 } else if (pnc == pn_Cmp_Lg) {
1308 /* simple case:a != b <==> a_h != b_h || a_l != b_l */
1309 res = new_rd_Or(db, blk,
1310 new_r_Proj(low, mode_b, pnc),
1311 new_r_Proj(high, mode_b, pnc),
1314 /* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
1315 t = new_rd_And(db, blk,
1316 new_r_Proj(low, mode_b, pnc),
1317 new_r_Proj(high, mode_b, pn_Cmp_Eq),
1319 res = new_rd_Or(db, blk,
1320 new_r_Proj(high, mode_b, pnc & ~pn_Cmp_Eq),
1324 exchange(proj, res);
1327 static void lower_Proj(ir_node *node, ir_mode *mode, lower_env_t *env)
1330 ir_node *pred = get_Proj_pred(node);
1332 lower_Proj_Cmp(env, node);
1339 static void lower_Conv(ir_node *node, ir_mode *mode, lower_env_t *env)
1341 mode = get_irn_mode(node);
1343 if (mode == env->high_signed || mode == env->high_unsigned) {
1344 lower_Conv_to_Ll(node, env);
1346 ir_mode *op_mode = get_irn_mode(get_Conv_op(node));
1348 if (op_mode == env->high_signed || op_mode == env->high_unsigned) {
1349 lower_Conv_from_Ll(node, env);
1355 * Lower the method type.
1357 * @param env the lower environment
1358 * @param mtp the method type to lower
1360 * @return the lowered type
1362 static ir_type *lower_mtp(lower_env_t *env, ir_type *mtp)
1366 ir_type *res, *value_type;
1368 if (is_lowered_type(mtp))
1371 entry = pmap_find(lowered_type, mtp);
1373 int i, n, r, n_param, n_res;
1375 /* count new number of params */
1376 n_param = n = get_method_n_params(mtp);
1377 for (i = n_param - 1; i >= 0; --i) {
1378 ir_type *tp = get_method_param_type(mtp, i);
1380 if (is_Primitive_type(tp)) {
1381 ir_mode *mode = get_type_mode(tp);
1383 if (mode == env->high_signed ||
1384 mode == env->high_unsigned)
1389 /* count new number of results */
1390 n_res = r = get_method_n_ress(mtp);
1391 for (i = n_res - 1; i >= 0; --i) {
1392 ir_type *tp = get_method_res_type(mtp, i);
1394 if (is_Primitive_type(tp)) {
1395 ir_mode *mode = get_type_mode(tp);
1397 if (mode == env->high_signed ||
1398 mode == env->high_unsigned)
1403 res = new_type_method(n_param, n_res);
1405 /* set param types and result types */
1406 for (i = n_param = 0; i < n; ++i) {
1407 ir_type *tp = get_method_param_type(mtp, i);
1409 if (is_Primitive_type(tp)) {
1410 ir_mode *mode = get_type_mode(tp);
1412 if (mode == env->high_signed) {
1413 set_method_param_type(res, n_param++, tp_u);
1414 set_method_param_type(res, n_param++, tp_s);
1415 } else if (mode == env->high_unsigned) {
1416 set_method_param_type(res, n_param++, tp_u);
1417 set_method_param_type(res, n_param++, tp_u);
1419 set_method_param_type(res, n_param++, tp);
1422 set_method_param_type(res, n_param++, tp);
1425 for (i = n_res = 0; i < r; ++i) {
1426 ir_type *tp = get_method_res_type(mtp, i);
1428 if (is_Primitive_type(tp)) {
1429 ir_mode *mode = get_type_mode(tp);
1431 if (mode == env->high_signed) {
1432 set_method_res_type(res, n_res++, tp_u);
1433 set_method_res_type(res, n_res++, tp_s);
1434 } else if (mode == env->high_unsigned) {
1435 set_method_res_type(res, n_res++, tp_u);
1436 set_method_res_type(res, n_res++, tp_u);
1438 set_method_res_type(res, n_res++, tp);
1441 set_method_res_type(res, n_res++, tp);
1444 set_lowered_type(mtp, res);
1445 pmap_insert(lowered_type, mtp, res);
1447 value_type = get_method_value_param_type(mtp);
1448 if (value_type != NULL) {
1449 /* this creates a new value parameter type */
1450 (void)get_method_value_param_ent(res, 0);
1452 /* set new param positions */
1453 for (i = n_param = 0; i < n; ++i) {
1454 ir_type *tp = get_method_param_type(mtp, i);
1455 ident *id = get_method_param_ident(mtp, i);
1456 ir_entity *ent = get_method_value_param_ent(mtp, i);
1458 set_entity_link(ent, INT_TO_PTR(n_param));
1459 if (is_Primitive_type(tp)) {
1460 ir_mode *mode = get_type_mode(tp);
1462 if (mode == env->high_signed || mode == env->high_unsigned) {
1464 lid = id_mangle(id, env->first_id);
1465 set_method_param_ident(res, n_param, lid);
1466 set_entity_ident(get_method_value_param_ent(res, n_param), lid);
1467 lid = id_mangle(id, env->next_id);
1468 set_method_param_ident(res, n_param + 1, lid);
1469 set_entity_ident(get_method_value_param_ent(res, n_param + 1), lid);
1476 set_method_param_ident(res, n_param, id);
1477 set_entity_ident(get_method_value_param_ent(res, n_param), id);
1482 set_lowered_type(value_type, get_method_value_param_type(res));
1491 * Translate a Return.
1493 static void lower_Return(ir_node *node, ir_mode *mode, lower_env_t *env)
1495 ir_graph *irg = get_irn_irg(node);
1496 ir_entity *ent = get_irg_entity(irg);
1497 ir_type *mtp = get_entity_type(ent);
1503 /* check if this return must be lowered */
1504 for (i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1505 ir_node *pred = get_Return_res(node, i);
1506 ir_mode *mode = get_irn_op_mode(pred);
1508 if (mode == env->high_signed || mode == env->high_unsigned)
1514 ent = get_irg_entity(irg);
1515 mtp = get_entity_type(ent);
1517 mtp = lower_mtp(env, mtp);
1518 set_entity_type(ent, mtp);
1520 /* create a new in array */
1521 NEW_ARR_A(ir_node *, in, get_method_n_ress(mtp) + 1);
1522 in[0] = get_Return_mem(node);
1524 for (j = i = 0, n = get_Return_n_ress(node); i < n; ++i) {
1525 ir_node *pred = get_Return_res(node, i);
1526 ir_mode *pred_mode = get_irn_mode(pred);
1528 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1529 const node_entry_t *entry = get_node_entry(env, pred);
1530 in[++j] = entry->low_word;
1531 in[++j] = entry->high_word;
1537 set_irn_in(node, j+1, in);
1541 * Translate the parameters.
1543 static void lower_Start(ir_node *node, ir_mode *mode, lower_env_t *env)
1545 ir_graph *irg = get_irn_irg(node);
1546 ir_entity *ent = get_irg_entity(irg);
1547 ir_type *tp = get_entity_type(ent);
1550 int i, j, n_params, rem;
1551 ir_node *proj, *args;
1554 if (is_lowered_type(tp)) {
1555 mtp = get_associated_type(tp);
1559 assert(! is_lowered_type(mtp));
1561 n_params = get_method_n_params(mtp);
1565 NEW_ARR_A(long, new_projs, n_params);
1567 /* first check if we have parameters that must be fixed */
1568 for (i = j = 0; i < n_params; ++i, ++j) {
1569 ir_type *tp = get_method_param_type(mtp, i);
1572 if (is_Primitive_type(tp)) {
1573 ir_mode *mode = get_type_mode(tp);
1575 if (mode == env->high_signed ||
1576 mode == env->high_unsigned)
1583 mtp = lower_mtp(env, mtp);
1584 set_entity_type(ent, mtp);
1586 /* switch off optimization for new Proj nodes or they might be CSE'ed
1587 with not patched one's */
1588 rem = get_optimize();
1591 /* ok, fix all Proj's and create new ones */
1592 args = get_irg_args(irg);
1593 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
1594 ir_node *pred = get_Proj_pred(proj);
1603 /* do not visit this node again */
1604 mark_irn_visited(proj);
1609 proj_nr = get_Proj_proj(proj);
1610 set_Proj_proj(proj, new_projs[proj_nr]);
1612 mode = get_irn_mode(proj);
1613 mode_l = env->low_unsigned;
1614 if (mode == env->high_signed) {
1615 mode_h = env->low_signed;
1616 } else if (mode == env->high_unsigned) {
1617 mode_h = env->low_unsigned;
1622 dbg = get_irn_dbg_info(proj);
1623 res_low = new_rd_Proj(dbg, args, mode_l, new_projs[proj_nr]);
1624 res_high = new_rd_Proj(dbg, args, mode_h, new_projs[proj_nr] + 1);
1625 set_lowered(env, proj, res_low, res_high);
1633 static void lower_Call(ir_node *node, ir_mode *mode, lower_env_t *env)
1635 ir_type *tp = get_Call_type(node);
1637 ir_node **in, *proj, *results;
1638 int n_params, n_res;
1639 bool need_lower = false;
1641 long *res_numbers = NULL;
1644 if (is_lowered_type(tp)) {
1645 call_tp = get_associated_type(tp);
1650 assert(! is_lowered_type(call_tp));
1652 n_params = get_method_n_params(call_tp);
1653 for (i = 0; i < n_params; ++i) {
1654 ir_type *tp = get_method_param_type(call_tp, i);
1656 if (is_Primitive_type(tp)) {
1657 ir_mode *mode = get_type_mode(tp);
1659 if (mode == env->high_signed || mode == env->high_unsigned) {
1665 n_res = get_method_n_ress(call_tp);
1667 NEW_ARR_A(long, res_numbers, n_res);
1669 for (i = j = 0; i < n_res; ++i, ++j) {
1670 ir_type *tp = get_method_res_type(call_tp, i);
1673 if (is_Primitive_type(tp)) {
1674 ir_mode *mode = get_type_mode(tp);
1676 if (mode == env->high_signed || mode == env->high_unsigned) {
1687 /* let's lower it */
1688 call_tp = lower_mtp(env, call_tp);
1689 set_Call_type(node, call_tp);
1691 NEW_ARR_A(ir_node *, in, get_method_n_params(call_tp) + 2);
1693 in[0] = get_Call_mem(node);
1694 in[1] = get_Call_ptr(node);
1696 for (j = 2, i = 0; i < n_params; ++i) {
1697 ir_node *pred = get_Call_param(node, i);
1698 ir_mode *pred_mode = get_irn_mode(pred);
1700 if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
1701 const node_entry_t *pred_entry = get_node_entry(env, pred);
1702 in[j++] = pred_entry->low_word;
1703 in[j++] = pred_entry->high_word;
1709 set_irn_in(node, j, in);
1711 /* fix the results */
1713 for (proj = get_irn_link(node); proj; proj = get_irn_link(proj)) {
1714 long proj_nr = get_Proj_proj(proj);
1716 if (proj_nr == pn_Call_T_result && get_Proj_pred(proj) == node) {
1717 /* found the result proj */
1723 if (results != NULL) { /* there are results */
1724 int rem = get_optimize();
1726 /* switch off optimization for new Proj nodes or they might be CSE'ed
1727 with not patched one's */
1729 for (i = j = 0, proj = get_irn_link(results); proj; proj = get_irn_link(proj), ++i, ++j) {
1730 if (get_Proj_pred(proj) == results) {
1731 long proj_nr = get_Proj_proj(proj);
1732 ir_mode *proj_mode = get_irn_mode(proj);
1739 /* found a result */
1740 mark_irn_visited(proj);
1742 set_Proj_proj(proj, res_numbers[proj_nr]);
1744 mode_l = env->low_unsigned;
1745 if (proj_mode == env->high_signed) {
1746 mode_h = env->low_signed;
1747 } else if (proj_mode == env->high_unsigned) {
1748 mode_h = env->low_unsigned;
1753 dbg = get_irn_dbg_info(proj);
1754 res_low = new_rd_Proj(dbg, results, mode_l, res_numbers[proj_nr]);
1755 res_high = new_rd_Proj(dbg, results, mode_h, res_numbers[proj_nr] + 1);
1756 set_lowered(env, proj, res_low, res_high);
1764 * Translate an Unknown into two.
1766 static void lower_Unknown(ir_node *node, ir_mode *mode, lower_env_t *env)
1768 ir_mode *low_mode = env->low_unsigned;
1769 ir_graph *irg = get_irn_irg(node);
1770 ir_node *res_low = new_r_Unknown(irg, low_mode);
1771 ir_node *res_high = new_r_Unknown(irg, mode);
1772 set_lowered(env, node, res_low, res_high);
1778 * First step: just create two templates
1780 static void lower_Phi(lower_env_t *env, ir_node *phi)
1782 ir_mode *mode = get_irn_mode(phi);
1797 /* enqueue predecessors */
1798 arity = get_Phi_n_preds(phi);
1799 for (i = 0; i < arity; ++i) {
1800 ir_node *pred = get_Phi_pred(phi, i);
1801 pdeq_putr(env->waitq, pred);
1804 if (mode != env->high_signed && mode != env->high_unsigned)
1807 /* first create a new in array */
1808 NEW_ARR_A(ir_node *, in_l, arity);
1809 NEW_ARR_A(ir_node *, in_h, arity);
1810 irg = get_irn_irg(phi);
1811 mode_l = env->low_unsigned;
1812 mode_h = mode == env->high_signed ? env->low_signed : env->low_unsigned;
1813 unk_l = new_r_Dummy(irg, mode_l);
1814 unk_h = new_r_Dummy(irg, mode_h);
1815 for (i = 0; i < arity; ++i) {
1820 dbg = get_irn_dbg_info(phi);
1821 block = get_nodes_block(phi);
1822 phi_l = new_rd_Phi(dbg, block, arity, in_l, mode_l);
1823 phi_h = new_rd_Phi(dbg, block, arity, in_h, mode_h);
1825 set_lowered(env, phi, phi_l, phi_h);
1827 /* remember that we need to fixup the predecessors later */
1828 ARR_APP1(ir_node*, env->lowered_phis, phi);
1830 /* Don't forget to link the new Phi nodes into the block.
1831 * Beware that some Phis might be optimized away. */
1833 add_Block_phi(block, phi_l);
1835 add_Block_phi(block, phi_h);
1838 static void fixup_phi(lower_env_t *env, ir_node *phi)
1840 const node_entry_t *entry = get_node_entry(env, phi);
1841 ir_node *phi_l = entry->low_word;
1842 ir_node *phi_h = entry->high_word;
1843 int arity = get_Phi_n_preds(phi);
1846 /* exchange phi predecessors which are lowered by now */
1847 for (i = 0; i < arity; ++i) {
1848 ir_node *pred = get_Phi_pred(phi, i);
1849 const node_entry_t *pred_entry = get_node_entry(env, pred);
1851 set_Phi_pred(phi_l, i, pred_entry->low_word);
1852 set_Phi_pred(phi_h, i, pred_entry->high_word);
1859 static void lower_Mux(ir_node *mux, ir_mode *mode, lower_env_t *env)
1861 ir_node *truen = get_Mux_true(mux);
1862 ir_node *falsen = get_Mux_false(mux);
1863 ir_node *sel = get_Mux_sel(mux);
1864 const node_entry_t *true_entry = get_node_entry(env, truen);
1865 const node_entry_t *false_entry = get_node_entry(env, falsen);
1866 ir_node *true_l = true_entry->low_word;
1867 ir_node *true_h = true_entry->high_word;
1868 ir_node *false_l = false_entry->low_word;
1869 ir_node *false_h = false_entry->high_word;
1870 dbg_info *dbgi = get_irn_dbg_info(mux);
1871 ir_node *block = get_nodes_block(mux);
1873 = new_rd_Mux(dbgi, block, sel, false_l, true_l, env->low_unsigned);
1875 = new_rd_Mux(dbgi, block, sel, false_h, true_h, mode);
1876 set_lowered(env, mux, res_low, res_high);
1880 * Translate an ASM node.
1882 static void lower_ASM(ir_node *asmn, ir_mode *mode, lower_env_t *env)
1884 ir_mode *his = env->high_signed;
1885 ir_mode *hiu = env->high_unsigned;
1891 for (i = get_irn_arity(asmn) - 1; i >= 0; --i) {
1892 ir_mode *op_mode = get_irn_mode(get_irn_n(asmn, i));
1893 if (op_mode == his || op_mode == hiu) {
1894 panic("lowering ASM unimplemented");
1901 n = get_irn_link(n);
1905 proj_mode = get_irn_mode(n);
1906 if (proj_mode == his || proj_mode == hiu) {
1907 panic("lowering ASM unimplemented");
1913 * Translate a Sel node.
1915 static void lower_Sel(ir_node *sel, ir_mode *mode, lower_env_t *env)
1919 /* we must only lower value parameter Sels if we change the
1920 value parameter type. */
1921 if (env->value_param_tp != NULL) {
1922 ir_entity *ent = get_Sel_entity(sel);
1923 if (get_entity_owner(ent) == env->value_param_tp) {
1924 int pos = PTR_TO_INT(get_entity_link(ent));
1926 ent = get_method_value_param_ent(env->l_mtp, pos);
1927 set_Sel_entity(sel, ent);
1933 * check for opcodes that must always be lowered.
1935 static bool always_lower(ir_opcode code)
1953 * Compare two op_mode_entry_t's.
1955 static int cmp_op_mode(const void *elt, const void *key, size_t size)
1957 const op_mode_entry_t *e1 = elt;
1958 const op_mode_entry_t *e2 = key;
1961 return (e1->op - e2->op) | (e1->imode - e2->imode) | (e1->omode - e2->omode);
1965 * Compare two conv_tp_entry_t's.
1967 static int cmp_conv_tp(const void *elt, const void *key, size_t size)
1969 const conv_tp_entry_t *e1 = elt;
1970 const conv_tp_entry_t *e2 = key;
1973 return (e1->imode - e2->imode) | (e1->omode - e2->omode);
1977 * Enter a lowering function into an ir_op.
1979 static void enter_lower_func(ir_op *op, lower_func func)
1981 op->ops.generic = (op_func)func;
1985 * Returns non-zero if a method type must be lowered.
1987 * @param mtp the method type
1989 static bool mtp_must_be_lowered(lower_env_t *env, ir_type *mtp)
1991 int n_params = get_method_n_params(mtp);
1994 /* first check if we have parameters that must be fixed */
1995 for (i = 0; i < n_params; ++i) {
1996 ir_type *tp = get_method_param_type(mtp, i);
1998 if (is_Primitive_type(tp)) {
1999 ir_mode *mode = get_type_mode(tp);
2001 if (mode == env->high_signed ||
2002 mode == env->high_unsigned)
2009 /* Determine which modes need to be lowered */
2010 static void setup_modes(lower_env_t *env)
2012 unsigned size_bits = env->params->doubleword_size;
2013 ir_mode *doubleword_signed = NULL;
2014 ir_mode *doubleword_unsigned = NULL;
2015 int n_modes = get_irp_n_modes();
2016 ir_mode_arithmetic arithmetic;
2017 unsigned modulo_shift;
2020 /* search for doubleword modes... */
2021 for (i = 0; i < n_modes; ++i) {
2022 ir_mode *mode = get_irp_mode(i);
2023 if (!mode_is_int(mode))
2025 if (get_mode_size_bits(mode) != size_bits)
2027 if (mode_is_signed(mode)) {
2028 if (doubleword_signed != NULL) {
2029 /* sigh - the lowerer should really just lower all mode with
2030 * size_bits it finds. Unfortunately this required a bigger
2032 panic("multiple double word signed modes found");
2034 doubleword_signed = mode;
2036 if (doubleword_unsigned != NULL) {
2037 /* sigh - the lowerer should really just lower all mode with
2038 * size_bits it finds. Unfortunately this required a bigger
2040 panic("multiple double word unsigned modes found");
2042 doubleword_unsigned = mode;
2045 if (doubleword_signed == NULL || doubleword_unsigned == NULL) {
2046 panic("Couldn't find doubleword modes");
2049 arithmetic = get_mode_arithmetic(doubleword_signed);
2050 modulo_shift = get_mode_modulo_shift(doubleword_signed);
2052 assert(get_mode_size_bits(doubleword_unsigned) == size_bits);
2053 assert(size_bits % 2 == 0);
2054 assert(get_mode_sign(doubleword_signed) == 1);
2055 assert(get_mode_sign(doubleword_unsigned) == 0);
2056 assert(get_mode_sort(doubleword_signed) == irms_int_number);
2057 assert(get_mode_sort(doubleword_unsigned) == irms_int_number);
2058 assert(get_mode_arithmetic(doubleword_unsigned) == arithmetic);
2059 assert(get_mode_modulo_shift(doubleword_unsigned) == modulo_shift);
2061 /* try to guess a sensible modulo shift for the new mode.
2062 * (This is IMO another indication that this should really be a node
2063 * attribute instead of a mode thing) */
2064 if (modulo_shift == size_bits) {
2065 modulo_shift = modulo_shift / 2;
2066 } else if (modulo_shift == 0) {
2069 panic("Don't know what new modulo shift to use for lowered doubleword mode");
2073 /* produce lowered modes */
2074 env->high_signed = doubleword_signed;
2075 env->high_unsigned = doubleword_unsigned;
2076 env->low_signed = new_ir_mode("WS", irms_int_number, size_bits, 1,
2077 arithmetic, modulo_shift);
2078 env->low_unsigned = new_ir_mode("WU", irms_int_number, size_bits, 0,
2079 arithmetic, modulo_shift);
2082 static void enqueue_preds(lower_env_t *env, ir_node *node)
2084 int arity = get_irn_arity(node);
2087 for (i = 0; i < arity; ++i) {
2088 ir_node *pred = get_irn_n(node, i);
2089 pdeq_putr(env->waitq, pred);
2093 static void lower_node(lower_env_t *env, ir_node *node)
2101 node_entry_t *entry;
2103 if (irn_visited(node))
2105 mark_irn_visited(node);
2107 /* cycles are always broken at Phi and Block nodes. So we don't need special
2108 * magic in all the other lower functions */
2109 if (is_Block(node)) {
2110 enqueue_preds(env, node);
2112 } else if (is_Phi(node)) {
2113 lower_Phi(env, node);
2117 /* depth-first: descend into operands */
2118 if (!is_Block(node)) {
2119 ir_node *block = get_nodes_block(node);
2120 lower_node(env, block);
2123 arity = get_irn_arity(node);
2124 for (i = 0; i < arity; ++i) {
2125 ir_node *pred = get_irn_n(node, i);
2126 lower_node(env, pred);
2129 op = get_irn_op(node);
2130 func = (lower_func) op->ops.generic;
2134 idx = get_irn_idx(node);
2135 entry = idx < env->n_entries ? env->entries[idx] : NULL;
2136 if (entry != NULL || always_lower(get_irn_opcode(node))) {
2137 mode = get_irn_op_mode(node);
2138 if (mode == env->high_signed) {
2139 mode = env->low_signed;
2141 mode = env->low_unsigned;
2143 DB((dbg, LEVEL_1, " %+F\n", node));
2144 func(node, mode, env);
2148 static void lower_irg(lower_env_t *env, ir_graph *irg)
2154 obstack_init(&env->obst);
2156 n_idx = get_irg_last_idx(irg);
2157 n_idx = n_idx + (n_idx >> 2); /* add 25% */
2158 env->n_entries = n_idx;
2159 env->entries = NEW_ARR_F(node_entry_t*, n_idx);
2160 memset(env->entries, 0, sizeof(env->entries[0]) * n_idx);
2165 env->proj_2_block = pmap_create();
2166 env->value_param_tp = NULL;
2168 ent = get_irg_entity(irg);
2169 mtp = get_entity_type(ent);
2171 if (mtp_must_be_lowered(env, mtp)) {
2172 ir_type *ltp = lower_mtp(env, mtp);
2173 env->flags |= MUST_BE_LOWERED;
2174 set_entity_type(ent, ltp);
2176 env->value_param_tp = get_method_value_param_type(mtp);
2179 /* first step: link all nodes and allocate data */
2180 ir_reserve_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2181 irg_walk_graph(irg, firm_clear_node_and_phi_links,
2182 prepare_links_and_handle_rotl, env);
2184 if (env->flags & MUST_BE_LOWERED) {
2186 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
2187 inc_irg_visited(irg);
2189 assert(pdeq_empty(env->waitq));
2190 pdeq_putr(env->waitq, get_irg_end(irg));
2192 env->lowered_phis = NEW_ARR_F(ir_node*, 0);
2193 while (!pdeq_empty(env->waitq)) {
2194 ir_node *node = pdeq_getl(env->waitq);
2195 lower_node(env, node);
2198 /* we need to fixup phis */
2199 for (i = 0; i < ARR_LEN(env->lowered_phis); ++i) {
2200 ir_node *phi = env->lowered_phis[i];
2201 fixup_phi(env, phi);
2203 DEL_ARR_F(env->lowered_phis);
2206 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
2208 /* outs are invalid, we changed the graph */
2209 set_irg_outs_inconsistent(irg);
2211 if (env->flags & CF_CHANGED) {
2212 /* control flow changed, dominance info is invalid */
2213 set_irg_doms_inconsistent(irg);
2214 set_irg_extblk_inconsistent(irg);
2215 set_irg_loopinfo_inconsistent(irg);
2219 ir_free_resources(irg, IR_RESOURCE_PHI_LIST | IR_RESOURCE_IRN_LINK);
2221 pmap_destroy(env->proj_2_block);
2222 DEL_ARR_F(env->entries);
2223 obstack_free(&env->obst, NULL);
2229 void lower_dw_ops(const lwrdw_param_t *param)
2234 assert(param != NULL);
2235 FIRM_DBG_REGISTER(dbg, "firm.lower.dw");
2237 memset(&lenv, 0, sizeof(lenv));
2238 lenv.params = param;
2241 /* create the necessary maps */
2242 if (! intrinsic_fkt)
2243 intrinsic_fkt = new_set(cmp_op_mode, iro_Last + 1);
2245 conv_types = new_set(cmp_conv_tp, 16);
2247 lowered_type = pmap_create();
2249 /* create a primitive unsigned and signed type */
2251 tp_u = get_type_for_mode(lenv.low_unsigned);
2253 tp_s = get_type_for_mode(lenv.low_signed);
2255 /* create method types for the created binop calls */
2257 binop_tp_u = new_type_method(4, 2);
2258 set_method_param_type(binop_tp_u, 0, tp_u);
2259 set_method_param_type(binop_tp_u, 1, tp_u);
2260 set_method_param_type(binop_tp_u, 2, tp_u);
2261 set_method_param_type(binop_tp_u, 3, tp_u);
2262 set_method_res_type(binop_tp_u, 0, tp_u);
2263 set_method_res_type(binop_tp_u, 1, tp_u);
2266 binop_tp_s = new_type_method(4, 2);
2267 set_method_param_type(binop_tp_s, 0, tp_u);
2268 set_method_param_type(binop_tp_s, 1, tp_s);
2269 set_method_param_type(binop_tp_s, 2, tp_u);
2270 set_method_param_type(binop_tp_s, 3, tp_s);
2271 set_method_res_type(binop_tp_s, 0, tp_u);
2272 set_method_res_type(binop_tp_s, 1, tp_s);
2274 if (! shiftop_tp_u) {
2275 shiftop_tp_u = new_type_method(3, 2);
2276 set_method_param_type(shiftop_tp_u, 0, tp_u);
2277 set_method_param_type(shiftop_tp_u, 1, tp_u);
2278 set_method_param_type(shiftop_tp_u, 2, tp_u);
2279 set_method_res_type(shiftop_tp_u, 0, tp_u);
2280 set_method_res_type(shiftop_tp_u, 1, tp_u);
2282 if (! shiftop_tp_s) {
2283 shiftop_tp_s = new_type_method(3, 2);
2284 set_method_param_type(shiftop_tp_s, 0, tp_u);
2285 set_method_param_type(shiftop_tp_s, 1, tp_s);
2286 set_method_param_type(shiftop_tp_s, 2, tp_u);
2287 set_method_res_type(shiftop_tp_s, 0, tp_u);
2288 set_method_res_type(shiftop_tp_s, 1, tp_s);
2291 unop_tp_u = new_type_method(2, 2);
2292 set_method_param_type(unop_tp_u, 0, tp_u);
2293 set_method_param_type(unop_tp_u, 1, tp_u);
2294 set_method_res_type(unop_tp_u, 0, tp_u);
2295 set_method_res_type(unop_tp_u, 1, tp_u);
2298 unop_tp_s = new_type_method(2, 2);
2299 set_method_param_type(unop_tp_s, 0, tp_u);
2300 set_method_param_type(unop_tp_s, 1, tp_s);
2301 set_method_res_type(unop_tp_s, 0, tp_u);
2302 set_method_res_type(unop_tp_s, 1, tp_s);
2305 clear_irp_opcodes_generic_func();
2306 enter_lower_func(op_Add, lower_binop);
2307 enter_lower_func(op_And, lower_And);
2308 enter_lower_func(op_ASM, lower_ASM);
2309 enter_lower_func(op_Call, lower_Call);
2310 enter_lower_func(op_Cond, lower_Cond);
2311 enter_lower_func(op_Const, lower_Const);
2312 enter_lower_func(op_Conv, lower_Conv);
2313 enter_lower_func(op_Div, lower_Div);
2314 enter_lower_func(op_DivMod, lower_DivMod);
2315 enter_lower_func(op_Eor, lower_Eor);
2316 enter_lower_func(op_Load, lower_Load);
2317 enter_lower_func(op_Minus, lower_Unop);
2318 enter_lower_func(op_Mod, lower_Mod);
2319 enter_lower_func(op_Mul, lower_binop);
2320 enter_lower_func(op_Mux, lower_Mux);
2321 enter_lower_func(op_Not, lower_Not);
2322 enter_lower_func(op_Or, lower_Or);
2323 enter_lower_func(op_Proj, lower_Proj);
2324 enter_lower_func(op_Return, lower_Return);
2325 enter_lower_func(op_Sel, lower_Sel);
2326 enter_lower_func(op_Shl, lower_Shl);
2327 enter_lower_func(op_Shr, lower_Shr);
2328 enter_lower_func(op_Shrs, lower_Shrs);
2329 enter_lower_func(op_Start, lower_Start);
2330 enter_lower_func(op_Store, lower_Store);
2331 enter_lower_func(op_Sub, lower_binop);
2332 enter_lower_func(op_Unknown, lower_Unknown);
2334 lenv.tv_mode_bytes = new_tarval_from_long(param->doubleword_size/(2*8), lenv.low_unsigned);
2335 lenv.tv_mode_bits = new_tarval_from_long(param->doubleword_size/2, lenv.low_unsigned);
2336 lenv.waitq = new_pdeq();
2337 lenv.first_id = new_id_from_chars(param->little_endian ? ".l" : ".h", 2);
2338 lenv.next_id = new_id_from_chars(param->little_endian ? ".h" : ".l", 2);
2340 /* transform all graphs */
2341 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
2342 ir_graph *irg = get_irp_irg(i);
2343 lower_irg(&lenv, irg);
2345 del_pdeq(lenv.waitq);
2348 /* Default implementation. */
2349 ir_entity *def_create_intrinsic_fkt(ir_type *method, const ir_op *op,
2350 const ir_mode *imode, const ir_mode *omode,
2358 if (imode == omode) {
2359 snprintf(buf, sizeof(buf), "__l%s%s", get_op_name(op), get_mode_name(imode));
2361 snprintf(buf, sizeof(buf), "__l%s%s%s", get_op_name(op),
2362 get_mode_name(imode), get_mode_name(omode));
2364 id = new_id_from_str(buf);
2366 ent = new_entity(get_glob_type(), id, method);
2367 set_entity_ld_ident(ent, get_entity_ident(ent));