2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
38 #include "ia32_new_nodes.h"
39 #include "bearch_ia32_t.h"
40 #include "gen_ia32_regalloc_if.h"
42 /** The array of all intrinsics that must be mapped. */
43 static i_record *intrinsics;
45 /** An array to cache all entities. */
46 static ir_entity *i_ents[iro_Last + 1];
49 * Maps all intrinsic calls that the backend support
50 * and map all instructions the backend did not support
53 void ia32_handle_intrinsics(void) {
54 if (intrinsics && ARR_LEN(intrinsics) > 0) {
55 lower_intrinsics(intrinsics, ARR_LEN(intrinsics), /*part_block_used=*/1);
59 #define BINOP_Left_Low 0
60 #define BINOP_Left_High 1
61 #define BINOP_Right_Low 2
62 #define BINOP_Right_High 3
65 * Replace a call be a tuple of l_res, h_res.
67 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
68 ir_node *jmp, *res, *in[2];
69 ir_node *bad = get_irg_bad(irg);
70 ir_node *nomem = get_irg_no_mem(irg);
75 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
77 turn_into_tuple(call, pn_Call_max);
78 set_Tuple_pred(call, pn_Call_M_regular, nomem);
81 * We do not check here if this call really has exception and regular Proj's.
82 * new_r_Jmp might than be CSEd with the real exit jmp and then bad things happen
83 * (in movgen.c from 186.crafty for example).
84 * So be sure the newly created Jmp cannot CSE.
86 old_cse = get_opt_cse();
88 jmp = new_r_Jmp(irg, block);
91 set_Tuple_pred(call, pn_Call_X_regular, jmp);
92 set_Tuple_pred(call, pn_Call_X_except, bad);
93 set_Tuple_pred(call, pn_Call_T_result, res);
94 set_Tuple_pred(call, pn_Call_M_except, nomem);
95 set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
99 * Map an Add (a_l, a_h, b_l, b_h)
101 static int map_Add(ir_node *call, void *ctx) {
102 ir_graph *irg = current_ir_graph;
103 dbg_info *dbg = get_irn_dbg_info(call);
104 ir_node *block = get_nodes_block(call);
105 ir_node **params = get_Call_param_arr(call);
106 ir_type *method = get_Call_type(call);
107 ir_node *a_l = params[BINOP_Left_Low];
108 ir_node *a_h = params[BINOP_Left_High];
109 ir_node *b_l = params[BINOP_Right_Low];
110 ir_node *b_h = params[BINOP_Right_High];
111 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
112 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
113 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
114 ir_node *add_low, *add_high, *flags;
115 ir_node *l_res, *h_res;
118 /* l_res = a_l + b_l */
119 /* h_res = a_h + b_h + carry */
121 add_low = new_bd_ia32_l_Add(dbg, block, a_l, b_l, mode_T);
122 flags = new_r_Proj(irg, block, add_low, mode_flags, pn_ia32_flags);
123 add_high = new_bd_ia32_l_Adc(dbg, block, a_h, b_h, flags, h_mode);
125 l_res = new_r_Proj(irg, block, add_low, l_mode, pn_ia32_res);
128 resolve_call(call, l_res, h_res, irg, block);
133 * Map a Sub (a_l, a_h, b_l, b_h)
135 static int map_Sub(ir_node *call, void *ctx)
137 ir_graph *irg = current_ir_graph;
138 dbg_info *dbg = get_irn_dbg_info(call);
139 ir_node *block = get_nodes_block(call);
140 ir_node **params = get_Call_param_arr(call);
141 ir_type *method = get_Call_type(call);
142 ir_node *a_l = params[BINOP_Left_Low];
143 ir_node *a_h = params[BINOP_Left_High];
144 ir_node *b_l = params[BINOP_Right_Low];
145 ir_node *b_h = params[BINOP_Right_High];
146 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
147 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
148 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
149 ir_node *sub_low, *sub_high, *flags;
150 ir_node *l_res, *h_res;
153 /* l_res = a_l - b_l */
154 /* h_res = a_h - b_h - carry */
156 sub_low = new_bd_ia32_l_Sub(dbg, block, a_l, b_l, mode_T);
157 flags = new_r_Proj(irg, block, sub_low, mode_flags, pn_ia32_flags);
158 sub_high = new_bd_ia32_l_Sbb(dbg, block, a_h, b_h, flags, h_mode);
160 l_res = new_r_Proj(irg, block, sub_low, l_mode, pn_ia32_res);
163 resolve_call(call, l_res, h_res, irg, block);
168 * Map a Shl (a_l, a_h, count)
170 static int map_Shl(ir_node *call, void *ctx) {
171 ir_graph *irg = current_ir_graph;
172 dbg_info *dbg = get_irn_dbg_info(call);
173 ir_node *block = get_nodes_block(call);
174 ir_node **params = get_Call_param_arr(call);
175 ir_type *method = get_Call_type(call);
176 ir_node *a_l = params[BINOP_Left_Low];
177 ir_node *a_h = params[BINOP_Left_High];
178 ir_node *cnt = params[BINOP_Right_Low];
179 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
180 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
182 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
186 /* the shift count is a const, create better code */
187 tarval *tv = get_Const_tarval(cnt);
189 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
190 /* simplest case: shift only the lower bits. Note that there is no
191 need to reduce the constant here, this is done by the hardware. */
192 ir_node *conv = new_rd_Conv(dbg, irg, block, a_l, h_mode);
193 h_res = new_rd_Shl(dbg, irg, block, conv, cnt, h_mode);
194 l_res = new_rd_Const(dbg, irg, l_mode, get_mode_null(l_mode));
197 /* h_res = SHLD a_h, a_l, cnt */
198 h_res = new_bd_ia32_l_ShlD(dbg, block, a_h, a_l, cnt, h_mode);
200 /* l_res = SHL a_l, cnt */
201 l_res = new_bd_ia32_l_ShlDep(dbg, block, a_l, cnt, h_res, l_mode);
204 resolve_call(call, l_res, h_res, irg, block);
209 upper = get_nodes_block(call);
211 /* h_res = SHLD a_h, a_l, cnt */
212 h1 = new_bd_ia32_l_ShlD(dbg, upper, a_h, a_l, cnt, h_mode);
214 /* l_res = SHL a_l, cnt */
215 l1 = new_bd_ia32_l_ShlDep(dbg, upper, a_l, cnt, h1, l_mode);
217 c_mode = get_irn_mode(cnt);
218 irn = new_r_Const_long(irg, c_mode, 32);
219 irn = new_rd_And(dbg, irg, upper, cnt, irn, c_mode);
220 irn = new_rd_Cmp(dbg, irg, upper, irn, new_r_Const(irg, c_mode, get_mode_null(c_mode)));
221 irn = new_r_Proj(irg, upper, irn, mode_b, pn_Cmp_Eq);
222 cond = new_rd_Cond(dbg, irg, upper, irn);
224 in[0] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_true);
225 in[1] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_false);
227 /* the block for cnt >= 32 */
228 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
229 h2 = new_rd_Conv(dbg, irg, n_block, l1, h_mode);
230 l2 = new_r_Const(irg, l_mode, get_mode_null(l_mode));
231 in[1] = new_r_Jmp(irg, n_block);
233 set_irn_in(block, 2, in);
237 l_res = new_r_Phi(irg, block, 2, in, l_mode);
238 set_Block_phis(block, l_res);
242 h_res = new_r_Phi(irg, block, 2, in, h_mode);
243 set_Phi_next(l_res, h_res);
244 set_Phi_next(h_res, NULL);
247 set_nodes_block(call, block);
248 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
249 set_nodes_block(irn, block);
251 resolve_call(call, l_res, h_res, irg, block);
256 * Map a Shr (a_l, a_h, count)
258 static int map_Shr(ir_node *call, void *ctx) {
259 ir_graph *irg = current_ir_graph;
260 dbg_info *dbg = get_irn_dbg_info(call);
261 ir_node *block = get_nodes_block(call);
262 ir_node **params = get_Call_param_arr(call);
263 ir_type *method = get_Call_type(call);
264 ir_node *a_l = params[BINOP_Left_Low];
265 ir_node *a_h = params[BINOP_Left_High];
266 ir_node *cnt = params[BINOP_Right_Low];
267 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
268 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
270 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
274 /* the shift count is a const, create better code */
275 tarval *tv = get_Const_tarval(cnt);
277 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
278 /* simplest case: shift only the higher bits. Note that there is no
279 need to reduce the constant here, this is done by the hardware. */
280 ir_node *conv = new_rd_Conv(dbg, irg, block, a_h, l_mode);
281 h_res = new_rd_Const(dbg, irg, h_mode, get_mode_null(h_mode));
282 l_res = new_rd_Shr(dbg, irg, block, conv, cnt, l_mode);
284 /* l_res = SHRD a_h:a_l, cnt */
285 l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
287 /* h_res = SHR a_h, cnt */
288 h_res = new_bd_ia32_l_ShrDep(dbg, block, a_h, cnt, l_res, h_mode);
290 resolve_call(call, l_res, h_res, irg, block);
295 upper = get_nodes_block(call);
297 /* l_res = SHRD a_h:a_l, cnt */
298 l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
300 /* h_res = SHR a_h, cnt */
301 h1 = new_bd_ia32_l_ShrDep(dbg, upper, a_h, cnt, l1, h_mode);
303 c_mode = get_irn_mode(cnt);
304 irn = new_r_Const_long(irg, c_mode, 32);
305 irn = new_rd_And(dbg, irg, upper, cnt, irn, c_mode);
306 irn = new_rd_Cmp(dbg, irg, upper, irn, new_r_Const(irg, c_mode, get_mode_null(c_mode)));
307 irn = new_r_Proj(irg, upper, irn, mode_b, pn_Cmp_Eq);
308 cond = new_rd_Cond(dbg, irg, upper, irn);
310 in[0] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_true);
311 in[1] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_false);
313 /* the block for cnt >= 32 */
314 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
315 l2 = new_rd_Conv(dbg, irg, n_block, h1, l_mode);
316 h2 = new_r_Const(irg, h_mode, get_mode_null(h_mode));
317 in[1] = new_r_Jmp(irg, n_block);
319 set_irn_in(block, 2, in);
323 l_res = new_r_Phi(irg, block, 2, in, l_mode);
324 set_Block_phis(block, l_res);
328 h_res = new_r_Phi(irg, block, 2, in, h_mode);
329 set_Phi_next(l_res, h_res);
330 set_Phi_next(h_res, NULL);
333 set_nodes_block(call, block);
334 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
335 set_nodes_block(irn, block);
337 resolve_call(call, l_res, h_res, irg, block);
342 * Map a Shrs (a_l, a_h, count)
344 static int map_Shrs(ir_node *call, void *ctx) {
345 ir_graph *irg = current_ir_graph;
346 dbg_info *dbg = get_irn_dbg_info(call);
347 ir_node *block = get_nodes_block(call);
348 ir_node **params = get_Call_param_arr(call);
349 ir_type *method = get_Call_type(call);
350 ir_node *a_l = params[BINOP_Left_Low];
351 ir_node *a_h = params[BINOP_Left_High];
352 ir_node *cnt = params[BINOP_Right_Low];
353 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
354 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
356 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
360 /* the shift count is a const, create better code */
361 tarval *tv = get_Const_tarval(cnt);
363 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
364 /* simplest case: shift only the higher bits. Note that there is no
365 need to reduce the constant here, this is done by the hardware. */
366 ir_node *conv = new_rd_Conv(dbg, irg, block, a_h, l_mode);
367 ir_mode *c_mode = get_irn_mode(cnt);
369 h_res = new_rd_Shrs(dbg, irg, block, a_h, new_r_Const_long(irg, c_mode, 31), h_mode);
370 l_res = new_rd_Shrs(dbg, irg, block, conv, cnt, l_mode);
372 /* l_res = SHRD a_h:a_l, cnt */
373 l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
375 /* h_res = SAR a_h, cnt */
376 h_res = new_bd_ia32_l_SarDep(dbg, block, a_h, cnt, l_res, h_mode);
378 resolve_call(call, l_res, h_res, irg, block);
383 upper = get_nodes_block(call);
385 /* l_res = SHRD a_h:a_l, cnt */
386 l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
388 /* h_res = SAR a_h, cnt */
389 h1 = new_bd_ia32_l_SarDep(dbg, upper, a_h, cnt, l1, h_mode);
391 c_mode = get_irn_mode(cnt);
392 irn = new_r_Const_long(irg, c_mode, 32);
393 irn = new_rd_And(dbg, irg, upper, cnt, irn, c_mode);
394 irn = new_rd_Cmp(dbg, irg, upper, irn, new_r_Const(irg, c_mode, get_mode_null(c_mode)));
395 irn = new_r_Proj(irg, upper, irn, mode_b, pn_Cmp_Eq);
396 cond = new_rd_Cond(dbg, irg, upper, irn);
398 in[0] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_true);
399 in[1] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_false);
401 /* the block for cnt >= 32 */
402 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
403 l2 = new_rd_Conv(dbg, irg, n_block, h1, l_mode);
404 h2 = new_rd_Shrs(dbg, irg, n_block, a_h, new_r_Const_long(irg, c_mode, 31), h_mode);
405 in[1] = new_r_Jmp(irg, n_block);
407 set_irn_in(block, 2, in);
411 l_res = new_r_Phi(irg, block, 2, in, l_mode);
412 set_Block_phis(block, l_res);
416 h_res = new_r_Phi(irg, block, 2, in, h_mode);
417 set_Phi_next(l_res, h_res);
418 set_Phi_next(h_res, NULL);
421 set_nodes_block(call, block);
422 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
423 set_nodes_block(irn, block);
425 resolve_call(call, l_res, h_res, irg, block);
429 static int is_sign_extend(ir_node *low, ir_node *high)
436 high_r = get_Shrs_right(high);
437 if (!is_Const(high_r)) return 0;
439 shift_count = get_Const_tarval(high_r);
440 if (!tarval_is_long(shift_count)) return 0;
441 if (get_tarval_long(shift_count) != 31) return 0;
443 high_l = get_Shrs_left(high);
445 if (is_Conv(low) && get_Conv_op(low) == high_l) return 1;
446 if (is_Conv(high_l) && get_Conv_op(high_l) == low) return 1;
447 } else if (is_Const(low) && is_Const(high)) {
448 tarval *tl = get_Const_tarval(low);
449 tarval *th = get_Const_tarval(high);
451 if (tarval_is_long(th) && tarval_is_long(tl)) {
452 long l = get_tarval_long(tl);
453 long h = get_tarval_long(th);
455 return (h == 0 && l >= 0) || (h == -1 && l < 0);
463 * Map a Mul (a_l, a_h, b_l, b_h)
465 static int map_Mul(ir_node *call, void *ctx) {
466 ir_graph *irg = current_ir_graph;
467 dbg_info *dbg = get_irn_dbg_info(call);
468 ir_node *block = get_nodes_block(call);
469 ir_node **params = get_Call_param_arr(call);
470 ir_type *method = get_Call_type(call);
471 ir_node *a_l = params[BINOP_Left_Low];
472 ir_node *a_h = params[BINOP_Left_High];
473 ir_node *b_l = params[BINOP_Right_Low];
474 ir_node *b_h = params[BINOP_Right_High];
475 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
476 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
477 ir_node *l_res, *h_res, *mul, *pEDX, *add;
490 /* handle the often used case of 32x32=64 mul */
491 if (is_sign_extend(a_l, a_h) && is_sign_extend(b_l, b_h)) {
492 mul = new_bd_ia32_l_IMul(dbg, block, a_l, b_l);
493 h_res = new_rd_Proj(dbg, irg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
494 l_res = new_rd_Proj(dbg, irg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
499 mul = new_bd_ia32_l_Mul(dbg, block, a_l, b_l);
500 pEDX = new_rd_Proj(dbg, irg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
501 l_res = new_rd_Proj(dbg, irg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
503 b_l = new_rd_Conv(dbg, irg, block, b_l, h_mode);
504 mul = new_rd_Mul( dbg, irg, block, a_h, b_l, h_mode);
505 add = new_rd_Add( dbg, irg, block, mul, pEDX, h_mode);
506 a_l = new_rd_Conv(dbg, irg, block, a_l, h_mode);
507 mul = new_rd_Mul( dbg, irg, block, a_l, b_h, h_mode);
508 h_res = new_rd_Add( dbg, irg, block, add, mul, h_mode);
511 resolve_call(call, l_res, h_res, irg, block);
517 * Map a Minus (a_l, a_h)
519 static int map_Minus(ir_node *call, void *ctx) {
520 ir_graph *irg = current_ir_graph;
521 dbg_info *dbg = get_irn_dbg_info(call);
522 ir_node *block = get_nodes_block(call);
523 ir_node **params = get_Call_param_arr(call);
524 ir_type *method = get_Call_type(call);
525 ir_node *a_l = params[BINOP_Left_Low];
526 ir_node *a_h = params[BINOP_Left_High];
527 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
528 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
529 ir_node *l_res, *h_res, *res;
532 res = new_bd_ia32_Minus64Bit(dbg, block, a_l, a_h);
533 l_res = new_r_Proj(irg, block, res, l_mode, pn_ia32_Minus64Bit_low_res);
534 h_res = new_r_Proj(irg, block, res, h_mode, pn_ia32_Minus64Bit_high_res);
536 resolve_call(call, l_res, h_res, irg, block);
542 * Map a Abs (a_l, a_h)
544 static int map_Abs(ir_node *call, void *ctx) {
545 ir_graph *irg = current_ir_graph;
546 dbg_info *dbg = get_irn_dbg_info(call);
547 ir_node *block = get_nodes_block(call);
548 ir_node **params = get_Call_param_arr(call);
549 ir_type *method = get_Call_type(call);
550 ir_node *a_l = params[BINOP_Left_Low];
551 ir_node *a_h = params[BINOP_Left_High];
552 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
553 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
554 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
555 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h;
562 Code inspired by gcc output :) (although gcc doubles the
563 operation for t1 as t2 and uses t1 for operations with low part
564 and t2 for operations with high part which is actually unnecessary
565 because t1 and t2 represent the same value)
571 h_res = t3 - t1 - carry
575 /* TODO: give a hint to the backend somehow to not create a cltd here... */
576 sign = new_rd_Shrs(dbg, irg, block, a_h, new_Const_long(l_mode, 31), h_mode);
577 sign_l = new_rd_Conv(dbg, irg, block, sign, l_mode);
578 sub_l = new_rd_Eor(dbg, irg, block, a_l, sign_l, l_mode);
579 sub_h = new_rd_Eor(dbg, irg, block, a_h, sign, h_mode);
581 l_sub = new_bd_ia32_l_Sub(dbg, block, sub_l, sign_l, mode_T);
582 l_res = new_r_Proj(irg, block, l_sub, l_mode, pn_ia32_res);
583 flags = new_r_Proj(irg, block, l_sub, mode_flags, pn_ia32_flags);
584 h_res = new_bd_ia32_l_Sbb(dbg, block, sub_h, sign, flags, h_mode);
586 resolve_call(call, l_res, h_res, irg, block);
591 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
594 * Maps a Div. Change into a library call
596 static int map_Div(ir_node *call, void *ctx) {
597 ia32_intrinsic_env_t *env = ctx;
598 ir_type *method = get_Call_type(call);
599 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
604 if (mode_is_signed(h_mode)) {
605 /* 64bit signed Division */
608 /* create library entity */
609 ent = env->divdi3 = new_entity(get_glob_type(), ID("__divdi3"), method);
610 set_entity_visibility(ent, visibility_external_allocated);
611 set_entity_ld_ident(ent, ID("__divdi3"));
614 /* 64bit unsigned Division */
617 /* create library entity */
618 ent = env->udivdi3 = new_entity(get_glob_type(), ID("__udivdi3"), method);
619 set_entity_visibility(ent, visibility_external_allocated);
620 set_entity_ld_ident(ent, ID("__udivdi3"));
624 ptr = get_Call_ptr(call);
625 set_SymConst_symbol(ptr, sym);
630 * Maps a Mod. Change into a library call
632 static int map_Mod(ir_node *call, void *ctx) {
633 ia32_intrinsic_env_t *env = ctx;
634 ir_type *method = get_Call_type(call);
635 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
640 if (mode_is_signed(h_mode)) {
641 /* 64bit signed Modulo */
644 /* create library entity */
645 ent = env->moddi3 = new_entity(get_glob_type(), ID("__moddi3"), method);
646 set_entity_visibility(ent, visibility_external_allocated);
647 set_entity_ld_ident(ent, ID("__moddi3"));
650 /* 64bit signed Modulo */
653 /* create library entity */
654 ent = env->umoddi3 = new_entity(get_glob_type(), ID("__umoddi3"), method);
655 set_entity_visibility(ent, visibility_external_allocated);
656 set_entity_ld_ident(ent, ID("__umoddi3"));
660 ptr = get_Call_ptr(call);
661 set_SymConst_symbol(ptr, sym);
668 static int map_Conv(ir_node *call, void *ctx) {
669 ir_graph *irg = current_ir_graph;
670 dbg_info *dbg = get_irn_dbg_info(call);
671 ir_node *block = get_nodes_block(call);
672 ir_node **params = get_Call_param_arr(call);
673 ir_type *method = get_Call_type(call);
674 int n = get_Call_n_params(call);
675 ir_node *l_res, *h_res;
679 ir_node *float_to_ll;
681 /* We have a Conv float -> long long here */
682 ir_node *a_f = params[0];
683 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
684 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
686 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
688 if (mode_is_signed(h_res_mode)) {
689 /* convert from float to signed 64bit */
690 float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, a_f);
692 l_res = new_r_Proj(irg, block, float_to_ll, l_res_mode,
693 pn_ia32_l_FloattoLL_res_low);
694 h_res = new_r_Proj(irg, block, float_to_ll, h_res_mode,
695 pn_ia32_l_FloattoLL_res_high);
697 /* convert from float to signed 64bit */
698 ir_mode *flt_mode = get_irn_mode(a_f);
699 tarval *flt_tv = new_tarval_from_str("9223372036854775808", 19, flt_mode);
700 ir_node *flt_corr = new_Const(flt_mode, flt_tv);
701 ir_node *lower_blk = block;
703 ir_node *cmp, *proj, *cond, *blk, *int_phi, *flt_phi;
707 upper_blk = get_nodes_block(call);
709 cmp = new_rd_Cmp(dbg, irg, upper_blk, a_f, flt_corr);
710 proj = new_r_Proj(irg, upper_blk, cmp, mode_b, pn_Cmp_Lt);
711 cond = new_rd_Cond(dbg, irg, upper_blk, proj);
712 in[0] = new_r_Proj(irg, upper_blk, cond, mode_X, pn_Cond_true);
713 in[1] = new_r_Proj(irg, upper_blk, cond, mode_X, pn_Cond_false);
714 blk = new_r_Block(irg, 1, &in[1]);
715 in[1] = new_r_Jmp(irg, blk);
717 set_irn_in(lower_blk, 2, in);
720 in[0] = new_Const(h_res_mode, get_mode_null(h_res_mode));
721 in[1] = new_Const_long(h_res_mode, 0x80000000);
723 int_phi = new_r_Phi(irg, lower_blk, 2, in, h_res_mode);
726 in[1] = new_rd_Sub(dbg, irg, upper_blk, a_f, flt_corr, flt_mode);
728 flt_phi = new_r_Phi(irg, lower_blk, 2, in, flt_mode);
730 /* fix Phi links for next part_block() */
731 set_Block_phis(lower_blk, int_phi);
732 set_Phi_next(int_phi, flt_phi);
733 set_Phi_next(flt_phi, NULL);
735 float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
737 l_res = new_r_Proj(irg, lower_blk, float_to_ll, l_res_mode,
738 pn_ia32_l_FloattoLL_res_low);
739 h_res = new_r_Proj(irg, lower_blk, float_to_ll, h_res_mode,
740 pn_ia32_l_FloattoLL_res_high);
742 h_res = new_rd_Add(dbg, irg, lower_blk, h_res, int_phi, h_res_mode);
744 /* move the call and its Proj's to the lower block */
745 set_nodes_block(call, lower_blk);
747 for (proj = get_irn_link(call); proj != NULL; proj = get_irn_link(proj))
748 set_nodes_block(proj, lower_blk);
752 resolve_call(call, l_res, h_res, irg, block);
754 ir_node *ll_to_float;
756 /* We have a Conv long long -> float here */
757 ir_node *a_l = params[BINOP_Left_Low];
758 ir_node *a_h = params[BINOP_Left_High];
759 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
761 assert(! mode_is_float(get_irn_mode(a_l))
762 && ! mode_is_float(get_irn_mode(a_h)));
764 ll_to_float = new_bd_ia32_l_LLtoFloat(dbg, block, a_h, a_l, fres_mode);
767 resolve_call(call, ll_to_float, NULL, irg, block);
769 panic("unexpected Conv call %+F", call);
775 /* Ia32 implementation of intrinsic mapping. */
776 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
777 const ir_mode *imode, const ir_mode *omode,
781 ir_entity **ent = NULL;
782 i_mapper_func mapper;
785 intrinsics = NEW_ARR_F(i_record, 0);
787 switch (get_op_code(op)) {
789 ent = &i_ents[iro_Add];
793 ent = &i_ents[iro_Sub];
797 ent = &i_ents[iro_Shl];
801 ent = &i_ents[iro_Shr];
805 ent = &i_ents[iro_Shrs];
809 ent = &i_ents[iro_Mul];
813 ent = &i_ents[iro_Minus];
817 ent = &i_ents[iro_Abs];
821 ent = &i_ents[iro_Div];
825 ent = &i_ents[iro_Mod];
829 ent = &i_ents[iro_Conv];
833 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
834 return def_create_intrinsic_fkt(method, op, imode, omode, context);
838 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
840 ident *id = id_mangle(IDENT("L"), get_op_ident(op));
841 *ent = new_entity(get_glob_type(), id, method);
844 elt.i_call.kind = INTRINSIC_CALL;
845 elt.i_call.i_ent = *ent;
846 elt.i_call.i_mapper = mapper;
847 elt.i_call.ctx = context;
848 elt.i_call.link = NULL;
850 ARR_APP1(i_record, intrinsics, elt);