2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
40 #include "ia32_new_nodes.h"
41 #include "bearch_ia32_t.h"
42 #include "gen_ia32_regalloc_if.h"
44 /** The array of all intrinsics that must be mapped. */
45 static i_record *intrinsics;
47 /** An array to cache all entities */
48 static ir_entity *i_ents[iro_MaxOpcode];
51 * Maps all intrinsic calls that the backend support
52 * and map all instructions the backend did not support
55 void ia32_handle_intrinsics(void) {
56 if (intrinsics && ARR_LEN(intrinsics) > 0) {
57 lower_intrinsics(intrinsics, ARR_LEN(intrinsics), /*part_block_used=*/1);
61 #define BINOP_Left_Low 0
62 #define BINOP_Left_High 1
63 #define BINOP_Right_Low 2
64 #define BINOP_Right_High 3
67 * Replace a call be a tuple of l_res, h_res.
69 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
70 ir_node *jmp, *res, *in[2];
71 ir_node *bad = get_irg_bad(irg);
72 ir_node *nomem = get_irg_no_mem(irg);
77 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
79 turn_into_tuple(call, pn_Call_max);
80 set_Tuple_pred(call, pn_Call_M_regular, nomem);
83 * We do not check here if this call really has exception and regular Proj's.
84 * new_r_Jmp might than be CSEd with the real exit jmp and then bad things happen
85 * (in movgen.c from 186.crafty for example).
86 * So be sure the newly created Jmp cannot CSE.
88 old_cse = get_opt_cse();
90 jmp = new_r_Jmp(irg, block);
93 set_Tuple_pred(call, pn_Call_X_regular, jmp);
94 set_Tuple_pred(call, pn_Call_X_except, bad);
95 set_Tuple_pred(call, pn_Call_T_result, res);
96 set_Tuple_pred(call, pn_Call_M_except, nomem);
97 set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
101 * Map an Add (a_l, a_h, b_l, b_h)
103 static int map_Add(ir_node *call, void *ctx) {
104 ir_graph *irg = current_ir_graph;
105 dbg_info *dbg = get_irn_dbg_info(call);
106 ir_node *block = get_nodes_block(call);
107 ir_node **params = get_Call_param_arr(call);
108 ir_type *method = get_Call_type(call);
109 ir_node *a_l = params[BINOP_Left_Low];
110 ir_node *a_h = params[BINOP_Left_High];
111 ir_node *b_l = params[BINOP_Right_Low];
112 ir_node *b_h = params[BINOP_Right_High];
113 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
114 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
115 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
116 ir_node *add_low, *add_high, *flags;
117 ir_node *l_res, *h_res;
120 /* l_res = a_l + b_l */
121 /* h_res = a_h + b_h + carry */
123 add_low = new_rd_ia32_l_Add(dbg, irg, block, a_l, b_l, mode_T);
124 flags = new_r_Proj(irg, block, add_low, mode_flags, pn_ia32_flags);
125 add_high = new_rd_ia32_l_Adc(dbg, irg, block, a_h, b_h, flags, h_mode);
127 l_res = new_r_Proj(irg, block, add_low, l_mode, pn_ia32_res);
130 resolve_call(call, l_res, h_res, irg, block);
135 * Map a Sub (a_l, a_h, b_l, b_h)
137 static int map_Sub(ir_node *call, void *ctx)
139 ir_graph *irg = current_ir_graph;
140 dbg_info *dbg = get_irn_dbg_info(call);
141 ir_node *block = get_nodes_block(call);
142 ir_node **params = get_Call_param_arr(call);
143 ir_type *method = get_Call_type(call);
144 ir_node *a_l = params[BINOP_Left_Low];
145 ir_node *a_h = params[BINOP_Left_High];
146 ir_node *b_l = params[BINOP_Right_Low];
147 ir_node *b_h = params[BINOP_Right_High];
148 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
149 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
150 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
151 ir_node *sub_low, *sub_high, *flags;
152 ir_node *l_res, *h_res;
155 /* l_res = a_l - b_l */
156 /* h_res = a_h - b_h - carry */
158 sub_low = new_rd_ia32_l_Sub(dbg, irg, block, a_l, b_l, mode_T);
159 flags = new_r_Proj(irg, block, sub_low, mode_flags, pn_ia32_flags);
160 sub_high = new_rd_ia32_l_Sbb(dbg, irg, block, a_h, b_h, flags, h_mode);
162 l_res = new_r_Proj(irg, block, sub_low, l_mode, pn_ia32_res);
165 resolve_call(call, l_res, h_res, irg, block);
170 * Map a Shl (a_l, a_h, count)
172 static int map_Shl(ir_node *call, void *ctx) {
173 ir_graph *irg = current_ir_graph;
174 dbg_info *dbg = get_irn_dbg_info(call);
175 ir_node *block = get_nodes_block(call);
176 ir_node **params = get_Call_param_arr(call);
177 ir_type *method = get_Call_type(call);
178 ir_node *a_l = params[BINOP_Left_Low];
179 ir_node *a_h = params[BINOP_Left_High];
180 ir_node *cnt = params[BINOP_Right_Low];
181 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
182 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
184 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
188 /* the shift count is a const, create better code */
189 tarval *tv = get_Const_tarval(cnt);
191 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
192 /* simplest case: shift only the lower bits. Note that there is no
193 need to reduce the constant here, this is done by the hardware. */
194 ir_node *conv = new_rd_Conv(dbg, irg, block, a_l, h_mode);
195 h_res = new_rd_Shl(dbg, irg, block, conv, cnt, h_mode);
196 l_res = new_rd_Const(dbg, irg, block, l_mode, get_mode_null(l_mode));
199 /* h_res = SHLD a_h, a_l, cnt */
200 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, h_mode);
202 /* l_res = SHL a_l, cnt */
203 l_res = new_rd_ia32_l_ShlDep(dbg, irg, block, a_l, cnt, h_res, l_mode);
206 resolve_call(call, l_res, h_res, irg, block);
211 upper = get_nodes_block(call);
213 /* h_res = SHLD a_h, a_l, cnt */
214 h1 = new_rd_ia32_l_ShlD(dbg, irg, upper, a_h, a_l, cnt, h_mode);
216 /* l_res = SHL a_l, cnt */
217 l1 = new_rd_ia32_l_ShlDep(dbg, irg, upper, a_l, cnt, h1, l_mode);
219 c_mode = get_irn_mode(cnt);
220 irn = new_r_Const_long(irg, upper, c_mode, 32);
221 irn = new_rd_And(dbg, irg, upper, cnt, irn, c_mode);
222 irn = new_rd_Cmp(dbg, irg, upper, irn, new_r_Const(irg, upper, c_mode, get_mode_null(c_mode)));
223 irn = new_r_Proj(irg, upper, irn, mode_b, pn_Cmp_Eq);
224 cond = new_rd_Cond(dbg, irg, upper, irn);
226 in[0] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_true);
227 in[1] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_false);
229 /* the block for cnt >= 32 */
230 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
231 h2 = new_rd_Conv(dbg, irg, n_block, l1, h_mode);
232 l2 = new_r_Const(irg, n_block, l_mode, get_mode_null(l_mode));
233 in[1] = new_r_Jmp(irg, n_block);
235 set_irn_in(block, 2, in);
239 l_res = new_r_Phi(irg, block, 2, in, l_mode);
240 set_Block_phis(block, l_res);
244 h_res = new_r_Phi(irg, block, 2, in, h_mode);
245 set_Phi_next(l_res, h_res);
246 set_Phi_next(h_res, NULL);
249 set_nodes_block(call, block);
250 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
251 set_nodes_block(irn, block);
253 resolve_call(call, l_res, h_res, irg, block);
258 * Map a Shr (a_l, a_h, count)
260 static int map_Shr(ir_node *call, void *ctx) {
261 ir_graph *irg = current_ir_graph;
262 dbg_info *dbg = get_irn_dbg_info(call);
263 ir_node *block = get_nodes_block(call);
264 ir_node **params = get_Call_param_arr(call);
265 ir_type *method = get_Call_type(call);
266 ir_node *a_l = params[BINOP_Left_Low];
267 ir_node *a_h = params[BINOP_Left_High];
268 ir_node *cnt = params[BINOP_Right_Low];
269 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
270 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
272 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
276 /* the shift count is a const, create better code */
277 tarval *tv = get_Const_tarval(cnt);
279 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
280 /* simplest case: shift only the higher bits. Note that there is no
281 need to reduce the constant here, this is done by the hardware. */
282 ir_node *conv = new_rd_Conv(dbg, irg, block, a_h, l_mode);
283 h_res = new_rd_Const(dbg, irg, block, h_mode, get_mode_null(h_mode));
284 l_res = new_rd_Shr(dbg, irg, block, conv, cnt, l_mode);
286 /* l_res = SHRD a_h:a_l, cnt */
287 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_mode);
289 /* h_res = SHR a_h, cnt */
290 h_res = new_rd_ia32_l_ShrDep(dbg, irg, block, a_h, cnt, l_res, h_mode);
292 resolve_call(call, l_res, h_res, irg, block);
297 upper = get_nodes_block(call);
299 /* l_res = SHRD a_h:a_l, cnt */
300 l1 = new_rd_ia32_l_ShrD(dbg, irg, upper, a_l, a_h, cnt, l_mode);
302 /* h_res = SHR a_h, cnt */
303 h1 = new_rd_ia32_l_ShrDep(dbg, irg, upper, a_h, cnt, l1, h_mode);
305 c_mode = get_irn_mode(cnt);
306 irn = new_r_Const_long(irg, upper, c_mode, 32);
307 irn = new_rd_And(dbg, irg, upper, cnt, irn, c_mode);
308 irn = new_rd_Cmp(dbg, irg, upper, irn, new_r_Const(irg, upper, c_mode, get_mode_null(c_mode)));
309 irn = new_r_Proj(irg, upper, irn, mode_b, pn_Cmp_Eq);
310 cond = new_rd_Cond(dbg, irg, upper, irn);
312 in[0] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_true);
313 in[1] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_false);
315 /* the block for cnt >= 32 */
316 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
317 l2 = new_rd_Conv(dbg, irg, n_block, h1, l_mode);
318 h2 = new_r_Const(irg, n_block, h_mode, get_mode_null(h_mode));
319 in[1] = new_r_Jmp(irg, n_block);
321 set_irn_in(block, 2, in);
325 l_res = new_r_Phi(irg, block, 2, in, l_mode);
326 set_Block_phis(block, l_res);
330 h_res = new_r_Phi(irg, block, 2, in, h_mode);
331 set_Phi_next(l_res, h_res);
332 set_Phi_next(h_res, NULL);
335 set_nodes_block(call, block);
336 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
337 set_nodes_block(irn, block);
339 resolve_call(call, l_res, h_res, irg, block);
344 * Map a Shrs (a_l, a_h, count)
346 static int map_Shrs(ir_node *call, void *ctx) {
347 ir_graph *irg = current_ir_graph;
348 dbg_info *dbg = get_irn_dbg_info(call);
349 ir_node *block = get_nodes_block(call);
350 ir_node **params = get_Call_param_arr(call);
351 ir_type *method = get_Call_type(call);
352 ir_node *a_l = params[BINOP_Left_Low];
353 ir_node *a_h = params[BINOP_Left_High];
354 ir_node *cnt = params[BINOP_Right_Low];
355 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
356 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
358 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
362 /* the shift count is a const, create better code */
363 tarval *tv = get_Const_tarval(cnt);
365 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
366 /* simplest case: shift only the higher bits. Note that there is no
367 need to reduce the constant here, this is done by the hardware. */
368 ir_node *conv = new_rd_Conv(dbg, irg, block, a_h, l_mode);
369 ir_mode *c_mode = get_irn_mode(cnt);
371 h_res = new_rd_Shrs(dbg, irg, block, a_h, new_r_Const_long(irg, block, c_mode, 31), h_mode);
372 l_res = new_rd_Shrs(dbg, irg, block, conv, cnt, l_mode);
374 /* l_res = SHRD a_h:a_l, cnt */
375 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_mode);
377 /* h_res = SAR a_h, cnt */
378 h_res = new_rd_ia32_l_SarDep(dbg, irg, block, a_h, cnt, l_res, h_mode);
380 resolve_call(call, l_res, h_res, irg, block);
385 upper = get_nodes_block(call);
387 /* l_res = SHRD a_h:a_l, cnt */
388 l1 = new_rd_ia32_l_ShrD(dbg, irg, upper, a_l, a_h, cnt, l_mode);
390 /* h_res = SAR a_h, cnt */
391 h1 = new_rd_ia32_l_SarDep(dbg, irg, upper, a_h, cnt, l1, h_mode);
393 c_mode = get_irn_mode(cnt);
394 irn = new_r_Const_long(irg, upper, c_mode, 32);
395 irn = new_rd_And(dbg, irg, upper, cnt, irn, c_mode);
396 irn = new_rd_Cmp(dbg, irg, upper, irn, new_r_Const(irg, upper, c_mode, get_mode_null(c_mode)));
397 irn = new_r_Proj(irg, upper, irn, mode_b, pn_Cmp_Eq);
398 cond = new_rd_Cond(dbg, irg, upper, irn);
400 in[0] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_true);
401 in[1] = new_r_Proj(irg, upper, cond, mode_X, pn_Cond_false);
403 /* the block for cnt >= 32 */
404 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
405 l2 = new_rd_Conv(dbg, irg, n_block, h1, l_mode);
406 h2 = new_rd_Shrs(dbg, irg, n_block, a_h, new_r_Const_long(irg, block, c_mode, 31), h_mode);
407 in[1] = new_r_Jmp(irg, n_block);
409 set_irn_in(block, 2, in);
413 l_res = new_r_Phi(irg, block, 2, in, l_mode);
414 set_Block_phis(block, l_res);
418 h_res = new_r_Phi(irg, block, 2, in, h_mode);
419 set_Phi_next(l_res, h_res);
420 set_Phi_next(h_res, NULL);
423 set_nodes_block(call, block);
424 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
425 set_nodes_block(irn, block);
427 resolve_call(call, l_res, h_res, irg, block);
431 static int is_sign_extend(ir_node *low, ir_node *high)
438 high_r = get_Shrs_right(high);
439 if (!is_Const(high_r)) return 0;
441 shift_count = get_Const_tarval(high_r);
442 if (!tarval_is_long(shift_count)) return 0;
443 if (get_tarval_long(shift_count) != 31) return 0;
445 high_l = get_Shrs_left(high);
447 if (is_Conv(low) && get_Conv_op(low) == high_l) return 1;
448 if (is_Conv(high_l) && get_Conv_op(high_l) == low) return 1;
449 } else if (is_Const(low) && is_Const(high)) {
450 tarval *tl = get_Const_tarval(low);
451 tarval *th = get_Const_tarval(high);
453 if (tarval_is_long(th) && tarval_is_long(tl)) {
454 long l = get_tarval_long(tl);
455 long h = get_tarval_long(th);
457 return (h == 0 && l >= 0) || (h == -1 && l < 0);
465 * Map a Mul (a_l, a_h, b_l, b_h)
467 static int map_Mul(ir_node *call, void *ctx) {
468 ir_graph *irg = current_ir_graph;
469 dbg_info *dbg = get_irn_dbg_info(call);
470 ir_node *block = get_nodes_block(call);
471 ir_node **params = get_Call_param_arr(call);
472 ir_type *method = get_Call_type(call);
473 ir_node *a_l = params[BINOP_Left_Low];
474 ir_node *a_h = params[BINOP_Left_High];
475 ir_node *b_l = params[BINOP_Right_Low];
476 ir_node *b_h = params[BINOP_Right_High];
477 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
478 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
479 ir_node *l_res, *h_res, *mul, *pEDX, *add;
492 /* handle the often used case of 32x32=64 mul */
493 if (is_sign_extend(a_l, a_h) && is_sign_extend(b_l, b_h)) {
494 mul = new_rd_ia32_l_IMul(dbg, irg, block, a_l, b_l);
495 h_res = new_rd_Proj(dbg, irg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
496 l_res = new_rd_Proj(dbg, irg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
501 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
502 pEDX = new_rd_Proj(dbg, irg, block, mul, h_mode, pn_ia32_l_Mul_EDX);
503 l_res = new_rd_Proj(dbg, irg, block, mul, l_mode, pn_ia32_l_Mul_EAX);
505 b_l = new_rd_Conv(dbg, irg, block, b_l, h_mode);
506 mul = new_rd_Mul( dbg, irg, block, a_h, b_l, h_mode);
507 add = new_rd_Add( dbg, irg, block, mul, pEDX, h_mode);
508 a_l = new_rd_Conv(dbg, irg, block, a_l, h_mode);
509 mul = new_rd_Mul( dbg, irg, block, a_l, b_h, h_mode);
510 h_res = new_rd_Add( dbg, irg, block, add, mul, h_mode);
513 resolve_call(call, l_res, h_res, irg, block);
519 * Map a Minus (a_l, a_h)
521 static int map_Minus(ir_node *call, void *ctx) {
522 ir_graph *irg = current_ir_graph;
523 dbg_info *dbg = get_irn_dbg_info(call);
524 ir_node *block = get_nodes_block(call);
525 ir_node **params = get_Call_param_arr(call);
526 ir_type *method = get_Call_type(call);
527 ir_node *a_l = params[BINOP_Left_Low];
528 ir_node *a_h = params[BINOP_Left_High];
529 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
530 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
531 ir_node *l_res, *h_res, *res;
534 res = new_rd_ia32_Minus64Bit(dbg, irg, block, a_l, a_h);
535 l_res = new_r_Proj(irg, block, res, l_mode, pn_ia32_Minus64Bit_low_res);
536 h_res = new_r_Proj(irg, block, res, h_mode, pn_ia32_Minus64Bit_high_res);
538 resolve_call(call, l_res, h_res, irg, block);
544 * Map a Abs (a_l, a_h)
546 static int map_Abs(ir_node *call, void *ctx) {
547 ir_graph *irg = current_ir_graph;
548 dbg_info *dbg = get_irn_dbg_info(call);
549 ir_node *block = get_nodes_block(call);
550 ir_node **params = get_Call_param_arr(call);
551 ir_type *method = get_Call_type(call);
552 ir_node *a_l = params[BINOP_Left_Low];
553 ir_node *a_h = params[BINOP_Left_High];
554 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
555 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
556 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
557 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h;
564 Code inspired by gcc output :) (although gcc doubles the
565 operation for t1 as t2 and uses t1 for operations with low part
566 and t2 for operations with high part which is actually unnecessary
567 because t1 and t2 represent the same value)
573 h_res = t3 - t1 - carry
577 /* TODO: give a hint to the backend somehow to not create a cltd here... */
578 sign = new_rd_Shrs(dbg, irg, block, a_h, new_Const_long(l_mode, 31), h_mode);
579 sign_l = new_rd_Conv(dbg, irg, block, sign, l_mode);
580 sub_l = new_rd_Eor(dbg, irg, block, a_l, sign_l, l_mode);
581 sub_h = new_rd_Eor(dbg, irg, block, a_h, sign, h_mode);
583 l_sub = new_rd_ia32_l_Sub(dbg, irg, block, sub_l, sign_l, mode_T);
584 l_res = new_r_Proj(irg, block, l_sub, l_mode, pn_ia32_res);
585 flags = new_r_Proj(irg, block, l_sub, mode_flags, pn_ia32_flags);
586 h_res = new_rd_ia32_l_Sbb(dbg, irg, block, sub_h, sign, flags, h_mode);
588 resolve_call(call, l_res, h_res, irg, block);
593 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
596 * Maps a Div. Change into a library call
598 static int map_Div(ir_node *call, void *ctx) {
599 ia32_intrinsic_env_t *env = ctx;
600 ir_type *method = get_Call_type(call);
601 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
606 if (mode_is_signed(h_mode)) {
607 /* 64bit signed Division */
610 /* create library entity */
611 ent = env->divdi3 = new_entity(get_glob_type(), ID("__divdi3"), method);
612 set_entity_visibility(ent, visibility_external_allocated);
613 set_entity_ld_ident(ent, ID("__divdi3"));
616 /* 64bit unsigned Division */
619 /* create library entity */
620 ent = env->udivdi3 = new_entity(get_glob_type(), ID("__udivdi3"), method);
621 set_entity_visibility(ent, visibility_external_allocated);
622 set_entity_ld_ident(ent, ID("__udivdi3"));
626 ptr = get_Call_ptr(call);
627 set_SymConst_symbol(ptr, sym);
632 * Maps a Mod. Change into a library call
634 static int map_Mod(ir_node *call, void *ctx) {
635 ia32_intrinsic_env_t *env = ctx;
636 ir_type *method = get_Call_type(call);
637 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
642 if (mode_is_signed(h_mode)) {
643 /* 64bit signed Modulo */
646 /* create library entity */
647 ent = env->moddi3 = new_entity(get_glob_type(), ID("__moddi3"), method);
648 set_entity_visibility(ent, visibility_external_allocated);
649 set_entity_ld_ident(ent, ID("__moddi3"));
652 /* 64bit signed Modulo */
655 /* create library entity */
656 ent = env->umoddi3 = new_entity(get_glob_type(), ID("__umoddi3"), method);
657 set_entity_visibility(ent, visibility_external_allocated);
658 set_entity_ld_ident(ent, ID("__umoddi3"));
662 ptr = get_Call_ptr(call);
663 set_SymConst_symbol(ptr, sym);
670 static int map_Conv(ir_node *call, void *ctx) {
671 ir_graph *irg = current_ir_graph;
672 dbg_info *dbg = get_irn_dbg_info(call);
673 ir_node *block = get_nodes_block(call);
674 ir_node **params = get_Call_param_arr(call);
675 ir_type *method = get_Call_type(call);
676 int n = get_Call_n_params(call);
677 ir_node *l_res, *h_res;
681 ir_node *float_to_ll;
683 /* We have a Conv float -> long long here */
684 ir_node *a_f = params[0];
685 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
686 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
688 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
690 float_to_ll = new_rd_ia32_l_FloattoLL(dbg, irg, block, a_f);
692 l_res = new_r_Proj(irg, block, float_to_ll, l_res_mode,
693 pn_ia32_l_FloattoLL_res_low);
694 h_res = new_r_Proj(irg, block, float_to_ll, h_res_mode,
695 pn_ia32_l_FloattoLL_res_high);
698 resolve_call(call, l_res, h_res, irg, block);
700 ir_node *ll_to_float;
702 /* We have a Conv long long -> float here */
703 ir_node *a_l = params[BINOP_Left_Low];
704 ir_node *a_h = params[BINOP_Left_High];
705 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
707 assert(! mode_is_float(get_irn_mode(a_l))
708 && ! mode_is_float(get_irn_mode(a_h)));
710 ll_to_float = new_rd_ia32_l_LLtoFloat(dbg, irg, block, a_h, a_l,
714 resolve_call(call, ll_to_float, NULL, irg, block);
716 panic("unexpected Conv call %+F", call);
722 /* Ia32 implementation of intrinsic mapping. */
723 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
724 const ir_mode *imode, const ir_mode *omode,
728 ir_entity **ent = NULL;
729 i_mapper_func mapper;
732 intrinsics = NEW_ARR_F(i_record, 0);
734 switch (get_op_code(op)) {
736 ent = &i_ents[iro_Add];
740 ent = &i_ents[iro_Sub];
744 ent = &i_ents[iro_Shl];
748 ent = &i_ents[iro_Shr];
752 ent = &i_ents[iro_Shrs];
756 ent = &i_ents[iro_Mul];
760 ent = &i_ents[iro_Minus];
764 ent = &i_ents[iro_Abs];
768 ent = &i_ents[iro_Div];
772 ent = &i_ents[iro_Mod];
776 ent = &i_ents[iro_Conv];
780 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
781 return def_create_intrinsic_fkt(method, op, imode, omode, context);
785 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
787 ident *id = mangle(IDENT("L"), get_op_ident(op));
788 *ent = new_entity(get_glob_type(), id, method);
791 elt.i_call.kind = INTRINSIC_CALL;
792 elt.i_call.i_ent = *ent;
793 elt.i_call.i_mapper = mapper;
794 elt.i_call.ctx = context;
795 elt.i_call.link = NULL;
797 ARR_APP1(i_record, intrinsics, elt);