2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
39 #include "ia32_new_nodes.h"
40 #include "bearch_ia32_t.h"
41 #include "gen_ia32_regalloc_if.h"
43 /** The array of all intrinsics that must be mapped. */
44 static i_record *intrinsics;
46 /** An array to cache all entities. */
47 static ir_entity *i_ents[iro_Last + 1];
50 * Maps all intrinsic calls that the backend support
51 * and map all instructions the backend did not support
54 void ia32_handle_intrinsics(void) {
55 if (intrinsics && ARR_LEN(intrinsics) > 0) {
56 lower_intrinsics(intrinsics, ARR_LEN(intrinsics), /*part_block_used=*/1);
60 #define BINOP_Left_Low 0
61 #define BINOP_Left_High 1
62 #define BINOP_Right_Low 2
63 #define BINOP_Right_High 3
66 * Reroute edges from the pn_Call_T_result proj of a call.
68 * @param proj the pn_Call_T_result Proj
69 * @param l_res the lower 32 bit result
70 * @param h_res the upper 32 bit result or NULL
71 * @param irg the graph to replace on
73 static void reroute_result(ir_node *proj, ir_node *l_res, ir_node *h_res, ir_graph *irg) {
74 const ir_edge_t *edge, *next;
76 foreach_out_edge_safe(proj, edge, next) {
77 ir_node *proj = get_edge_src_irn(edge);
78 long pn = get_Proj_proj(proj);
81 edges_reroute(proj, l_res, irg);
82 } else if (pn == 1 && h_res != NULL) {
83 edges_reroute(proj, h_res, irg);
85 panic("Unsupported Result-Proj from Call found");
91 * Replace a call be a tuple of l_res, h_res.
93 * @param call the call node to replace
94 * @param l_res the lower 32 bit result
95 * @param h_res the upper 32 bit result or NULL
96 * @param irg the graph to replace on
97 * @param block the block to replace on (always the call block)
99 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
100 ir_node *jmp, *res, *in[2];
101 ir_node *bad = get_irg_bad(irg);
102 ir_node *nomem = get_irg_no_mem(irg);
105 if (edges_activated(irg)) {
106 /* use rerouting to prevent some warning in the backend */
107 const ir_edge_t *edge, *next;
109 foreach_out_edge_safe(call, edge, next) {
110 ir_node *proj = get_edge_src_irn(edge);
111 pn_Call pn = get_Proj_proj(proj);
114 case pn_Call_X_regular:
116 * We do not check here if this call really has exception and regular Proj's.
117 * new_r_Jmp might than be CSEd with the real exit jmp and then bad things happen
118 * (in movgen.c from 186.crafty for example).
119 * So be sure the newly created Jmp cannot CSE.
121 old_cse = get_opt_cse();
123 jmp = new_r_Jmp(block);
124 set_opt_cse(old_cse);
125 edges_reroute(proj, jmp, irg);
128 case pn_Call_X_except:
129 case pn_Call_P_value_res_base:
130 /* should not happen here */
131 edges_reroute(proj, bad, irg);
134 /* should not happen here */
135 edges_reroute(proj, nomem, irg);
137 case pn_Call_T_result:
138 reroute_result(proj, l_res, h_res, irg);
141 panic("Wrong Proj from Call");
147 /* no edges, build Tuple */
153 res = new_r_Tuple(block, 2, in);
156 turn_into_tuple(call, pn_Call_max);
159 * We do not check here if this call really has exception and regular Proj's.
160 * new_r_Jmp might than be CSEd with the real exit jmp and then bad things happen
161 * (in movgen.c from 186.crafty for example).
162 * So be sure the newly created Jmp cannot CSE.
164 old_cse = get_opt_cse();
166 jmp = new_r_Jmp(block);
167 set_opt_cse(old_cse);
169 set_Tuple_pred(call, pn_Call_M, nomem);
170 set_Tuple_pred(call, pn_Call_X_regular, jmp);
171 set_Tuple_pred(call, pn_Call_X_except, bad);
172 set_Tuple_pred(call, pn_Call_T_result, res);
173 set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
178 * Map an Add (a_l, a_h, b_l, b_h)
180 static int map_Add(ir_node *call, void *ctx) {
181 dbg_info *dbg = get_irn_dbg_info(call);
182 ir_node *block = get_nodes_block(call);
183 ir_node **params = get_Call_param_arr(call);
184 ir_type *method = get_Call_type(call);
185 ir_node *a_l = params[BINOP_Left_Low];
186 ir_node *a_h = params[BINOP_Left_High];
187 ir_node *b_l = params[BINOP_Right_Low];
188 ir_node *b_h = params[BINOP_Right_High];
189 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
190 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
191 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
192 ir_node *add_low, *add_high, *flags;
193 ir_node *l_res, *h_res;
196 /* l_res = a_l + b_l */
197 /* h_res = a_h + b_h + carry */
199 add_low = new_bd_ia32_l_Add(dbg, block, a_l, b_l, mode_T);
200 flags = new_r_Proj(block, add_low, mode_flags, pn_ia32_flags);
201 add_high = new_bd_ia32_l_Adc(dbg, block, a_h, b_h, flags, h_mode);
203 l_res = new_r_Proj(block, add_low, l_mode, pn_ia32_res);
206 resolve_call(call, l_res, h_res, current_ir_graph, block);
211 * Map a Sub (a_l, a_h, b_l, b_h)
213 static int map_Sub(ir_node *call, void *ctx)
215 dbg_info *dbg = get_irn_dbg_info(call);
216 ir_node *block = get_nodes_block(call);
217 ir_node **params = get_Call_param_arr(call);
218 ir_type *method = get_Call_type(call);
219 ir_node *a_l = params[BINOP_Left_Low];
220 ir_node *a_h = params[BINOP_Left_High];
221 ir_node *b_l = params[BINOP_Right_Low];
222 ir_node *b_h = params[BINOP_Right_High];
223 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
224 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
225 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
226 ir_node *sub_low, *sub_high, *flags;
227 ir_node *l_res, *h_res;
230 /* l_res = a_l - b_l */
231 /* h_res = a_h - b_h - carry */
233 sub_low = new_bd_ia32_l_Sub(dbg, block, a_l, b_l, mode_T);
234 flags = new_r_Proj(block, sub_low, mode_flags, pn_ia32_flags);
235 sub_high = new_bd_ia32_l_Sbb(dbg, block, a_h, b_h, flags, h_mode);
237 l_res = new_r_Proj( block, sub_low, l_mode, pn_ia32_res);
240 resolve_call(call, l_res, h_res, current_ir_graph, block);
245 * Map a Shl (a_l, a_h, count)
247 static int map_Shl(ir_node *call, void *ctx) {
248 ir_graph *irg = current_ir_graph;
249 dbg_info *dbg = get_irn_dbg_info(call);
250 ir_node *block = get_nodes_block(call);
251 ir_node **params = get_Call_param_arr(call);
252 ir_type *method = get_Call_type(call);
253 ir_node *a_l = params[BINOP_Left_Low];
254 ir_node *a_h = params[BINOP_Left_High];
255 ir_node *cnt = params[BINOP_Right_Low];
256 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
257 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
259 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
263 /* the shift count is a const, create better code */
264 tarval *tv = get_Const_tarval(cnt);
266 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
267 /* simplest case: shift only the lower bits. Note that there is no
268 need to reduce the constant here, this is done by the hardware. */
269 ir_node *conv = new_rd_Conv(dbg, block, a_l, h_mode);
270 h_res = new_rd_Shl(dbg, block, conv, cnt, h_mode);
271 l_res = new_rd_Const(dbg, irg, get_mode_null(l_mode));
274 /* h_res = SHLD a_h, a_l, cnt */
275 h_res = new_bd_ia32_l_ShlD(dbg, block, a_h, a_l, cnt, h_mode);
277 /* l_res = SHL a_l, cnt */
278 l_res = new_bd_ia32_l_ShlDep(dbg, block, a_l, cnt, h_res, l_mode);
281 resolve_call(call, l_res, h_res, irg, block);
286 upper = get_nodes_block(call);
288 /* h_res = SHLD a_h, a_l, cnt */
289 h1 = new_bd_ia32_l_ShlD(dbg, upper, a_h, a_l, cnt, h_mode);
291 /* l_res = SHL a_l, cnt */
292 l1 = new_bd_ia32_l_ShlDep(dbg, upper, a_l, cnt, h1, l_mode);
294 c_mode = get_irn_mode(cnt);
295 irn = new_r_Const_long(irg, c_mode, 32);
296 irn = new_rd_And(dbg, upper, cnt, irn, c_mode);
297 irn = new_rd_Cmp(dbg, upper, irn, new_r_Const(irg, get_mode_null(c_mode)));
298 irn = new_r_Proj(upper, irn, mode_b, pn_Cmp_Eq);
299 cond = new_rd_Cond(dbg, upper, irn);
301 in[0] = new_r_Proj(upper, cond, mode_X, pn_Cond_true);
302 in[1] = new_r_Proj(upper, cond, mode_X, pn_Cond_false);
304 /* the block for cnt >= 32 */
305 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
306 h2 = new_rd_Conv(dbg, n_block, l1, h_mode);
307 l2 = new_r_Const(irg, get_mode_null(l_mode));
308 in[1] = new_r_Jmp(n_block);
310 set_irn_in(block, 2, in);
314 l_res = new_r_Phi(block, 2, in, l_mode);
315 set_Block_phis(block, l_res);
319 h_res = new_r_Phi(block, 2, in, h_mode);
320 set_Phi_next(l_res, h_res);
321 set_Phi_next(h_res, NULL);
324 set_nodes_block(call, block);
325 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
326 set_nodes_block(irn, block);
328 resolve_call(call, l_res, h_res, irg, block);
333 * Map a Shr (a_l, a_h, count)
335 static int map_Shr(ir_node *call, void *ctx) {
336 ir_graph *irg = current_ir_graph;
337 dbg_info *dbg = get_irn_dbg_info(call);
338 ir_node *block = get_nodes_block(call);
339 ir_node **params = get_Call_param_arr(call);
340 ir_type *method = get_Call_type(call);
341 ir_node *a_l = params[BINOP_Left_Low];
342 ir_node *a_h = params[BINOP_Left_High];
343 ir_node *cnt = params[BINOP_Right_Low];
344 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
345 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
347 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
351 /* the shift count is a const, create better code */
352 tarval *tv = get_Const_tarval(cnt);
354 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
355 /* simplest case: shift only the higher bits. Note that there is no
356 need to reduce the constant here, this is done by the hardware. */
357 ir_node *conv = new_rd_Conv(dbg, block, a_h, l_mode);
358 h_res = new_rd_Const(dbg, irg, get_mode_null(h_mode));
359 l_res = new_rd_Shr(dbg, block, conv, cnt, l_mode);
361 /* l_res = SHRD a_h:a_l, cnt */
362 l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
364 /* h_res = SHR a_h, cnt */
365 h_res = new_bd_ia32_l_ShrDep(dbg, block, a_h, cnt, l_res, h_mode);
367 resolve_call(call, l_res, h_res, irg, block);
372 upper = get_nodes_block(call);
374 /* l_res = SHRD a_h:a_l, cnt */
375 l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
377 /* h_res = SHR a_h, cnt */
378 h1 = new_bd_ia32_l_ShrDep(dbg, upper, a_h, cnt, l1, h_mode);
380 c_mode = get_irn_mode(cnt);
381 irn = new_r_Const_long(irg, c_mode, 32);
382 irn = new_rd_And(dbg, upper, cnt, irn, c_mode);
383 irn = new_rd_Cmp(dbg, upper, irn, new_r_Const(irg, get_mode_null(c_mode)));
384 irn = new_r_Proj(upper, irn, mode_b, pn_Cmp_Eq);
385 cond = new_rd_Cond(dbg, upper, irn);
387 in[0] = new_r_Proj(upper, cond, mode_X, pn_Cond_true);
388 in[1] = new_r_Proj(upper, cond, mode_X, pn_Cond_false);
390 /* the block for cnt >= 32 */
391 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
392 l2 = new_rd_Conv(dbg, n_block, h1, l_mode);
393 h2 = new_r_Const(irg, get_mode_null(h_mode));
394 in[1] = new_r_Jmp(n_block);
396 set_irn_in(block, 2, in);
400 l_res = new_r_Phi(block, 2, in, l_mode);
401 set_Block_phis(block, l_res);
405 h_res = new_r_Phi(block, 2, in, h_mode);
406 set_Phi_next(l_res, h_res);
407 set_Phi_next(h_res, NULL);
410 set_nodes_block(call, block);
411 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
412 set_nodes_block(irn, block);
414 resolve_call(call, l_res, h_res, irg, block);
419 * Map a Shrs (a_l, a_h, count)
421 static int map_Shrs(ir_node *call, void *ctx) {
422 ir_graph *irg = current_ir_graph;
423 dbg_info *dbg = get_irn_dbg_info(call);
424 ir_node *block = get_nodes_block(call);
425 ir_node **params = get_Call_param_arr(call);
426 ir_type *method = get_Call_type(call);
427 ir_node *a_l = params[BINOP_Left_Low];
428 ir_node *a_h = params[BINOP_Left_High];
429 ir_node *cnt = params[BINOP_Right_Low];
430 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
431 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
433 ir_node *l_res, *h_res, *irn, *cond, *upper, *n_block, *l1, *l2, *h1, *h2, *in[2];
437 /* the shift count is a const, create better code */
438 tarval *tv = get_Const_tarval(cnt);
440 if (tarval_cmp(tv, new_tarval_from_long(32, l_mode)) & (pn_Cmp_Gt|pn_Cmp_Eq)) {
441 /* simplest case: shift only the higher bits. Note that there is no
442 need to reduce the constant here, this is done by the hardware. */
443 ir_node *conv = new_rd_Conv(dbg, block, a_h, l_mode);
444 ir_mode *c_mode = get_irn_mode(cnt);
446 h_res = new_rd_Shrs(dbg, block, a_h, new_r_Const_long(irg, c_mode, 31), h_mode);
447 l_res = new_rd_Shrs(dbg, block, conv, cnt, l_mode);
449 /* l_res = SHRD a_h:a_l, cnt */
450 l_res = new_bd_ia32_l_ShrD(dbg, block, a_l, a_h, cnt, l_mode);
452 /* h_res = SAR a_h, cnt */
453 h_res = new_bd_ia32_l_SarDep(dbg, block, a_h, cnt, l_res, h_mode);
455 resolve_call(call, l_res, h_res, irg, block);
460 upper = get_nodes_block(call);
462 /* l_res = SHRD a_h:a_l, cnt */
463 l1 = new_bd_ia32_l_ShrD(dbg, upper, a_l, a_h, cnt, l_mode);
465 /* h_res = SAR a_h, cnt */
466 h1 = new_bd_ia32_l_SarDep(dbg, upper, a_h, cnt, l1, h_mode);
468 c_mode = get_irn_mode(cnt);
469 irn = new_r_Const_long(irg, c_mode, 32);
470 irn = new_rd_And(dbg, upper, cnt, irn, c_mode);
471 irn = new_rd_Cmp(dbg, upper, irn, new_r_Const(irg, get_mode_null(c_mode)));
472 irn = new_r_Proj(upper, irn, mode_b, pn_Cmp_Eq);
473 cond = new_rd_Cond(dbg, upper, irn);
475 in[0] = new_r_Proj(upper, cond, mode_X, pn_Cond_true);
476 in[1] = new_r_Proj(upper, cond, mode_X, pn_Cond_false);
478 /* the block for cnt >= 32 */
479 n_block = new_rd_Block(dbg, irg, 1, &in[1]);
480 l2 = new_rd_Conv(dbg, n_block, h1, l_mode);
481 h2 = new_rd_Shrs(dbg, n_block, a_h, new_r_Const_long(irg, c_mode, 31), h_mode);
482 in[1] = new_r_Jmp(n_block);
484 set_irn_in(block, 2, in);
488 l_res = new_r_Phi(block, 2, in, l_mode);
489 set_Block_phis(block, l_res);
493 h_res = new_r_Phi(block, 2, in, h_mode);
494 set_Phi_next(l_res, h_res);
495 set_Phi_next(h_res, NULL);
498 set_nodes_block(call, block);
499 for (irn = get_irn_link(call); irn != NULL; irn = get_irn_link(irn))
500 set_nodes_block(irn, block);
502 resolve_call(call, l_res, h_res, irg, block);
507 * Checks where node high is a sign extension of low.
509 static int is_sign_extend(ir_node *low, ir_node *high)
516 high_r = get_Shrs_right(high);
517 if (!is_Const(high_r)) return 0;
519 shift_count = get_Const_tarval(high_r);
520 if (!tarval_is_long(shift_count)) return 0;
521 if (get_tarval_long(shift_count) != 31) return 0;
523 high_l = get_Shrs_left(high);
525 if (is_Conv(low) && get_Conv_op(low) == high_l) return 1;
526 if (is_Conv(high_l) && get_Conv_op(high_l) == low) return 1;
527 } else if (is_Const(low) && is_Const(high)) {
528 tarval *tl = get_Const_tarval(low);
529 tarval *th = get_Const_tarval(high);
531 if (tarval_is_long(th) && tarval_is_long(tl)) {
532 long l = get_tarval_long(tl);
533 long h = get_tarval_long(th);
535 return (h == 0 && l >= 0) || (h == -1 && l < 0);
543 * Map a Mul (a_l, a_h, b_l, b_h)
545 static int map_Mul(ir_node *call, void *ctx) {
546 dbg_info *dbg = get_irn_dbg_info(call);
547 ir_node *block = get_nodes_block(call);
548 ir_node **params = get_Call_param_arr(call);
549 ir_type *method = get_Call_type(call);
550 ir_node *a_l = params[BINOP_Left_Low];
551 ir_node *a_h = params[BINOP_Left_High];
552 ir_node *b_l = params[BINOP_Right_Low];
553 ir_node *b_h = params[BINOP_Right_High];
554 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
555 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
556 ir_node *l_res, *h_res, *mul, *pEDX, *add;
569 /* handle the often used case of 32x32=64 mul */
570 if (is_sign_extend(a_l, a_h) && is_sign_extend(b_l, b_h)) {
571 mul = new_bd_ia32_l_IMul(dbg, block, a_l, b_l);
572 h_res = new_rd_Proj(dbg, block, mul, h_mode, pn_ia32_l_IMul_res_high);
573 l_res = new_rd_Proj(dbg, block, mul, l_mode, pn_ia32_l_IMul_res_low);
575 /* note that zero extension is handled hare efficiently */
576 mul = new_bd_ia32_l_Mul(dbg, block, a_l, b_l);
577 pEDX = new_rd_Proj(dbg, block, mul, h_mode, pn_ia32_l_Mul_res_high);
578 l_res = new_rd_Proj(dbg, block, mul, l_mode, pn_ia32_l_Mul_res_low);
580 b_l = new_rd_Conv(dbg, block, b_l, h_mode);
581 mul = new_rd_Mul( dbg, block, a_h, b_l, h_mode);
582 add = new_rd_Add( dbg, block, mul, pEDX, h_mode);
583 a_l = new_rd_Conv(dbg, block, a_l, h_mode);
584 mul = new_rd_Mul( dbg, block, a_l, b_h, h_mode);
585 h_res = new_rd_Add( dbg, block, add, mul, h_mode);
587 resolve_call(call, l_res, h_res, current_ir_graph, block);
593 * Map a Minus (a_l, a_h)
595 static int map_Minus(ir_node *call, void *ctx) {
596 dbg_info *dbg = get_irn_dbg_info(call);
597 ir_node *block = get_nodes_block(call);
598 ir_node **params = get_Call_param_arr(call);
599 ir_type *method = get_Call_type(call);
600 ir_node *a_l = params[BINOP_Left_Low];
601 ir_node *a_h = params[BINOP_Left_High];
602 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
603 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
604 ir_node *l_res, *h_res, *res;
607 res = new_bd_ia32_Minus64Bit(dbg, block, a_l, a_h);
608 l_res = new_r_Proj(block, res, l_mode, pn_ia32_Minus64Bit_low_res);
609 h_res = new_r_Proj(block, res, h_mode, pn_ia32_Minus64Bit_high_res);
611 resolve_call(call, l_res, h_res, current_ir_graph, block);
617 * Map a Abs (a_l, a_h)
619 static int map_Abs(ir_node *call, void *ctx) {
620 dbg_info *dbg = get_irn_dbg_info(call);
621 ir_node *block = get_nodes_block(call);
622 ir_node **params = get_Call_param_arr(call);
623 ir_type *method = get_Call_type(call);
624 ir_node *a_l = params[BINOP_Left_Low];
625 ir_node *a_h = params[BINOP_Left_High];
626 ir_mode *l_mode = get_type_mode(get_method_res_type(method, 0));
627 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
628 ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
629 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h;
636 Code inspired by gcc output :) (although gcc doubles the
637 operation for t1 as t2 and uses t1 for operations with low part
638 and t2 for operations with high part which is actually unnecessary
639 because t1 and t2 represent the same value)
645 h_res = t3 - t1 - carry
649 /* TODO: give a hint to the backend somehow to not create a cltd here... */
650 sign = new_rd_Shrs(dbg, block, a_h, new_Const_long(l_mode, 31), h_mode);
651 sign_l = new_rd_Conv(dbg, block, sign, l_mode);
652 sub_l = new_rd_Eor(dbg, block, a_l, sign_l, l_mode);
653 sub_h = new_rd_Eor(dbg, block, a_h, sign, h_mode);
655 l_sub = new_bd_ia32_l_Sub(dbg, block, sub_l, sign_l, mode_T);
656 l_res = new_r_Proj(block, l_sub, l_mode, pn_ia32_res);
657 flags = new_r_Proj(block, l_sub, mode_flags, pn_ia32_flags);
658 h_res = new_bd_ia32_l_Sbb(dbg, block, sub_h, sign, flags, h_mode);
660 resolve_call(call, l_res, h_res, current_ir_graph, block);
665 #define ID(x) new_id_from_chars(x, sizeof(x)-1)
668 * Maps a Div. Change into a library call.
670 static int map_Div(ir_node *call, void *ctx) {
671 ia32_intrinsic_env_t *env = ctx;
672 ir_type *method = get_Call_type(call);
673 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
678 if (mode_is_signed(h_mode)) {
679 /* 64bit signed Division */
682 /* create library entity */
683 ent = env->divdi3 = new_entity(get_glob_type(), ID("__divdi3"), method);
684 set_entity_visibility(ent, visibility_external_allocated);
685 set_entity_ld_ident(ent, ID("__divdi3"));
688 /* 64bit unsigned Division */
691 /* create library entity */
692 ent = env->udivdi3 = new_entity(get_glob_type(), ID("__udivdi3"), method);
693 set_entity_visibility(ent, visibility_external_allocated);
694 set_entity_ld_ident(ent, ID("__udivdi3"));
698 ptr = get_Call_ptr(call);
699 set_SymConst_symbol(ptr, sym);
704 * Maps a Mod. Change into a library call
706 static int map_Mod(ir_node *call, void *ctx) {
707 ia32_intrinsic_env_t *env = ctx;
708 ir_type *method = get_Call_type(call);
709 ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
714 if (mode_is_signed(h_mode)) {
715 /* 64bit signed Modulo */
718 /* create library entity */
719 ent = env->moddi3 = new_entity(get_glob_type(), ID("__moddi3"), method);
720 set_entity_visibility(ent, visibility_external_allocated);
721 set_entity_ld_ident(ent, ID("__moddi3"));
724 /* 64bit signed Modulo */
727 /* create library entity */
728 ent = env->umoddi3 = new_entity(get_glob_type(), ID("__umoddi3"), method);
729 set_entity_visibility(ent, visibility_external_allocated);
730 set_entity_ld_ident(ent, ID("__umoddi3"));
734 ptr = get_Call_ptr(call);
735 set_SymConst_symbol(ptr, sym);
742 static int map_Conv(ir_node *call, void *ctx) {
743 ir_graph *irg = current_ir_graph;
744 dbg_info *dbg = get_irn_dbg_info(call);
745 ir_node *block = get_nodes_block(call);
746 ir_node **params = get_Call_param_arr(call);
747 ir_type *method = get_Call_type(call);
748 int n = get_Call_n_params(call);
749 ir_node *l_res, *h_res;
753 ir_node *float_to_ll;
755 /* We have a Conv float -> long long here */
756 ir_node *a_f = params[0];
757 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
758 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
760 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
762 if (mode_is_signed(h_res_mode)) {
763 /* convert from float to signed 64bit */
764 float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, a_f);
766 l_res = new_r_Proj(block, float_to_ll, l_res_mode,
767 pn_ia32_l_FloattoLL_res_low);
768 h_res = new_r_Proj(block, float_to_ll, h_res_mode,
769 pn_ia32_l_FloattoLL_res_high);
771 /* convert from float to signed 64bit */
772 ir_mode *flt_mode = get_irn_mode(a_f);
773 tarval *flt_tv = new_tarval_from_str("9223372036854775808", 19, flt_mode);
774 ir_node *flt_corr = new_Const(flt_tv);
775 ir_node *lower_blk = block;
777 ir_node *cmp, *proj, *cond, *blk, *int_phi, *flt_phi;
781 upper_blk = get_nodes_block(call);
783 cmp = new_rd_Cmp(dbg, upper_blk, a_f, flt_corr);
784 proj = new_r_Proj(upper_blk, cmp, mode_b, pn_Cmp_Lt);
785 cond = new_rd_Cond(dbg, upper_blk, proj);
786 in[0] = new_r_Proj(upper_blk, cond, mode_X, pn_Cond_true);
787 in[1] = new_r_Proj(upper_blk, cond, mode_X, pn_Cond_false);
788 blk = new_r_Block(irg, 1, &in[1]);
789 in[1] = new_r_Jmp(blk);
791 set_irn_in(lower_blk, 2, in);
794 in[0] = new_Const(get_mode_null(h_res_mode));
795 in[1] = new_Const_long(h_res_mode, 0x80000000);
797 int_phi = new_r_Phi(lower_blk, 2, in, h_res_mode);
800 in[1] = new_rd_Sub(dbg, upper_blk, a_f, flt_corr, flt_mode);
802 flt_phi = new_r_Phi(lower_blk, 2, in, flt_mode);
804 /* fix Phi links for next part_block() */
805 set_Block_phis(lower_blk, int_phi);
806 set_Phi_next(int_phi, flt_phi);
807 set_Phi_next(flt_phi, NULL);
809 float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
811 l_res = new_r_Proj(lower_blk, float_to_ll, l_res_mode,
812 pn_ia32_l_FloattoLL_res_low);
813 h_res = new_r_Proj(lower_blk, float_to_ll, h_res_mode,
814 pn_ia32_l_FloattoLL_res_high);
816 h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, h_res_mode);
818 /* move the call and its Proj's to the lower block */
819 set_nodes_block(call, lower_blk);
821 for (proj = get_irn_link(call); proj != NULL; proj = get_irn_link(proj))
822 set_nodes_block(proj, lower_blk);
826 resolve_call(call, l_res, h_res, irg, block);
828 ir_node *ll_to_float;
830 /* We have a Conv long long -> float here */
831 ir_node *a_l = params[BINOP_Left_Low];
832 ir_node *a_h = params[BINOP_Left_High];
833 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
835 assert(! mode_is_float(get_irn_mode(a_l))
836 && ! mode_is_float(get_irn_mode(a_h)));
838 ll_to_float = new_bd_ia32_l_LLtoFloat(dbg, block, a_h, a_l, fres_mode);
841 resolve_call(call, ll_to_float, NULL, irg, block);
843 panic("unexpected Conv call %+F", call);
849 /* Ia32 implementation of intrinsic mapping. */
850 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
851 const ir_mode *imode, const ir_mode *omode,
855 ir_entity **ent = NULL;
856 i_mapper_func mapper;
859 intrinsics = NEW_ARR_F(i_record, 0);
861 switch (get_op_code(op)) {
863 ent = &i_ents[iro_Add];
867 ent = &i_ents[iro_Sub];
871 ent = &i_ents[iro_Shl];
875 ent = &i_ents[iro_Shr];
879 ent = &i_ents[iro_Shrs];
883 ent = &i_ents[iro_Mul];
887 ent = &i_ents[iro_Minus];
891 ent = &i_ents[iro_Abs];
895 ent = &i_ents[iro_Div];
899 ent = &i_ents[iro_Mod];
903 ent = &i_ents[iro_Conv];
907 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
908 return def_create_intrinsic_fkt(method, op, imode, omode, context);
912 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
914 ident *id = id_mangle(IDENT("L"), get_op_ident(op));
915 *ent = new_entity(get_glob_type(), id, method);
918 elt.i_call.kind = INTRINSIC_CALL;
919 elt.i_call.i_ent = *ent;
920 elt.i_call.i_mapper = mapper;
921 elt.i_call.ctx = context;
922 elt.i_call.link = NULL;
924 ARR_APP1(i_record, intrinsics, elt);