2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
36 #include "lower_intrinsics.h"
40 #include "ia32_new_nodes.h"
41 #include "bearch_ia32_t.h"
42 #include "gen_ia32_regalloc_if.h"
44 /** The array of all intrinsics that must be mapped. */
45 static i_record *intrinsics;
47 /** An array to cache all entities */
48 static ir_entity *i_ents[iro_MaxOpcode];
51 * Maps all intrinsic calls that the backend support
52 * and map all instructions the backend did not support
55 void ia32_handle_intrinsics(void) {
56 if (intrinsics && ARR_LEN(intrinsics) > 0)
57 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
60 #define BINOP_Left_Low 0
61 #define BINOP_Left_High 1
62 #define BINOP_Right_Low 2
63 #define BINOP_Right_High 3
66 * Replace a call be a tuple of l_res, h_res.
68 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
73 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
75 turn_into_tuple(call, pn_Call_max);
76 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
77 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, block));
78 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
79 set_Tuple_pred(call, pn_Call_T_result, res);
80 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
81 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
85 * Map an Add (a_l, a_h, b_l, b_h)
87 static int map_Add(ir_node *call, void *ctx) {
88 ir_graph *irg = current_ir_graph;
89 dbg_info *dbg = get_irn_dbg_info(call);
90 ir_node *block = get_nodes_block(call);
91 ir_node **params = get_Call_param_arr(call);
92 ir_type *method = get_Call_type(call);
93 ir_node *a_l = params[BINOP_Left_Low];
94 ir_node *a_h = params[BINOP_Left_High];
95 ir_node *b_l = params[BINOP_Right_Low];
96 ir_node *b_h = params[BINOP_Right_High];
97 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
98 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
99 ir_node *l_res, *h_res, *add;
101 /* l_res = a_l + b_l */
102 /* h_res = a_h + b_h + carry */
104 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
105 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
106 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
108 resolve_call(call, l_res, h_res, irg, block);
113 * Map a Sub (a_l, a_h, b_l, b_h)
115 static int map_Sub(ir_node *call, void *ctx) {
116 ir_graph *irg = current_ir_graph;
117 dbg_info *dbg = get_irn_dbg_info(call);
118 ir_node *block = get_nodes_block(call);
119 ir_node **params = get_Call_param_arr(call);
120 ir_type *method = get_Call_type(call);
121 ir_node *a_l = params[BINOP_Left_Low];
122 ir_node *a_h = params[BINOP_Left_High];
123 ir_node *b_l = params[BINOP_Right_Low];
124 ir_node *b_h = params[BINOP_Right_High];
125 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
126 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
127 ir_node *l_res, *h_res, *res;
129 /* l_res = a_l - b_l */
130 /* h_res = a_h - b_h - carry */
132 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
133 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
134 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
136 resolve_call(call, l_res, h_res, irg, block);
141 * Map a Shl (a_l, a_h, count)
143 static int map_Shl(ir_node *call, void *ctx) {
144 ir_graph *irg = current_ir_graph;
145 dbg_info *dbg = get_irn_dbg_info(call);
146 ir_node *block = get_nodes_block(call);
147 ir_node **params = get_Call_param_arr(call);
148 ir_type *method = get_Call_type(call);
149 ir_node *a_l = params[BINOP_Left_Low];
150 ir_node *a_h = params[BINOP_Left_High];
151 ir_node *cnt = params[BINOP_Right_Low];
152 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
153 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
154 ir_node *l_res, *h_res;
156 /* h_res = SHLD a_h, a_l, cnt */
157 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
159 /* l_res = SHL a_l, cnt */
160 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
162 //add_irn_dep(l_res, h_res);
164 resolve_call(call, l_res, h_res, irg, block);
169 * Map a Shr (a_l, a_h, count)
171 static int map_Shr(ir_node *call, void *ctx) {
172 ir_graph *irg = current_ir_graph;
173 dbg_info *dbg = get_irn_dbg_info(call);
174 ir_node *block = get_nodes_block(call);
175 ir_node **params = get_Call_param_arr(call);
176 ir_type *method = get_Call_type(call);
177 ir_node *a_l = params[BINOP_Left_Low];
178 ir_node *a_h = params[BINOP_Left_High];
179 ir_node *cnt = params[BINOP_Right_Low];
180 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
181 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
182 ir_node *l_res, *h_res;
184 /* l_res = SHRD a_l, a_h, cnt */
185 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
187 /* h_res = SHR a_h, cnt */
188 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
190 //add_irn_dep(h_res, l_res);
192 resolve_call(call, l_res, h_res, irg, block);
197 * Map a Shrs (a_l, a_h, count)
199 static int map_Shrs(ir_node *call, void *ctx) {
200 ir_graph *irg = current_ir_graph;
201 dbg_info *dbg = get_irn_dbg_info(call);
202 ir_node *block = get_nodes_block(call);
203 ir_node **params = get_Call_param_arr(call);
204 ir_type *method = get_Call_type(call);
205 ir_node *a_l = params[BINOP_Left_Low];
206 ir_node *a_h = params[BINOP_Left_High];
207 ir_node *cnt = params[BINOP_Right_Low];
208 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
209 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
210 ir_node *l_res, *h_res;
212 /* l_res = SHRD a_l, a_h, cnt */
213 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
215 /* h_res = SAR a_h, cnt */
216 h_res = new_rd_ia32_l_Sar(dbg, irg, block, a_h, cnt, h_res_mode);
218 //add_irn_dep(h_res, l_res);
220 resolve_call(call, l_res, h_res, irg, block);
225 * Map a Mul (a_l, a_h, b_l, b_h)
227 static int map_Mul(ir_node *call, void *ctx) {
228 ir_graph *irg = current_ir_graph;
229 dbg_info *dbg = get_irn_dbg_info(call);
230 ir_node *block = get_nodes_block(call);
231 ir_node **params = get_Call_param_arr(call);
232 ir_type *method = get_Call_type(call);
233 ir_node *a_l = params[BINOP_Left_Low];
234 ir_node *a_h = params[BINOP_Left_High];
235 ir_node *b_l = params[BINOP_Right_Low];
236 ir_node *b_h = params[BINOP_Right_High];
237 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
238 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
239 ir_node *l_res, *h_res, *mul, *pEDX, *add;
250 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
251 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EDX);
252 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EAX);
254 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l);
255 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
256 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h);
257 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
259 resolve_call(call, l_res, h_res, irg, block);
265 * Map a Minus (a_l, a_h)
267 static int map_Minus(ir_node *call, void *ctx) {
268 ir_graph *irg = current_ir_graph;
269 dbg_info *dbg = get_irn_dbg_info(call);
270 ir_node *block = get_nodes_block(call);
271 ir_node **params = get_Call_param_arr(call);
272 ir_type *method = get_Call_type(call);
273 ir_node *a_l = params[BINOP_Left_Low];
274 ir_node *a_h = params[BINOP_Left_High];
275 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
276 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
277 ir_node *l_res, *h_res, *cnst, *res;
279 /* too bad: we need 0 in a register here */
280 cnst = new_Const_long(h_res_mode, 0);
282 /* l_res = 0 - a_l */
283 /* h_res = 0 - a_h - carry */
285 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
286 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
287 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
289 resolve_call(call, l_res, h_res, irg, block);
295 * Map a Abs (a_l, a_h)
297 static int map_Abs(ir_node *call, void *ctx) {
298 ir_graph *irg = current_ir_graph;
299 dbg_info *dbg = get_irn_dbg_info(call);
300 ir_node *block = get_nodes_block(call);
301 ir_node **params = get_Call_param_arr(call);
302 ir_type *method = get_Call_type(call);
303 ir_node *a_l = params[BINOP_Left_Low];
304 ir_node *a_h = params[BINOP_Left_High];
305 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
306 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
307 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
310 Code inspired by gcc output :) (although gcc doubles the
311 operation for t1 as t2 and uses t1 for operations with low part
312 and t2 for operations with high part which is actually unnecessary
313 because t1 and t2 represent the same value)
319 h_res = t3 - t1 - carry
323 sign = new_rd_ia32_l_Sar(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
324 sub_l = new_rd_ia32_l_Xor(dbg, irg, block, a_l, sign, l_res_mode);
325 sub_h = new_rd_ia32_l_Xor(dbg, irg, block, a_h, sign, h_res_mode);
326 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
327 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
328 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
330 resolve_call(call, l_res, h_res, irg, block);
338 } ia32_intrinsic_divmod_t;
341 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
343 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
344 ia32_intrinsic_env_t *env = ctx;
345 ir_graph *irg = current_ir_graph;
346 dbg_info *dbg = get_irn_dbg_info(call);
347 ir_node *block = get_nodes_block(call);
348 ir_node **params = get_Call_param_arr(call);
349 ir_type *method = get_Call_type(call);
350 ir_node *a_l = params[BINOP_Left_Low];
351 ir_node *a_h = params[BINOP_Left_High];
352 ir_node *b_l = params[BINOP_Right_Low];
353 ir_node *b_h = params[BINOP_Right_High];
354 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
355 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
356 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
357 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
358 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
359 ir_node *l_res, *h_res, *frame;
360 ir_node *store_l, *store_h;
361 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
362 ir_node *fa, *fb, *fres;
364 /* allocate memory on frame to store args */
366 ent_a = env->ll_div_op1 =
367 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
372 ent_b = env->ll_div_op2 =
373 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
377 frame = get_irg_frame(irg);
379 /* store first arg */
380 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
381 set_ia32_frame_ent(store_l, ent_a);
382 set_ia32_use_frame(store_l);
383 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
386 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
387 set_ia32_frame_ent(store_h, ent_a);
388 add_ia32_am_offs_int(store_h, mode_bytes);
389 set_ia32_use_frame(store_h);
390 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
393 mem = new_r_Sync(irg, block, 2, op_mem);
395 /* load first arg into FPU */
396 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
397 set_ia32_frame_ent(fa, ent_a);
398 set_ia32_use_frame(fa);
399 set_ia32_ls_mode(fa, mode_D);
400 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
401 fa = new_r_Proj(irg, block, fa, mode_E, pn_ia32_l_vfild_res);
403 /* store second arg */
404 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
405 set_ia32_frame_ent(store_l, ent_b);
406 set_ia32_use_frame(store_l);
407 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
410 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
411 set_ia32_frame_ent(store_h, ent_b);
412 add_ia32_am_offs_int(store_h, mode_bytes);
413 set_ia32_use_frame(store_h);
414 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
417 mem = new_r_Sync(irg, block, 2, op_mem);
419 /* load second arg into FPU */
420 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
421 set_ia32_frame_ent(fb, ent_b);
422 set_ia32_use_frame(fb);
423 set_ia32_ls_mode(fb, mode_D);
424 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
425 fb = new_r_Proj(irg, block, fb, mode_E, pn_ia32_l_vfild_res);
430 mem = new_r_Sync(irg, block, 2, op_mem);
432 /* perform division */
434 case IA32_INTRINSIC_DIV:
435 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb);
436 fres = new_rd_Proj(dbg, irg, block, fres, mode_E, pn_ia32_l_vfdiv_res);
438 case IA32_INTRINSIC_MOD:
439 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_E);
445 /* store back result, we use ent_a here */
446 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
447 set_ia32_frame_ent(fres, ent_a);
448 set_ia32_use_frame(fres);
449 set_ia32_ls_mode(fres, mode_D);
452 /* load low part of the result */
453 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
454 set_ia32_frame_ent(l_res, ent_a);
455 set_ia32_use_frame(l_res);
456 set_ia32_ls_mode(l_res, l_res_mode);
457 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
459 /* load hight part of the result */
460 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
461 set_ia32_frame_ent(h_res, ent_a);
462 add_ia32_am_offs_int(h_res, mode_bytes);
463 set_ia32_use_frame(h_res);
464 set_ia32_ls_mode(h_res, h_res_mode);
465 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
468 resolve_call(call, l_res, h_res, irg, block);
473 static int map_Div(ir_node *call, void *ctx) {
474 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
477 static int map_Mod(ir_node *call, void *ctx) {
478 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
482 * Maps a Conv (a_l, a_h)
484 static int map_Conv(ir_node *call, void *ctx) {
485 ia32_intrinsic_env_t *env = ctx;
486 ir_graph *irg = current_ir_graph;
487 dbg_info *dbg = get_irn_dbg_info(call);
488 ir_node *block = get_nodes_block(call);
489 ir_node **params = get_Call_param_arr(call);
490 ir_type *method = get_Call_type(call);
491 int n = get_Call_n_params(call);
492 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
494 ir_node *l_res, *h_res, *frame, *fres;
495 ir_node *store_l, *store_h;
496 ir_node *op_mem[2], *mem;
499 /* We have a Conv float -> long long here */
500 ir_node *a_f = params[0];
501 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
502 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
504 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
506 /* allocate memory on frame to store args */
507 ent = env->irg == irg ? env->d_ll_conv : NULL;
509 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
514 frame = get_irg_frame(irg);
517 Now we create a node to move the value from a XMM register into
518 x87 FPU because it is unknown here, which FPU is used.
519 This node is killed in transformation phase when not needed.
520 Otherwise it is split up into a movsd + fld
522 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
523 set_ia32_frame_ent(a_f, ent);
524 set_ia32_use_frame(a_f);
525 set_ia32_ls_mode(a_f, mode_D);
527 /* store from FPU as Int */
528 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
529 set_ia32_frame_ent(a_f, ent);
530 set_ia32_use_frame(a_f);
531 set_ia32_ls_mode(a_f, mode_D);
534 /* load low part of the result */
535 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
536 set_ia32_frame_ent(l_res, ent);
537 set_ia32_use_frame(l_res);
538 set_ia32_ls_mode(l_res, l_res_mode);
539 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
541 /* load hight part of the result */
542 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
543 set_ia32_frame_ent(h_res, ent);
544 add_ia32_am_offs_int(h_res, gp_bytes);
545 set_ia32_use_frame(h_res);
546 set_ia32_ls_mode(h_res, h_res_mode);
547 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
550 resolve_call(call, l_res, h_res, irg, block);
553 /* We have a Conv long long -> float here */
554 ir_node *a_l = params[BINOP_Left_Low];
555 ir_node *a_h = params[BINOP_Left_High];
556 ir_mode *mode_a_l = get_irn_mode(a_l);
557 ir_mode *mode_a_h = get_irn_mode(a_h);
558 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
560 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
562 /* allocate memory on frame to store args */
563 ent = env->irg == irg ? env->ll_d_conv : NULL;
565 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
570 frame = get_irg_frame(irg);
572 /* store first arg (low part) */
573 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
574 set_ia32_frame_ent(store_l, ent);
575 set_ia32_use_frame(store_l);
576 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
579 /* store second arg (high part) */
580 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
581 set_ia32_frame_ent(store_h, ent);
582 add_ia32_am_offs_int(store_h, gp_bytes);
583 set_ia32_use_frame(store_h);
584 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
587 mem = new_r_Sync(irg, block, 2, op_mem);
589 /* Load arg into x87 FPU (implicit convert) */
590 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
591 set_ia32_frame_ent(fres, ent);
592 set_ia32_use_frame(fres);
593 set_ia32_ls_mode(fres, mode_D);
594 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
595 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
598 Now we create a node to move the loaded value into a XMM
599 register because it is unknown here, which FPU is used.
600 This node is killed in transformation phase when not needed.
601 Otherwise it is split up into a fst + movsd
603 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
604 set_ia32_frame_ent(fres, ent);
605 set_ia32_use_frame(fres);
606 set_ia32_ls_mode(fres, fres_mode);
609 resolve_call(call, fres, NULL, irg, block);
612 assert(0 && "unexpected Conv call");
618 /* Ia32 implementation of intrinsic mapping. */
619 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
620 const ir_mode *imode, const ir_mode *omode,
624 ir_entity **ent = NULL;
625 i_mapper_func mapper;
628 intrinsics = NEW_ARR_F(i_record, 0);
630 switch (get_op_code(op)) {
632 ent = &i_ents[iro_Add];
636 ent = &i_ents[iro_Sub];
640 ent = &i_ents[iro_Shl];
644 ent = &i_ents[iro_Shr];
648 ent = &i_ents[iro_Shrs];
652 ent = &i_ents[iro_Mul];
656 ent = &i_ents[iro_Minus];
660 ent = &i_ents[iro_Abs];
664 ent = &i_ents[iro_Div];
668 ent = &i_ents[iro_Mod];
672 ent = &i_ents[iro_Conv];
676 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
677 return def_create_intrinsic_fkt(method, op, imode, omode, context);
681 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
683 ident *id = mangle(IDENT("L"), get_op_ident(op));
684 *ent = new_entity(get_glob_type(), id, method);
687 elt.i_call.kind = INTRINSIC_CALL;
688 elt.i_call.i_ent = *ent;
689 elt.i_call.i_mapper = mapper;
690 elt.i_call.ctx = context;
691 elt.i_call.link = NULL;
693 ARR_APP1(i_record, intrinsics, elt);