2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
39 #include "ia32_new_nodes.h"
40 #include "bearch_ia32_t.h"
41 #include "gen_ia32_regalloc_if.h"
43 /** The array of all intrinsics that must be mapped. */
44 static i_record *intrinsics;
46 /** An array to cache all entities */
47 static ir_entity *i_ents[iro_MaxOpcode];
50 * Maps all intrinsic calls that the backend support
51 * and map all instructions the backend did not support
54 void ia32_handle_intrinsics(void) {
55 if (intrinsics && ARR_LEN(intrinsics) > 0)
56 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
59 #define BINOP_Left_Low 0
60 #define BINOP_Left_High 1
61 #define BINOP_Right_Low 2
62 #define BINOP_Right_High 3
65 * Replace a call be a tuple of l_res, h_res.
67 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
72 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
74 turn_into_tuple(call, pn_Call_max);
75 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
76 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, block));
77 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
78 set_Tuple_pred(call, pn_Call_T_result, res);
79 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
80 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
84 * Map an Add (a_l, a_h, b_l, b_h)
86 static int map_Add(ir_node *call, void *ctx) {
87 ir_graph *irg = current_ir_graph;
88 dbg_info *dbg = get_irn_dbg_info(call);
89 ir_node *block = get_nodes_block(call);
90 ir_node **params = get_Call_param_arr(call);
91 ir_type *method = get_Call_type(call);
92 ir_node *a_l = params[BINOP_Left_Low];
93 ir_node *a_h = params[BINOP_Left_High];
94 ir_node *b_l = params[BINOP_Right_Low];
95 ir_node *b_h = params[BINOP_Right_High];
96 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
97 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
98 ir_node *l_res, *h_res, *add;
101 /* l_res = a_l + b_l */
102 /* h_res = a_h + b_h + carry */
104 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
105 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
106 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
108 resolve_call(call, l_res, h_res, irg, block);
113 * Map a Sub (a_l, a_h, b_l, b_h)
115 static int map_Sub(ir_node *call, void *ctx) {
116 ir_graph *irg = current_ir_graph;
117 dbg_info *dbg = get_irn_dbg_info(call);
118 ir_node *block = get_nodes_block(call);
119 ir_node **params = get_Call_param_arr(call);
120 ir_type *method = get_Call_type(call);
121 ir_node *a_l = params[BINOP_Left_Low];
122 ir_node *a_h = params[BINOP_Left_High];
123 ir_node *b_l = params[BINOP_Right_Low];
124 ir_node *b_h = params[BINOP_Right_High];
125 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
126 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
127 ir_node *l_res, *h_res, *res;
130 /* l_res = a_l - b_l */
131 /* h_res = a_h - b_h - carry */
133 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
134 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
135 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
137 resolve_call(call, l_res, h_res, irg, block);
142 * Map a Shl (a_l, a_h, count)
144 static int map_Shl(ir_node *call, void *ctx) {
145 ir_graph *irg = current_ir_graph;
146 dbg_info *dbg = get_irn_dbg_info(call);
147 ir_node *block = get_nodes_block(call);
148 ir_node **params = get_Call_param_arr(call);
149 ir_type *method = get_Call_type(call);
150 ir_node *a_l = params[BINOP_Left_Low];
151 ir_node *a_h = params[BINOP_Left_High];
152 ir_node *cnt = params[BINOP_Right_Low];
153 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
154 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
155 ir_node *l_res, *h_res;
158 /* h_res = SHLD a_h, a_l, cnt */
159 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
161 /* l_res = SHL a_l, cnt */
162 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
164 //add_irn_dep(l_res, h_res);
166 resolve_call(call, l_res, h_res, irg, block);
171 * Map a Shr (a_l, a_h, count)
173 static int map_Shr(ir_node *call, void *ctx) {
174 ir_graph *irg = current_ir_graph;
175 dbg_info *dbg = get_irn_dbg_info(call);
176 ir_node *block = get_nodes_block(call);
177 ir_node **params = get_Call_param_arr(call);
178 ir_type *method = get_Call_type(call);
179 ir_node *a_l = params[BINOP_Left_Low];
180 ir_node *a_h = params[BINOP_Left_High];
181 ir_node *cnt = params[BINOP_Right_Low];
182 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
183 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
184 ir_node *l_res, *h_res;
187 /* l_res = SHRD a_l, a_h, cnt */
188 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
190 /* h_res = SHR a_h, cnt */
191 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
193 //add_irn_dep(h_res, l_res);
195 resolve_call(call, l_res, h_res, irg, block);
200 * Map a Shrs (a_l, a_h, count)
202 static int map_Shrs(ir_node *call, void *ctx) {
203 ir_graph *irg = current_ir_graph;
204 dbg_info *dbg = get_irn_dbg_info(call);
205 ir_node *block = get_nodes_block(call);
206 ir_node **params = get_Call_param_arr(call);
207 ir_type *method = get_Call_type(call);
208 ir_node *a_l = params[BINOP_Left_Low];
209 ir_node *a_h = params[BINOP_Left_High];
210 ir_node *cnt = params[BINOP_Right_Low];
211 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
212 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
213 ir_node *l_res, *h_res;
216 /* l_res = SHRD a_l, a_h, cnt */
217 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
219 /* h_res = SAR a_h, cnt */
220 h_res = new_rd_ia32_l_Sar(dbg, irg, block, a_h, cnt, h_res_mode);
222 //add_irn_dep(h_res, l_res);
224 resolve_call(call, l_res, h_res, irg, block);
229 * Map a Mul (a_l, a_h, b_l, b_h)
231 static int map_Mul(ir_node *call, void *ctx) {
232 ir_graph *irg = current_ir_graph;
233 dbg_info *dbg = get_irn_dbg_info(call);
234 ir_node *block = get_nodes_block(call);
235 ir_node **params = get_Call_param_arr(call);
236 ir_type *method = get_Call_type(call);
237 ir_node *a_l = params[BINOP_Left_Low];
238 ir_node *a_h = params[BINOP_Left_High];
239 ir_node *b_l = params[BINOP_Right_Low];
240 ir_node *b_h = params[BINOP_Right_High];
241 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
242 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
243 ir_node *l_res, *h_res, *mul, *pEDX, *add;
255 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
256 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EDX);
257 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EAX);
259 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l);
260 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
261 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h);
262 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
264 resolve_call(call, l_res, h_res, irg, block);
270 * Map a Minus (a_l, a_h)
272 static int map_Minus(ir_node *call, void *ctx) {
273 ir_graph *irg = current_ir_graph;
274 dbg_info *dbg = get_irn_dbg_info(call);
275 ir_node *block = get_nodes_block(call);
276 ir_node **params = get_Call_param_arr(call);
277 ir_type *method = get_Call_type(call);
278 ir_node *a_l = params[BINOP_Left_Low];
279 ir_node *a_h = params[BINOP_Left_High];
280 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
281 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
282 ir_node *l_res, *h_res, *cnst, *res;
285 /* too bad: we need 0 in a register here */
286 cnst = new_Const_long(h_res_mode, 0);
288 /* l_res = 0 - a_l */
289 /* h_res = 0 - a_h - carry */
291 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
292 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
293 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
295 resolve_call(call, l_res, h_res, irg, block);
301 * Map a Abs (a_l, a_h)
303 static int map_Abs(ir_node *call, void *ctx) {
304 ir_graph *irg = current_ir_graph;
305 dbg_info *dbg = get_irn_dbg_info(call);
306 ir_node *block = get_nodes_block(call);
307 ir_node **params = get_Call_param_arr(call);
308 ir_type *method = get_Call_type(call);
309 ir_node *a_l = params[BINOP_Left_Low];
310 ir_node *a_h = params[BINOP_Left_High];
311 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
312 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
313 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
317 Code inspired by gcc output :) (although gcc doubles the
318 operation for t1 as t2 and uses t1 for operations with low part
319 and t2 for operations with high part which is actually unnecessary
320 because t1 and t2 represent the same value)
326 h_res = t3 - t1 - carry
330 sign = new_rd_ia32_l_Sar(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
331 sub_l = new_rd_ia32_l_Xor(dbg, irg, block, a_l, sign, l_res_mode);
332 sub_h = new_rd_ia32_l_Xor(dbg, irg, block, a_h, sign, h_res_mode);
333 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
334 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
335 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
337 resolve_call(call, l_res, h_res, irg, block);
345 } ia32_intrinsic_divmod_t;
348 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
350 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
351 ia32_intrinsic_env_t *env = ctx;
352 ir_graph *irg = current_ir_graph;
353 dbg_info *dbg = get_irn_dbg_info(call);
354 ir_node *block = get_nodes_block(call);
355 ir_node **params = get_Call_param_arr(call);
356 ir_type *method = get_Call_type(call);
357 ir_node *a_l = params[BINOP_Left_Low];
358 ir_node *a_h = params[BINOP_Left_High];
359 ir_node *b_l = params[BINOP_Right_Low];
360 ir_node *b_h = params[BINOP_Right_High];
361 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
362 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
363 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
364 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
365 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
366 ir_node *l_res, *h_res, *frame;
367 ir_node *store_l, *store_h;
368 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
369 ir_node *fa, *fb, *fres;
371 /* allocate memory on frame to store args */
373 ent_a = env->ll_div_op1 =
374 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
379 ent_b = env->ll_div_op2 =
380 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
384 frame = get_irg_frame(irg);
386 /* store first arg */
387 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
388 set_ia32_frame_ent(store_l, ent_a);
389 set_ia32_use_frame(store_l);
390 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
393 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
394 set_ia32_frame_ent(store_h, ent_a);
395 add_ia32_am_offs_int(store_h, mode_bytes);
396 set_ia32_use_frame(store_h);
397 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
400 mem = new_r_Sync(irg, block, 2, op_mem);
402 /* load first arg into FPU */
403 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
404 set_ia32_frame_ent(fa, ent_a);
405 set_ia32_use_frame(fa);
406 set_ia32_ls_mode(fa, mode_D);
407 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
408 fa = new_r_Proj(irg, block, fa, mode_E, pn_ia32_l_vfild_res);
410 /* store second arg */
411 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
412 set_ia32_frame_ent(store_l, ent_b);
413 set_ia32_use_frame(store_l);
414 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
417 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
418 set_ia32_frame_ent(store_h, ent_b);
419 add_ia32_am_offs_int(store_h, mode_bytes);
420 set_ia32_use_frame(store_h);
421 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
424 mem = new_r_Sync(irg, block, 2, op_mem);
426 /* load second arg into FPU */
427 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
428 set_ia32_frame_ent(fb, ent_b);
429 set_ia32_use_frame(fb);
430 set_ia32_ls_mode(fb, mode_D);
431 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
432 fb = new_r_Proj(irg, block, fb, mode_E, pn_ia32_l_vfild_res);
437 mem = new_r_Sync(irg, block, 2, op_mem);
439 /* perform division */
441 case IA32_INTRINSIC_DIV:
442 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb);
443 fres = new_rd_Proj(dbg, irg, block, fres, mode_E, pn_ia32_l_vfdiv_res);
445 case IA32_INTRINSIC_MOD:
446 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_E);
452 /* store back result, we use ent_a here */
453 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
454 set_ia32_frame_ent(fres, ent_a);
455 set_ia32_use_frame(fres);
456 set_ia32_ls_mode(fres, mode_D);
459 /* load low part of the result */
460 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
461 set_ia32_frame_ent(l_res, ent_a);
462 set_ia32_use_frame(l_res);
463 set_ia32_ls_mode(l_res, l_res_mode);
464 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
466 /* load hight part of the result */
467 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
468 set_ia32_frame_ent(h_res, ent_a);
469 add_ia32_am_offs_int(h_res, mode_bytes);
470 set_ia32_use_frame(h_res);
471 set_ia32_ls_mode(h_res, h_res_mode);
472 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
475 resolve_call(call, l_res, h_res, irg, block);
480 static int map_Div(ir_node *call, void *ctx) {
481 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
484 static int map_Mod(ir_node *call, void *ctx) {
485 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
489 * Maps a Conv (a_l, a_h)
491 static int map_Conv(ir_node *call, void *ctx) {
492 ia32_intrinsic_env_t *env = ctx;
493 ir_graph *irg = current_ir_graph;
494 dbg_info *dbg = get_irn_dbg_info(call);
495 ir_node *block = get_nodes_block(call);
496 ir_node **params = get_Call_param_arr(call);
497 ir_type *method = get_Call_type(call);
498 int n = get_Call_n_params(call);
499 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
501 ir_node *l_res, *h_res, *frame, *fres;
502 ir_node *store_l, *store_h;
503 ir_node *op_mem[2], *mem;
506 /* We have a Conv float -> long long here */
507 ir_node *a_f = params[0];
508 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
509 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
511 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
513 /* allocate memory on frame to store args */
514 ent = env->irg == irg ? env->d_ll_conv : NULL;
516 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
521 frame = get_irg_frame(irg);
524 Now we create a node to move the value from a XMM register into
525 x87 FPU because it is unknown here, which FPU is used.
526 This node is killed in transformation phase when not needed.
527 Otherwise it is split up into a movsd + fld
529 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
530 set_ia32_frame_ent(a_f, ent);
531 set_ia32_use_frame(a_f);
532 set_ia32_ls_mode(a_f, mode_D);
534 /* store from FPU as Int */
535 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
536 set_ia32_frame_ent(a_f, ent);
537 set_ia32_use_frame(a_f);
538 set_ia32_ls_mode(a_f, mode_D);
541 /* load low part of the result */
542 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
543 set_ia32_frame_ent(l_res, ent);
544 set_ia32_use_frame(l_res);
545 set_ia32_ls_mode(l_res, l_res_mode);
546 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
548 /* load hight part of the result */
549 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
550 set_ia32_frame_ent(h_res, ent);
551 add_ia32_am_offs_int(h_res, gp_bytes);
552 set_ia32_use_frame(h_res);
553 set_ia32_ls_mode(h_res, h_res_mode);
554 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
557 resolve_call(call, l_res, h_res, irg, block);
560 /* We have a Conv long long -> float here */
561 ir_node *a_l = params[BINOP_Left_Low];
562 ir_node *a_h = params[BINOP_Left_High];
563 ir_mode *mode_a_l = get_irn_mode(a_l);
564 ir_mode *mode_a_h = get_irn_mode(a_h);
565 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
567 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
569 /* allocate memory on frame to store args */
570 ent = env->irg == irg ? env->ll_d_conv : NULL;
572 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
577 frame = get_irg_frame(irg);
579 /* store first arg (low part) */
580 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
581 set_ia32_frame_ent(store_l, ent);
582 set_ia32_use_frame(store_l);
583 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
586 /* store second arg (high part) */
587 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
588 set_ia32_frame_ent(store_h, ent);
589 add_ia32_am_offs_int(store_h, gp_bytes);
590 set_ia32_use_frame(store_h);
591 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
594 mem = new_r_Sync(irg, block, 2, op_mem);
596 /* Load arg into x87 FPU (implicit convert) */
597 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
598 set_ia32_frame_ent(fres, ent);
599 set_ia32_use_frame(fres);
600 set_ia32_ls_mode(fres, mode_D);
601 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
602 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
605 Now we create a node to move the loaded value into a XMM
606 register because it is unknown here, which FPU is used.
607 This node is killed in transformation phase when not needed.
608 Otherwise it is split up into a fst + movsd
610 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
611 set_ia32_frame_ent(fres, ent);
612 set_ia32_use_frame(fres);
613 set_ia32_ls_mode(fres, fres_mode);
616 resolve_call(call, fres, NULL, irg, block);
619 assert(0 && "unexpected Conv call");
625 /* Ia32 implementation of intrinsic mapping. */
626 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
627 const ir_mode *imode, const ir_mode *omode,
631 ir_entity **ent = NULL;
632 i_mapper_func mapper;
635 intrinsics = NEW_ARR_F(i_record, 0);
637 switch (get_op_code(op)) {
639 ent = &i_ents[iro_Add];
643 ent = &i_ents[iro_Sub];
647 ent = &i_ents[iro_Shl];
651 ent = &i_ents[iro_Shr];
655 ent = &i_ents[iro_Shrs];
659 ent = &i_ents[iro_Mul];
663 ent = &i_ents[iro_Minus];
667 ent = &i_ents[iro_Abs];
671 ent = &i_ents[iro_Div];
675 ent = &i_ents[iro_Mod];
679 ent = &i_ents[iro_Conv];
683 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
684 return def_create_intrinsic_fkt(method, op, imode, omode, context);
688 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
690 ident *id = mangle(IDENT("L"), get_op_ident(op));
691 *ent = new_entity(get_glob_type(), id, method);
694 elt.i_call.kind = INTRINSIC_CALL;
695 elt.i_call.i_ent = *ent;
696 elt.i_call.i_mapper = mapper;
697 elt.i_call.ctx = context;
698 elt.i_call.link = NULL;
700 ARR_APP1(i_record, intrinsics, elt);