2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
39 #include "ia32_new_nodes.h"
40 #include "bearch_ia32_t.h"
41 #include "gen_ia32_regalloc_if.h"
43 /** The array of all intrinsics that must be mapped. */
44 static i_record *intrinsics;
46 /** An array to cache all entities */
47 static ir_entity *i_ents[iro_MaxOpcode];
50 * Maps all intrinsic calls that the backend support
51 * and map all instructions the backend did not support
54 void ia32_handle_intrinsics(void) {
55 if (intrinsics && ARR_LEN(intrinsics) > 0)
56 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
59 #define BINOP_Left_Low 0
60 #define BINOP_Left_High 1
61 #define BINOP_Right_Low 2
62 #define BINOP_Right_High 3
65 * Replace a call be a tuple of l_res, h_res.
67 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
72 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
74 turn_into_tuple(call, pn_Call_max);
75 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
76 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, block));
77 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
78 set_Tuple_pred(call, pn_Call_T_result, res);
79 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
80 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
84 * Map an Add (a_l, a_h, b_l, b_h)
86 static int map_Add(ir_node *call, void *ctx) {
87 ir_graph *irg = current_ir_graph;
88 dbg_info *dbg = get_irn_dbg_info(call);
89 ir_node *block = get_nodes_block(call);
90 ir_node **params = get_Call_param_arr(call);
91 ir_type *method = get_Call_type(call);
92 ir_node *a_l = params[BINOP_Left_Low];
93 ir_node *a_h = params[BINOP_Left_High];
94 ir_node *b_l = params[BINOP_Right_Low];
95 ir_node *b_h = params[BINOP_Right_High];
96 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
97 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
98 ir_node *l_res, *h_res, *add;
100 /* l_res = a_l + b_l */
101 /* h_res = a_h + b_h + carry */
103 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
104 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
105 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
107 resolve_call(call, l_res, h_res, irg, block);
112 * Map a Sub (a_l, a_h, b_l, b_h)
114 static int map_Sub(ir_node *call, void *ctx) {
115 ir_graph *irg = current_ir_graph;
116 dbg_info *dbg = get_irn_dbg_info(call);
117 ir_node *block = get_nodes_block(call);
118 ir_node **params = get_Call_param_arr(call);
119 ir_type *method = get_Call_type(call);
120 ir_node *a_l = params[BINOP_Left_Low];
121 ir_node *a_h = params[BINOP_Left_High];
122 ir_node *b_l = params[BINOP_Right_Low];
123 ir_node *b_h = params[BINOP_Right_High];
124 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
125 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
126 ir_node *l_res, *h_res, *res;
128 /* l_res = a_l - b_l */
129 /* h_res = a_h - b_h - carry */
131 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
132 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
133 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
135 resolve_call(call, l_res, h_res, irg, block);
140 * Map a Shl (a_l, a_h, count)
142 static int map_Shl(ir_node *call, void *ctx) {
143 ir_graph *irg = current_ir_graph;
144 dbg_info *dbg = get_irn_dbg_info(call);
145 ir_node *block = get_nodes_block(call);
146 ir_node **params = get_Call_param_arr(call);
147 ir_type *method = get_Call_type(call);
148 ir_node *a_l = params[BINOP_Left_Low];
149 ir_node *a_h = params[BINOP_Left_High];
150 ir_node *cnt = params[BINOP_Right_Low];
151 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
152 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
153 ir_node *l_res, *h_res;
155 /* h_res = SHLD a_h, a_l, cnt */
156 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
158 /* l_res = SHL a_l, cnt */
159 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
161 //add_irn_dep(l_res, h_res);
163 resolve_call(call, l_res, h_res, irg, block);
168 * Map a Shr (a_l, a_h, count)
170 static int map_Shr(ir_node *call, void *ctx) {
171 ir_graph *irg = current_ir_graph;
172 dbg_info *dbg = get_irn_dbg_info(call);
173 ir_node *block = get_nodes_block(call);
174 ir_node **params = get_Call_param_arr(call);
175 ir_type *method = get_Call_type(call);
176 ir_node *a_l = params[BINOP_Left_Low];
177 ir_node *a_h = params[BINOP_Left_High];
178 ir_node *cnt = params[BINOP_Right_Low];
179 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
180 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
181 ir_node *l_res, *h_res;
183 /* l_res = SHRD a_l, a_h, cnt */
184 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
186 /* h_res = SHR a_h, cnt */
187 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
189 //add_irn_dep(h_res, l_res);
191 resolve_call(call, l_res, h_res, irg, block);
196 * Map a Shrs (a_l, a_h, count)
198 static int map_Shrs(ir_node *call, void *ctx) {
199 ir_graph *irg = current_ir_graph;
200 dbg_info *dbg = get_irn_dbg_info(call);
201 ir_node *block = get_nodes_block(call);
202 ir_node **params = get_Call_param_arr(call);
203 ir_type *method = get_Call_type(call);
204 ir_node *a_l = params[BINOP_Left_Low];
205 ir_node *a_h = params[BINOP_Left_High];
206 ir_node *cnt = params[BINOP_Right_Low];
207 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
208 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
209 ir_node *l_res, *h_res;
211 /* l_res = SHRD a_l, a_h, cnt */
212 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
214 /* h_res = SAR a_h, cnt */
215 h_res = new_rd_ia32_l_Sar(dbg, irg, block, a_h, cnt, h_res_mode);
217 //add_irn_dep(h_res, l_res);
219 resolve_call(call, l_res, h_res, irg, block);
224 * Map a Mul (a_l, a_h, b_l, b_h)
226 static int map_Mul(ir_node *call, void *ctx) {
227 ir_graph *irg = current_ir_graph;
228 dbg_info *dbg = get_irn_dbg_info(call);
229 ir_node *block = get_nodes_block(call);
230 ir_node **params = get_Call_param_arr(call);
231 ir_type *method = get_Call_type(call);
232 ir_node *a_l = params[BINOP_Left_Low];
233 ir_node *a_h = params[BINOP_Left_High];
234 ir_node *b_l = params[BINOP_Right_Low];
235 ir_node *b_h = params[BINOP_Right_High];
236 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
237 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
238 ir_node *l_res, *h_res, *mul, *pEDX, *add;
249 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
250 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EDX);
251 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EAX);
253 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l);
254 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
255 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h);
256 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
258 resolve_call(call, l_res, h_res, irg, block);
264 * Map a Minus (a_l, a_h)
266 static int map_Minus(ir_node *call, void *ctx) {
267 ir_graph *irg = current_ir_graph;
268 dbg_info *dbg = get_irn_dbg_info(call);
269 ir_node *block = get_nodes_block(call);
270 ir_node **params = get_Call_param_arr(call);
271 ir_type *method = get_Call_type(call);
272 ir_node *a_l = params[BINOP_Left_Low];
273 ir_node *a_h = params[BINOP_Left_High];
274 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
275 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
276 ir_node *l_res, *h_res, *cnst, *res;
278 /* too bad: we need 0 in a register here */
279 cnst = new_Const_long(h_res_mode, 0);
281 /* l_res = 0 - a_l */
282 /* h_res = 0 - a_h - carry */
284 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
285 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
286 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
288 resolve_call(call, l_res, h_res, irg, block);
294 * Map a Abs (a_l, a_h)
296 static int map_Abs(ir_node *call, void *ctx) {
297 ir_graph *irg = current_ir_graph;
298 dbg_info *dbg = get_irn_dbg_info(call);
299 ir_node *block = get_nodes_block(call);
300 ir_node **params = get_Call_param_arr(call);
301 ir_type *method = get_Call_type(call);
302 ir_node *a_l = params[BINOP_Left_Low];
303 ir_node *a_h = params[BINOP_Left_High];
304 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
305 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
306 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
309 Code inspired by gcc output :) (although gcc doubles the
310 operation for t1 as t2 and uses t1 for operations with low part
311 and t2 for operations with high part which is actually unnecessary
312 because t1 and t2 represent the same value)
318 h_res = t3 - t1 - carry
322 sign = new_rd_ia32_l_Sar(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
323 sub_l = new_rd_ia32_l_Xor(dbg, irg, block, a_l, sign, l_res_mode);
324 sub_h = new_rd_ia32_l_Xor(dbg, irg, block, a_h, sign, h_res_mode);
325 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
326 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
327 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
329 resolve_call(call, l_res, h_res, irg, block);
337 } ia32_intrinsic_divmod_t;
340 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
342 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
343 ia32_intrinsic_env_t *env = ctx;
344 ir_graph *irg = current_ir_graph;
345 dbg_info *dbg = get_irn_dbg_info(call);
346 ir_node *block = get_nodes_block(call);
347 ir_node **params = get_Call_param_arr(call);
348 ir_type *method = get_Call_type(call);
349 ir_node *a_l = params[BINOP_Left_Low];
350 ir_node *a_h = params[BINOP_Left_High];
351 ir_node *b_l = params[BINOP_Right_Low];
352 ir_node *b_h = params[BINOP_Right_High];
353 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
354 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
355 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
356 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
357 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
358 ir_node *l_res, *h_res, *frame;
359 ir_node *store_l, *store_h;
360 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
361 ir_node *fa, *fb, *fres;
363 /* allocate memory on frame to store args */
365 ent_a = env->ll_div_op1 =
366 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
371 ent_b = env->ll_div_op2 =
372 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
376 frame = get_irg_frame(irg);
378 /* store first arg */
379 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
380 set_ia32_frame_ent(store_l, ent_a);
381 set_ia32_use_frame(store_l);
382 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
385 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
386 set_ia32_frame_ent(store_h, ent_a);
387 add_ia32_am_offs_int(store_h, mode_bytes);
388 set_ia32_use_frame(store_h);
389 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
392 mem = new_r_Sync(irg, block, 2, op_mem);
394 /* load first arg into FPU */
395 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
396 set_ia32_frame_ent(fa, ent_a);
397 set_ia32_use_frame(fa);
398 set_ia32_ls_mode(fa, mode_D);
399 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
400 fa = new_r_Proj(irg, block, fa, mode_E, pn_ia32_l_vfild_res);
402 /* store second arg */
403 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
404 set_ia32_frame_ent(store_l, ent_b);
405 set_ia32_use_frame(store_l);
406 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
409 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
410 set_ia32_frame_ent(store_h, ent_b);
411 add_ia32_am_offs_int(store_h, mode_bytes);
412 set_ia32_use_frame(store_h);
413 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
416 mem = new_r_Sync(irg, block, 2, op_mem);
418 /* load second arg into FPU */
419 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
420 set_ia32_frame_ent(fb, ent_b);
421 set_ia32_use_frame(fb);
422 set_ia32_ls_mode(fb, mode_D);
423 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
424 fb = new_r_Proj(irg, block, fb, mode_E, pn_ia32_l_vfild_res);
429 mem = new_r_Sync(irg, block, 2, op_mem);
431 /* perform division */
433 case IA32_INTRINSIC_DIV:
434 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb);
435 fres = new_rd_Proj(dbg, irg, block, fres, mode_E, pn_ia32_l_vfdiv_res);
437 case IA32_INTRINSIC_MOD:
438 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_E);
444 /* store back result, we use ent_a here */
445 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
446 set_ia32_frame_ent(fres, ent_a);
447 set_ia32_use_frame(fres);
448 set_ia32_ls_mode(fres, mode_D);
451 /* load low part of the result */
452 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
453 set_ia32_frame_ent(l_res, ent_a);
454 set_ia32_use_frame(l_res);
455 set_ia32_ls_mode(l_res, l_res_mode);
456 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
458 /* load hight part of the result */
459 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
460 set_ia32_frame_ent(h_res, ent_a);
461 add_ia32_am_offs_int(h_res, mode_bytes);
462 set_ia32_use_frame(h_res);
463 set_ia32_ls_mode(h_res, h_res_mode);
464 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
467 resolve_call(call, l_res, h_res, irg, block);
472 static int map_Div(ir_node *call, void *ctx) {
473 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
476 static int map_Mod(ir_node *call, void *ctx) {
477 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
481 * Maps a Conv (a_l, a_h)
483 static int map_Conv(ir_node *call, void *ctx) {
484 ia32_intrinsic_env_t *env = ctx;
485 ir_graph *irg = current_ir_graph;
486 dbg_info *dbg = get_irn_dbg_info(call);
487 ir_node *block = get_nodes_block(call);
488 ir_node **params = get_Call_param_arr(call);
489 ir_type *method = get_Call_type(call);
490 int n = get_Call_n_params(call);
491 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
493 ir_node *l_res, *h_res, *frame, *fres;
494 ir_node *store_l, *store_h;
495 ir_node *op_mem[2], *mem;
498 /* We have a Conv float -> long long here */
499 ir_node *a_f = params[0];
500 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
501 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
503 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
505 /* allocate memory on frame to store args */
506 ent = env->irg == irg ? env->d_ll_conv : NULL;
508 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
513 frame = get_irg_frame(irg);
516 Now we create a node to move the value from a XMM register into
517 x87 FPU because it is unknown here, which FPU is used.
518 This node is killed in transformation phase when not needed.
519 Otherwise it is split up into a movsd + fld
521 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
522 set_ia32_frame_ent(a_f, ent);
523 set_ia32_use_frame(a_f);
524 set_ia32_ls_mode(a_f, mode_D);
526 /* store from FPU as Int */
527 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
528 set_ia32_frame_ent(a_f, ent);
529 set_ia32_use_frame(a_f);
530 set_ia32_ls_mode(a_f, mode_D);
533 /* load low part of the result */
534 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
535 set_ia32_frame_ent(l_res, ent);
536 set_ia32_use_frame(l_res);
537 set_ia32_ls_mode(l_res, l_res_mode);
538 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
540 /* load hight part of the result */
541 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
542 set_ia32_frame_ent(h_res, ent);
543 add_ia32_am_offs_int(h_res, gp_bytes);
544 set_ia32_use_frame(h_res);
545 set_ia32_ls_mode(h_res, h_res_mode);
546 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
549 resolve_call(call, l_res, h_res, irg, block);
552 /* We have a Conv long long -> float here */
553 ir_node *a_l = params[BINOP_Left_Low];
554 ir_node *a_h = params[BINOP_Left_High];
555 ir_mode *mode_a_l = get_irn_mode(a_l);
556 ir_mode *mode_a_h = get_irn_mode(a_h);
557 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
559 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
561 /* allocate memory on frame to store args */
562 ent = env->irg == irg ? env->ll_d_conv : NULL;
564 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
569 frame = get_irg_frame(irg);
571 /* store first arg (low part) */
572 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
573 set_ia32_frame_ent(store_l, ent);
574 set_ia32_use_frame(store_l);
575 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
578 /* store second arg (high part) */
579 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
580 set_ia32_frame_ent(store_h, ent);
581 add_ia32_am_offs_int(store_h, gp_bytes);
582 set_ia32_use_frame(store_h);
583 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
586 mem = new_r_Sync(irg, block, 2, op_mem);
588 /* Load arg into x87 FPU (implicit convert) */
589 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
590 set_ia32_frame_ent(fres, ent);
591 set_ia32_use_frame(fres);
592 set_ia32_ls_mode(fres, mode_D);
593 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
594 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
597 Now we create a node to move the loaded value into a XMM
598 register because it is unknown here, which FPU is used.
599 This node is killed in transformation phase when not needed.
600 Otherwise it is split up into a fst + movsd
602 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
603 set_ia32_frame_ent(fres, ent);
604 set_ia32_use_frame(fres);
605 set_ia32_ls_mode(fres, fres_mode);
608 resolve_call(call, fres, NULL, irg, block);
611 assert(0 && "unexpected Conv call");
617 /* Ia32 implementation of intrinsic mapping. */
618 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
619 const ir_mode *imode, const ir_mode *omode,
623 ir_entity **ent = NULL;
624 i_mapper_func mapper;
627 intrinsics = NEW_ARR_F(i_record, 0);
629 switch (get_op_code(op)) {
631 ent = &i_ents[iro_Add];
635 ent = &i_ents[iro_Sub];
639 ent = &i_ents[iro_Shl];
643 ent = &i_ents[iro_Shr];
647 ent = &i_ents[iro_Shrs];
651 ent = &i_ents[iro_Mul];
655 ent = &i_ents[iro_Minus];
659 ent = &i_ents[iro_Abs];
663 ent = &i_ents[iro_Div];
667 ent = &i_ents[iro_Mod];
671 ent = &i_ents[iro_Conv];
675 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
676 return def_create_intrinsic_fkt(method, op, imode, omode, context);
680 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
682 ident *id = mangle(IDENT("L"), get_op_ident(op));
683 *ent = new_entity(get_glob_type(), id, method);
686 elt.i_call.kind = INTRINSIC_CALL;
687 elt.i_call.i_ent = *ent;
688 elt.i_call.i_mapper = mapper;
689 elt.i_call.ctx = context;
690 elt.i_call.link = NULL;
692 ARR_APP1(i_record, intrinsics, elt);