2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements the mapping of 64Bit intrinsic
23 * functions to code or library calls.
24 * @author Michael Beck
36 #include "lower_intrinsics.h"
41 #include "ia32_new_nodes.h"
42 #include "bearch_ia32_t.h"
43 #include "gen_ia32_regalloc_if.h"
45 /** The array of all intrinsics that must be mapped. */
46 static i_record *intrinsics;
48 /** An array to cache all entities */
49 static ir_entity *i_ents[iro_MaxOpcode];
52 * Maps all intrinsic calls that the backend support
53 * and map all instructions the backend did not support
56 void ia32_handle_intrinsics(void) {
57 if (intrinsics && ARR_LEN(intrinsics) > 0)
58 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
61 #define BINOP_Left_Low 0
62 #define BINOP_Left_High 1
63 #define BINOP_Right_Low 2
64 #define BINOP_Right_High 3
67 * Replace a call be a tuple of l_res, h_res.
69 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
74 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
76 turn_into_tuple(call, pn_Call_max);
77 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
78 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, block));
79 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
80 set_Tuple_pred(call, pn_Call_T_result, res);
81 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
82 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
86 * Map an Add (a_l, a_h, b_l, b_h)
88 static int map_Add(ir_node *call, void *ctx) {
89 ir_graph *irg = current_ir_graph;
90 dbg_info *dbg = get_irn_dbg_info(call);
91 ir_node *block = get_nodes_block(call);
92 ir_node **params = get_Call_param_arr(call);
93 ir_type *method = get_Call_type(call);
94 ir_node *a_l = params[BINOP_Left_Low];
95 ir_node *a_h = params[BINOP_Left_High];
96 ir_node *b_l = params[BINOP_Right_Low];
97 ir_node *b_h = params[BINOP_Right_High];
98 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
99 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
100 ir_node *l_res, *h_res, *add;
102 /* l_res = a_l + b_l */
103 /* h_res = a_h + b_h + carry */
105 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
106 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
107 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
109 resolve_call(call, l_res, h_res, irg, block);
114 * Map a Sub (a_l, a_h, b_l, b_h)
116 static int map_Sub(ir_node *call, void *ctx) {
117 ir_graph *irg = current_ir_graph;
118 dbg_info *dbg = get_irn_dbg_info(call);
119 ir_node *block = get_nodes_block(call);
120 ir_node **params = get_Call_param_arr(call);
121 ir_type *method = get_Call_type(call);
122 ir_node *a_l = params[BINOP_Left_Low];
123 ir_node *a_h = params[BINOP_Left_High];
124 ir_node *b_l = params[BINOP_Right_Low];
125 ir_node *b_h = params[BINOP_Right_High];
126 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
127 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
128 ir_node *l_res, *h_res, *res;
130 /* l_res = a_l - b_l */
131 /* h_res = a_h - b_h - carry */
133 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
134 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
135 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
137 resolve_call(call, l_res, h_res, irg, block);
142 * Map a Shl (a_l, a_h, count)
144 static int map_Shl(ir_node *call, void *ctx) {
145 ir_graph *irg = current_ir_graph;
146 dbg_info *dbg = get_irn_dbg_info(call);
147 ir_node *block = get_nodes_block(call);
148 ir_node **params = get_Call_param_arr(call);
149 ir_type *method = get_Call_type(call);
150 ir_node *a_l = params[BINOP_Left_Low];
151 ir_node *a_h = params[BINOP_Left_High];
152 ir_node *cnt = params[BINOP_Right_Low];
153 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
154 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
155 ir_node *l_res, *h_res;
157 /* h_res = SHLD a_h, a_l, cnt */
158 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
160 /* l_res = SHL a_l, cnt */
161 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
163 //add_irn_dep(l_res, h_res);
165 resolve_call(call, l_res, h_res, irg, block);
170 * Map a Shr (a_l, a_h, count)
172 static int map_Shr(ir_node *call, void *ctx) {
173 ir_graph *irg = current_ir_graph;
174 dbg_info *dbg = get_irn_dbg_info(call);
175 ir_node *block = get_nodes_block(call);
176 ir_node **params = get_Call_param_arr(call);
177 ir_type *method = get_Call_type(call);
178 ir_node *a_l = params[BINOP_Left_Low];
179 ir_node *a_h = params[BINOP_Left_High];
180 ir_node *cnt = params[BINOP_Right_Low];
181 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
182 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
183 ir_node *l_res, *h_res;
185 /* l_res = SHRD a_l, a_h, cnt */
186 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
188 /* h_res = SHR a_h, cnt */
189 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
191 //add_irn_dep(h_res, l_res);
193 resolve_call(call, l_res, h_res, irg, block);
198 * Map a Shrs (a_l, a_h, count)
200 static int map_Shrs(ir_node *call, void *ctx) {
201 ir_graph *irg = current_ir_graph;
202 dbg_info *dbg = get_irn_dbg_info(call);
203 ir_node *block = get_nodes_block(call);
204 ir_node **params = get_Call_param_arr(call);
205 ir_type *method = get_Call_type(call);
206 ir_node *a_l = params[BINOP_Left_Low];
207 ir_node *a_h = params[BINOP_Left_High];
208 ir_node *cnt = params[BINOP_Right_Low];
209 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
210 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
211 ir_node *l_res, *h_res;
213 /* l_res = SHRD a_l, a_h, cnt */
214 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
216 /* h_res = SAR a_h, cnt */
217 h_res = new_rd_ia32_l_Sar(dbg, irg, block, a_h, cnt, h_res_mode);
219 //add_irn_dep(h_res, l_res);
221 resolve_call(call, l_res, h_res, irg, block);
226 * Map a Mul (a_l, a_h, b_l, b_h)
228 static int map_Mul(ir_node *call, void *ctx) {
229 ir_graph *irg = current_ir_graph;
230 dbg_info *dbg = get_irn_dbg_info(call);
231 ir_node *block = get_nodes_block(call);
232 ir_node **params = get_Call_param_arr(call);
233 ir_type *method = get_Call_type(call);
234 ir_node *a_l = params[BINOP_Left_Low];
235 ir_node *a_h = params[BINOP_Left_High];
236 ir_node *b_l = params[BINOP_Right_Low];
237 ir_node *b_h = params[BINOP_Right_High];
238 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
239 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
240 ir_node *l_res, *h_res, *mul, *pEDX, *add;
251 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
252 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EDX);
253 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EAX);
255 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l);
256 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
257 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h);
258 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
260 resolve_call(call, l_res, h_res, irg, block);
266 * Map a Minus (a_l, a_h)
268 static int map_Minus(ir_node *call, void *ctx) {
269 ir_graph *irg = current_ir_graph;
270 dbg_info *dbg = get_irn_dbg_info(call);
271 ir_node *block = get_nodes_block(call);
272 ir_node **params = get_Call_param_arr(call);
273 ir_type *method = get_Call_type(call);
274 ir_node *a_l = params[BINOP_Left_Low];
275 ir_node *a_h = params[BINOP_Left_High];
276 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
277 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
278 ir_node *l_res, *h_res, *cnst, *res;
280 /* too bad: we need 0 in a register here */
281 cnst = new_Const_long(h_res_mode, 0);
283 /* l_res = 0 - a_l */
284 /* h_res = 0 - a_h - carry */
286 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
287 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
288 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
290 resolve_call(call, l_res, h_res, irg, block);
296 * Map a Abs (a_l, a_h)
298 static int map_Abs(ir_node *call, void *ctx) {
299 ir_graph *irg = current_ir_graph;
300 dbg_info *dbg = get_irn_dbg_info(call);
301 ir_node *block = get_nodes_block(call);
302 ir_node **params = get_Call_param_arr(call);
303 ir_type *method = get_Call_type(call);
304 ir_node *a_l = params[BINOP_Left_Low];
305 ir_node *a_h = params[BINOP_Left_High];
306 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
307 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
308 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
311 Code inspired by gcc output :) (although gcc doubles the
312 operation for t1 as t2 and uses t1 for operations with low part
313 and t2 for operations with high part which is actually unnecessary
314 because t1 and t2 represent the same value)
320 h_res = t3 - t1 - carry
324 sign = new_rd_ia32_l_Sar(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
325 sub_l = new_rd_ia32_l_Xor(dbg, irg, block, a_l, sign, l_res_mode);
326 sub_h = new_rd_ia32_l_Xor(dbg, irg, block, a_h, sign, h_res_mode);
327 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
328 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
329 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
331 resolve_call(call, l_res, h_res, irg, block);
339 } ia32_intrinsic_divmod_t;
342 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
344 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
345 ia32_intrinsic_env_t *env = ctx;
346 ir_graph *irg = current_ir_graph;
347 dbg_info *dbg = get_irn_dbg_info(call);
348 ir_node *block = get_nodes_block(call);
349 ir_node **params = get_Call_param_arr(call);
350 ir_type *method = get_Call_type(call);
351 ir_node *a_l = params[BINOP_Left_Low];
352 ir_node *a_h = params[BINOP_Left_High];
353 ir_node *b_l = params[BINOP_Right_Low];
354 ir_node *b_h = params[BINOP_Right_High];
355 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
356 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
357 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
358 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
359 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
360 ir_node *l_res, *h_res, *frame;
361 ir_node *store_l, *store_h;
362 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
363 ir_node *fa, *fb, *fres;
365 /* allocate memory on frame to store args */
367 ent_a = env->ll_div_op1 =
368 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
373 ent_b = env->ll_div_op2 =
374 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
378 frame = get_irg_frame(irg);
380 /* store first arg */
381 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
382 set_ia32_frame_ent(store_l, ent_a);
383 set_ia32_use_frame(store_l);
384 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
387 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
388 set_ia32_frame_ent(store_h, ent_a);
389 add_ia32_am_offs_int(store_h, mode_bytes);
390 set_ia32_use_frame(store_h);
391 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
394 mem = new_r_Sync(irg, block, 2, op_mem);
396 /* load first arg into FPU */
397 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
398 set_ia32_frame_ent(fa, ent_a);
399 set_ia32_use_frame(fa);
400 set_ia32_ls_mode(fa, mode_D);
401 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
402 fa = new_r_Proj(irg, block, fa, mode_E, pn_ia32_l_vfild_res);
404 /* store second arg */
405 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
406 set_ia32_frame_ent(store_l, ent_b);
407 set_ia32_use_frame(store_l);
408 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
411 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
412 set_ia32_frame_ent(store_h, ent_b);
413 add_ia32_am_offs_int(store_h, mode_bytes);
414 set_ia32_use_frame(store_h);
415 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
418 mem = new_r_Sync(irg, block, 2, op_mem);
420 /* load second arg into FPU */
421 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
422 set_ia32_frame_ent(fb, ent_b);
423 set_ia32_use_frame(fb);
424 set_ia32_ls_mode(fb, mode_D);
425 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
426 fb = new_r_Proj(irg, block, fb, mode_E, pn_ia32_l_vfild_res);
431 mem = new_r_Sync(irg, block, 2, op_mem);
433 /* perform division */
435 case IA32_INTRINSIC_DIV:
436 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb);
437 fres = new_rd_Proj(dbg, irg, block, fres, mode_E, pn_ia32_l_vfdiv_res);
439 case IA32_INTRINSIC_MOD:
440 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_E);
446 /* store back result, we use ent_a here */
447 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
448 set_ia32_frame_ent(fres, ent_a);
449 set_ia32_use_frame(fres);
450 set_ia32_ls_mode(fres, mode_D);
453 /* load low part of the result */
454 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
455 set_ia32_frame_ent(l_res, ent_a);
456 set_ia32_use_frame(l_res);
457 set_ia32_ls_mode(l_res, l_res_mode);
458 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
460 /* load hight part of the result */
461 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
462 set_ia32_frame_ent(h_res, ent_a);
463 add_ia32_am_offs_int(h_res, mode_bytes);
464 set_ia32_use_frame(h_res);
465 set_ia32_ls_mode(h_res, h_res_mode);
466 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
469 resolve_call(call, l_res, h_res, irg, block);
474 static int map_Div(ir_node *call, void *ctx) {
475 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
478 static int map_Mod(ir_node *call, void *ctx) {
479 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
483 * Maps a Conv (a_l, a_h)
485 static int map_Conv(ir_node *call, void *ctx) {
486 ia32_intrinsic_env_t *env = ctx;
487 ir_graph *irg = current_ir_graph;
488 dbg_info *dbg = get_irn_dbg_info(call);
489 ir_node *block = get_nodes_block(call);
490 ir_node **params = get_Call_param_arr(call);
491 ir_type *method = get_Call_type(call);
492 int n = get_Call_n_params(call);
493 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
495 ir_node *l_res, *h_res, *frame, *fres;
496 ir_node *store_l, *store_h;
497 ir_node *op_mem[2], *mem;
500 /* We have a Conv float -> long long here */
501 ir_node *a_f = params[0];
502 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
503 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
505 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
507 /* allocate memory on frame to store args */
508 ent = env->irg == irg ? env->d_ll_conv : NULL;
510 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
515 frame = get_irg_frame(irg);
518 Now we create a node to move the value from a XMM register into
519 x87 FPU because it is unknown here, which FPU is used.
520 This node is killed in transformation phase when not needed.
521 Otherwise it is split up into a movsd + fld
523 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
524 set_ia32_frame_ent(a_f, ent);
525 set_ia32_use_frame(a_f);
526 set_ia32_ls_mode(a_f, mode_D);
528 /* store from FPU as Int */
529 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
530 set_ia32_frame_ent(a_f, ent);
531 set_ia32_use_frame(a_f);
532 set_ia32_ls_mode(a_f, mode_D);
535 /* load low part of the result */
536 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
537 set_ia32_frame_ent(l_res, ent);
538 set_ia32_use_frame(l_res);
539 set_ia32_ls_mode(l_res, l_res_mode);
540 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
542 /* load hight part of the result */
543 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
544 set_ia32_frame_ent(h_res, ent);
545 add_ia32_am_offs_int(h_res, gp_bytes);
546 set_ia32_use_frame(h_res);
547 set_ia32_ls_mode(h_res, h_res_mode);
548 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
551 resolve_call(call, l_res, h_res, irg, block);
554 /* We have a Conv long long -> float here */
555 ir_node *a_l = params[BINOP_Left_Low];
556 ir_node *a_h = params[BINOP_Left_High];
557 ir_mode *mode_a_l = get_irn_mode(a_l);
558 ir_mode *mode_a_h = get_irn_mode(a_h);
559 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
561 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
563 /* allocate memory on frame to store args */
564 ent = env->irg == irg ? env->ll_d_conv : NULL;
566 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
571 frame = get_irg_frame(irg);
573 /* store first arg (low part) */
574 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
575 set_ia32_frame_ent(store_l, ent);
576 set_ia32_use_frame(store_l);
577 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
580 /* store second arg (high part) */
581 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
582 set_ia32_frame_ent(store_h, ent);
583 add_ia32_am_offs_int(store_h, gp_bytes);
584 set_ia32_use_frame(store_h);
585 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
588 mem = new_r_Sync(irg, block, 2, op_mem);
590 /* Load arg into x87 FPU (implicit convert) */
591 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
592 set_ia32_frame_ent(fres, ent);
593 set_ia32_use_frame(fres);
594 set_ia32_ls_mode(fres, mode_D);
595 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
596 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
599 Now we create a node to move the loaded value into a XMM
600 register because it is unknown here, which FPU is used.
601 This node is killed in transformation phase when not needed.
602 Otherwise it is split up into a fst + movsd
604 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
605 set_ia32_frame_ent(fres, ent);
606 set_ia32_use_frame(fres);
607 set_ia32_ls_mode(fres, fres_mode);
610 resolve_call(call, fres, NULL, irg, block);
613 assert(0 && "unexpected Conv call");
619 /* Ia32 implementation of intrinsic mapping. */
620 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
621 const ir_mode *imode, const ir_mode *omode,
625 ir_entity **ent = NULL;
626 i_mapper_func mapper;
629 intrinsics = NEW_ARR_F(i_record, 0);
631 switch (get_op_code(op)) {
633 ent = &i_ents[iro_Add];
637 ent = &i_ents[iro_Sub];
641 ent = &i_ents[iro_Shl];
645 ent = &i_ents[iro_Shr];
649 ent = &i_ents[iro_Shrs];
653 ent = &i_ents[iro_Mul];
657 ent = &i_ents[iro_Minus];
661 ent = &i_ents[iro_Abs];
665 ent = &i_ents[iro_Div];
669 ent = &i_ents[iro_Mod];
673 ent = &i_ents[iro_Conv];
677 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
678 return def_create_intrinsic_fkt(method, op, imode, omode, context);
682 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
684 ident *id = mangle(IDENT("L"), get_op_ident(op));
685 *ent = new_entity(get_glob_type(), id, method);
688 elt.i_call.kind = INTRINSIC_CALL;
689 elt.i_call.i_ent = *ent;
690 elt.i_call.i_mapper = mapper;
691 elt.i_call.ctx = context;
692 elt.i_call.link = NULL;
694 ARR_APP1(i_record, intrinsics, elt);