2 * This file implements the mapping of 64Bit intrinsic functions to
3 * code or library calls.
17 #include "lower_intrinsics.h"
22 #include "ia32_new_nodes.h"
23 #include "bearch_ia32_t.h"
24 #include "gen_ia32_regalloc_if.h"
26 /** The array of all intrinsics that must be mapped. */
27 static i_record *intrinsics;
29 /** An array to cache all entities */
30 static entity *i_ents[iro_MaxOpcode];
33 * Maps all intrinsic calls that the backend support
34 * and map all instructions the backend did not support
37 void ia32_handle_intrinsics(void) {
38 if (intrinsics && ARR_LEN(intrinsics) > 0)
39 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
42 #define BINOP_Left_Low 0
43 #define BINOP_Left_High 1
44 #define BINOP_Right_Low 2
45 #define BINOP_Right_High 3
47 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
52 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
54 turn_into_tuple(call, pn_Call_max);
55 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
56 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
57 set_Tuple_pred(call, pn_Call_T_result, res);
58 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
59 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
63 * Map an Add (a_l, a_h, b_l, b_h)
65 static int map_Add(ir_node *call, void *ctx) {
66 ir_graph *irg = current_ir_graph;
67 dbg_info *dbg = get_irn_dbg_info(call);
68 ir_node *block = get_nodes_block(call);
69 ir_node **params = get_Call_param_arr(call);
70 ir_type *method = get_Call_type(call);
71 ir_node *a_l = params[BINOP_Left_Low];
72 ir_node *a_h = params[BINOP_Left_High];
73 ir_node *b_l = params[BINOP_Right_Low];
74 ir_node *b_h = params[BINOP_Right_High];
75 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
76 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
77 ir_node *l_res, *h_res, *add;
79 /* l_res = a_l + b_l */
80 /* h_res = a_h + b_h + carry */
82 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
83 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
84 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
86 resolve_call(call, l_res, h_res, irg, block);
91 * Map a Sub (a_l, a_h, b_l, b_h)
93 static int map_Sub(ir_node *call, void *ctx) {
94 ir_graph *irg = current_ir_graph;
95 dbg_info *dbg = get_irn_dbg_info(call);
96 ir_node *block = get_nodes_block(call);
97 ir_node **params = get_Call_param_arr(call);
98 ir_type *method = get_Call_type(call);
99 ir_node *a_l = params[BINOP_Left_Low];
100 ir_node *a_h = params[BINOP_Left_High];
101 ir_node *b_l = params[BINOP_Right_Low];
102 ir_node *b_h = params[BINOP_Right_High];
103 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
104 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
105 ir_node *l_res, *h_res, *res;
107 /* l_res = a_l - b_l */
108 /* h_res = a_h - b_h - carry */
110 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
111 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
112 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
114 resolve_call(call, l_res, h_res, irg, block);
119 * Map a Shl (a_l, a_h, count)
121 static int map_Shl(ir_node *call, void *ctx) {
122 ir_graph *irg = current_ir_graph;
123 dbg_info *dbg = get_irn_dbg_info(call);
124 ir_node *block = get_nodes_block(call);
125 ir_node **params = get_Call_param_arr(call);
126 ir_type *method = get_Call_type(call);
127 ir_node *a_l = params[BINOP_Left_Low];
128 ir_node *a_h = params[BINOP_Left_High];
129 ir_node *cnt = params[BINOP_Right_Low];
130 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
131 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
132 ir_node *l_res, *h_res;
134 /* h_res = SHLD a_h, a_l, cnt */
135 l_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
137 /* l_res = SHL a_l, cnt */
138 h_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
140 add_irn_dep(h_res, l_res);
142 resolve_call(call, l_res, h_res, irg, block);
147 * Map a Shr (a_l, a_h, count)
149 static int map_Shr(ir_node *call, void *ctx) {
150 ir_graph *irg = current_ir_graph;
151 dbg_info *dbg = get_irn_dbg_info(call);
152 ir_node *block = get_nodes_block(call);
153 ir_node **params = get_Call_param_arr(call);
154 ir_type *method = get_Call_type(call);
155 ir_node *a_l = params[BINOP_Left_Low];
156 ir_node *a_h = params[BINOP_Left_High];
157 ir_node *cnt = params[BINOP_Right_Low];
158 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
159 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
160 ir_node *l_res, *h_res;
162 /* l_res = SHRD a_l, a_h, cnt */
163 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
165 /* h_res = SHR a_h, cnt */
166 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
168 add_irn_dep(h_res, l_res);
170 resolve_call(call, l_res, h_res, irg, block);
175 * Map a Shrs (a_l, a_h, count)
177 static int map_Shrs(ir_node *call, void *ctx) {
178 ir_graph *irg = current_ir_graph;
179 dbg_info *dbg = get_irn_dbg_info(call);
180 ir_node *block = get_nodes_block(call);
181 ir_node **params = get_Call_param_arr(call);
182 ir_type *method = get_Call_type(call);
183 ir_node *a_l = params[BINOP_Left_Low];
184 ir_node *a_h = params[BINOP_Left_High];
185 ir_node *cnt = params[BINOP_Right_Low];
186 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
187 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
188 ir_node *l_res, *h_res;
190 /* l_res = SHRD a_l, a_h, cnt */
191 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
193 /* h_res = SAR a_h, cnt */
194 h_res = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, cnt, h_res_mode);
196 add_irn_dep(h_res, l_res);
198 resolve_call(call, l_res, h_res, irg, block);
203 * Map a Mul (a_l, a_h, b_l, b_h)
205 static int map_Mul(ir_node *call, void *ctx) {
206 ir_graph *irg = current_ir_graph;
207 dbg_info *dbg = get_irn_dbg_info(call);
208 ir_node *block = get_nodes_block(call);
209 ir_node **params = get_Call_param_arr(call);
210 ir_type *method = get_Call_type(call);
211 ir_node *a_l = params[BINOP_Left_Low];
212 ir_node *a_h = params[BINOP_Left_High];
213 ir_node *b_l = params[BINOP_Right_Low];
214 ir_node *b_h = params[BINOP_Right_High];
215 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
216 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
217 ir_node *l_res, *h_res, *mul, *pEDX, *add;
228 mul = new_rd_ia32_l_MulS(dbg, irg, block, a_l, b_l);
229 set_ia32_res_mode(mul, l_res_mode);
230 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EDX);
231 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EAX);
233 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l, h_res_mode);
234 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
235 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h, h_res_mode);
236 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
238 resolve_call(call, l_res, h_res, irg, block);
244 * Map a Minus (a_l, a_h)
246 static int map_Minus(ir_node *call, void *ctx) {
247 ir_graph *irg = current_ir_graph;
248 dbg_info *dbg = get_irn_dbg_info(call);
249 ir_node *block = get_nodes_block(call);
250 ir_node **params = get_Call_param_arr(call);
251 ir_type *method = get_Call_type(call);
252 ir_node *a_l = params[BINOP_Left_Low];
253 ir_node *a_h = params[BINOP_Left_High];
254 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
255 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
256 ir_node *l_res, *h_res, *cnst, *res;
258 /* too bad: we need 0 in a register here */
259 cnst = new_Const_long(h_res_mode, 0);
261 /* l_res = 0 - a_l */
262 /* h_res = 0 - a_h - carry */
264 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
265 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
266 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
268 resolve_call(call, l_res, h_res, irg, block);
274 * Map a Abs (a_l, a_h)
276 static int map_Abs(ir_node *call, void *ctx) {
277 ir_graph *irg = current_ir_graph;
278 dbg_info *dbg = get_irn_dbg_info(call);
279 ir_node *block = get_nodes_block(call);
280 ir_node **params = get_Call_param_arr(call);
281 ir_type *method = get_Call_type(call);
282 ir_node *a_l = params[BINOP_Left_Low];
283 ir_node *a_h = params[BINOP_Left_High];
284 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
285 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
286 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
289 Code inspired by gcc output :) (although gcc doubles the
290 operation for t1 as t2 and uses t1 for operations with low part
291 and t2 for operations with high part which is actually unnecessary
292 because t1 and t2 represent the same value)
298 h_res = t3 - t1 - carry
302 sign = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
303 sub_l = new_rd_ia32_l_Eor(dbg, irg, block, a_l, sign, l_res_mode);
304 sub_h = new_rd_ia32_l_Eor(dbg, irg, block, a_h, sign, h_res_mode);
305 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
306 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
307 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
309 resolve_call(call, l_res, h_res, irg, block);
315 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
317 static int DivMod_mapper(ir_node *call, void *ctx, int need_mod) {
318 ia32_intrinsic_env_t *env = ctx;
319 ir_graph *irg = current_ir_graph;
320 dbg_info *dbg = get_irn_dbg_info(call);
321 ir_node *block = get_nodes_block(call);
322 ir_node **params = get_Call_param_arr(call);
323 ir_type *method = get_Call_type(call);
324 ir_node *a_l = params[BINOP_Left_Low];
325 ir_node *a_h = params[BINOP_Left_High];
326 ir_node *b_l = params[BINOP_Right_Low];
327 ir_node *b_h = params[BINOP_Right_High];
328 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
329 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
330 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
331 entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
332 entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
333 ir_node *l_res, *h_res, *frame;
334 ir_node *store_l, *store_h;
335 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
336 ir_node *fa, *fb, *fres;
338 /* allocate memory on frame to store args */
341 ent_a = env->ll_div_op1 =
342 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
347 ent_b = env->ll_div_op2 =
348 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
352 frame = get_irg_frame(irg);
354 /* store first arg */
355 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
356 set_ia32_frame_ent(store_l, ent_a);
357 set_ia32_use_frame(store_l);
358 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
359 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
361 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
362 set_ia32_frame_ent(store_h, ent_a);
363 add_ia32_am_offs_int(store_h, mode_bytes);
364 set_ia32_use_frame(store_h);
365 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
366 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
368 mem = new_r_Sync(irg, block, 2, op_mem);
370 /* load first arg into FPU */
371 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
372 set_ia32_frame_ent(fa, ent_a);
373 set_ia32_use_frame(fa);
374 set_ia32_ls_mode(fa, mode_D);
375 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
376 fa = new_r_Proj(irg, block, fa, mode_D, pn_ia32_l_vfild_res);
378 /* store second arg */
379 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
380 set_ia32_frame_ent(store_l, ent_b);
381 set_ia32_use_frame(store_l);
382 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
383 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
385 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
386 set_ia32_frame_ent(store_h, ent_b);
387 add_ia32_am_offs_int(store_h, mode_bytes);
388 set_ia32_use_frame(store_h);
389 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
390 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
392 mem = new_r_Sync(irg, block, 2, op_mem);
394 /* load second arg into FPU */
395 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
396 set_ia32_frame_ent(fb, ent_b);
397 set_ia32_use_frame(fb);
398 set_ia32_ls_mode(fb, mode_D);
399 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
400 fb = new_r_Proj(irg, block, fb, mode_D, pn_ia32_l_vfild_res);
405 mem = new_r_Sync(irg, block, 2, op_mem);
407 /* perform division */
408 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb, mode_D);
411 /* we need modulo: mod = a - b * res */
413 fres = new_rd_ia32_l_vfmul(dbg, irg, block, fb, fres, mode_D);
414 fres = new_rd_ia32_l_vfsub(dbg, irg, block, fa, fres, mode_D);
417 /* store back result, we use ent_a here */
418 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
419 set_ia32_frame_ent(fres, ent_a);
420 set_ia32_use_frame(fres);
421 set_ia32_ls_mode(fres, mode_D);
422 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfist_M);
424 /* load low part of the result */
425 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
426 set_ia32_frame_ent(l_res, ent_a);
427 set_ia32_use_frame(l_res);
428 set_ia32_ls_mode(l_res, l_res_mode);
429 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
431 /* load hight part of the result */
432 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
433 set_ia32_frame_ent(h_res, ent_a);
434 add_ia32_am_offs_int(h_res, mode_bytes);
435 set_ia32_use_frame(h_res);
436 set_ia32_ls_mode(h_res, h_res_mode);
437 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
440 resolve_call(call, l_res, h_res, irg, block);
445 static int map_Div(ir_node *call, void *ctx) {
446 return DivMod_mapper(call, ctx, 0);
449 static int map_Mod(ir_node *call, void *ctx) {
450 return DivMod_mapper(call, ctx, 1);
454 * Maps a Conv (a_l, a_h)
456 static int map_Conv(ir_node *call, void *ctx) {
457 ia32_intrinsic_env_t *env = ctx;
458 ir_graph *irg = current_ir_graph;
459 dbg_info *dbg = get_irn_dbg_info(call);
460 ir_node *block = get_nodes_block(call);
461 ir_node **params = get_Call_param_arr(call);
462 ir_type *method = get_Call_type(call);
463 int n = get_Call_n_params(call);
464 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
466 ir_node *l_res, *h_res, *frame, *fres;
467 ir_node *store_l, *store_h;
468 ir_node *op_mem[2], *mem;
471 /* We have a Conv float -> long long here */
472 ir_node *a_f = params[0];
473 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
474 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
476 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
478 /* allocate memory on frame to store args */
479 ent = env->irg == irg ? env->d_ll_conv : NULL;
481 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
486 frame = get_irg_frame(irg);
489 Now we create a node to move the value from a XMM register into
490 x87 FPU because it is unknown here, which FPU is used.
491 This node is killed in transformation phase when not needed.
492 Otherwise it is split up into a movsd + fld
494 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
495 set_ia32_frame_ent(a_f, ent);
496 set_ia32_use_frame(a_f);
497 set_ia32_ls_mode(a_f, mode_D);
499 /* store from FPU as Int */
500 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
501 set_ia32_frame_ent(a_f, ent);
502 set_ia32_use_frame(a_f);
503 set_ia32_ls_mode(a_f, mode_D);
504 mem = new_r_Proj(irg, block, a_f, mode_M, pn_ia32_l_vfist_M);
506 /* load low part of the result */
507 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
508 set_ia32_frame_ent(l_res, ent);
509 set_ia32_use_frame(l_res);
510 set_ia32_ls_mode(l_res, l_res_mode);
511 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
513 /* load hight part of the result */
514 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
515 set_ia32_frame_ent(h_res, ent);
516 add_ia32_am_offs_int(h_res, gp_bytes);
517 set_ia32_use_frame(h_res);
518 set_ia32_ls_mode(h_res, h_res_mode);
519 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
522 resolve_call(call, l_res, h_res, irg, block);
525 /* We have a Conv long long -> float here */
526 ir_node *a_l = params[BINOP_Left_Low];
527 ir_node *a_h = params[BINOP_Left_High];
528 ir_mode *mode_a_l = get_irn_mode(a_l);
529 ir_mode *mode_a_h = get_irn_mode(a_h);
530 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
532 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
534 /* allocate memory on frame to store args */
535 ent = env->irg == irg ? env->ll_d_conv : NULL;
537 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
542 frame = get_irg_frame(irg);
544 /* store first arg (low part) */
545 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
546 set_ia32_frame_ent(store_l, ent);
547 set_ia32_use_frame(store_l);
548 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
549 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
551 /* store second arg (high part) */
552 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
553 set_ia32_frame_ent(store_h, ent);
554 add_ia32_am_offs_int(store_h, gp_bytes);
555 set_ia32_use_frame(store_h);
556 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
557 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
559 mem = new_r_Sync(irg, block, 2, op_mem);
561 /* Load arg into x87 FPU (implicit convert) */
562 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
563 set_ia32_frame_ent(fres, ent);
564 set_ia32_use_frame(fres);
565 set_ia32_ls_mode(fres, mode_D);
566 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
567 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
570 Now we create a node to move the loaded value into a XMM
571 register because it is unknown here, which FPU is used.
572 This node is killed in transformation phase when not needed.
573 Otherwise it is split up into a fst + movsd
575 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
576 set_ia32_frame_ent(fres, ent);
577 set_ia32_use_frame(fres);
578 set_ia32_ls_mode(fres, fres_mode);
581 resolve_call(call, fres, NULL, irg, block);
584 assert(0 && "unexpected Conv call");
590 /* Ia32 implementation of intrinsic mapping. */
591 entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
592 const ir_mode *imode, const ir_mode *omode,
597 i_mapper_func mapper;
600 intrinsics = NEW_ARR_F(i_record, 0);
602 switch (get_op_code(op)) {
604 ent = &i_ents[iro_Add];
608 ent = &i_ents[iro_Sub];
612 ent = &i_ents[iro_Shl];
616 ent = &i_ents[iro_Shr];
620 ent = &i_ents[iro_Shrs];
624 ent = &i_ents[iro_Mul];
628 ent = &i_ents[iro_Minus];
632 ent = &i_ents[iro_Abs];
636 ent = &i_ents[iro_Div];
640 ent = &i_ents[iro_Mod];
644 ent = &i_ents[iro_Conv];
648 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
649 return def_create_intrinsic_fkt(method, op, imode, omode, context);
653 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
655 ident *id = mangle(IDENT("L"), get_op_ident(op));
656 *ent = new_entity(get_glob_type(), id, method);
659 elt.i_call.kind = INTRINSIC_CALL;
660 elt.i_call.i_ent = *ent;
661 elt.i_call.i_mapper = mapper;
662 elt.i_call.ctx = context;
663 elt.i_call.link = NULL;
665 ARR_APP1(i_record, intrinsics, elt);