2 * This file implements the mapping of 64Bit intrinsic functions to
3 * code or library calls.
17 #include "lower_intrinsics.h"
22 #include "ia32_new_nodes.h"
23 #include "bearch_ia32_t.h"
24 #include "gen_ia32_regalloc_if.h"
26 /** The array of all intrinsics that must be mapped. */
27 static i_record *intrinsics;
29 /** An array to cache all entities */
30 static ir_entity *i_ents[iro_MaxOpcode];
33 * Maps all intrinsic calls that the backend support
34 * and map all instructions the backend did not support
37 void ia32_handle_intrinsics(void) {
38 if (intrinsics && ARR_LEN(intrinsics) > 0)
39 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
42 #define BINOP_Left_Low 0
43 #define BINOP_Left_High 1
44 #define BINOP_Right_Low 2
45 #define BINOP_Right_High 3
47 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
52 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
54 turn_into_tuple(call, pn_Call_max);
55 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
56 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
57 set_Tuple_pred(call, pn_Call_T_result, res);
58 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
59 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
63 * Map an Add (a_l, a_h, b_l, b_h)
65 static int map_Add(ir_node *call, void *ctx) {
66 ir_graph *irg = current_ir_graph;
67 dbg_info *dbg = get_irn_dbg_info(call);
68 ir_node *block = get_nodes_block(call);
69 ir_node **params = get_Call_param_arr(call);
70 ir_type *method = get_Call_type(call);
71 ir_node *a_l = params[BINOP_Left_Low];
72 ir_node *a_h = params[BINOP_Left_High];
73 ir_node *b_l = params[BINOP_Right_Low];
74 ir_node *b_h = params[BINOP_Right_High];
75 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
76 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
77 ir_node *l_res, *h_res, *add;
79 /* l_res = a_l + b_l */
80 /* h_res = a_h + b_h + carry */
82 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
83 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
84 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
86 resolve_call(call, l_res, h_res, irg, block);
91 * Map a Sub (a_l, a_h, b_l, b_h)
93 static int map_Sub(ir_node *call, void *ctx) {
94 ir_graph *irg = current_ir_graph;
95 dbg_info *dbg = get_irn_dbg_info(call);
96 ir_node *block = get_nodes_block(call);
97 ir_node **params = get_Call_param_arr(call);
98 ir_type *method = get_Call_type(call);
99 ir_node *a_l = params[BINOP_Left_Low];
100 ir_node *a_h = params[BINOP_Left_High];
101 ir_node *b_l = params[BINOP_Right_Low];
102 ir_node *b_h = params[BINOP_Right_High];
103 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
104 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
105 ir_node *l_res, *h_res, *res;
107 /* l_res = a_l - b_l */
108 /* h_res = a_h - b_h - carry */
110 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
111 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
112 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
114 resolve_call(call, l_res, h_res, irg, block);
119 * Map a Shl (a_l, a_h, count)
121 static int map_Shl(ir_node *call, void *ctx) {
122 ir_graph *irg = current_ir_graph;
123 dbg_info *dbg = get_irn_dbg_info(call);
124 ir_node *block = get_nodes_block(call);
125 ir_node **params = get_Call_param_arr(call);
126 ir_type *method = get_Call_type(call);
127 ir_node *a_l = params[BINOP_Left_Low];
128 ir_node *a_h = params[BINOP_Left_High];
129 ir_node *cnt = params[BINOP_Right_Low];
130 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
131 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
132 ir_node *l_res, *h_res;
134 /* h_res = SHLD a_h, a_l, cnt */
135 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
137 /* l_res = SHL a_l, cnt */
138 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
140 add_irn_dep(l_res, h_res);
142 resolve_call(call, l_res, h_res, irg, block);
147 * Map a Shr (a_l, a_h, count)
149 static int map_Shr(ir_node *call, void *ctx) {
150 ir_graph *irg = current_ir_graph;
151 dbg_info *dbg = get_irn_dbg_info(call);
152 ir_node *block = get_nodes_block(call);
153 ir_node **params = get_Call_param_arr(call);
154 ir_type *method = get_Call_type(call);
155 ir_node *a_l = params[BINOP_Left_Low];
156 ir_node *a_h = params[BINOP_Left_High];
157 ir_node *cnt = params[BINOP_Right_Low];
158 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
159 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
160 ir_node *l_res, *h_res;
162 /* l_res = SHRD a_l, a_h, cnt */
163 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
165 /* h_res = SHR a_h, cnt */
166 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
168 add_irn_dep(h_res, l_res);
170 resolve_call(call, l_res, h_res, irg, block);
175 * Map a Shrs (a_l, a_h, count)
177 static int map_Shrs(ir_node *call, void *ctx) {
178 ir_graph *irg = current_ir_graph;
179 dbg_info *dbg = get_irn_dbg_info(call);
180 ir_node *block = get_nodes_block(call);
181 ir_node **params = get_Call_param_arr(call);
182 ir_type *method = get_Call_type(call);
183 ir_node *a_l = params[BINOP_Left_Low];
184 ir_node *a_h = params[BINOP_Left_High];
185 ir_node *cnt = params[BINOP_Right_Low];
186 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
187 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
188 ir_node *l_res, *h_res;
190 /* l_res = SHRD a_l, a_h, cnt */
191 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
193 /* h_res = SAR a_h, cnt */
194 h_res = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, cnt, h_res_mode);
196 add_irn_dep(h_res, l_res);
198 resolve_call(call, l_res, h_res, irg, block);
203 * Map a Mul (a_l, a_h, b_l, b_h)
205 static int map_Mul(ir_node *call, void *ctx) {
206 ir_graph *irg = current_ir_graph;
207 dbg_info *dbg = get_irn_dbg_info(call);
208 ir_node *block = get_nodes_block(call);
209 ir_node **params = get_Call_param_arr(call);
210 ir_type *method = get_Call_type(call);
211 ir_node *a_l = params[BINOP_Left_Low];
212 ir_node *a_h = params[BINOP_Left_High];
213 ir_node *b_l = params[BINOP_Right_Low];
214 ir_node *b_h = params[BINOP_Right_High];
215 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
216 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
217 ir_node *l_res, *h_res, *mul, *pEDX, *add;
228 mul = new_rd_ia32_l_MulS(dbg, irg, block, a_l, b_l);
229 set_ia32_res_mode(mul, l_res_mode);
230 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EDX);
231 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EAX);
233 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l, h_res_mode);
234 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
235 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h, h_res_mode);
236 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
238 resolve_call(call, l_res, h_res, irg, block);
244 * Map a Minus (a_l, a_h)
246 static int map_Minus(ir_node *call, void *ctx) {
247 ir_graph *irg = current_ir_graph;
248 dbg_info *dbg = get_irn_dbg_info(call);
249 ir_node *block = get_nodes_block(call);
250 ir_node **params = get_Call_param_arr(call);
251 ir_type *method = get_Call_type(call);
252 ir_node *a_l = params[BINOP_Left_Low];
253 ir_node *a_h = params[BINOP_Left_High];
254 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
255 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
256 ir_node *l_res, *h_res, *cnst, *res;
258 /* too bad: we need 0 in a register here */
259 cnst = new_Const_long(h_res_mode, 0);
261 /* l_res = 0 - a_l */
262 /* h_res = 0 - a_h - carry */
264 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
265 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
266 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
268 resolve_call(call, l_res, h_res, irg, block);
274 * Map a Abs (a_l, a_h)
276 static int map_Abs(ir_node *call, void *ctx) {
277 ir_graph *irg = current_ir_graph;
278 dbg_info *dbg = get_irn_dbg_info(call);
279 ir_node *block = get_nodes_block(call);
280 ir_node **params = get_Call_param_arr(call);
281 ir_type *method = get_Call_type(call);
282 ir_node *a_l = params[BINOP_Left_Low];
283 ir_node *a_h = params[BINOP_Left_High];
284 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
285 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
286 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
289 Code inspired by gcc output :) (although gcc doubles the
290 operation for t1 as t2 and uses t1 for operations with low part
291 and t2 for operations with high part which is actually unnecessary
292 because t1 and t2 represent the same value)
298 h_res = t3 - t1 - carry
302 sign = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
303 sub_l = new_rd_ia32_l_Eor(dbg, irg, block, a_l, sign, l_res_mode);
304 sub_h = new_rd_ia32_l_Eor(dbg, irg, block, a_h, sign, h_res_mode);
305 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
306 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
307 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
309 resolve_call(call, l_res, h_res, irg, block);
317 } ia32_intrinsic_divmod_t;
320 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
322 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
323 ia32_intrinsic_env_t *env = ctx;
324 ir_graph *irg = current_ir_graph;
325 dbg_info *dbg = get_irn_dbg_info(call);
326 ir_node *block = get_nodes_block(call);
327 ir_node **params = get_Call_param_arr(call);
328 ir_type *method = get_Call_type(call);
329 ir_node *a_l = params[BINOP_Left_Low];
330 ir_node *a_h = params[BINOP_Left_High];
331 ir_node *b_l = params[BINOP_Right_Low];
332 ir_node *b_h = params[BINOP_Right_High];
333 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
334 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
335 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
336 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
337 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
338 ir_node *l_res, *h_res, *frame;
339 ir_node *store_l, *store_h;
340 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
341 ir_node *fa, *fb, *fres;
343 /* allocate memory on frame to store args */
346 ent_a = env->ll_div_op1 =
347 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
352 ent_b = env->ll_div_op2 =
353 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
357 frame = get_irg_frame(irg);
359 /* store first arg */
360 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
361 set_ia32_frame_ent(store_l, ent_a);
362 set_ia32_use_frame(store_l);
363 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
364 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
366 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
367 set_ia32_frame_ent(store_h, ent_a);
368 add_ia32_am_offs_int(store_h, mode_bytes);
369 set_ia32_use_frame(store_h);
370 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
371 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
373 mem = new_r_Sync(irg, block, 2, op_mem);
375 /* load first arg into FPU */
376 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
377 set_ia32_frame_ent(fa, ent_a);
378 set_ia32_use_frame(fa);
379 set_ia32_ls_mode(fa, mode_D);
380 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
381 fa = new_r_Proj(irg, block, fa, mode_D, pn_ia32_l_vfild_res);
383 /* store second arg */
384 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
385 set_ia32_frame_ent(store_l, ent_b);
386 set_ia32_use_frame(store_l);
387 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
388 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
390 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
391 set_ia32_frame_ent(store_h, ent_b);
392 add_ia32_am_offs_int(store_h, mode_bytes);
393 set_ia32_use_frame(store_h);
394 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
395 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
397 mem = new_r_Sync(irg, block, 2, op_mem);
399 /* load second arg into FPU */
400 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
401 set_ia32_frame_ent(fb, ent_b);
402 set_ia32_use_frame(fb);
403 set_ia32_ls_mode(fb, mode_D);
404 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
405 fb = new_r_Proj(irg, block, fb, mode_D, pn_ia32_l_vfild_res);
410 mem = new_r_Sync(irg, block, 2, op_mem);
412 /* perform division */
414 case IA32_INTRINSIC_DIV:
415 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb, mode_D);
417 case IA32_INTRINSIC_MOD:
418 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_D);
424 /* store back result, we use ent_a here */
425 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
426 set_ia32_frame_ent(fres, ent_a);
427 set_ia32_use_frame(fres);
428 set_ia32_ls_mode(fres, mode_D);
429 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfist_M);
431 /* load low part of the result */
432 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
433 set_ia32_frame_ent(l_res, ent_a);
434 set_ia32_use_frame(l_res);
435 set_ia32_ls_mode(l_res, l_res_mode);
436 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
438 /* load hight part of the result */
439 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
440 set_ia32_frame_ent(h_res, ent_a);
441 add_ia32_am_offs_int(h_res, mode_bytes);
442 set_ia32_use_frame(h_res);
443 set_ia32_ls_mode(h_res, h_res_mode);
444 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
447 resolve_call(call, l_res, h_res, irg, block);
452 static int map_Div(ir_node *call, void *ctx) {
453 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
456 static int map_Mod(ir_node *call, void *ctx) {
457 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
461 * Maps a Conv (a_l, a_h)
463 static int map_Conv(ir_node *call, void *ctx) {
464 ia32_intrinsic_env_t *env = ctx;
465 ir_graph *irg = current_ir_graph;
466 dbg_info *dbg = get_irn_dbg_info(call);
467 ir_node *block = get_nodes_block(call);
468 ir_node **params = get_Call_param_arr(call);
469 ir_type *method = get_Call_type(call);
470 int n = get_Call_n_params(call);
471 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
473 ir_node *l_res, *h_res, *frame, *fres;
474 ir_node *store_l, *store_h;
475 ir_node *op_mem[2], *mem;
478 /* We have a Conv float -> long long here */
479 ir_node *a_f = params[0];
480 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
481 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
483 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
485 /* allocate memory on frame to store args */
486 ent = env->irg == irg ? env->d_ll_conv : NULL;
488 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
493 frame = get_irg_frame(irg);
496 Now we create a node to move the value from a XMM register into
497 x87 FPU because it is unknown here, which FPU is used.
498 This node is killed in transformation phase when not needed.
499 Otherwise it is split up into a movsd + fld
501 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
502 set_ia32_frame_ent(a_f, ent);
503 set_ia32_use_frame(a_f);
504 set_ia32_ls_mode(a_f, mode_D);
506 /* store from FPU as Int */
507 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
508 set_ia32_frame_ent(a_f, ent);
509 set_ia32_use_frame(a_f);
510 set_ia32_ls_mode(a_f, mode_D);
511 mem = new_r_Proj(irg, block, a_f, mode_M, pn_ia32_l_vfist_M);
513 /* load low part of the result */
514 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
515 set_ia32_frame_ent(l_res, ent);
516 set_ia32_use_frame(l_res);
517 set_ia32_ls_mode(l_res, l_res_mode);
518 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
520 /* load hight part of the result */
521 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
522 set_ia32_frame_ent(h_res, ent);
523 add_ia32_am_offs_int(h_res, gp_bytes);
524 set_ia32_use_frame(h_res);
525 set_ia32_ls_mode(h_res, h_res_mode);
526 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
529 resolve_call(call, l_res, h_res, irg, block);
532 /* We have a Conv long long -> float here */
533 ir_node *a_l = params[BINOP_Left_Low];
534 ir_node *a_h = params[BINOP_Left_High];
535 ir_mode *mode_a_l = get_irn_mode(a_l);
536 ir_mode *mode_a_h = get_irn_mode(a_h);
537 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
539 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
541 /* allocate memory on frame to store args */
542 ent = env->irg == irg ? env->ll_d_conv : NULL;
544 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
549 frame = get_irg_frame(irg);
551 /* store first arg (low part) */
552 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
553 set_ia32_frame_ent(store_l, ent);
554 set_ia32_use_frame(store_l);
555 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
556 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
558 /* store second arg (high part) */
559 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
560 set_ia32_frame_ent(store_h, ent);
561 add_ia32_am_offs_int(store_h, gp_bytes);
562 set_ia32_use_frame(store_h);
563 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
564 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
566 mem = new_r_Sync(irg, block, 2, op_mem);
568 /* Load arg into x87 FPU (implicit convert) */
569 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
570 set_ia32_frame_ent(fres, ent);
571 set_ia32_use_frame(fres);
572 set_ia32_ls_mode(fres, mode_D);
573 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
574 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
577 Now we create a node to move the loaded value into a XMM
578 register because it is unknown here, which FPU is used.
579 This node is killed in transformation phase when not needed.
580 Otherwise it is split up into a fst + movsd
582 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
583 set_ia32_frame_ent(fres, ent);
584 set_ia32_use_frame(fres);
585 set_ia32_ls_mode(fres, fres_mode);
588 resolve_call(call, fres, NULL, irg, block);
591 assert(0 && "unexpected Conv call");
597 /* Ia32 implementation of intrinsic mapping. */
598 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
599 const ir_mode *imode, const ir_mode *omode,
603 ir_entity **ent = NULL;
604 i_mapper_func mapper;
607 intrinsics = NEW_ARR_F(i_record, 0);
609 switch (get_op_code(op)) {
611 ent = &i_ents[iro_Add];
615 ent = &i_ents[iro_Sub];
619 ent = &i_ents[iro_Shl];
623 ent = &i_ents[iro_Shr];
627 ent = &i_ents[iro_Shrs];
631 ent = &i_ents[iro_Mul];
635 ent = &i_ents[iro_Minus];
639 ent = &i_ents[iro_Abs];
643 ent = &i_ents[iro_Div];
647 ent = &i_ents[iro_Mod];
651 ent = &i_ents[iro_Conv];
655 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
656 return def_create_intrinsic_fkt(method, op, imode, omode, context);
660 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
662 ident *id = mangle(IDENT("L"), get_op_ident(op));
663 *ent = new_entity(get_glob_type(), id, method);
666 elt.i_call.kind = INTRINSIC_CALL;
667 elt.i_call.i_ent = *ent;
668 elt.i_call.i_mapper = mapper;
669 elt.i_call.ctx = context;
670 elt.i_call.link = NULL;
672 ARR_APP1(i_record, intrinsics, elt);