2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * This file implements the mapping of 64Bit intrinsic functions to
22 * code or library calls.
23 * @author Michael Beck
36 #include "lower_intrinsics.h"
41 #include "ia32_new_nodes.h"
42 #include "bearch_ia32_t.h"
43 #include "gen_ia32_regalloc_if.h"
45 /** The array of all intrinsics that must be mapped. */
46 static i_record *intrinsics;
48 /** An array to cache all entities */
49 static ir_entity *i_ents[iro_MaxOpcode];
52 * Maps all intrinsic calls that the backend support
53 * and map all instructions the backend did not support
56 void ia32_handle_intrinsics(void) {
57 if (intrinsics && ARR_LEN(intrinsics) > 0)
58 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
61 #define BINOP_Left_Low 0
62 #define BINOP_Left_High 1
63 #define BINOP_Right_Low 2
64 #define BINOP_Right_High 3
66 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
71 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
73 turn_into_tuple(call, pn_Call_max);
74 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
75 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
76 set_Tuple_pred(call, pn_Call_T_result, res);
77 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
78 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
82 * Map an Add (a_l, a_h, b_l, b_h)
84 static int map_Add(ir_node *call, void *ctx) {
85 ir_graph *irg = current_ir_graph;
86 dbg_info *dbg = get_irn_dbg_info(call);
87 ir_node *block = get_nodes_block(call);
88 ir_node **params = get_Call_param_arr(call);
89 ir_type *method = get_Call_type(call);
90 ir_node *a_l = params[BINOP_Left_Low];
91 ir_node *a_h = params[BINOP_Left_High];
92 ir_node *b_l = params[BINOP_Right_Low];
93 ir_node *b_h = params[BINOP_Right_High];
94 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
95 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
96 ir_node *l_res, *h_res, *add;
98 /* l_res = a_l + b_l */
99 /* h_res = a_h + b_h + carry */
101 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
102 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
103 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
105 resolve_call(call, l_res, h_res, irg, block);
110 * Map a Sub (a_l, a_h, b_l, b_h)
112 static int map_Sub(ir_node *call, void *ctx) {
113 ir_graph *irg = current_ir_graph;
114 dbg_info *dbg = get_irn_dbg_info(call);
115 ir_node *block = get_nodes_block(call);
116 ir_node **params = get_Call_param_arr(call);
117 ir_type *method = get_Call_type(call);
118 ir_node *a_l = params[BINOP_Left_Low];
119 ir_node *a_h = params[BINOP_Left_High];
120 ir_node *b_l = params[BINOP_Right_Low];
121 ir_node *b_h = params[BINOP_Right_High];
122 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
123 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
124 ir_node *l_res, *h_res, *res;
126 /* l_res = a_l - b_l */
127 /* h_res = a_h - b_h - carry */
129 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
130 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
131 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
133 resolve_call(call, l_res, h_res, irg, block);
138 * Map a Shl (a_l, a_h, count)
140 static int map_Shl(ir_node *call, void *ctx) {
141 ir_graph *irg = current_ir_graph;
142 dbg_info *dbg = get_irn_dbg_info(call);
143 ir_node *block = get_nodes_block(call);
144 ir_node **params = get_Call_param_arr(call);
145 ir_type *method = get_Call_type(call);
146 ir_node *a_l = params[BINOP_Left_Low];
147 ir_node *a_h = params[BINOP_Left_High];
148 ir_node *cnt = params[BINOP_Right_Low];
149 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
150 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
151 ir_node *l_res, *h_res;
153 /* h_res = SHLD a_h, a_l, cnt */
154 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
156 /* l_res = SHL a_l, cnt */
157 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
159 //add_irn_dep(l_res, h_res);
161 resolve_call(call, l_res, h_res, irg, block);
166 * Map a Shr (a_l, a_h, count)
168 static int map_Shr(ir_node *call, void *ctx) {
169 ir_graph *irg = current_ir_graph;
170 dbg_info *dbg = get_irn_dbg_info(call);
171 ir_node *block = get_nodes_block(call);
172 ir_node **params = get_Call_param_arr(call);
173 ir_type *method = get_Call_type(call);
174 ir_node *a_l = params[BINOP_Left_Low];
175 ir_node *a_h = params[BINOP_Left_High];
176 ir_node *cnt = params[BINOP_Right_Low];
177 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
178 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
179 ir_node *l_res, *h_res;
181 /* l_res = SHRD a_l, a_h, cnt */
182 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
184 /* h_res = SHR a_h, cnt */
185 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
187 //add_irn_dep(h_res, l_res);
189 resolve_call(call, l_res, h_res, irg, block);
194 * Map a Shrs (a_l, a_h, count)
196 static int map_Shrs(ir_node *call, void *ctx) {
197 ir_graph *irg = current_ir_graph;
198 dbg_info *dbg = get_irn_dbg_info(call);
199 ir_node *block = get_nodes_block(call);
200 ir_node **params = get_Call_param_arr(call);
201 ir_type *method = get_Call_type(call);
202 ir_node *a_l = params[BINOP_Left_Low];
203 ir_node *a_h = params[BINOP_Left_High];
204 ir_node *cnt = params[BINOP_Right_Low];
205 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
206 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
207 ir_node *l_res, *h_res;
209 /* l_res = SHRD a_l, a_h, cnt */
210 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
212 /* h_res = SAR a_h, cnt */
213 h_res = new_rd_ia32_l_Sar(dbg, irg, block, a_h, cnt, h_res_mode);
215 //add_irn_dep(h_res, l_res);
217 resolve_call(call, l_res, h_res, irg, block);
222 * Map a Mul (a_l, a_h, b_l, b_h)
224 static int map_Mul(ir_node *call, void *ctx) {
225 ir_graph *irg = current_ir_graph;
226 dbg_info *dbg = get_irn_dbg_info(call);
227 ir_node *block = get_nodes_block(call);
228 ir_node **params = get_Call_param_arr(call);
229 ir_type *method = get_Call_type(call);
230 ir_node *a_l = params[BINOP_Left_Low];
231 ir_node *a_h = params[BINOP_Left_High];
232 ir_node *b_l = params[BINOP_Right_Low];
233 ir_node *b_h = params[BINOP_Right_High];
234 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
235 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
236 ir_node *l_res, *h_res, *mul, *pEDX, *add;
247 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
248 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EDX);
249 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EAX);
251 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l);
252 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
253 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h);
254 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
256 resolve_call(call, l_res, h_res, irg, block);
262 * Map a Minus (a_l, a_h)
264 static int map_Minus(ir_node *call, void *ctx) {
265 ir_graph *irg = current_ir_graph;
266 dbg_info *dbg = get_irn_dbg_info(call);
267 ir_node *block = get_nodes_block(call);
268 ir_node **params = get_Call_param_arr(call);
269 ir_type *method = get_Call_type(call);
270 ir_node *a_l = params[BINOP_Left_Low];
271 ir_node *a_h = params[BINOP_Left_High];
272 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
273 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
274 ir_node *l_res, *h_res, *cnst, *res;
276 /* too bad: we need 0 in a register here */
277 cnst = new_Const_long(h_res_mode, 0);
279 /* l_res = 0 - a_l */
280 /* h_res = 0 - a_h - carry */
282 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
283 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
284 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
286 resolve_call(call, l_res, h_res, irg, block);
292 * Map a Abs (a_l, a_h)
294 static int map_Abs(ir_node *call, void *ctx) {
295 ir_graph *irg = current_ir_graph;
296 dbg_info *dbg = get_irn_dbg_info(call);
297 ir_node *block = get_nodes_block(call);
298 ir_node **params = get_Call_param_arr(call);
299 ir_type *method = get_Call_type(call);
300 ir_node *a_l = params[BINOP_Left_Low];
301 ir_node *a_h = params[BINOP_Left_High];
302 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
303 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
304 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
307 Code inspired by gcc output :) (although gcc doubles the
308 operation for t1 as t2 and uses t1 for operations with low part
309 and t2 for operations with high part which is actually unnecessary
310 because t1 and t2 represent the same value)
316 h_res = t3 - t1 - carry
320 sign = new_rd_ia32_l_Sar(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
321 sub_l = new_rd_ia32_l_Xor(dbg, irg, block, a_l, sign, l_res_mode);
322 sub_h = new_rd_ia32_l_Xor(dbg, irg, block, a_h, sign, h_res_mode);
323 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
324 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
325 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
327 resolve_call(call, l_res, h_res, irg, block);
335 } ia32_intrinsic_divmod_t;
338 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
340 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
341 ia32_intrinsic_env_t *env = ctx;
342 ir_graph *irg = current_ir_graph;
343 dbg_info *dbg = get_irn_dbg_info(call);
344 ir_node *block = get_nodes_block(call);
345 ir_node **params = get_Call_param_arr(call);
346 ir_type *method = get_Call_type(call);
347 ir_node *a_l = params[BINOP_Left_Low];
348 ir_node *a_h = params[BINOP_Left_High];
349 ir_node *b_l = params[BINOP_Right_Low];
350 ir_node *b_h = params[BINOP_Right_High];
351 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
352 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
353 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
354 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
355 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
356 ir_node *l_res, *h_res, *frame;
357 ir_node *store_l, *store_h;
358 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
359 ir_node *fa, *fb, *fres;
361 /* allocate memory on frame to store args */
363 ent_a = env->ll_div_op1 =
364 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
369 ent_b = env->ll_div_op2 =
370 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
374 frame = get_irg_frame(irg);
376 /* store first arg */
377 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
378 set_ia32_frame_ent(store_l, ent_a);
379 set_ia32_use_frame(store_l);
380 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
383 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
384 set_ia32_frame_ent(store_h, ent_a);
385 add_ia32_am_offs_int(store_h, mode_bytes);
386 set_ia32_use_frame(store_h);
387 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
390 mem = new_r_Sync(irg, block, 2, op_mem);
392 /* load first arg into FPU */
393 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
394 set_ia32_frame_ent(fa, ent_a);
395 set_ia32_use_frame(fa);
396 set_ia32_ls_mode(fa, mode_D);
397 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
398 fa = new_r_Proj(irg, block, fa, mode_E, pn_ia32_l_vfild_res);
400 /* store second arg */
401 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
402 set_ia32_frame_ent(store_l, ent_b);
403 set_ia32_use_frame(store_l);
404 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
407 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
408 set_ia32_frame_ent(store_h, ent_b);
409 add_ia32_am_offs_int(store_h, mode_bytes);
410 set_ia32_use_frame(store_h);
411 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
414 mem = new_r_Sync(irg, block, 2, op_mem);
416 /* load second arg into FPU */
417 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
418 set_ia32_frame_ent(fb, ent_b);
419 set_ia32_use_frame(fb);
420 set_ia32_ls_mode(fb, mode_D);
421 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
422 fb = new_r_Proj(irg, block, fb, mode_E, pn_ia32_l_vfild_res);
427 mem = new_r_Sync(irg, block, 2, op_mem);
429 /* perform division */
431 case IA32_INTRINSIC_DIV:
432 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb);
433 fres = new_rd_Proj(dbg, irg, block, fres, mode_E, pn_ia32_l_vfdiv_res);
435 case IA32_INTRINSIC_MOD:
436 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_E);
442 /* store back result, we use ent_a here */
443 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
444 set_ia32_frame_ent(fres, ent_a);
445 set_ia32_use_frame(fres);
446 set_ia32_ls_mode(fres, mode_D);
449 /* load low part of the result */
450 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
451 set_ia32_frame_ent(l_res, ent_a);
452 set_ia32_use_frame(l_res);
453 set_ia32_ls_mode(l_res, l_res_mode);
454 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
456 /* load hight part of the result */
457 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
458 set_ia32_frame_ent(h_res, ent_a);
459 add_ia32_am_offs_int(h_res, mode_bytes);
460 set_ia32_use_frame(h_res);
461 set_ia32_ls_mode(h_res, h_res_mode);
462 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
465 resolve_call(call, l_res, h_res, irg, block);
470 static int map_Div(ir_node *call, void *ctx) {
471 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
474 static int map_Mod(ir_node *call, void *ctx) {
475 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
479 * Maps a Conv (a_l, a_h)
481 static int map_Conv(ir_node *call, void *ctx) {
482 ia32_intrinsic_env_t *env = ctx;
483 ir_graph *irg = current_ir_graph;
484 dbg_info *dbg = get_irn_dbg_info(call);
485 ir_node *block = get_nodes_block(call);
486 ir_node **params = get_Call_param_arr(call);
487 ir_type *method = get_Call_type(call);
488 int n = get_Call_n_params(call);
489 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
491 ir_node *l_res, *h_res, *frame, *fres;
492 ir_node *store_l, *store_h;
493 ir_node *op_mem[2], *mem;
496 /* We have a Conv float -> long long here */
497 ir_node *a_f = params[0];
498 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
499 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
501 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
503 /* allocate memory on frame to store args */
504 ent = env->irg == irg ? env->d_ll_conv : NULL;
506 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
511 frame = get_irg_frame(irg);
514 Now we create a node to move the value from a XMM register into
515 x87 FPU because it is unknown here, which FPU is used.
516 This node is killed in transformation phase when not needed.
517 Otherwise it is split up into a movsd + fld
519 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
520 set_ia32_frame_ent(a_f, ent);
521 set_ia32_use_frame(a_f);
522 set_ia32_ls_mode(a_f, mode_D);
524 /* store from FPU as Int */
525 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
526 set_ia32_frame_ent(a_f, ent);
527 set_ia32_use_frame(a_f);
528 set_ia32_ls_mode(a_f, mode_D);
531 /* load low part of the result */
532 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
533 set_ia32_frame_ent(l_res, ent);
534 set_ia32_use_frame(l_res);
535 set_ia32_ls_mode(l_res, l_res_mode);
536 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
538 /* load hight part of the result */
539 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
540 set_ia32_frame_ent(h_res, ent);
541 add_ia32_am_offs_int(h_res, gp_bytes);
542 set_ia32_use_frame(h_res);
543 set_ia32_ls_mode(h_res, h_res_mode);
544 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
547 resolve_call(call, l_res, h_res, irg, block);
550 /* We have a Conv long long -> float here */
551 ir_node *a_l = params[BINOP_Left_Low];
552 ir_node *a_h = params[BINOP_Left_High];
553 ir_mode *mode_a_l = get_irn_mode(a_l);
554 ir_mode *mode_a_h = get_irn_mode(a_h);
555 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
557 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
559 /* allocate memory on frame to store args */
560 ent = env->irg == irg ? env->ll_d_conv : NULL;
562 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
567 frame = get_irg_frame(irg);
569 /* store first arg (low part) */
570 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
571 set_ia32_frame_ent(store_l, ent);
572 set_ia32_use_frame(store_l);
573 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
576 /* store second arg (high part) */
577 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
578 set_ia32_frame_ent(store_h, ent);
579 add_ia32_am_offs_int(store_h, gp_bytes);
580 set_ia32_use_frame(store_h);
581 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
584 mem = new_r_Sync(irg, block, 2, op_mem);
586 /* Load arg into x87 FPU (implicit convert) */
587 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
588 set_ia32_frame_ent(fres, ent);
589 set_ia32_use_frame(fres);
590 set_ia32_ls_mode(fres, mode_D);
591 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
592 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
595 Now we create a node to move the loaded value into a XMM
596 register because it is unknown here, which FPU is used.
597 This node is killed in transformation phase when not needed.
598 Otherwise it is split up into a fst + movsd
600 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
601 set_ia32_frame_ent(fres, ent);
602 set_ia32_use_frame(fres);
603 set_ia32_ls_mode(fres, fres_mode);
606 resolve_call(call, fres, NULL, irg, block);
609 assert(0 && "unexpected Conv call");
615 /* Ia32 implementation of intrinsic mapping. */
616 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
617 const ir_mode *imode, const ir_mode *omode,
621 ir_entity **ent = NULL;
622 i_mapper_func mapper;
625 intrinsics = NEW_ARR_F(i_record, 0);
627 switch (get_op_code(op)) {
629 ent = &i_ents[iro_Add];
633 ent = &i_ents[iro_Sub];
637 ent = &i_ents[iro_Shl];
641 ent = &i_ents[iro_Shr];
645 ent = &i_ents[iro_Shrs];
649 ent = &i_ents[iro_Mul];
653 ent = &i_ents[iro_Minus];
657 ent = &i_ents[iro_Abs];
661 ent = &i_ents[iro_Div];
665 ent = &i_ents[iro_Mod];
669 ent = &i_ents[iro_Conv];
673 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
674 return def_create_intrinsic_fkt(method, op, imode, omode, context);
678 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
680 ident *id = mangle(IDENT("L"), get_op_ident(op));
681 *ent = new_entity(get_glob_type(), id, method);
684 elt.i_call.kind = INTRINSIC_CALL;
685 elt.i_call.i_ent = *ent;
686 elt.i_call.i_mapper = mapper;
687 elt.i_call.ctx = context;
688 elt.i_call.link = NULL;
690 ARR_APP1(i_record, intrinsics, elt);