2 * This file implements the mapping of 64Bit intrinsic functions to
3 * code or library calls.
17 #include "lower_intrinsics.h"
22 #include "ia32_new_nodes.h"
23 #include "bearch_ia32_t.h"
24 #include "gen_ia32_regalloc_if.h"
26 /** The array of all intrinsics that must be mapped. */
27 static i_record *intrinsics;
29 /** An array to cache all entities */
30 static entity *i_ents[iro_MaxOpcode];
33 * Maps all intrinsic calls that the backend support
34 * and map all instructions the backend did not support
37 void ia32_handle_intrinsics(void) {
38 if (intrinsics && ARR_LEN(intrinsics) > 0)
39 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
42 #define BINOP_Left_Low 0
43 #define BINOP_Left_High 1
44 #define BINOP_Right_Low 2
45 #define BINOP_Right_High 3
47 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
52 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
54 turn_into_tuple(call, pn_Call_max);
55 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
56 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
57 set_Tuple_pred(call, pn_Call_T_result, res);
58 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
59 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
63 * Map an Add (a_l, a_h, b_l, b_h)
65 static int map_Add(ir_node *call, void *ctx) {
66 ir_graph *irg = current_ir_graph;
67 dbg_info *dbg = get_irn_dbg_info(call);
68 ir_node *block = get_nodes_block(call);
69 ir_node **params = get_Call_param_arr(call);
70 ir_type *method = get_Call_type(call);
71 ir_node *a_l = params[BINOP_Left_Low];
72 ir_node *a_h = params[BINOP_Left_High];
73 ir_node *b_l = params[BINOP_Right_Low];
74 ir_node *b_h = params[BINOP_Right_High];
75 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
76 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
77 ir_node *l_res, *h_res;
79 /* l_res = a_l + b_l */
80 l_res = new_rd_ia32_l_Add(dbg, irg, block, a_l, b_l, l_res_mode);
82 /* h_res = a_h + b_h + carry */
83 h_res = new_rd_ia32_l_AddC(dbg, irg, block, a_h, b_h, h_res_mode);
85 resolve_call(call, l_res, h_res, irg, block);
90 * Map a Sub (a_l, a_h, b_l, b_h)
92 static int map_Sub(ir_node *call, void *ctx) {
93 ir_graph *irg = current_ir_graph;
94 dbg_info *dbg = get_irn_dbg_info(call);
95 ir_node *block = get_nodes_block(call);
96 ir_node **params = get_Call_param_arr(call);
97 ir_type *method = get_Call_type(call);
98 ir_node *a_l = params[BINOP_Left_Low];
99 ir_node *a_h = params[BINOP_Left_High];
100 ir_node *b_l = params[BINOP_Right_Low];
101 ir_node *b_h = params[BINOP_Right_High];
102 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
103 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
104 ir_node *l_res, *h_res;
106 /* l_res = a_l - b_l */
107 l_res = new_rd_ia32_l_Sub(dbg, irg, block, a_l, b_l, l_res_mode);
109 /* h_res = a_h - b_h - carry */
110 h_res = new_rd_ia32_l_SubC(dbg, irg, block, a_h, b_h, h_res_mode);
112 resolve_call(call, l_res, h_res, irg, block);
118 * Map a Shl (a_l, a_h, count)
120 static int map_Shl(ir_node *call, void *ctx) {
121 ir_graph *irg = current_ir_graph;
122 dbg_info *dbg = get_irn_dbg_info(call);
123 ir_node *block = get_nodes_block(call);
124 ir_node **params = get_Call_param_arr(call);
125 ir_type *method = get_Call_type(call);
126 ir_node *a_l = params[BINOP_Left_Low];
127 ir_node *a_h = params[BINOP_Left_High];
128 ir_node *cnt = params[BINOP_Right_Low];
129 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
130 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
131 ir_node *l_res, *h_res;
133 /* h_res = SHLD a_h, a_l, cnt */
134 l_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
136 /* l_res = SHL a_l, cnt */
137 h_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
138 add_irn_dep(h_res, l_res);
140 resolve_call(call, l_res, h_res, irg, block);
146 * Map a Shr (a_l, a_h, count)
148 static int map_Shr(ir_node *call, void *ctx) {
149 ir_graph *irg = current_ir_graph;
150 dbg_info *dbg = get_irn_dbg_info(call);
151 ir_node *block = get_nodes_block(call);
152 ir_node **params = get_Call_param_arr(call);
153 ir_type *method = get_Call_type(call);
154 ir_node *a_l = params[BINOP_Left_Low];
155 ir_node *a_h = params[BINOP_Left_High];
156 ir_node *cnt = params[BINOP_Right_Low];
157 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
158 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
159 ir_node *l_res, *h_res;
161 /* l_res = SHRD a_l, a_h, cnt */
162 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
164 /* h_res = SHR a_h, cnt */
165 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
166 add_irn_dep(h_res, l_res);
168 resolve_call(call, l_res, h_res, irg, block);
174 * Map a Shrs (a_l, a_h, count)
176 static int map_Shrs(ir_node *call, void *ctx) {
177 ir_graph *irg = current_ir_graph;
178 dbg_info *dbg = get_irn_dbg_info(call);
179 ir_node *block = get_nodes_block(call);
180 ir_node **params = get_Call_param_arr(call);
181 ir_type *method = get_Call_type(call);
182 ir_node *a_l = params[BINOP_Left_Low];
183 ir_node *a_h = params[BINOP_Left_High];
184 ir_node *cnt = params[BINOP_Right_Low];
185 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
186 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
187 ir_node *l_res, *h_res;
189 /* l_res = SHRD a_l, a_h, cnt */
190 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
192 /* h_res = SAR a_h, cnt */
193 h_res = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, cnt, h_res_mode);
194 add_irn_dep(h_res, l_res);
196 resolve_call(call, l_res, h_res, irg, block);
202 * Map a Mul (a_l, a_h, b_l, b_h)
204 static int map_Mul(ir_node *call, void *ctx) {
205 ir_graph *irg = current_ir_graph;
206 dbg_info *dbg = get_irn_dbg_info(call);
207 ir_node *block = get_nodes_block(call);
208 ir_node **params = get_Call_param_arr(call);
209 ir_type *method = get_Call_type(call);
210 ir_node *a_l = params[BINOP_Left_Low];
211 ir_node *a_h = params[BINOP_Left_High];
212 ir_node *b_l = params[BINOP_Right_Low];
213 ir_node *b_h = params[BINOP_Right_High];
214 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
215 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
216 ir_node *l_res, *h_res, *mul, *pEDX, *add;
227 mul = new_rd_ia32_l_MulS(dbg, irg, block, a_l, b_l);
228 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EDX);
229 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EAX);
231 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l, h_res_mode);
232 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
233 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h, h_res_mode);
234 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
236 resolve_call(call, l_res, h_res, irg, block);
242 * Map a Minus (a_l, a_h)
244 static int map_Minus(ir_node *call, void *ctx) {
245 ir_graph *irg = current_ir_graph;
246 dbg_info *dbg = get_irn_dbg_info(call);
247 ir_node *block = get_nodes_block(call);
248 ir_node **params = get_Call_param_arr(call);
249 ir_type *method = get_Call_type(call);
250 ir_node *a_l = params[BINOP_Left_Low];
251 ir_node *a_h = params[BINOP_Left_High];
252 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
253 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
254 ir_node *l_res, *h_res, *cnst;
256 /* l_res = 0 - a_l */
257 l_res = new_rd_ia32_l_Minus(dbg, irg, block, a_l, l_res_mode);
259 /* h_res = 0 - a_h - carry */
261 /* too bad: we need 0 in a register here */
262 cnst = new_Const_long(h_res_mode, 0);
263 h_res = new_rd_ia32_l_SubC(dbg, irg, block, cnst, a_h, h_res_mode);
264 add_irn_dep(h_res, l_res);
266 resolve_call(call, l_res, h_res, irg, block);
272 * Map a Abs (a_l, a_h)
274 static int map_Abs(ir_node *call, void *ctx) {
275 ir_graph *irg = current_ir_graph;
276 dbg_info *dbg = get_irn_dbg_info(call);
277 ir_node *block = get_nodes_block(call);
278 ir_node **params = get_Call_param_arr(call);
279 ir_type *method = get_Call_type(call);
280 ir_node *a_l = params[BINOP_Left_Low];
281 ir_node *a_h = params[BINOP_Left_High];
282 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
283 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
284 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h;
287 Code inspired by gcc output :) (although gcc doubles the
288 operation for t1 as t2 and uses t1 for operations with low part
289 and t2 for operations with high part which is actually unnecessary
290 because t1 and t2 represent the same value)
296 h_res = t3 - t1 - carry
300 sign = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
301 sub_l = new_rd_ia32_l_Eor(dbg, irg, block, a_l, sign, l_res_mode);
302 sub_h = new_rd_ia32_l_Eor(dbg, irg, block, a_h, sign, h_res_mode);
303 l_res = new_rd_ia32_l_Sub(dbg, irg, block, sub_l, sign, l_res_mode);
304 h_res = new_rd_ia32_l_SubC(dbg, irg, block, sub_h, sign, l_res_mode);
305 add_irn_dep(h_res, l_res);
307 resolve_call(call, l_res, h_res, irg, block);
313 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
315 static int DivMod_mapper(ir_node *call, void *ctx, int need_mod) {
316 ia32_intrinsic_env_t *env = ctx;
317 ir_graph *irg = current_ir_graph;
318 dbg_info *dbg = get_irn_dbg_info(call);
319 ir_node *block = get_nodes_block(call);
320 ir_node **params = get_Call_param_arr(call);
321 ir_type *method = get_Call_type(call);
322 ir_node *a_l = params[BINOP_Left_Low];
323 ir_node *a_h = params[BINOP_Left_High];
324 ir_node *b_l = params[BINOP_Right_Low];
325 ir_node *b_h = params[BINOP_Right_High];
326 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
327 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
328 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
329 entity *ent_a = env->ll_div_op1;
330 entity *ent_b = env->ll_div_op2;
331 ir_node *l_res, *h_res, *frame;
332 ir_node *store_l, *store_h;
333 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
334 ir_node *fa, *fb, *fres;
337 /* allocate memory on frame to store args */
340 ent_a = env->ll_div_op1 =
341 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
345 ent_b = env->ll_div_op2 =
346 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
349 snprintf(buf, sizeof(buf), "%d", mode_bytes);
350 frame = get_irg_frame(irg);
352 /* store first arg */
353 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
354 set_ia32_frame_ent(store_l, ent_a);
355 set_ia32_use_frame(store_l);
356 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
357 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
359 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
360 set_ia32_frame_ent(store_h, ent_a);
361 add_ia32_am_offs(store_h, buf);
362 set_ia32_use_frame(store_h);
363 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
364 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
366 mem = new_r_Sync(irg, block, 2, op_mem);
368 /* load first arg into FPU */
369 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
370 set_ia32_frame_ent(fa, ent_a);
371 set_ia32_use_frame(fa);
372 set_ia32_ls_mode(fa, mode_D);
373 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
374 fa = new_r_Proj(irg, block, fa, mode_D, pn_ia32_l_vfild_res);
376 /* store second arg */
377 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
378 set_ia32_frame_ent(store_l, ent_b);
379 set_ia32_use_frame(store_l);
380 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
381 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
383 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
384 set_ia32_frame_ent(store_h, ent_b);
385 add_ia32_am_offs(store_h, buf);
386 set_ia32_use_frame(store_h);
387 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
388 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
390 mem = new_r_Sync(irg, block, 2, op_mem);
392 /* load second arg into FPU */
393 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
394 set_ia32_frame_ent(fb, ent_b);
395 set_ia32_use_frame(fb);
396 set_ia32_ls_mode(fb, mode_D);
397 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
398 fb = new_r_Proj(irg, block, fb, mode_D, pn_ia32_l_vfild_res);
403 mem = new_r_Sync(irg, block, 2, op_mem);
405 /* perform division */
406 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb, mode_D);
409 /* we need modulo: mod = a - b * res */
411 fres = new_rd_ia32_l_vfmul(dbg, irg, block, fb, fres, mode_D);
412 fres = new_rd_ia32_l_vfsub(dbg, irg, block, fa, fres, mode_D);
415 /* store back result, we use ent_a here */
416 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
417 set_ia32_frame_ent(fres, ent_a);
418 set_ia32_use_frame(fres);
419 set_ia32_ls_mode(fres, mode_D);
420 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfist_M);
422 /* load low part of the result */
423 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
424 set_ia32_frame_ent(l_res, ent_a);
425 set_ia32_use_frame(l_res);
426 set_ia32_ls_mode(l_res, l_res_mode);
427 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
429 /* load hight part of the result */
430 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
431 set_ia32_frame_ent(h_res, ent_a);
432 add_ia32_am_offs(h_res, buf);
433 set_ia32_use_frame(h_res);
434 set_ia32_ls_mode(h_res, h_res_mode);
435 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
438 resolve_call(call, l_res, h_res, irg, block);
443 static int map_Div(ir_node *call, void *ctx) {
444 return DivMod_mapper(call, ctx, 0);
447 static int map_Mod(ir_node *call, void *ctx) {
448 return DivMod_mapper(call, ctx, 1);
452 * Maps a Conv (a_l, a_h)
454 static int map_Conv(ir_node *call, void *ctx) {
455 ia32_intrinsic_env_t *env = ctx;
456 ir_graph *irg = current_ir_graph;
457 dbg_info *dbg = get_irn_dbg_info(call);
458 ir_node *block = get_nodes_block(call);
459 ir_node **params = get_Call_param_arr(call);
460 ir_type *method = get_Call_type(call);
461 int n = get_Call_n_params(call);
462 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
464 ir_node *l_res, *h_res, *frame, *fres;
465 ir_node *store_l, *store_h;
466 ir_node *op_mem[2], *mem;
470 /* We have a Conv float -> long long here */
471 ir_node *a_f = params[0];
472 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
473 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
475 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
477 /* allocate memory on frame to store args */
478 ent = env->d_ll_conv;
480 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
484 snprintf(buf, sizeof(buf), "%d", gp_bytes);
485 frame = get_irg_frame(irg);
488 Now we create a node to move the value from a XMM register into
489 x87 FPU because it is unknown here, which FPU is used.
490 This node is killed in transformation phase when not needed.
491 Otherwise it is split up into a movsd + fld
493 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
494 set_ia32_frame_ent(a_f, ent);
495 set_ia32_use_frame(a_f);
496 set_ia32_ls_mode(a_f, mode_D);
498 /* store from FPU as Int */
499 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
500 set_ia32_frame_ent(a_f, ent);
501 set_ia32_use_frame(a_f);
502 set_ia32_ls_mode(a_f, mode_D);
503 mem = new_r_Proj(irg, block, a_f, mode_M, pn_ia32_l_vfist_M);
505 /* load low part of the result */
506 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
507 set_ia32_frame_ent(l_res, ent);
508 set_ia32_use_frame(l_res);
509 set_ia32_ls_mode(l_res, l_res_mode);
510 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
512 /* load hight part of the result */
513 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
514 set_ia32_frame_ent(h_res, ent);
515 add_ia32_am_offs(h_res, buf);
516 set_ia32_use_frame(h_res);
517 set_ia32_ls_mode(h_res, h_res_mode);
518 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
521 resolve_call(call, l_res, h_res, irg, block);
524 /* We have a Conv long long -> float here */
525 ir_node *a_l = params[BINOP_Left_Low];
526 ir_node *a_h = params[BINOP_Left_High];
527 ir_mode *mode_a_l = get_irn_mode(a_l);
528 ir_mode *mode_a_h = get_irn_mode(a_h);
529 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
531 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
533 /* allocate memory on frame to store args */
534 ent = env->ll_d_conv;
536 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
540 snprintf(buf, sizeof(buf), "%d", gp_bytes);
541 frame = get_irg_frame(irg);
543 /* store first arg (low part) */
544 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
545 set_ia32_frame_ent(store_l, ent);
546 set_ia32_use_frame(store_l);
547 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
548 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
550 /* store second arg (high part) */
551 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
552 set_ia32_frame_ent(store_h, ent);
553 add_ia32_am_offs(store_h, buf);
554 set_ia32_use_frame(store_h);
555 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
556 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
558 mem = new_r_Sync(irg, block, 2, op_mem);
560 /* Load arg into x87 FPU (implicit convert) */
561 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
562 set_ia32_frame_ent(fres, ent);
563 set_ia32_use_frame(fres);
564 set_ia32_ls_mode(fres, mode_D);
565 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
566 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
569 Now we create a node to move the loaded value into a XMM
570 register because it is unknown here, which FPU is used.
571 This node is killed in transformation phase when not needed.
572 Otherwise it is split up into a fst + movsd
574 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
575 set_ia32_frame_ent(fres, ent);
576 set_ia32_use_frame(fres);
577 set_ia32_ls_mode(fres, fres_mode);
580 resolve_call(call, fres, NULL, irg, block);
583 assert(0 && "unexpected Conv call");
589 /* Ia32 implementation of intrinsic mapping. */
590 entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
591 const ir_mode *imode, const ir_mode *omode,
596 i_mapper_func mapper;
599 intrinsics = NEW_ARR_F(i_record, 0);
601 switch (get_op_code(op)) {
603 ent = &i_ents[iro_Add];
607 ent = &i_ents[iro_Sub];
611 ent = &i_ents[iro_Shl];
615 ent = &i_ents[iro_Shr];
619 ent = &i_ents[iro_Shrs];
623 ent = &i_ents[iro_Mul];
627 ent = &i_ents[iro_Minus];
631 ent = &i_ents[iro_Abs];
635 ent = &i_ents[iro_Div];
639 ent = &i_ents[iro_Mod];
643 ent = &i_ents[iro_Conv];
647 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
648 return def_create_intrinsic_fkt(method, op, imode, omode, context);
652 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
654 ident *id = mangle(IDENT("L"), get_op_ident(op));
655 *ent = new_entity(get_glob_type(), id, method);
658 elt.i_call.kind = INTRINSIC_CALL;
659 elt.i_call.i_ent = *ent;
660 elt.i_call.i_mapper = mapper;
661 elt.i_call.ctx = context;
662 elt.i_call.link = NULL;
664 ARR_APP1(i_record, intrinsics, elt);