2 * This file implements the mapping of 64Bit intrinsic functions to
3 * code or library calls.
17 #include "lower_intrinsics.h"
22 #include "ia32_new_nodes.h"
23 #include "bearch_ia32_t.h"
24 #include "gen_ia32_regalloc_if.h"
26 /** The array of all intrinsics that must be mapped. */
27 static i_record *intrinsics;
29 /** An array to cache all entities */
30 static entity *i_ents[iro_MaxOpcode];
33 * Maps all intrinsic calls that the backend support
34 * and map all instructions the backend did not support
37 void ia32_handle_intrinsics(void) {
38 if (intrinsics && ARR_LEN(intrinsics) > 0)
39 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
42 #define BINOP_Left_Low 0
43 #define BINOP_Left_High 1
44 #define BINOP_Right_Low 2
45 #define BINOP_Right_High 3
47 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
52 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
54 turn_into_tuple(call, pn_Call_max);
55 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
56 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
57 set_Tuple_pred(call, pn_Call_T_result, res);
58 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
59 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
63 * Map an Add (a_l, a_h, b_l, b_h)
65 static int map_Add(ir_node *call, void *ctx) {
66 ir_graph *irg = current_ir_graph;
67 dbg_info *dbg = get_irn_dbg_info(call);
68 ir_node *block = get_nodes_block(call);
69 ir_node **params = get_Call_param_arr(call);
70 ir_type *method = get_Call_type(call);
71 ir_node *a_l = params[BINOP_Left_Low];
72 ir_node *a_h = params[BINOP_Left_High];
73 ir_node *b_l = params[BINOP_Right_Low];
74 ir_node *b_h = params[BINOP_Right_High];
75 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
76 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
77 ir_node *l_res, *h_res;
79 /* l_res = a_l + b_l */
80 l_res = new_rd_ia32_l_Add(dbg, irg, block, a_l, b_l, l_res_mode);
82 /* h_res = a_h + b_h + carry */
83 h_res = new_rd_ia32_l_AddC(dbg, irg, block, a_h, b_h, h_res_mode);
85 resolve_call(call, l_res, h_res, irg, block);
90 * Map a Sub (a_l, a_h, b_l, b_h)
92 static int map_Sub(ir_node *call, void *ctx) {
93 ir_graph *irg = current_ir_graph;
94 dbg_info *dbg = get_irn_dbg_info(call);
95 ir_node *block = get_nodes_block(call);
96 ir_node **params = get_Call_param_arr(call);
97 ir_type *method = get_Call_type(call);
98 ir_node *a_l = params[BINOP_Left_Low];
99 ir_node *a_h = params[BINOP_Left_High];
100 ir_node *b_l = params[BINOP_Right_Low];
101 ir_node *b_h = params[BINOP_Right_High];
102 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
103 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
104 ir_node *l_res, *h_res;
106 /* l_res = a_l - b_l */
107 l_res = new_rd_ia32_l_Sub(dbg, irg, block, a_l, b_l, l_res_mode);
109 /* h_res = a_h - b_h - carry */
110 h_res = new_rd_ia32_l_SubC(dbg, irg, block, a_h, b_h, h_res_mode);
112 resolve_call(call, l_res, h_res, irg, block);
118 * Map a Shl (a_l, a_h, count)
120 static int map_Shl(ir_node *call, void *ctx) {
121 ir_graph *irg = current_ir_graph;
122 dbg_info *dbg = get_irn_dbg_info(call);
123 ir_node *block = get_nodes_block(call);
124 ir_node **params = get_Call_param_arr(call);
125 ir_type *method = get_Call_type(call);
126 ir_node *a_l = params[BINOP_Left_Low];
127 ir_node *a_h = params[BINOP_Left_High];
128 ir_node *cnt = params[BINOP_Right_Low];
129 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
130 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
131 ir_node *l_res, *h_res;
133 /* h_res = SHLD a_h, a_l, cnt */
134 l_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
136 /* l_res = SHL a_l, cnt */
137 h_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
139 resolve_call(call, l_res, h_res, irg, block);
145 * Map a Shr (a_l, a_h, count)
147 static int map_Shr(ir_node *call, void *ctx) {
148 ir_graph *irg = current_ir_graph;
149 dbg_info *dbg = get_irn_dbg_info(call);
150 ir_node *block = get_nodes_block(call);
151 ir_node **params = get_Call_param_arr(call);
152 ir_type *method = get_Call_type(call);
153 ir_node *a_l = params[BINOP_Left_Low];
154 ir_node *a_h = params[BINOP_Left_High];
155 ir_node *cnt = params[BINOP_Right_Low];
156 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
157 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
158 ir_node *l_res, *h_res;
160 /* l_res = SHRD a_l, a_h, cnt */
161 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
163 /* h_res = SHR a_h, cnt */
164 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
166 resolve_call(call, l_res, h_res, irg, block);
172 * Map a Shrs (a_l, a_h, count)
174 static int map_Shrs(ir_node *call, void *ctx) {
175 ir_graph *irg = current_ir_graph;
176 dbg_info *dbg = get_irn_dbg_info(call);
177 ir_node *block = get_nodes_block(call);
178 ir_node **params = get_Call_param_arr(call);
179 ir_type *method = get_Call_type(call);
180 ir_node *a_l = params[BINOP_Left_Low];
181 ir_node *a_h = params[BINOP_Left_High];
182 ir_node *cnt = params[BINOP_Right_Low];
183 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
184 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
185 ir_node *l_res, *h_res;
187 /* l_res = SHRD a_l, a_h, cnt */
188 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
190 /* h_res = SAR a_h, cnt */
191 h_res = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, cnt, h_res_mode);
193 resolve_call(call, l_res, h_res, irg, block);
199 * Map a Mul (a_l, a_h, b_l, b_h)
201 static int map_Mul(ir_node *call, void *ctx) {
202 ir_graph *irg = current_ir_graph;
203 dbg_info *dbg = get_irn_dbg_info(call);
204 ir_node *block = get_nodes_block(call);
205 ir_node **params = get_Call_param_arr(call);
206 ir_type *method = get_Call_type(call);
207 ir_node *a_l = params[BINOP_Left_Low];
208 ir_node *a_h = params[BINOP_Left_High];
209 ir_node *b_l = params[BINOP_Right_Low];
210 ir_node *b_h = params[BINOP_Right_High];
211 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
212 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
213 ir_node *l_res, *h_res, *mul, *pEDX, *add;
224 mul = new_rd_ia32_l_MulS(dbg, irg, block, a_l, b_l);
225 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EDX);
226 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EAX);
228 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l, h_res_mode);
229 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
230 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h, h_res_mode);
231 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
233 resolve_call(call, l_res, h_res, irg, block);
239 * Map a Minus (a_l, a_h)
241 static int map_Minus(ir_node *call, void *ctx) {
242 ir_graph *irg = current_ir_graph;
243 dbg_info *dbg = get_irn_dbg_info(call);
244 ir_node *block = get_nodes_block(call);
245 ir_node **params = get_Call_param_arr(call);
246 ir_type *method = get_Call_type(call);
247 ir_node *a_l = params[BINOP_Left_Low];
248 ir_node *a_h = params[BINOP_Left_High];
249 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
250 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
251 ir_node *l_res, *h_res, *cnst;
253 /* l_res = 0 - a_l */
254 l_res = new_rd_ia32_l_Minus(dbg, irg, block, a_l, l_res_mode);
256 /* h_res = 0 - a_h - carry */
258 /* too bad: we need 0 in a register here */
259 cnst = new_Const_long(h_res_mode, 0);
260 h_res = new_rd_ia32_l_SubC(dbg, irg, block, cnst, a_h, h_res_mode);
262 resolve_call(call, l_res, h_res, irg, block);
268 * Map a Abs (a_l, a_h)
270 static int map_Abs(ir_node *call, void *ctx) {
271 ir_graph *irg = current_ir_graph;
272 dbg_info *dbg = get_irn_dbg_info(call);
273 ir_node *block = get_nodes_block(call);
274 ir_node **params = get_Call_param_arr(call);
275 ir_type *method = get_Call_type(call);
276 ir_node *a_l = params[BINOP_Left_Low];
277 ir_node *a_h = params[BINOP_Left_High];
278 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
279 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
280 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h;
283 Code inspired by gcc output :) (although gcc doubles the
284 operation for t1 as t2 and uses t1 for operations with low part
285 and t2 for operations with high part which is actually unnecessary
286 because t1 and t2 represent the same value)
292 h_res = t3 - t1 - carry
296 sign = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
297 sub_l = new_rd_ia32_l_Eor(dbg, irg, block, a_l, sign, l_res_mode);
298 sub_h = new_rd_ia32_l_Eor(dbg, irg, block, a_h, sign, h_res_mode);
299 l_res = new_rd_ia32_l_Sub(dbg, irg, block, sub_l, sign, l_res_mode);
300 h_res = new_rd_ia32_l_SubC(dbg, irg, block, sub_h, sign, l_res_mode);
302 resolve_call(call, l_res, h_res, irg, block);
308 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
310 static int DivMod_mapper(ir_node *call, void *ctx, int need_mod) {
311 ia32_intrinsic_env_t *env = ctx;
312 ir_graph *irg = current_ir_graph;
313 dbg_info *dbg = get_irn_dbg_info(call);
314 ir_node *block = get_nodes_block(call);
315 ir_node **params = get_Call_param_arr(call);
316 ir_type *method = get_Call_type(call);
317 ir_node *a_l = params[BINOP_Left_Low];
318 ir_node *a_h = params[BINOP_Left_High];
319 ir_node *b_l = params[BINOP_Right_Low];
320 ir_node *b_h = params[BINOP_Right_High];
321 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
322 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
323 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
324 entity *ent_a = env->ll_div_op1;
325 entity *ent_b = env->ll_div_op2;
326 ir_node *l_res, *h_res, *frame;
327 ir_node *store_l, *store_h;
328 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
329 ir_node *fa, *fb, *fres;
332 /* allocate memory on frame to store args */
335 ent_a = env->ll_div_op1 =
336 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
340 ent_b = env->ll_div_op2 =
341 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
344 snprintf(buf, sizeof(buf), "%d", mode_bytes);
345 frame = get_irg_frame(irg);
347 /* store first arg */
348 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
349 set_ia32_frame_ent(store_l, ent_a);
350 set_ia32_use_frame(store_l);
351 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
352 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
354 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
355 set_ia32_frame_ent(store_h, ent_a);
356 add_ia32_am_offs(store_h, buf);
357 set_ia32_use_frame(store_h);
358 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
359 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
361 mem = new_r_Sync(irg, block, 2, op_mem);
363 /* load first arg into FPU */
364 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
365 set_ia32_frame_ent(fa, ent_a);
366 set_ia32_use_frame(fa);
367 set_ia32_ls_mode(fa, mode_D);
368 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
369 fa = new_r_Proj(irg, block, fa, mode_D, pn_ia32_l_vfild_res);
371 /* store second arg */
372 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
373 set_ia32_frame_ent(store_l, ent_b);
374 set_ia32_use_frame(store_l);
375 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
376 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
378 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
379 set_ia32_frame_ent(store_h, ent_b);
380 add_ia32_am_offs(store_h, buf);
381 set_ia32_use_frame(store_h);
382 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
383 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
385 mem = new_r_Sync(irg, block, 2, op_mem);
387 /* load second arg into FPU */
388 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
389 set_ia32_frame_ent(fb, ent_b);
390 set_ia32_use_frame(fb);
391 set_ia32_ls_mode(fb, mode_D);
392 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
393 fb = new_r_Proj(irg, block, fb, mode_D, pn_ia32_l_vfild_res);
398 mem = new_r_Sync(irg, block, 2, op_mem);
400 /* perform division */
401 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb, mode_D);
404 /* we need modulo: mod = a - b * res */
406 fres = new_rd_ia32_l_vfmul(dbg, irg, block, fb, fres, mode_D);
407 fres = new_rd_ia32_l_vfsub(dbg, irg, block, fa, fres, mode_D);
410 /* store back result, we use ent_a here */
411 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
412 set_ia32_frame_ent(fres, ent_a);
413 set_ia32_use_frame(fres);
414 set_ia32_ls_mode(fres, mode_D);
415 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfist_M);
417 /* load low part of the result */
418 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
419 set_ia32_frame_ent(l_res, ent_a);
420 set_ia32_use_frame(l_res);
421 set_ia32_ls_mode(l_res, l_res_mode);
422 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
424 /* load hight part of the result */
425 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
426 set_ia32_frame_ent(h_res, ent_a);
427 add_ia32_am_offs(h_res, buf);
428 set_ia32_use_frame(h_res);
429 set_ia32_ls_mode(h_res, h_res_mode);
430 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
433 resolve_call(call, l_res, h_res, irg, block);
438 static int map_Div(ir_node *call, void *ctx) {
439 return DivMod_mapper(call, ctx, 0);
442 static int map_Mod(ir_node *call, void *ctx) {
443 return DivMod_mapper(call, ctx, 1);
447 * Maps a Conv (a_l, a_h)
449 static int map_Conv(ir_node *call, void *ctx) {
450 ia32_intrinsic_env_t *env = ctx;
451 ir_graph *irg = current_ir_graph;
452 dbg_info *dbg = get_irn_dbg_info(call);
453 ir_node *block = get_nodes_block(call);
454 ir_node **params = get_Call_param_arr(call);
455 ir_type *method = get_Call_type(call);
456 int n = get_Call_n_params(call);
457 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
459 ir_node *l_res, *h_res, *frame, *fres;
460 ir_node *store_l, *store_h;
461 ir_node *op_mem[2], *mem;
465 /* We have a Conv float -> long long here */
466 ir_node *a_f = params[0];
467 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
468 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
470 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
472 /* allocate memory on frame to store args */
473 ent = env->d_ll_conv;
475 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
479 snprintf(buf, sizeof(buf), "%d", gp_bytes);
480 frame = get_irg_frame(irg);
483 Now we create a node to move the value from a XMM register into
484 x87 FPU because it is unknown here, which FPU is used.
485 This node is killed in transformation phase when not needed.
486 Otherwise it is split up into a movsd + fld
488 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
489 set_ia32_frame_ent(a_f, ent);
490 set_ia32_use_frame(a_f);
491 set_ia32_ls_mode(a_f, mode_D);
493 /* store from FPU as Int */
494 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
495 set_ia32_frame_ent(a_f, ent);
496 set_ia32_use_frame(a_f);
497 set_ia32_ls_mode(a_f, mode_D);
498 mem = new_r_Proj(irg, block, a_f, mode_M, pn_ia32_l_vfist_M);
500 /* load low part of the result */
501 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
502 set_ia32_frame_ent(l_res, ent);
503 set_ia32_use_frame(l_res);
504 set_ia32_ls_mode(l_res, l_res_mode);
505 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
507 /* load hight part of the result */
508 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
509 set_ia32_frame_ent(h_res, ent);
510 add_ia32_am_offs(h_res, buf);
511 set_ia32_use_frame(h_res);
512 set_ia32_ls_mode(h_res, h_res_mode);
513 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
516 resolve_call(call, l_res, h_res, irg, block);
519 /* We have a Conv long long -> float here */
520 ir_node *a_l = params[BINOP_Left_Low];
521 ir_node *a_h = params[BINOP_Left_High];
522 ir_mode *mode_a_l = get_irn_mode(a_l);
523 ir_mode *mode_a_h = get_irn_mode(a_h);
524 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
526 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
528 /* allocate memory on frame to store args */
529 ent = env->ll_d_conv;
531 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
535 snprintf(buf, sizeof(buf), "%d", gp_bytes);
536 frame = get_irg_frame(irg);
538 /* store first arg (low part) */
539 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
540 set_ia32_frame_ent(store_l, ent);
541 set_ia32_use_frame(store_l);
542 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
543 op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M);
545 /* store second arg (high part) */
546 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
547 set_ia32_frame_ent(store_h, ent);
548 add_ia32_am_offs(store_h, buf);
549 set_ia32_use_frame(store_h);
550 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
551 op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M);
553 mem = new_r_Sync(irg, block, 2, op_mem);
555 /* Load arg into x87 FPU (implicit convert) */
556 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
557 set_ia32_frame_ent(fres, ent);
558 set_ia32_use_frame(fres);
559 set_ia32_ls_mode(fres, mode_D);
560 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
561 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
564 Now we create a node to move the loaded value into a XMM
565 register because it is unknown here, which FPU is used.
566 This node is killed in transformation phase when not needed.
567 Otherwise it is split up into a fst + movsd
569 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
570 set_ia32_frame_ent(fres, ent);
571 set_ia32_use_frame(fres);
572 set_ia32_ls_mode(fres, fres_mode);
575 resolve_call(call, fres, NULL, irg, block);
578 assert(0 && "unexpected Conv call");
584 /* Ia32 implementation of intrinsic mapping. */
585 entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
586 const ir_mode *imode, const ir_mode *omode,
591 i_mapper_func mapper;
594 intrinsics = NEW_ARR_F(i_record, 0);
596 switch (get_op_code(op)) {
598 ent = &i_ents[iro_Add];
602 ent = &i_ents[iro_Sub];
606 ent = &i_ents[iro_Shl];
610 ent = &i_ents[iro_Shr];
614 ent = &i_ents[iro_Shrs];
618 ent = &i_ents[iro_Mul];
622 ent = &i_ents[iro_Minus];
626 ent = &i_ents[iro_Abs];
630 ent = &i_ents[iro_Div];
634 ent = &i_ents[iro_Mod];
638 ent = &i_ents[iro_Conv];
642 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
643 return def_create_intrinsic_fkt(method, op, imode, omode, context);
647 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
649 ident *id = mangle(IDENT("L"), get_op_ident(op));
650 *ent = new_entity(get_glob_type(), id, method);
653 elt.i_call.kind = INTRINSIC_CALL;
654 elt.i_call.i_ent = *ent;
655 elt.i_call.i_mapper = mapper;
656 elt.i_call.ctx = context;
657 elt.i_call.link = NULL;
659 ARR_APP1(i_record, intrinsics, elt);