2 * This file implements the mapping of 64Bit intrinsic functions to
3 * code or library calls.
17 #include "lower_intrinsics.h"
22 #include "ia32_new_nodes.h"
23 #include "bearch_ia32_t.h"
24 #include "gen_ia32_regalloc_if.h"
26 /** The array of all intrinsics that must be mapped. */
27 static i_record *intrinsics;
29 /** An array to cache all entities */
30 static ir_entity *i_ents[iro_MaxOpcode];
33 * Maps all intrinsic calls that the backend support
34 * and map all instructions the backend did not support
37 void ia32_handle_intrinsics(void) {
38 if (intrinsics && ARR_LEN(intrinsics) > 0)
39 lower_intrinsics(intrinsics, ARR_LEN(intrinsics));
42 #define BINOP_Left_Low 0
43 #define BINOP_Left_High 1
44 #define BINOP_Right_Low 2
45 #define BINOP_Right_High 3
47 static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
52 res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in);
54 turn_into_tuple(call, pn_Call_max);
55 set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
56 set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
57 set_Tuple_pred(call, pn_Call_T_result, res);
58 set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
59 set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg));
63 * Map an Add (a_l, a_h, b_l, b_h)
65 static int map_Add(ir_node *call, void *ctx) {
66 ir_graph *irg = current_ir_graph;
67 dbg_info *dbg = get_irn_dbg_info(call);
68 ir_node *block = get_nodes_block(call);
69 ir_node **params = get_Call_param_arr(call);
70 ir_type *method = get_Call_type(call);
71 ir_node *a_l = params[BINOP_Left_Low];
72 ir_node *a_h = params[BINOP_Left_High];
73 ir_node *b_l = params[BINOP_Right_Low];
74 ir_node *b_h = params[BINOP_Right_High];
75 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
76 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
77 ir_node *l_res, *h_res, *add;
79 /* l_res = a_l + b_l */
80 /* h_res = a_h + b_h + carry */
82 add = new_rd_ia32_Add64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
83 l_res = new_r_Proj(irg, block, add, l_res_mode, pn_ia32_Add64Bit_low_res);
84 h_res = new_r_Proj(irg, block, add, h_res_mode, pn_ia32_Add64Bit_high_res);
86 resolve_call(call, l_res, h_res, irg, block);
91 * Map a Sub (a_l, a_h, b_l, b_h)
93 static int map_Sub(ir_node *call, void *ctx) {
94 ir_graph *irg = current_ir_graph;
95 dbg_info *dbg = get_irn_dbg_info(call);
96 ir_node *block = get_nodes_block(call);
97 ir_node **params = get_Call_param_arr(call);
98 ir_type *method = get_Call_type(call);
99 ir_node *a_l = params[BINOP_Left_Low];
100 ir_node *a_h = params[BINOP_Left_High];
101 ir_node *b_l = params[BINOP_Right_Low];
102 ir_node *b_h = params[BINOP_Right_High];
103 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
104 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
105 ir_node *l_res, *h_res, *res;
107 /* l_res = a_l - b_l */
108 /* h_res = a_h - b_h - carry */
110 res = new_rd_ia32_Sub64Bit(dbg, irg, block, a_l, a_h, b_l, b_h);
111 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
112 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
114 resolve_call(call, l_res, h_res, irg, block);
119 * Map a Shl (a_l, a_h, count)
121 static int map_Shl(ir_node *call, void *ctx) {
122 ir_graph *irg = current_ir_graph;
123 dbg_info *dbg = get_irn_dbg_info(call);
124 ir_node *block = get_nodes_block(call);
125 ir_node **params = get_Call_param_arr(call);
126 ir_type *method = get_Call_type(call);
127 ir_node *a_l = params[BINOP_Left_Low];
128 ir_node *a_h = params[BINOP_Left_High];
129 ir_node *cnt = params[BINOP_Right_Low];
130 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
131 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
132 ir_node *l_res, *h_res;
134 /* h_res = SHLD a_h, a_l, cnt */
135 h_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode);
137 /* l_res = SHL a_l, cnt */
138 l_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
140 //add_irn_dep(l_res, h_res);
142 resolve_call(call, l_res, h_res, irg, block);
147 * Map a Shr (a_l, a_h, count)
149 static int map_Shr(ir_node *call, void *ctx) {
150 ir_graph *irg = current_ir_graph;
151 dbg_info *dbg = get_irn_dbg_info(call);
152 ir_node *block = get_nodes_block(call);
153 ir_node **params = get_Call_param_arr(call);
154 ir_type *method = get_Call_type(call);
155 ir_node *a_l = params[BINOP_Left_Low];
156 ir_node *a_h = params[BINOP_Left_High];
157 ir_node *cnt = params[BINOP_Right_Low];
158 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
159 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
160 ir_node *l_res, *h_res;
162 /* l_res = SHRD a_l, a_h, cnt */
163 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
165 /* h_res = SHR a_h, cnt */
166 h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
168 //add_irn_dep(h_res, l_res);
170 resolve_call(call, l_res, h_res, irg, block);
175 * Map a Shrs (a_l, a_h, count)
177 static int map_Shrs(ir_node *call, void *ctx) {
178 ir_graph *irg = current_ir_graph;
179 dbg_info *dbg = get_irn_dbg_info(call);
180 ir_node *block = get_nodes_block(call);
181 ir_node **params = get_Call_param_arr(call);
182 ir_type *method = get_Call_type(call);
183 ir_node *a_l = params[BINOP_Left_Low];
184 ir_node *a_h = params[BINOP_Left_High];
185 ir_node *cnt = params[BINOP_Right_Low];
186 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
187 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
188 ir_node *l_res, *h_res;
190 /* l_res = SHRD a_l, a_h, cnt */
191 l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode);
193 /* h_res = SAR a_h, cnt */
194 h_res = new_rd_ia32_l_Sar(dbg, irg, block, a_h, cnt, h_res_mode);
196 //add_irn_dep(h_res, l_res);
198 resolve_call(call, l_res, h_res, irg, block);
203 * Map a Mul (a_l, a_h, b_l, b_h)
205 static int map_Mul(ir_node *call, void *ctx) {
206 ir_graph *irg = current_ir_graph;
207 dbg_info *dbg = get_irn_dbg_info(call);
208 ir_node *block = get_nodes_block(call);
209 ir_node **params = get_Call_param_arr(call);
210 ir_type *method = get_Call_type(call);
211 ir_node *a_l = params[BINOP_Left_Low];
212 ir_node *a_h = params[BINOP_Left_High];
213 ir_node *b_l = params[BINOP_Right_Low];
214 ir_node *b_h = params[BINOP_Right_High];
215 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
216 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
217 ir_node *l_res, *h_res, *mul, *pEDX, *add;
228 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_l);
229 pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EDX);
230 l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_Mul_EAX);
232 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l);
233 add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode);
234 mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h);
235 h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode);
237 resolve_call(call, l_res, h_res, irg, block);
243 * Map a Minus (a_l, a_h)
245 static int map_Minus(ir_node *call, void *ctx) {
246 ir_graph *irg = current_ir_graph;
247 dbg_info *dbg = get_irn_dbg_info(call);
248 ir_node *block = get_nodes_block(call);
249 ir_node **params = get_Call_param_arr(call);
250 ir_type *method = get_Call_type(call);
251 ir_node *a_l = params[BINOP_Left_Low];
252 ir_node *a_h = params[BINOP_Left_High];
253 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
254 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
255 ir_node *l_res, *h_res, *cnst, *res;
257 /* too bad: we need 0 in a register here */
258 cnst = new_Const_long(h_res_mode, 0);
260 /* l_res = 0 - a_l */
261 /* h_res = 0 - a_h - carry */
263 res = new_rd_ia32_Minus64Bit(dbg, irg, block, cnst, a_l, a_h);
264 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Minus64Bit_low_res);
265 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Minus64Bit_high_res);
267 resolve_call(call, l_res, h_res, irg, block);
273 * Map a Abs (a_l, a_h)
275 static int map_Abs(ir_node *call, void *ctx) {
276 ir_graph *irg = current_ir_graph;
277 dbg_info *dbg = get_irn_dbg_info(call);
278 ir_node *block = get_nodes_block(call);
279 ir_node **params = get_Call_param_arr(call);
280 ir_type *method = get_Call_type(call);
281 ir_node *a_l = params[BINOP_Left_Low];
282 ir_node *a_h = params[BINOP_Left_High];
283 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
284 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
285 ir_node *l_res, *h_res, *sign, *sub_l, *sub_h, *res;
288 Code inspired by gcc output :) (although gcc doubles the
289 operation for t1 as t2 and uses t1 for operations with low part
290 and t2 for operations with high part which is actually unnecessary
291 because t1 and t2 represent the same value)
297 h_res = t3 - t1 - carry
301 sign = new_rd_ia32_l_Sar(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode);
302 sub_l = new_rd_ia32_l_Xor(dbg, irg, block, a_l, sign, l_res_mode);
303 sub_h = new_rd_ia32_l_Xor(dbg, irg, block, a_h, sign, h_res_mode);
304 res = new_rd_ia32_Sub64Bit(dbg, irg, block, sub_l, sub_h, sign, sign);
305 l_res = new_r_Proj(irg, block, res, l_res_mode, pn_ia32_Sub64Bit_low_res);
306 h_res = new_r_Proj(irg, block, res, h_res_mode, pn_ia32_Sub64Bit_high_res);
308 resolve_call(call, l_res, h_res, irg, block);
316 } ia32_intrinsic_divmod_t;
319 * Maps a Div/Mod (a_l, a_h, b_l, b_h)
321 static int DivMod_mapper(ir_node *call, void *ctx, ia32_intrinsic_divmod_t dmtp) {
322 ia32_intrinsic_env_t *env = ctx;
323 ir_graph *irg = current_ir_graph;
324 dbg_info *dbg = get_irn_dbg_info(call);
325 ir_node *block = get_nodes_block(call);
326 ir_node **params = get_Call_param_arr(call);
327 ir_type *method = get_Call_type(call);
328 ir_node *a_l = params[BINOP_Left_Low];
329 ir_node *a_h = params[BINOP_Left_High];
330 ir_node *b_l = params[BINOP_Right_Low];
331 ir_node *b_h = params[BINOP_Right_High];
332 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
333 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
334 int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
335 ir_entity *ent_a = env->irg == irg ? env->ll_div_op1 : NULL;
336 ir_entity *ent_b = env->irg == irg ? env->ll_div_op2 : NULL;
337 ir_node *l_res, *h_res, *frame;
338 ir_node *store_l, *store_h;
339 ir_node *op_mem[2], *mem, *fa_mem, *fb_mem;
340 ir_node *fa, *fb, *fres;
342 /* allocate memory on frame to store args */
344 ent_a = env->ll_div_op1 =
345 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
350 ent_b = env->ll_div_op2 =
351 frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0);
355 frame = get_irg_frame(irg);
357 /* store first arg */
358 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
359 set_ia32_frame_ent(store_l, ent_a);
360 set_ia32_use_frame(store_l);
361 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
364 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
365 set_ia32_frame_ent(store_h, ent_a);
366 add_ia32_am_offs_int(store_h, mode_bytes);
367 set_ia32_use_frame(store_h);
368 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
371 mem = new_r_Sync(irg, block, 2, op_mem);
373 /* load first arg into FPU */
374 fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
375 set_ia32_frame_ent(fa, ent_a);
376 set_ia32_use_frame(fa);
377 set_ia32_ls_mode(fa, mode_D);
378 fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M);
379 fa = new_r_Proj(irg, block, fa, mode_E, pn_ia32_l_vfild_res);
381 /* store second arg */
382 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg));
383 set_ia32_frame_ent(store_l, ent_b);
384 set_ia32_use_frame(store_l);
385 set_ia32_ls_mode(store_l, get_irn_mode(b_l));
388 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg));
389 set_ia32_frame_ent(store_h, ent_b);
390 add_ia32_am_offs_int(store_h, mode_bytes);
391 set_ia32_use_frame(store_h);
392 set_ia32_ls_mode(store_h, get_irn_mode(b_h));
395 mem = new_r_Sync(irg, block, 2, op_mem);
397 /* load second arg into FPU */
398 fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
399 set_ia32_frame_ent(fb, ent_b);
400 set_ia32_use_frame(fb);
401 set_ia32_ls_mode(fb, mode_D);
402 fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M);
403 fb = new_r_Proj(irg, block, fb, mode_E, pn_ia32_l_vfild_res);
408 mem = new_r_Sync(irg, block, 2, op_mem);
410 /* perform division */
412 case IA32_INTRINSIC_DIV:
413 fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb);
414 fres = new_rd_Proj(dbg, irg, block, fres, mode_E, pn_ia32_l_vfdiv_res);
416 case IA32_INTRINSIC_MOD:
417 fres = new_rd_ia32_l_vfprem(dbg, irg, block, fa, fb, mode_E);
423 /* store back result, we use ent_a here */
424 fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem);
425 set_ia32_frame_ent(fres, ent_a);
426 set_ia32_use_frame(fres);
427 set_ia32_ls_mode(fres, mode_D);
430 /* load low part of the result */
431 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
432 set_ia32_frame_ent(l_res, ent_a);
433 set_ia32_use_frame(l_res);
434 set_ia32_ls_mode(l_res, l_res_mode);
435 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
437 /* load hight part of the result */
438 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
439 set_ia32_frame_ent(h_res, ent_a);
440 add_ia32_am_offs_int(h_res, mode_bytes);
441 set_ia32_use_frame(h_res);
442 set_ia32_ls_mode(h_res, h_res_mode);
443 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
446 resolve_call(call, l_res, h_res, irg, block);
451 static int map_Div(ir_node *call, void *ctx) {
452 return DivMod_mapper(call, ctx, IA32_INTRINSIC_DIV);
455 static int map_Mod(ir_node *call, void *ctx) {
456 return DivMod_mapper(call, ctx, IA32_INTRINSIC_MOD);
460 * Maps a Conv (a_l, a_h)
462 static int map_Conv(ir_node *call, void *ctx) {
463 ia32_intrinsic_env_t *env = ctx;
464 ir_graph *irg = current_ir_graph;
465 dbg_info *dbg = get_irn_dbg_info(call);
466 ir_node *block = get_nodes_block(call);
467 ir_node **params = get_Call_param_arr(call);
468 ir_type *method = get_Call_type(call);
469 int n = get_Call_n_params(call);
470 int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
472 ir_node *l_res, *h_res, *frame, *fres;
473 ir_node *store_l, *store_h;
474 ir_node *op_mem[2], *mem;
477 /* We have a Conv float -> long long here */
478 ir_node *a_f = params[0];
479 ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0));
480 ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1));
482 assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call");
484 /* allocate memory on frame to store args */
485 ent = env->irg == irg ? env->d_ll_conv : NULL;
487 ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
492 frame = get_irg_frame(irg);
495 Now we create a node to move the value from a XMM register into
496 x87 FPU because it is unknown here, which FPU is used.
497 This node is killed in transformation phase when not needed.
498 Otherwise it is split up into a movsd + fld
500 a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D);
501 set_ia32_frame_ent(a_f, ent);
502 set_ia32_use_frame(a_f);
503 set_ia32_ls_mode(a_f, mode_D);
505 /* store from FPU as Int */
506 a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg));
507 set_ia32_frame_ent(a_f, ent);
508 set_ia32_use_frame(a_f);
509 set_ia32_ls_mode(a_f, mode_D);
512 /* load low part of the result */
513 l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
514 set_ia32_frame_ent(l_res, ent);
515 set_ia32_use_frame(l_res);
516 set_ia32_ls_mode(l_res, l_res_mode);
517 l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res);
519 /* load hight part of the result */
520 h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem);
521 set_ia32_frame_ent(h_res, ent);
522 add_ia32_am_offs_int(h_res, gp_bytes);
523 set_ia32_use_frame(h_res);
524 set_ia32_ls_mode(h_res, h_res_mode);
525 h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res);
528 resolve_call(call, l_res, h_res, irg, block);
531 /* We have a Conv long long -> float here */
532 ir_node *a_l = params[BINOP_Left_Low];
533 ir_node *a_h = params[BINOP_Left_High];
534 ir_mode *mode_a_l = get_irn_mode(a_l);
535 ir_mode *mode_a_h = get_irn_mode(a_h);
536 ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0));
538 assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call");
540 /* allocate memory on frame to store args */
541 ent = env->irg == irg ? env->ll_d_conv : NULL;
543 ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0);
548 frame = get_irg_frame(irg);
550 /* store first arg (low part) */
551 store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg));
552 set_ia32_frame_ent(store_l, ent);
553 set_ia32_use_frame(store_l);
554 set_ia32_ls_mode(store_l, get_irn_mode(a_l));
557 /* store second arg (high part) */
558 store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg));
559 set_ia32_frame_ent(store_h, ent);
560 add_ia32_am_offs_int(store_h, gp_bytes);
561 set_ia32_use_frame(store_h);
562 set_ia32_ls_mode(store_h, get_irn_mode(a_h));
565 mem = new_r_Sync(irg, block, 2, op_mem);
567 /* Load arg into x87 FPU (implicit convert) */
568 fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem);
569 set_ia32_frame_ent(fres, ent);
570 set_ia32_use_frame(fres);
571 set_ia32_ls_mode(fres, mode_D);
572 mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M);
573 fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res);
576 Now we create a node to move the loaded value into a XMM
577 register because it is unknown here, which FPU is used.
578 This node is killed in transformation phase when not needed.
579 Otherwise it is split up into a fst + movsd
581 fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode);
582 set_ia32_frame_ent(fres, ent);
583 set_ia32_use_frame(fres);
584 set_ia32_ls_mode(fres, fres_mode);
587 resolve_call(call, fres, NULL, irg, block);
590 assert(0 && "unexpected Conv call");
596 /* Ia32 implementation of intrinsic mapping. */
597 ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op,
598 const ir_mode *imode, const ir_mode *omode,
602 ir_entity **ent = NULL;
603 i_mapper_func mapper;
606 intrinsics = NEW_ARR_F(i_record, 0);
608 switch (get_op_code(op)) {
610 ent = &i_ents[iro_Add];
614 ent = &i_ents[iro_Sub];
618 ent = &i_ents[iro_Shl];
622 ent = &i_ents[iro_Shr];
626 ent = &i_ents[iro_Shrs];
630 ent = &i_ents[iro_Mul];
634 ent = &i_ents[iro_Minus];
638 ent = &i_ents[iro_Abs];
642 ent = &i_ents[iro_Div];
646 ent = &i_ents[iro_Mod];
650 ent = &i_ents[iro_Conv];
654 fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name));
655 return def_create_intrinsic_fkt(method, op, imode, omode, context);
659 #define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
661 ident *id = mangle(IDENT("L"), get_op_ident(op));
662 *ent = new_entity(get_glob_type(), id, method);
665 elt.i_call.kind = INTRINSIC_CALL;
666 elt.i_call.i_ent = *ent;
667 elt.i_call.i_mapper = mapper;
668 elt.i_call.ctx = context;
669 elt.i_call.link = NULL;
671 ARR_APP1(i_record, intrinsics, elt);