X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_intrinsics.c;h=52b0e95f33c0c345177b073d2edd21536e9a0cb2;hb=a1a465eb2b3f54027b29f829423fffd0396937f4;hp=aca46d7c436dfeb4041ba2f53f886b3cbe6ff1a1;hpb=b4c59a03fb7871ecb8248636d62dc4f20612b5cf;p=libfirm diff --git a/ir/be/ia32/ia32_intrinsics.c b/ir/be/ia32/ia32_intrinsics.c index aca46d7c4..52b0e95f3 100644 --- a/ir/be/ia32/ia32_intrinsics.c +++ b/ir/be/ia32/ia32_intrinsics.c @@ -20,6 +20,8 @@ #include "array.h" #include "ia32_new_nodes.h" +#include "bearch_ia32_t.h" +#include "gen_ia32_regalloc_if.h" /** The array of all intrinsics that must be mapped. */ static i_record *intrinsics; @@ -47,13 +49,13 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph in[0] = l_res; in[1] = h_res; - res = new_r_Tuple(irg, block, 2, in); + res = new_r_Tuple(irg, block, h_res == NULL ? 1 : 2, in); turn_into_tuple(call, pn_Call_max); set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg)); set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg)); set_Tuple_pred(call, pn_Call_T_result, res); - set_Tuple_pred(call, pn_Call_M_except, get_irg_bad(irg)); + set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg)); set_Tuple_pred(call, pn_Call_P_value_res_base, get_irg_bad(irg)); } @@ -112,6 +114,478 @@ static int map_Sub(ir_node *call, void *ctx) { return 1; } +/** + * Map a Shl (a_l, a_h, count) + */ +static int map_Shl(ir_node *call, void *ctx) { + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_node *cnt = params[BINOP_Right_Low]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + ir_node *l_res, *h_res; + + /* h_res = SHLD a_h, a_l, cnt */ + l_res = new_rd_ia32_l_ShlD(dbg, irg, block, a_h, a_l, cnt, l_res_mode); + + /* l_res = SHL a_l, cnt */ + h_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode); + add_irn_dep(h_res, l_res); + + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +/** + * Map a Shr (a_l, a_h, count) + */ +static int map_Shr(ir_node *call, void *ctx) { + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_node *cnt = params[BINOP_Right_Low]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + ir_node *l_res, *h_res; + + /* l_res = SHRD a_l, a_h, cnt */ + l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode); + + /* h_res = SHR a_h, cnt */ + h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode); + add_irn_dep(h_res, l_res); + + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +/** + * Map a Shrs (a_l, a_h, count) + */ +static int map_Shrs(ir_node *call, void *ctx) { + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_node *cnt = params[BINOP_Right_Low]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + ir_node *l_res, *h_res; + + /* l_res = SHRD a_l, a_h, cnt */ + l_res = new_rd_ia32_l_ShrD(dbg, irg, block, a_l, a_h, cnt, l_res_mode); + + /* h_res = SAR a_h, cnt */ + h_res = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, cnt, h_res_mode); + add_irn_dep(h_res, l_res); + + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +/** + * Map a Mul (a_l, a_h, b_l, b_h) + */ +static int map_Mul(ir_node *call, void *ctx) { + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_node *b_l = params[BINOP_Right_Low]; + ir_node *b_h = params[BINOP_Right_High]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + ir_node *l_res, *h_res, *mul, *pEDX, *add; + + /* + EDX:EAX = a_l * b_l + l_res = EAX + + t1 = b_l * a_h + t2 = t1 + EDX + t3 = a_l * b_h + h_res = t2 + t3 + */ + mul = new_rd_ia32_l_MulS(dbg, irg, block, a_l, b_l); + pEDX = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EDX); + l_res = new_rd_Proj(dbg, irg, block, mul, l_res_mode, pn_ia32_l_MulS_EAX); + + mul = new_rd_ia32_l_Mul(dbg, irg, block, a_h, b_l, h_res_mode); + add = new_rd_ia32_l_Add(dbg, irg, block, mul, pEDX, h_res_mode); + mul = new_rd_ia32_l_Mul(dbg, irg, block, a_l, b_h, h_res_mode); + h_res = new_rd_ia32_l_Add(dbg, irg, block, add, mul, h_res_mode); + + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +/** + * Map a Minus (a_l, a_h) + */ +static int map_Minus(ir_node *call, void *ctx) { + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + ir_node *l_res, *h_res, *cnst; + + /* l_res = 0 - a_l */ + l_res = new_rd_ia32_l_Minus(dbg, irg, block, a_l, l_res_mode); + + /* h_res = 0 - a_h - carry */ + + /* too bad: we need 0 in a register here */ + cnst = new_Const_long(h_res_mode, 0); + h_res = new_rd_ia32_l_SubC(dbg, irg, block, cnst, a_h, h_res_mode); + add_irn_dep(h_res, l_res); + + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +/** + * Map a Abs (a_l, a_h) + */ +static int map_Abs(ir_node *call, void *ctx) { + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + ir_node *l_res, *h_res, *sign, *sub_l, *sub_h; + + /* + Code inspired by gcc output :) (although gcc doubles the + operation for t1 as t2 and uses t1 for operations with low part + and t2 for operations with high part which is actually unnecessary + because t1 and t2 represent the same value) + + t1 = SHRS a_h, 31 + t2 = a_l ^ t1 + t3 = a_h ^ t1 + l_res = t2 - t1 + h_res = t3 - t1 - carry + + */ + + sign = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, new_Const_long(h_res_mode, 31), h_res_mode); + sub_l = new_rd_ia32_l_Eor(dbg, irg, block, a_l, sign, l_res_mode); + sub_h = new_rd_ia32_l_Eor(dbg, irg, block, a_h, sign, h_res_mode); + l_res = new_rd_ia32_l_Sub(dbg, irg, block, sub_l, sign, l_res_mode); + h_res = new_rd_ia32_l_SubC(dbg, irg, block, sub_h, sign, l_res_mode); + add_irn_dep(h_res, l_res); + + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +/** + * Maps a Div/Mod (a_l, a_h, b_l, b_h) + */ +static int DivMod_mapper(ir_node *call, void *ctx, int need_mod) { + ia32_intrinsic_env_t *env = ctx; + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_node *b_l = params[BINOP_Right_Low]; + ir_node *b_h = params[BINOP_Right_High]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + int mode_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode); + entity *ent_a = env->ll_div_op1; + entity *ent_b = env->ll_div_op2; + ir_node *l_res, *h_res, *frame; + ir_node *store_l, *store_h; + ir_node *op_mem[2], *mem, *fa_mem, *fb_mem; + ir_node *fa, *fb, *fres; + char buf[3]; + + /* allocate memory on frame to store args */ + + if (! ent_a) { + ent_a = env->ll_div_op1 = + frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0); + } + + if (! ent_b) { + ent_b = env->ll_div_op2 = + frame_alloc_area(get_irg_frame_type(irg), 2 * mode_bytes, 16, 0); + } + + snprintf(buf, sizeof(buf), "%d", mode_bytes); + frame = get_irg_frame(irg); + + /* store first arg */ + store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg)); + set_ia32_frame_ent(store_l, ent_a); + set_ia32_use_frame(store_l); + set_ia32_ls_mode(store_l, get_irn_mode(a_l)); + op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M); + + store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg)); + set_ia32_frame_ent(store_h, ent_a); + add_ia32_am_offs(store_h, buf); + set_ia32_use_frame(store_h); + set_ia32_ls_mode(store_h, get_irn_mode(a_h)); + op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M); + + mem = new_r_Sync(irg, block, 2, op_mem); + + /* load first arg into FPU */ + fa = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem); + set_ia32_frame_ent(fa, ent_a); + set_ia32_use_frame(fa); + set_ia32_ls_mode(fa, mode_D); + fa_mem = new_r_Proj(irg, block, fa, mode_M, pn_ia32_l_vfild_M); + fa = new_r_Proj(irg, block, fa, mode_D, pn_ia32_l_vfild_res); + + /* store second arg */ + store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, b_l, get_irg_no_mem(irg)); + set_ia32_frame_ent(store_l, ent_b); + set_ia32_use_frame(store_l); + set_ia32_ls_mode(store_l, get_irn_mode(b_l)); + op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M); + + store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, b_h, get_irg_no_mem(irg)); + set_ia32_frame_ent(store_h, ent_b); + add_ia32_am_offs(store_h, buf); + set_ia32_use_frame(store_h); + set_ia32_ls_mode(store_h, get_irn_mode(b_h)); + op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M); + + mem = new_r_Sync(irg, block, 2, op_mem); + + /* load second arg into FPU */ + fb = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem); + set_ia32_frame_ent(fb, ent_b); + set_ia32_use_frame(fb); + set_ia32_ls_mode(fb, mode_D); + fb_mem = new_r_Proj(irg, block, fb, mode_M, pn_ia32_l_vfild_M); + fb = new_r_Proj(irg, block, fb, mode_D, pn_ia32_l_vfild_res); + + op_mem[0] = fa_mem; + op_mem[1] = fb_mem; + + mem = new_r_Sync(irg, block, 2, op_mem); + + /* perform division */ + fres = new_rd_ia32_l_vfdiv(dbg, irg, block, fa, fb, mode_D); + + if (need_mod) { + /* we need modulo: mod = a - b * res */ + + fres = new_rd_ia32_l_vfmul(dbg, irg, block, fb, fres, mode_D); + fres = new_rd_ia32_l_vfsub(dbg, irg, block, fa, fres, mode_D); + } + + /* store back result, we use ent_a here */ + fres = new_rd_ia32_l_vfist(dbg, irg, block, frame, fres, mem); + set_ia32_frame_ent(fres, ent_a); + set_ia32_use_frame(fres); + set_ia32_ls_mode(fres, mode_D); + mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfist_M); + + /* load low part of the result */ + l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem); + set_ia32_frame_ent(l_res, ent_a); + set_ia32_use_frame(l_res); + set_ia32_ls_mode(l_res, l_res_mode); + l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res); + + /* load hight part of the result */ + h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem); + set_ia32_frame_ent(h_res, ent_a); + add_ia32_am_offs(h_res, buf); + set_ia32_use_frame(h_res); + set_ia32_ls_mode(h_res, h_res_mode); + h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res); + + /* lower the call */ + resolve_call(call, l_res, h_res, irg, block); + + return 1; +} + +static int map_Div(ir_node *call, void *ctx) { + return DivMod_mapper(call, ctx, 0); +} + +static int map_Mod(ir_node *call, void *ctx) { + return DivMod_mapper(call, ctx, 1); +} + +/** + * Maps a Conv (a_l, a_h) + */ +static int map_Conv(ir_node *call, void *ctx) { + ia32_intrinsic_env_t *env = ctx; + ir_graph *irg = current_ir_graph; + dbg_info *dbg = get_irn_dbg_info(call); + ir_node *block = get_nodes_block(call); + ir_node **params = get_Call_param_arr(call); + ir_type *method = get_Call_type(call); + int n = get_Call_n_params(call); + int gp_bytes = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode); + entity *ent; + ir_node *l_res, *h_res, *frame, *fres; + ir_node *store_l, *store_h; + ir_node *op_mem[2], *mem; + char buf[3]; + + if (n == 1) { + /* We have a Conv float -> long long here */ + ir_node *a_f = params[0]; + ir_mode *l_res_mode = get_type_mode(get_method_res_type(method, 0)); + ir_mode *h_res_mode = get_type_mode(get_method_res_type(method, 1)); + + assert(mode_is_float(get_irn_mode(a_f)) && "unexpected Conv call"); + + /* allocate memory on frame to store args */ + ent = env->d_ll_conv; + if (! ent) { + ent = env->d_ll_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0); + } + + /* Store arg */ + snprintf(buf, sizeof(buf), "%d", gp_bytes); + frame = get_irg_frame(irg); + + /* + Now we create a node to move the value from a XMM register into + x87 FPU because it is unknown here, which FPU is used. + This node is killed in transformation phase when not needed. + Otherwise it is split up into a movsd + fld + */ + a_f = new_rd_ia32_l_SSEtoX87(dbg, irg, block, frame, a_f, get_irg_no_mem(irg), mode_D); + set_ia32_frame_ent(a_f, ent); + set_ia32_use_frame(a_f); + set_ia32_ls_mode(a_f, mode_D); + + /* store from FPU as Int */ + a_f = new_rd_ia32_l_vfist(dbg, irg, block, frame, a_f, get_irg_no_mem(irg)); + set_ia32_frame_ent(a_f, ent); + set_ia32_use_frame(a_f); + set_ia32_ls_mode(a_f, mode_D); + mem = new_r_Proj(irg, block, a_f, mode_M, pn_ia32_l_vfist_M); + + /* load low part of the result */ + l_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem); + set_ia32_frame_ent(l_res, ent); + set_ia32_use_frame(l_res); + set_ia32_ls_mode(l_res, l_res_mode); + l_res = new_r_Proj(irg, block, l_res, l_res_mode, pn_ia32_l_Load_res); + + /* load hight part of the result */ + h_res = new_rd_ia32_l_Load(dbg, irg, block, frame, mem); + set_ia32_frame_ent(h_res, ent); + add_ia32_am_offs(h_res, buf); + set_ia32_use_frame(h_res); + set_ia32_ls_mode(h_res, h_res_mode); + h_res = new_r_Proj(irg, block, h_res, h_res_mode, pn_ia32_l_Load_res); + + /* lower the call */ + resolve_call(call, l_res, h_res, irg, block); + } + else if (n == 2) { + /* We have a Conv long long -> float here */ + ir_node *a_l = params[BINOP_Left_Low]; + ir_node *a_h = params[BINOP_Left_High]; + ir_mode *mode_a_l = get_irn_mode(a_l); + ir_mode *mode_a_h = get_irn_mode(a_h); + ir_mode *fres_mode = get_type_mode(get_method_res_type(method, 0)); + + assert(! mode_is_float(mode_a_l) && ! mode_is_float(mode_a_h) && "unexpected Conv call"); + + /* allocate memory on frame to store args */ + ent = env->ll_d_conv; + if (! ent) { + ent = env->ll_d_conv = frame_alloc_area(get_irg_frame_type(irg), 2 * gp_bytes, 16, 0); + } + + /* Store arg */ + snprintf(buf, sizeof(buf), "%d", gp_bytes); + frame = get_irg_frame(irg); + + /* store first arg (low part) */ + store_l = new_rd_ia32_l_Store(dbg, irg, block, frame, a_l, get_irg_no_mem(irg)); + set_ia32_frame_ent(store_l, ent); + set_ia32_use_frame(store_l); + set_ia32_ls_mode(store_l, get_irn_mode(a_l)); + op_mem[0] = new_r_Proj(irg, block, store_l, mode_M, pn_ia32_l_Store_M); + + /* store second arg (high part) */ + store_h = new_rd_ia32_l_Store(dbg, irg, block, frame, a_h, get_irg_no_mem(irg)); + set_ia32_frame_ent(store_h, ent); + add_ia32_am_offs(store_h, buf); + set_ia32_use_frame(store_h); + set_ia32_ls_mode(store_h, get_irn_mode(a_h)); + op_mem[1] = new_r_Proj(irg, block, store_h, mode_M, pn_ia32_l_Store_M); + + mem = new_r_Sync(irg, block, 2, op_mem); + + /* Load arg into x87 FPU (implicit convert) */ + fres = new_rd_ia32_l_vfild(dbg, irg, block, frame, mem); + set_ia32_frame_ent(fres, ent); + set_ia32_use_frame(fres); + set_ia32_ls_mode(fres, mode_D); + mem = new_r_Proj(irg, block, fres, mode_M, pn_ia32_l_vfild_M); + fres = new_r_Proj(irg, block, fres, fres_mode, pn_ia32_l_vfild_res); + + /* + Now we create a node to move the loaded value into a XMM + register because it is unknown here, which FPU is used. + This node is killed in transformation phase when not needed. + Otherwise it is split up into a fst + movsd + */ + fres = new_rd_ia32_l_X87toSSE(dbg, irg, block, frame, fres, mem, fres_mode); + set_ia32_frame_ent(fres, ent); + set_ia32_use_frame(fres); + set_ia32_ls_mode(fres, fres_mode); + + /* lower the call */ + resolve_call(call, fres, NULL, irg, block); + } + else { + assert(0 && "unexpected Conv call"); + } + + return 1; +} + /* Ia32 implementation of intrinsic mapping. */ entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op, const ir_mode *imode, const ir_mode *omode, @@ -133,8 +607,44 @@ entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op, ent = &i_ents[iro_Sub]; mapper = map_Sub; break; + case iro_Shl: + ent = &i_ents[iro_Shl]; + mapper = map_Shl; + break; + case iro_Shr: + ent = &i_ents[iro_Shr]; + mapper = map_Shr; + break; + case iro_Shrs: + ent = &i_ents[iro_Shrs]; + mapper = map_Shrs; + break; + case iro_Mul: + ent = &i_ents[iro_Mul]; + mapper = map_Mul; + break; + case iro_Minus: + ent = &i_ents[iro_Minus]; + mapper = map_Minus; + break; + case iro_Abs: + ent = &i_ents[iro_Abs]; + mapper = map_Abs; + break; + case iro_Div: + ent = &i_ents[iro_Div]; + mapper = map_Div; + break; + case iro_Mod: + ent = &i_ents[iro_Mod]; + mapper = map_Mod; + break; + case iro_Conv: + ent = &i_ents[iro_Conv]; + mapper = map_Conv; + break; default: - ir_fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %+O\n", op); + fprintf(stderr, "FIXME: unhandled op for ia32 intrinsic function %s\n", get_id_str(op->name)); return def_create_intrinsic_fkt(method, op, imode, omode, context); }