X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_intrinsics.c;h=69bfc6fa855dc184be1fb6512187f7093cad6325;hb=d16b1e9e4d47b1f467067aadb36d58fc79b37788;hp=22bd191fd342233f8b61d5e0d5e8b3f11ea237d7;hpb=77402d90eda5171ba1377a3fb79cfcc161bc9d25;p=libfirm diff --git a/ir/lower/lower_intrinsics.c b/ir/lower/lower_intrinsics.c index 22bd191fd..69bfc6fa8 100644 --- a/ir/lower/lower_intrinsics.c +++ b/ir/lower/lower_intrinsics.c @@ -20,66 +20,101 @@ #include #endif +#include "irop_t.h" #include "irprog_t.h" #include "irnode_t.h" +#include "irprog_t.h" #include "irgwalk.h" #include "ircons.h" #include "irgmod.h" +#include "irgopt.h" +#include "trouts.h" #include "lower_intrinsics.h" #include "pmap.h" /** Walker environment */ typedef struct _walker_env { - pmap *map; /**< the intrinsic map. */ - unsigned nr_of_intrinsics; /**< statistics */ + pmap *c_map; /**< The intrinsic call map. */ + unsigned nr_of_intrinsics; /**< statistics */ + i_instr_record **i_map; /**< The intrinsic instruction map. */ } walker_env_t; /** - * walker: do the call mapping + * walker: call all mapper functions */ -static void call_mapper(ir_node *call, void *env) { +static void call_mapper(ir_node *node, void *env) { walker_env_t *wenv = env; - ir_node *symconst; - pmap_entry *p; - const i_record *r; - entity *ent; + ir_op *op = get_irn_op(node); - if (get_irn_op(call) != op_Call) - return; + if (op == op_Call) { + ir_node *symconst; + pmap_entry *p; + const i_call_record *r; + entity *ent; - symconst = get_Call_ptr(call); - if (get_irn_op(symconst) != op_SymConst || - get_SymConst_kind(symconst) != symconst_addr_ent) - return; + symconst = get_Call_ptr(node); + if (get_irn_op(symconst) != op_SymConst || + get_SymConst_kind(symconst) != symconst_addr_ent) + return; - ent = get_SymConst_entity(symconst); - p = pmap_find(wenv->map, ent); + ent = get_SymConst_entity(symconst); + p = pmap_find(wenv->c_map, ent); - if (p) { - r = p->value; - wenv->nr_of_intrinsics += r->i_mapper(call, r->ctx) ? 1 : 0; + if (p) { + r = p->value; + wenv->nr_of_intrinsics += r->i_mapper(node, r->ctx) ? 1 : 0; + } + } + else { + if (0 <= op->code && op->code < ARR_LEN(wenv->i_map)) { + const i_instr_record *r = wenv->i_map[op->code]; + /* run all possible mapper */ + while (r) { + if (r->i_mapper(node, r->ctx)) { + ++wenv->nr_of_intrinsics; + break; + } + r = r->link; + } + } } } /* Go through all graphs and map calls to intrinsic functions. */ -unsigned lower_intrinsic_calls(const i_record *list, int length) { - int i; - ir_graph *irg; - pmap *map = pmap_create_ex(length); - walker_env_t wenv; - unsigned nr_of_intrinsics = 0; +unsigned lower_intrinsics(i_record *list, int length) { + int i, n_ops = get_irp_n_opcodes(); + ir_graph *irg; + pmap *c_map = pmap_create_ex(length); + i_instr_record **i_map; + unsigned nr_of_intrinsics = 0; + walker_env_t wenv; + + /* we use the ir_op generic pointers here */ + NEW_ARR_A(const i_instr_record *, i_map, n_ops); + memset((void *)i_map, 0, sizeof(*i_map) * n_ops); /* fill a map for faster search */ - for (i = length - 1; i >= 0; --i) - pmap_insert(map, list[i].i_ent, (void *)&list[i]); + for (i = length - 1; i >= 0; --i) { + if (list[i].i_call.kind == INTRINSIC_CALL) { + pmap_insert(c_map, list[i].i_call.i_ent, (void *)&list[i].i_call); + } + else { + ir_op *op = list[i].i_instr.op; + assert(0 <= op->code && op->code < ARR_LEN(i_map)); - wenv.map = map; + list[i].i_instr.link = i_map[op->code]; + i_map[op->code] = &list[i].i_instr; + } + } + + wenv.c_map = c_map; + wenv.i_map = i_map; for (i = get_irp_n_irgs() - 1; i >= 0; --i) { irg = get_irp_irg(i); wenv.nr_of_intrinsics = 0; - irg_walk_graph(irg, NULL, call_mapper, map); + irg_walk_graph(irg, NULL, call_mapper, &wenv); if (wenv.nr_of_intrinsics) { /* changes detected */ @@ -88,12 +123,19 @@ unsigned lower_intrinsic_calls(const i_record *list, int length) { /* exception control flow might have changed */ set_irg_doms_inconsistent(irg); + set_irg_extblk_inconsistent(irg); set_irg_loopinfo_inconsistent(irg); + /* calls might be removed/added */ + set_trouts_inconsistent(); + + /* optimize it, tuple might be created */ + local_optimize_graph(irg); + nr_of_intrinsics += wenv.nr_of_intrinsics; } } - pmap_destroy(map); + pmap_destroy(c_map); return nr_of_intrinsics; } @@ -142,3 +184,126 @@ int i_mapper_Alloca(ir_node *call, void *ctx) { return 1; } + +#define LMAX(a, b) ((a) > (b) ? (a) : (b)) + +/* A mapper for mapping unsupported instructions to runtime calls. */ +int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) { + int i, j, arity, first, n_param, n_res; + long n_proj; + ir_type *mtp; + ir_node *mem, *bl, *call, *addr, *res_proj; + ir_node **in; + ir_graph *irg; + symconst_symbol sym; + + /* check if the result modes match */ + if (get_irn_mode(node) != rt->mode) + return 0; + + arity = get_irn_arity(node); + if (arity <= 0) + return 0; + + mtp = get_entity_type(rt->ent); + n_param = get_method_n_params(mtp); + irg = current_ir_graph; + + mem = get_irn_n(node, 0); + if (get_irn_mode(mem) != mode_M) { + mem = new_r_NoMem(irg); + first = 0; + } + else + first = 1; + + /* check if the modes of the predecessors match the parameter modes */ + if (arity - first != n_param) + return 0; + + for (i = first, j = 0; i < arity; ++i, ++j) { + ir_type *param_tp = get_method_param_type(mtp, j); + ir_node *pred = get_irn_n(node, i); + + if (get_type_mode(param_tp) != get_irn_mode(pred)) + return 0; + } + + n_res = get_method_n_ress(mtp); + + /* step 0: calculate the number of needed Proj's */ + n_proj = 0; + n_proj = LMAX(n_proj, rt->mem_proj_nr + 1); + n_proj = LMAX(n_proj, rt->exc_proj_nr + 1); + n_proj = LMAX(n_proj, rt->exc_mem_proj_nr + 1); + n_proj = LMAX(n_proj, rt->res_proj_nr + 1); + + if (n_proj > 0) { + if (rt->mode != mode_T) /* must be mode_T */ + return 0; + } + else { + if (n_res > 0) + /* must match */ + if (get_type_mode(get_method_res_type(mtp, 0)) != rt->mode) + return 0; + } + + /* ok, when we are here, the number of predecessors match as well as the parameter modes */ + bl = get_nodes_block(node); + + in = NULL; + if (n_param > 0) { + NEW_ARR_A(ir_node *, in, n_param); + for (i = 0; i < n_param; ++i) + in[i] = get_irn_n(node, first + i); + } + + /* step 1: create the call */ + sym.entity_p = rt->ent; + addr = new_r_SymConst(irg, bl, sym, symconst_addr_ent); + call = new_rd_Call(get_irn_dbg_info(node), irg, bl, mem, addr, n_param, in, mtp); + set_irn_pinned(call, get_irn_pinned(node)); + + if (n_res > 0) + res_proj = new_r_Proj(irg, bl, call, mode_T, pn_Call_T_result); + + if (n_proj > 0) { + n_proj += n_res - 1; + + /* we are ready */ + turn_into_tuple(node, n_proj); + + for (i = 0; i < n_proj; ++i) + set_Tuple_pred(node, i, new_r_Bad(irg)); + if (rt->mem_proj_nr >= 0) + set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_regular)); + if (get_irn_op(mem) != op_NoMem) { + /* Exceptions can only be handled with real memory */ + if (rt->exc_proj_nr >= 0) + set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_X, pn_Call_X_except)); + if (rt->exc_mem_proj_nr >= 0) + set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_except)); + } + + if (rt->res_proj_nr >= 0) + for (i = 0; i < n_res; ++i) + set_Tuple_pred(node, rt->res_proj_nr + i, + new_r_Proj(irg, bl, res_proj, get_type_mode(get_method_res_type(mtp, i)), i)); + return 1; + } + else { + /* only one return value supported */ + if (n_res > 0) { + ir_mode *mode = get_type_mode(get_method_res_type(mtp, 0)); + + res_proj = new_r_Proj(irg, bl, call, mode_T, pn_Call_T_result); + res_proj = new_r_Proj(irg, bl, res_proj, mode, 0); + + exchange(node, res_proj); + return 1; + } + } + /* should not happen */ + return 0; +}