2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * File name: ir/lower/lower_intrinsics.c
23 * Purpose: lowering of Calls of intrinsic functions
24 * Author: Michael Beck
27 * Copyright: (c) 1998-2005 Universität Karlsruhe
43 #include "lower_intrinsics.h"
47 /** Walker environment */
48 typedef struct _walker_env {
49 pmap *c_map; /**< The intrinsic call map. */
50 unsigned nr_of_intrinsics; /**< statistics */
51 i_instr_record **i_map; /**< The intrinsic instruction map. */
55 * walker: call all mapper functions
57 static void call_mapper(ir_node *node, void *env) {
58 walker_env_t *wenv = env;
59 ir_op *op = get_irn_op(node);
64 const i_call_record *r;
67 symconst = get_Call_ptr(node);
68 if (get_irn_op(symconst) != op_SymConst ||
69 get_SymConst_kind(symconst) != symconst_addr_ent)
72 ent = get_SymConst_entity(symconst);
73 p = pmap_find(wenv->c_map, ent);
77 wenv->nr_of_intrinsics += r->i_mapper(node, r->ctx) ? 1 : 0;
81 if (0 <= op->code && op->code < ARR_LEN(wenv->i_map)) {
82 const i_instr_record *r = wenv->i_map[op->code];
83 /* run all possible mapper */
85 if (r->i_mapper(node, r->ctx)) {
86 ++wenv->nr_of_intrinsics;
95 /* Go through all graphs and map calls to intrinsic functions. */
96 unsigned lower_intrinsics(i_record *list, int length) {
97 int i, n_ops = get_irp_n_opcodes();
99 pmap *c_map = pmap_create_ex(length);
100 i_instr_record **i_map;
101 unsigned nr_of_intrinsics = 0;
104 /* we use the ir_op generic pointers here */
105 NEW_ARR_A(const i_instr_record *, i_map, n_ops);
106 memset((void *)i_map, 0, sizeof(*i_map) * n_ops);
108 /* fill a map for faster search */
109 for (i = length - 1; i >= 0; --i) {
110 if (list[i].i_call.kind == INTRINSIC_CALL) {
111 pmap_insert(c_map, list[i].i_call.i_ent, (void *)&list[i].i_call);
114 ir_op *op = list[i].i_instr.op;
115 assert(0 <= op->code && op->code < ARR_LEN(i_map));
117 list[i].i_instr.link = i_map[op->code];
118 i_map[op->code] = &list[i].i_instr;
125 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
126 irg = get_irp_irg(i);
128 wenv.nr_of_intrinsics = 0;
129 irg_walk_graph(irg, NULL, call_mapper, &wenv);
131 if (wenv.nr_of_intrinsics) {
132 /* changes detected */
133 set_irg_outs_inconsistent(irg);
134 set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
136 /* exception control flow might have changed */
137 set_irg_doms_inconsistent(irg);
138 set_irg_extblk_inconsistent(irg);
139 set_irg_loopinfo_inconsistent(irg);
141 /* calls might be removed/added */
142 set_trouts_inconsistent();
144 /* optimize it, tuple might be created */
145 local_optimize_graph(irg);
147 nr_of_intrinsics += wenv.nr_of_intrinsics;
152 return nr_of_intrinsics;
155 /* A mapper for the integer abs. */
156 int i_mapper_Abs(ir_node *call, void *ctx) {
157 ir_node *mem = get_Call_mem(call);
158 ir_node *block = get_nodes_block(call);
159 ir_node *op = get_Call_param(call, 0);
161 dbg_info *dbg = get_irn_dbg_info(call);
163 irn = new_rd_Abs(dbg, current_ir_graph, block, op, get_irn_mode(op));
164 irn = new_Tuple(1, &irn);
166 turn_into_tuple(call, pn_Call_max);
167 set_Tuple_pred(call, pn_Call_M_regular, mem);
168 set_Tuple_pred(call, pn_Call_X_except, new_Bad());
169 set_Tuple_pred(call, pn_Call_T_result, irn);
170 set_Tuple_pred(call, pn_Call_M_except, mem);
171 set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
176 /* A mapper for the alloca() function. */
177 int i_mapper_Alloca(ir_node *call, void *ctx) {
178 ir_node *mem = get_Call_mem(call);
179 ir_node *block = get_nodes_block(call);
180 ir_node *op = get_Call_param(call, 0);
182 dbg_info *dbg = get_irn_dbg_info(call);
184 irn = new_rd_Alloc(dbg, current_ir_graph, block, mem, op, firm_unknown_type, stack_alloc);
185 mem = new_Proj(irn, mode_M, pn_Alloc_M);
186 exc = new_Proj(irn, mode_X, pn_Alloc_X_except);
187 irn = new_Proj(irn, get_modeP_data(), pn_Alloc_res);
188 irn = new_Tuple(1, &irn);
190 turn_into_tuple(call, pn_Call_max);
191 set_Tuple_pred(call, pn_Call_M_regular, mem);
192 set_Tuple_pred(call, pn_Call_X_except, exc);
193 set_Tuple_pred(call, pn_Call_T_result, irn);
194 set_Tuple_pred(call, pn_Call_M_except, mem);
195 set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
200 #define LMAX(a, b) ((a) > (b) ? (a) : (b))
202 /* A mapper for mapping unsupported instructions to runtime calls. */
203 int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
204 int i, j, arity, first, n_param, n_res;
207 ir_node *mem, *bl, *call, *addr, *res_proj;
212 /* check if the result modes match */
213 if (get_irn_mode(node) != rt->mode)
216 arity = get_irn_arity(node);
220 mtp = get_entity_type(rt->ent);
221 n_param = get_method_n_params(mtp);
222 irg = current_ir_graph;
224 mem = get_irn_n(node, 0);
225 if (get_irn_mode(mem) != mode_M) {
226 mem = new_r_NoMem(irg);
232 /* check if the modes of the predecessors match the parameter modes */
233 if (arity - first != n_param)
236 for (i = first, j = 0; i < arity; ++i, ++j) {
237 ir_type *param_tp = get_method_param_type(mtp, j);
238 ir_node *pred = get_irn_n(node, i);
240 if (get_type_mode(param_tp) != get_irn_mode(pred))
244 n_res = get_method_n_ress(mtp);
246 /* step 0: calculate the number of needed Proj's */
248 n_proj = LMAX(n_proj, rt->mem_proj_nr + 1);
249 n_proj = LMAX(n_proj, rt->exc_proj_nr + 1);
250 n_proj = LMAX(n_proj, rt->exc_mem_proj_nr + 1);
251 n_proj = LMAX(n_proj, rt->res_proj_nr + 1);
254 if (rt->mode != mode_T) /* must be mode_T */
260 if (get_type_mode(get_method_res_type(mtp, 0)) != rt->mode)
264 /* ok, when we are here, the number of predecessors match as well as the parameter modes */
265 bl = get_nodes_block(node);
269 NEW_ARR_A(ir_node *, in, n_param);
270 for (i = 0; i < n_param; ++i)
271 in[i] = get_irn_n(node, first + i);
274 /* step 1: create the call */
275 sym.entity_p = rt->ent;
276 addr = new_r_SymConst(irg, bl, sym, symconst_addr_ent);
277 call = new_rd_Call(get_irn_dbg_info(node), irg, bl, mem, addr, n_param, in, mtp);
278 set_irn_pinned(call, get_irn_pinned(node));
281 res_proj = new_r_Proj(irg, bl, call, mode_T, pn_Call_T_result);
289 turn_into_tuple(node, n_proj);
291 for (i = 0; i < n_proj; ++i)
292 set_Tuple_pred(node, i, new_r_Bad(irg));
293 if (rt->mem_proj_nr >= 0)
294 set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_regular));
295 if (get_irn_op(mem) != op_NoMem) {
296 /* Exceptions can only be handled with real memory */
297 if (rt->exc_proj_nr >= 0)
298 set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_X, pn_Call_X_except));
299 if (rt->exc_mem_proj_nr >= 0)
300 set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_except));
303 if (rt->res_proj_nr >= 0)
304 for (i = 0; i < n_res; ++i)
305 set_Tuple_pred(node, rt->res_proj_nr + i,
306 new_r_Proj(irg, bl, res_proj, get_type_mode(get_method_res_type(mtp, i)), i));
310 /* only one return value supported */
312 ir_mode *mode = get_type_mode(get_method_res_type(mtp, 0));
314 res_proj = new_r_Proj(irg, bl, call, mode_T, pn_Call_T_result);
315 res_proj = new_r_Proj(irg, bl, res_proj, mode, 0);
317 exchange(node, res_proj);
321 /* should not happen */