2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief lowering of Calls of intrinsic functions
23 * @author Michael Beck
40 #include "lower_intrinsics.h"
44 /** Walker environment */
45 typedef struct _walker_env {
46 pmap *c_map; /**< The intrinsic call map. */
47 unsigned nr_of_intrinsics; /**< statistics */
48 i_instr_record **i_map; /**< The intrinsic instruction map. */
52 * walker: call all mapper functions
54 static void call_mapper(ir_node *node, void *env) {
55 walker_env_t *wenv = env;
56 ir_op *op = get_irn_op(node);
61 const i_call_record *r;
64 symconst = get_Call_ptr(node);
65 if (get_irn_op(symconst) != op_SymConst ||
66 get_SymConst_kind(symconst) != symconst_addr_ent)
69 ent = get_SymConst_entity(symconst);
70 p = pmap_find(wenv->c_map, ent);
74 wenv->nr_of_intrinsics += r->i_mapper(node, r->ctx) ? 1 : 0;
78 if (0 <= op->code && op->code < ARR_LEN(wenv->i_map)) {
79 const i_instr_record *r = wenv->i_map[op->code];
80 /* run all possible mapper */
82 if (r->i_mapper(node, r->ctx)) {
83 ++wenv->nr_of_intrinsics;
92 /* Go through all graphs and map calls to intrinsic functions. */
93 unsigned lower_intrinsics(i_record *list, int length) {
94 int i, n_ops = get_irp_n_opcodes();
96 pmap *c_map = pmap_create_ex(length);
97 i_instr_record **i_map;
98 unsigned nr_of_intrinsics = 0;
101 /* we use the ir_op generic pointers here */
102 NEW_ARR_A(const i_instr_record *, i_map, n_ops);
103 memset((void *)i_map, 0, sizeof(*i_map) * n_ops);
105 /* fill a map for faster search */
106 for (i = length - 1; i >= 0; --i) {
107 if (list[i].i_call.kind == INTRINSIC_CALL) {
108 pmap_insert(c_map, list[i].i_call.i_ent, (void *)&list[i].i_call);
111 ir_op *op = list[i].i_instr.op;
112 assert(0 <= op->code && op->code < ARR_LEN(i_map));
114 list[i].i_instr.link = i_map[op->code];
115 i_map[op->code] = &list[i].i_instr;
122 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
123 irg = get_irp_irg(i);
125 wenv.nr_of_intrinsics = 0;
126 irg_walk_graph(irg, NULL, call_mapper, &wenv);
128 if (wenv.nr_of_intrinsics) {
129 /* changes detected */
130 set_irg_outs_inconsistent(irg);
131 set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
133 /* exception control flow might have changed */
134 set_irg_doms_inconsistent(irg);
135 set_irg_extblk_inconsistent(irg);
136 set_irg_loopinfo_inconsistent(irg);
138 /* calls might be removed/added */
139 set_trouts_inconsistent();
141 /* optimize it, tuple might be created */
142 local_optimize_graph(irg);
144 nr_of_intrinsics += wenv.nr_of_intrinsics;
149 return nr_of_intrinsics;
152 /* A mapper for the integer abs. */
153 int i_mapper_Abs(ir_node *call, void *ctx) {
154 ir_node *mem = get_Call_mem(call);
155 ir_node *block = get_nodes_block(call);
156 ir_node *op = get_Call_param(call, 0);
158 dbg_info *dbg = get_irn_dbg_info(call);
160 irn = new_rd_Abs(dbg, current_ir_graph, block, op, get_irn_mode(op));
161 irn = new_Tuple(1, &irn);
163 turn_into_tuple(call, pn_Call_max);
164 set_Tuple_pred(call, pn_Call_M_regular, mem);
165 set_Tuple_pred(call, pn_Call_X_except, new_Bad());
166 set_Tuple_pred(call, pn_Call_T_result, irn);
167 set_Tuple_pred(call, pn_Call_M_except, mem);
168 set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
173 /* A mapper for the alloca() function. */
174 int i_mapper_Alloca(ir_node *call, void *ctx) {
175 ir_node *mem = get_Call_mem(call);
176 ir_node *block = get_nodes_block(call);
177 ir_node *op = get_Call_param(call, 0);
179 dbg_info *dbg = get_irn_dbg_info(call);
181 irn = new_rd_Alloc(dbg, current_ir_graph, block, mem, op, firm_unknown_type, stack_alloc);
182 mem = new_Proj(irn, mode_M, pn_Alloc_M);
183 exc = new_Proj(irn, mode_X, pn_Alloc_X_except);
184 irn = new_Proj(irn, get_modeP_data(), pn_Alloc_res);
185 irn = new_Tuple(1, &irn);
187 turn_into_tuple(call, pn_Call_max);
188 set_Tuple_pred(call, pn_Call_M_regular, mem);
189 set_Tuple_pred(call, pn_Call_X_except, exc);
190 set_Tuple_pred(call, pn_Call_T_result, irn);
191 set_Tuple_pred(call, pn_Call_M_except, mem);
192 set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
197 #define LMAX(a, b) ((a) > (b) ? (a) : (b))
199 /* A mapper for mapping unsupported instructions to runtime calls. */
200 int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
201 int i, j, arity, first, n_param, n_res;
204 ir_node *mem, *bl, *call, *addr, *res_proj;
209 /* check if the result modes match */
210 if (get_irn_mode(node) != rt->mode)
213 arity = get_irn_arity(node);
217 mtp = get_entity_type(rt->ent);
218 n_param = get_method_n_params(mtp);
219 irg = current_ir_graph;
221 mem = get_irn_n(node, 0);
222 if (get_irn_mode(mem) != mode_M) {
223 mem = new_r_NoMem(irg);
229 /* check if the modes of the predecessors match the parameter modes */
230 if (arity - first != n_param)
233 for (i = first, j = 0; i < arity; ++i, ++j) {
234 ir_type *param_tp = get_method_param_type(mtp, j);
235 ir_node *pred = get_irn_n(node, i);
237 if (get_type_mode(param_tp) != get_irn_mode(pred))
241 n_res = get_method_n_ress(mtp);
243 /* step 0: calculate the number of needed Proj's */
245 n_proj = LMAX(n_proj, rt->mem_proj_nr + 1);
246 n_proj = LMAX(n_proj, rt->exc_proj_nr + 1);
247 n_proj = LMAX(n_proj, rt->exc_mem_proj_nr + 1);
248 n_proj = LMAX(n_proj, rt->res_proj_nr + 1);
251 if (rt->mode != mode_T) /* must be mode_T */
257 if (get_type_mode(get_method_res_type(mtp, 0)) != rt->mode)
261 /* ok, when we are here, the number of predecessors match as well as the parameter modes */
262 bl = get_nodes_block(node);
266 NEW_ARR_A(ir_node *, in, n_param);
267 for (i = 0; i < n_param; ++i)
268 in[i] = get_irn_n(node, first + i);
271 /* step 1: create the call */
272 sym.entity_p = rt->ent;
273 addr = new_r_SymConst(irg, bl, sym, symconst_addr_ent);
274 call = new_rd_Call(get_irn_dbg_info(node), irg, bl, mem, addr, n_param, in, mtp);
275 set_irn_pinned(call, get_irn_pinned(node));
278 res_proj = new_r_Proj(irg, bl, call, mode_T, pn_Call_T_result);
286 turn_into_tuple(node, n_proj);
288 for (i = 0; i < n_proj; ++i)
289 set_Tuple_pred(node, i, new_r_Bad(irg));
290 if (rt->mem_proj_nr >= 0)
291 set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_regular));
292 if (get_irn_op(mem) != op_NoMem) {
293 /* Exceptions can only be handled with real memory */
294 if (rt->exc_proj_nr >= 0)
295 set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_X, pn_Call_X_except));
296 if (rt->exc_mem_proj_nr >= 0)
297 set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(irg, bl, call, mode_M, pn_Call_M_except));
300 if (rt->res_proj_nr >= 0)
301 for (i = 0; i < n_res; ++i)
302 set_Tuple_pred(node, rt->res_proj_nr + i,
303 new_r_Proj(irg, bl, res_proj, get_type_mode(get_method_res_type(mtp, i)), i));
307 /* only one return value supported */
309 ir_mode *mode = get_type_mode(get_method_res_type(mtp, 0));
311 res_proj = new_r_Proj(irg, bl, call, mode_T, pn_Call_T_result);
312 res_proj = new_r_Proj(irg, bl, res_proj, mode, 0);
314 exchange(node, res_proj);
318 /* should not happen */