2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower some High-level constructs, moved from the firmlower.
23 * @author Boris Boesler, Goetz Lindenmaier, Michael Beck
42 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
44 static void lower_sel(ir_node *sel) {
45 ir_graph *irg = current_ir_graph;
47 ir_node *newn, *cnst, *index, *ptr, *bl;
49 ir_mode *basemode, *mode, *mode_Int;
50 ir_type *basetyp, *owner;
55 ent = get_Sel_entity(sel);
56 owner = get_entity_owner(ent);
58 /* Do not lower frame type access: must be lowered by the backend. */
59 if (is_frame_type(owner))
63 * Cannot handle value param entities here.
64 * Must be lowered by the backend.
66 if (is_value_param_type(owner))
69 ptr = get_Sel_ptr(sel);
70 dbg = get_irn_dbg_info(sel);
71 mode = get_irn_mode(sel);
73 mode_Int = get_reference_mode_signed_eq(mode);
75 /* TLS access, must be handled by the linker */
76 if (get_tls_type() == owner) {
80 bl = get_nodes_block(sel);
82 cnst = new_rd_SymConst(dbg, irg, bl, sym, symconst_addr_ent);
83 newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode);
87 assert(get_type_state(get_entity_owner(ent)) == layout_fixed);
88 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
90 bl = get_nodes_block(sel);
91 if (0 < get_Sel_n_indexs(sel)) {
93 basetyp = get_entity_type(ent);
94 if (is_Primitive_type(basetyp))
95 basemode = get_type_mode(basetyp);
97 basemode = mode_P_data;
99 assert(basemode && "no mode for lowering Sel");
100 assert((get_mode_size_bytes(basemode) != -1) && "can not deal with unorthodox modes");
101 index = get_Sel_index(sel, 0);
103 if (is_Array_type(owner)) {
105 ir_type *arr_ty = owner;
106 int dims = get_array_n_dimensions(arr_ty);
107 int *map = alloca(sizeof(int) * dims);
110 assert(dims == get_Sel_n_indexs(sel)
111 && "array dimension must match number of indices of Sel node");
113 for (i = 0; i < dims; i++) {
114 int order = get_array_order(arr_ty, i);
116 assert(order < dims &&
117 "order of a dimension must be smaller than the arrays dim");
120 newn = get_Sel_ptr(sel);
122 /* Size of the array element */
123 tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
124 last_size = new_rd_Const(dbg, irg, get_irg_start_block(irg), mode_Int, tv);
127 * We compute the offset part of dimension d_i recursively
128 * with the the offset part of dimension d_{i-1}
130 * off_0 = sizeof(array_element_type);
131 * off_i = (u_i - l_i) * off_{i-1} ; i >= 1
133 * whereas u_i is the upper bound of the current dimension
134 * and l_i the lower bound of the current dimension.
136 for (i = dims - 1; i >= 0; i--) {
138 ir_node *lb, *ub, *elms, *n, *ind;
141 lb = get_array_lower_bound(arr_ty, dim);
142 ub = get_array_upper_bound(arr_ty, dim);
144 assert(irg == current_ir_graph);
145 if (get_irn_op(lb) != op_Unknown)
146 lb = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int);
150 if (get_irn_op(ub) != op_Unknown)
151 ub = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int);
156 * If the array has more than one dimension, lower and upper
157 * bounds have to be set in the non-last dimension.
160 assert(lb && "lower bound has to be set in multi-dim array");
161 assert(lb && "upper bound has to be set in multi-dim array");
163 /* Elements in one Dimension */
164 elms = new_rd_Sub(dbg, irg, bl, ub, lb, mode_Int);
167 ind = new_rd_Conv(dbg, irg, bl, get_Sel_index(sel, dim), mode_Int);
170 * Normalize index, id lower bound is set, also assume
174 ind = new_rd_Sub(dbg, irg, bl, ind, lb, mode_Int);
176 n = new_rd_Mul(dbg, irg, bl, ind, last_size, mode_Int);
182 last_size = new_rd_Mul(dbg, irg, bl, last_size, elms, mode_Int);
184 newn = new_rd_Add(dbg, irg, bl, newn, n, mode);
188 ir_mode *idx_mode = get_irn_mode(index);
189 tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
191 newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel),
192 new_rd_Mul(dbg, irg, bl, index,
193 new_r_Const(irg, get_irg_start_block(irg), idx_mode, tv),
197 } else if (is_Method_type(get_entity_type(ent)) &&
198 is_Class_type(owner) &&
199 (owner != get_glob_type()) &&
200 (!is_frame_type(owner))) {
202 ir_mode *ent_mode = get_type_mode(get_entity_type(ent));
204 /* We need an additional load when accessing methods from a dispatch table. */
205 tv = new_tarval_from_long(get_entity_offset(ent), mode_Int);
206 cnst = new_rd_Const(dbg, irg, get_irg_start_block(irg), mode_Int, tv);
207 add = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
208 #ifdef DO_CACHEOPT /* cacheopt version */
209 newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), sel, ent_mode);
210 cacheopt_map_addrs_register_node(newn);
211 set_Load_ptr(newn, add);
212 #else /* normal code */
213 newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), add, ent_mode);
215 newn = new_r_Proj(irg, bl, newn, ent_mode, pn_Load_res);
217 } else if (get_entity_owner(ent) != get_glob_type()) {
218 /* replace Sel by add(obj, const(ent.offset)) */
219 assert(!(get_entity_allocation(ent) == allocation_static &&
220 (get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0)));
221 tv = new_tarval_from_long(get_entity_offset(ent), mode_Int);
222 cnst = new_r_Const(irg, get_irg_start_block(irg), mode_Int, tv);
223 newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
226 newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, ent, firm_unknown_type);
236 * Lower a all possible SymConst nodes.
238 static void lower_symconst(ir_node *symc) {
246 switch (get_SymConst_kind(symc)) {
247 case symconst_type_tag:
248 assert(!"SymConst kind symconst_type_tag not implemented");
250 case symconst_type_size:
251 /* rewrite the SymConst node by a Const node */
252 tp = get_SymConst_type(symc);
253 assert(get_type_state(tp) == layout_fixed);
254 mode = get_irn_mode(symc);
255 tv = new_tarval_from_long(get_type_size_bytes(tp), mode);
256 newn = new_r_Const(current_ir_graph,
257 get_irg_start_block(current_ir_graph),
258 get_irn_mode(symc), tv);
262 exchange(symc, newn);
264 case symconst_type_align:
265 /* rewrite the SymConst node by a Const node */
266 tp = get_SymConst_type(symc);
267 assert(get_type_state(tp) == layout_fixed);
268 mode = get_irn_mode(symc);
269 tv = new_tarval_from_long(get_type_alignment_bytes(tp), mode);
270 newn = new_r_Const(current_ir_graph,
271 get_irg_start_block(current_ir_graph),
276 exchange(symc, newn);
278 case symconst_addr_name:
279 /* do not rewrite - pass info to back end */
281 case symconst_addr_ent:
284 case symconst_ofs_ent:
285 /* rewrite the SymConst node by a Const node */
286 ent = get_SymConst_entity(symc);
287 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
288 mode = get_irn_mode(symc);
289 tv = new_tarval_from_long(get_entity_offset(ent), mode);
290 newn = new_r_Const(current_ir_graph,
291 get_irg_start_block(current_ir_graph),
296 exchange(symc, newn);
298 case symconst_enum_const:
299 /* rewrite the SymConst node by a Const node */
300 ec = get_SymConst_enum(symc);
301 assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
302 tv = get_enumeration_value(ec);
303 newn = new_r_Const(current_ir_graph,
304 get_irg_start_block(current_ir_graph),
305 get_irn_mode(symc), tv);
309 exchange(symc, newn);
316 assert(!"unknown SymConst kind");
319 } /* lower_symconst */
322 * Checks, whether a size is an integral size
324 * @param size the size on bits
326 static int is_integral_size(int size) {
330 /* must be at least byte size */
332 } /* is_integral_size */
335 * lower bitfield load access.
337 * @param proj the Proj(result) node
338 * @param load the Load node
340 static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
341 ir_node *sel = get_Load_ptr(load);
342 ir_node *block, *n_proj, *res, *ptr;
345 ir_mode *bf_mode, *mode;
346 int offset, bit_offset, bits, bf_bits, old_cse;
349 if (get_irn_op(sel) != op_Sel)
352 ent = get_Sel_entity(sel);
353 bf_type = get_entity_type(ent);
355 /* We have a bitfield access, if either a bit offset is given, or
356 the size is not integral. */
357 bf_mode = get_type_mode(bf_type);
361 mode = get_irn_mode(proj);
362 block = get_nodes_block(proj);
363 bf_bits = get_mode_size_bits(bf_mode);
364 bit_offset = get_entity_offset_bits_remainder(ent);
365 if (bit_offset == 0 && is_integral_size(bf_bits)) {
366 if (mode != bf_mode) {
367 /* we have an integral size and can replace the load by a load
369 set_Load_mode(load, bf_mode);
370 db = get_irn_dbg_info(load);
371 res = new_rd_Proj(get_irn_dbg_info(proj), current_ir_graph, block, load, bf_mode, pn_Load_res);
372 res = new_rd_Conv(db, current_ir_graph, block, res, mode);
379 bits = get_mode_size_bits(mode);
380 offset = get_entity_offset(ent);
383 * ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned
384 * and Shr(Shl(Proj_mode(load)) for signed
387 /* abandon bitfield sel */
388 ptr = get_Sel_ptr(sel);
389 db = get_irn_dbg_info(sel);
390 ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
392 set_Load_ptr(load, ptr);
393 set_Load_mode(load, mode);
396 /* create new proj, switch off CSE or we may get the old one back */
397 old_cse = get_opt_cse();
399 res = n_proj = new_r_Proj(current_ir_graph, block, load, mode, pn_Load_res);
400 set_opt_cse(old_cse);
402 if (mode_is_signed(mode)) { /* signed */
403 int shift_count_up = bits - (bf_bits + bit_offset);
404 int shift_count_down = bits - bf_bits;
406 if (shift_count_up) {
407 res = new_r_Shl(current_ir_graph, block, res,
408 new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_up, mode_Iu)), mode);
410 if (shift_count_down) {
411 res = new_r_Shrs(current_ir_graph, block, res,
412 new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode);
414 } else { /* unsigned */
415 int shift_count_down = bit_offset;
416 unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
418 if (shift_count_down) {
419 res = new_r_Shr(current_ir_graph, block, res,
420 new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(shift_count_down, mode_Iu)), mode);
422 if (bits != bf_bits) {
423 res = new_r_And(current_ir_graph, block, res,
424 new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(mask, mode)), mode);
429 } /* lower_bitfields_loads */
432 * lower bitfield store access.
434 * @todo: It adds a load which may produce an exception!
436 static void lower_bitfields_stores(ir_node *store) {
437 ir_node *sel = get_Store_ptr(store);
438 ir_node *ptr, *value;
441 ir_mode *bf_mode, *mode;
442 ir_node *mem, *irn, *block;
443 unsigned mask, neg_mask;
444 int bf_bits, bits_mask, offset, bit_offset;
447 /* check bitfield access */
448 if (get_irn_op(sel) != op_Sel)
451 ent = get_Sel_entity(sel);
452 bf_type = get_entity_type(ent);
454 /* We have a bitfield access, if either a bit offset is given, or
455 the size is not integral. */
456 bf_mode = get_type_mode(bf_type);
460 bf_bits = get_mode_size_bits(bf_mode);
461 bit_offset = get_entity_offset_bits_remainder(ent);
462 if (bit_offset == 0 && is_integral_size(bf_bits))
465 value = get_Store_value(store);
466 mode = get_irn_mode(value);
469 * ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
471 mem = get_Store_mem(store);
472 block = get_nodes_block(store);
473 bit_offset = get_entity_offset_bits_remainder(ent);
474 offset = get_entity_offset(ent);
476 bits_mask = get_mode_size_bits(mode) - bf_bits;
477 mask = ((unsigned)-1) >> bits_mask;
481 /* abandon bitfield sel */
482 ptr = get_Sel_ptr(sel);
483 db = get_irn_dbg_info(sel);
484 ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
487 /* there are some bits, normal case */
488 irn = new_r_Load(current_ir_graph, block, mem, ptr, mode);
489 mem = new_r_Proj(current_ir_graph, block, irn, mode_M, pn_Load_M);
490 irn = new_r_Proj(current_ir_graph, block, irn, mode, pn_Load_res);
492 irn = new_r_And(current_ir_graph, block, irn,
493 new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(neg_mask, mode)), mode);
495 if (bit_offset > 0) {
496 value = new_r_Shl(current_ir_graph, block, value,
497 new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bit_offset, mode_Iu)), mode);
500 value = new_r_And(current_ir_graph, block, value,
501 new_r_Const(current_ir_graph, block, mode, new_tarval_from_long(mask, mode)), mode);
503 value = new_r_Or(current_ir_graph, block, value, irn, mode);
506 set_Store_mem(store, mem);
507 set_Store_value(store, value);
508 set_Store_ptr(store, ptr);
509 } /* lower_bitfields_stores */
512 * lowers IR-nodes, called from walker
514 static void lower_irnode(ir_node *irn, void *env) {
516 switch (get_irn_opcode(irn)) {
529 * Walker: lowers IR-nodes for bitfield access
531 static void lower_bf_access(ir_node *irn, void *env) {
533 switch (get_irn_opcode(irn)) {
536 long proj = get_Proj_proj(irn);
537 ir_node *pred = get_Proj_pred(irn);
538 ir_op *op = get_irn_op(pred);
540 if ((proj == pn_Load_res) && (op == op_Load))
541 lower_bitfields_loads(irn, pred);
545 lower_bitfields_stores(irn);
551 } /* lower_bf_access */
554 * Replaces SymConsts by a real constant if possible.
555 * Replace Sel nodes by address computation. Also resolves array access.
556 * Handle Bitfields by added And/Or calculations.
558 void lower_highlevel(void) {
561 n = get_irp_n_irgs();
562 for (i = 0; i < n; ++i) {
563 ir_graph *irg = get_irp_irg(i);
565 /* First step: lower bitfield access: must be run as long as Sels still exists. */
566 irg_walk_graph(irg, NULL, lower_bf_access, NULL);
568 /* Finally: lower SymConst-Size and Sel nodes. */
569 irg_walk_graph(irg, NULL, lower_irnode, NULL);
571 set_irg_phase_low(irg);
573 } /* lower_highlevel */