2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower some High-level constructs, moved from the firmlower.
23 * @author Boris Boesler, Goetz Lindenmaier, Michael Beck
42 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
44 static void lower_sel(ir_node *sel)
46 ir_graph *irg = current_ir_graph;
48 ir_node *newn, *cnst, *index, *ptr, *bl;
50 ir_mode *basemode, *mode, *mode_Int;
51 ir_type *basetyp, *owner;
56 /* Do not lower frame type/global offset table access: must be lowered by the backend. */
57 ptr = get_Sel_ptr(sel);
58 if (ptr == get_irg_frame(current_ir_graph))
61 ent = get_Sel_entity(sel);
62 owner = get_entity_owner(ent);
65 * Cannot handle value param entities or frame type entities here.
66 * Must be lowered by the backend.
68 if (is_value_param_type(owner) || is_frame_type(owner))
71 dbg = get_irn_dbg_info(sel);
72 mode = get_irn_mode(sel);
74 mode_Int = get_reference_mode_signed_eq(mode);
76 /* TLS access, must be handled by the linker */
77 if (get_tls_type() == owner) {
81 bl = get_nodes_block(sel);
83 cnst = new_rd_SymConst(dbg, irg, mode, sym, symconst_addr_ent);
84 newn = new_rd_Add(dbg, bl, ptr, cnst, mode);
88 assert(get_type_state(get_entity_owner(ent)) == layout_fixed);
89 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
91 bl = get_nodes_block(sel);
92 if (0 < get_Sel_n_indexs(sel)) {
94 basetyp = get_entity_type(ent);
95 if (is_Primitive_type(basetyp))
96 basemode = get_type_mode(basetyp);
98 basemode = mode_P_data;
100 assert(basemode && "no mode for lowering Sel");
101 assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
102 index = get_Sel_index(sel, 0);
104 if (is_Array_type(owner)) {
105 ir_type *arr_ty = owner;
106 int dims = get_array_n_dimensions(arr_ty);
107 int *map = ALLOCAN(int, dims);
111 assert(dims == get_Sel_n_indexs(sel)
112 && "array dimension must match number of indices of Sel node");
114 for (i = 0; i < dims; i++) {
115 int order = get_array_order(arr_ty, i);
117 assert(order < dims &&
118 "order of a dimension must be smaller than the arrays dim");
121 newn = get_Sel_ptr(sel);
123 /* Size of the array element */
124 tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
125 last_size = new_rd_Const(dbg, irg, tv);
128 * We compute the offset part of dimension d_i recursively
129 * with the the offset part of dimension d_{i-1}
131 * off_0 = sizeof(array_element_type);
132 * off_i = (u_i - l_i) * off_{i-1} ; i >= 1
134 * whereas u_i is the upper bound of the current dimension
135 * and l_i the lower bound of the current dimension.
137 for (i = dims - 1; i >= 0; i--) {
139 ir_node *lb, *ub, *elms, *n, *ind;
142 lb = get_array_lower_bound(arr_ty, dim);
143 ub = get_array_upper_bound(arr_ty, dim);
145 assert(irg == current_ir_graph);
146 if (! is_Unknown(lb))
147 lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
151 if (! is_Unknown(ub))
152 ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
157 * If the array has more than one dimension, lower and upper
158 * bounds have to be set in the non-last dimension.
161 assert(lb != NULL && "lower bound has to be set in multi-dim array");
162 assert(ub != NULL && "upper bound has to be set in multi-dim array");
164 /* Elements in one Dimension */
165 elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
168 ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);
171 * Normalize index, id lower bound is set, also assume
175 ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);
177 n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);
183 last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);
185 newn = new_rd_Add(dbg, bl, newn, n, mode);
189 ir_mode *idx_mode = get_irn_mode(index);
190 ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
192 newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
193 new_rd_Mul(dbg, bl, index,
194 new_r_Const(irg, tv),
198 } else if (is_Method_type(get_entity_type(ent)) &&
199 is_Class_type(owner) &&
200 (owner != get_glob_type()) &&
201 (!is_frame_type(owner))) {
203 ir_mode *ent_mode = get_type_mode(get_entity_type(ent));
205 /* We need an additional load when accessing methods from a dispatch table. */
206 tv = new_tarval_from_long(get_entity_offset(ent), mode_Int);
207 cnst = new_rd_Const(dbg, irg, tv);
208 add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
209 newn = new_rd_Load(dbg, bl, get_Sel_mem(sel), add, ent_mode, cons_none);
210 newn = new_r_Proj(newn, ent_mode, pn_Load_res);
212 } else if (get_entity_owner(ent) != get_glob_type()) {
215 /* replace Sel by add(obj, const(ent.offset)) */
216 newn = get_Sel_ptr(sel);
217 offset = get_entity_offset(ent);
219 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
221 tv = new_tarval_from_long(offset, mode_UInt);
222 cnst = new_r_Const(irg, tv);
223 newn = new_rd_Add(dbg, bl, newn, cnst, mode);
227 newn = new_rd_SymConst_addr_ent(NULL, irg, mode, ent);
237 * Lower a all possible SymConst nodes.
239 static void lower_symconst(ir_node *symc)
249 switch (get_SymConst_kind(symc)) {
250 case symconst_type_tag:
251 assert(!"SymConst kind symconst_type_tag not implemented");
253 case symconst_type_size:
254 /* rewrite the SymConst node by a Const node */
255 irg = get_irn_irg(symc);
256 tp = get_SymConst_type(symc);
257 assert(get_type_state(tp) == layout_fixed);
258 mode = get_irn_mode(symc);
259 newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp));
263 exchange(symc, newn);
265 case symconst_type_align:
266 /* rewrite the SymConst node by a Const node */
267 irg = get_irn_irg(symc);
268 tp = get_SymConst_type(symc);
269 assert(get_type_state(tp) == layout_fixed);
270 mode = get_irn_mode(symc);
271 newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp));
275 exchange(symc, newn);
277 case symconst_addr_ent:
280 case symconst_ofs_ent:
281 /* rewrite the SymConst node by a Const node */
282 irg = get_irn_irg(symc);
283 ent = get_SymConst_entity(symc);
284 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
285 mode = get_irn_mode(symc);
286 newn = new_r_Const_long(irg, mode, get_entity_offset(ent));
290 exchange(symc, newn);
292 case symconst_enum_const:
293 /* rewrite the SymConst node by a Const node */
294 irg = get_irn_irg(symc);
295 ec = get_SymConst_enum(symc);
296 assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
297 tv = get_enumeration_value(ec);
298 newn = new_r_Const(irg, tv);
302 exchange(symc, newn);
306 assert(!"unknown SymConst kind");
309 } /* lower_symconst */
312 * Checks, whether a size is an integral size
314 * @param size the size on bits
316 static int is_integral_size(int size)
321 /* must be at least byte size */
323 } /* is_integral_size */
326 * lower bitfield load access.
328 * @param proj the Proj(result) node
329 * @param load the Load node
331 static void lower_bitfields_loads(ir_node *proj, ir_node *load)
333 ir_node *sel = get_Load_ptr(load);
334 ir_node *block, *n_proj, *res, *ptr;
338 ir_mode *bf_mode, *mode;
339 int offset, bit_offset, bits, bf_bits, old_cse;
345 ent = get_Sel_entity(sel);
346 bf_type = get_entity_type(ent);
348 /* must be a bitfield type */
349 if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
352 /* We have a bitfield access, if either a bit offset is given, or
353 the size is not integral. */
354 bf_mode = get_type_mode(bf_type);
358 mode = get_irn_mode(proj);
359 block = get_nodes_block(proj);
360 bf_bits = get_mode_size_bits(bf_mode);
361 bit_offset = get_entity_offset_bits_remainder(ent);
363 if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
366 bits = get_mode_size_bits(mode);
367 offset = get_entity_offset(ent);
370 * ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned
371 * and Shr(Shl(Proj_mode(load)) for signed
374 /* abandon bitfield sel */
375 irg = get_irn_irg(sel);
376 ptr = get_Sel_ptr(sel);
377 db = get_irn_dbg_info(sel);
378 ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
380 set_Load_ptr(load, ptr);
381 set_Load_mode(load, mode);
384 /* create new proj, switch off CSE or we may get the old one back */
385 old_cse = get_opt_cse();
387 res = n_proj = new_r_Proj(load, mode, pn_Load_res);
388 set_opt_cse(old_cse);
390 if (mode_is_signed(mode)) { /* signed */
391 int shift_count_up = bits - (bf_bits + bit_offset);
392 int shift_count_down = bits - bf_bits;
394 if (shift_count_up) {
395 res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode);
397 if (shift_count_down) {
398 res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
400 } else { /* unsigned */
401 int shift_count_down = bit_offset;
402 unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
404 if (shift_count_down) {
405 res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
407 if (bits != bf_bits) {
408 res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode);
413 } /* lower_bitfields_loads */
416 * lower bitfield store access.
418 * @todo: It adds a load which may produce an exception!
420 static void lower_bitfields_stores(ir_node *store)
422 ir_node *sel = get_Store_ptr(store);
423 ir_node *ptr, *value;
426 ir_mode *bf_mode, *mode;
427 ir_node *mem, *irn, *block;
429 unsigned mask, neg_mask;
430 int bf_bits, bits_mask, offset, bit_offset;
433 /* check bitfield access */
437 ent = get_Sel_entity(sel);
438 bf_type = get_entity_type(ent);
440 /* must be a bitfield type */
441 if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
444 /* We have a bitfield access, if either a bit offset is given, or
445 the size is not integral. */
446 bf_mode = get_type_mode(bf_type);
450 value = get_Store_value(store);
451 mode = get_irn_mode(value);
452 block = get_nodes_block(store);
454 bf_bits = get_mode_size_bits(bf_mode);
455 bit_offset = get_entity_offset_bits_remainder(ent);
457 if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
461 * ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
463 mem = get_Store_mem(store);
464 offset = get_entity_offset(ent);
466 bits_mask = get_mode_size_bits(mode) - bf_bits;
467 mask = ((unsigned)-1) >> bits_mask;
471 /* abandon bitfield sel */
472 irg = get_irn_irg(sel);
473 ptr = get_Sel_ptr(sel);
474 db = get_irn_dbg_info(sel);
475 ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
478 /* there are some bits, normal case */
479 irn = new_r_Load(block, mem, ptr, mode, cons_none);
480 mem = new_r_Proj(irn, mode_M, pn_Load_M);
481 irn = new_r_Proj(irn, mode, pn_Load_res);
483 irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode);
485 if (bit_offset > 0) {
486 value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode);
489 value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode);
491 value = new_r_Or(block, value, irn, mode);
494 set_Store_mem(store, mem);
495 set_Store_value(store, value);
496 set_Store_ptr(store, ptr);
497 } /* lower_bitfields_stores */
500 * Lowers unaligned Loads.
502 static void lower_unaligned_Load(ir_node *load)
509 * Lowers unaligned Stores
511 static void lower_unaligned_Store(ir_node *store)
518 * lowers IR-nodes, called from walker
520 static void lower_irnode(ir_node *irn, void *env)
523 switch (get_irn_opcode(irn)) {
531 if (env != NULL && get_Load_align(irn) == align_non_aligned)
532 lower_unaligned_Load(irn);
535 if (env != NULL && get_Store_align(irn) == align_non_aligned)
536 lower_unaligned_Store(irn);
539 exchange(irn, get_Cast_op(irn));
547 * Walker: lowers IR-nodes for bitfield access
549 static void lower_bf_access(ir_node *irn, void *env)
552 switch (get_irn_opcode(irn)) {
555 long proj = get_Proj_proj(irn);
556 ir_node *pred = get_Proj_pred(irn);
558 if (proj == pn_Load_res && is_Load(pred))
559 lower_bitfields_loads(irn, pred);
563 lower_bitfields_stores(irn);
572 * Replaces SymConsts by a real constant if possible.
573 * Replace Sel nodes by address computation. Also resolves array access.
574 * Handle Bitfields by added And/Or calculations.
576 void lower_highlevel_graph(ir_graph *irg, int lower_bitfields)
578 if (lower_bitfields) {
579 /* First step: lower bitfield access: must be run as long as Sels still
581 irg_walk_graph(irg, NULL, lower_bf_access, NULL);
584 /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
585 irg_walk_graph(irg, NULL, lower_irnode, NULL);
587 set_irg_outs_inconsistent(irg);
590 typedef struct pass_t {
591 ir_graph_pass_t pass;
596 * Wrapper for running lower_highlevel_graph() as an ir_graph pass.
598 static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context)
600 pass_t *pass = (pass_t*)context;
602 lower_highlevel_graph(irg, pass->lower_bitfields);
604 } /* lower_highlevel_graph_wrapper */
606 ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields)
608 pass_t *pass = XMALLOCZ(pass_t);
610 pass->lower_bitfields = lower_bitfields;
611 return def_graph_pass_constructor(
612 &pass->pass, name ? name : "lower_hl", lower_highlevel_graph_wrapper);
613 } /* lower_highlevel_graph_pass */
616 * does the same as lower_highlevel() for all nodes on the const code irg
618 void lower_const_code(void)
620 walk_const_code(NULL, lower_irnode, NULL);
621 } /* lower_const_code */
623 ir_prog_pass_t *lower_const_code_pass(const char *name)
625 return def_prog_pass(name ? name : "lower_const_code", lower_const_code);
629 * Replaces SymConsts by a real constant if possible.
630 * Replace Sel nodes by address computation. Also resolves array access.
631 * Handle Bitfields by added And/Or calculations.
633 void lower_highlevel(int lower_bitfields)
637 n = get_irp_n_irgs();
638 for (i = 0; i < n; ++i) {
639 ir_graph *irg = get_irp_irg(i);
640 lower_highlevel_graph(irg, lower_bitfields);
643 } /* lower_highlevel */