2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower some High-level constructs, moved from the firmlower.
23 * @author Boris Boesler, Goetz Lindenmaier, Michael Beck
42 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
44 static void lower_sel(ir_node *sel)
46 ir_graph *irg = get_irn_irg(sel);
47 ir_entity *ent = get_Sel_entity(sel);
48 ir_type *owner = get_entity_owner(ent);
49 dbg_info *dbg = get_irn_dbg_info(sel);
50 ir_mode *mode = get_irn_mode(sel);
51 ir_node *bl = get_nodes_block(sel);
54 /* we can only replace Sels when the layout of the owner type is decided. */
55 if (get_type_state(owner) != layout_fixed)
58 if (0 < get_Sel_n_indexs(sel)) {
60 ir_type *basetyp = get_entity_type(ent);
63 if (is_Primitive_type(basetyp))
64 basemode = get_type_mode(basetyp);
66 basemode = mode_P_data;
68 assert(basemode && "no mode for lowering Sel");
69 assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
70 index = get_Sel_index(sel, 0);
72 if (is_Array_type(owner)) {
73 ir_type *arr_ty = owner;
74 size_t dims = get_array_n_dimensions(arr_ty);
75 size_t *map = ALLOCAN(size_t, dims);
76 ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
81 assert(dims == (size_t)get_Sel_n_indexs(sel)
82 && "array dimension must match number of indices of Sel node");
84 for (i = 0; i < dims; i++) {
85 size_t order = get_array_order(arr_ty, i);
87 assert(order < dims &&
88 "order of a dimension must be smaller than the arrays dim");
91 newn = get_Sel_ptr(sel);
93 /* Size of the array element */
94 tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
95 last_size = new_rd_Const(dbg, irg, tv);
98 * We compute the offset part of dimension d_i recursively
99 * with the the offset part of dimension d_{i-1}
101 * off_0 = sizeof(array_element_type);
102 * off_i = (u_i - l_i) * off_{i-1} ; i >= 1
104 * whereas u_i is the upper bound of the current dimension
105 * and l_i the lower bound of the current dimension.
107 for (i = dims; i > 0;) {
108 size_t dim = map[--i];
109 ir_node *lb, *ub, *elms, *n, *ind;
112 lb = get_array_lower_bound(arr_ty, dim);
113 ub = get_array_upper_bound(arr_ty, dim);
115 assert(irg == current_ir_graph);
116 if (! is_Unknown(lb))
117 lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
121 if (! is_Unknown(ub))
122 ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
127 * If the array has more than one dimension, lower and upper
128 * bounds have to be set in the non-last dimension.
131 assert(lb != NULL && "lower bound has to be set in multi-dim array");
132 assert(ub != NULL && "upper bound has to be set in multi-dim array");
134 /* Elements in one Dimension */
135 elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
138 ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);
141 * Normalize index, id lower bound is set, also assume
145 ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);
147 n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);
153 last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);
155 newn = new_rd_Add(dbg, bl, newn, n, mode);
159 ir_mode *idx_mode = get_irn_mode(index);
160 ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
162 newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
163 new_rd_Mul(dbg, bl, index,
164 new_r_Const(irg, tv),
168 } else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
169 /* We need an additional load when accessing methods from a dispatch
171 * Matze TODO: Is this really still used? At least liboo does its own
172 * lowering of Method-Sels...
174 ir_mode *ent_mode = get_type_mode(get_entity_type(ent));
175 int offset = get_entity_offset(ent);
176 ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
177 ir_tarval *tv = new_tarval_from_long(offset, mode_Int);
178 ir_node *cnst = new_rd_Const(dbg, irg, tv);
179 ir_node *add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
180 ir_node *mem = get_Sel_mem(sel);
181 newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
182 newn = new_r_Proj(newn, ent_mode, pn_Load_res);
184 int offset = get_entity_offset(ent);
186 /* replace Sel by add(obj, const(ent.offset)) */
187 newn = get_Sel_ptr(sel);
189 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
190 ir_tarval *tv = new_tarval_from_long(offset, mode_UInt);
191 ir_node *cnst = new_r_Const(irg, tv);
192 newn = new_rd_Add(dbg, bl, newn, cnst, mode);
203 * Lower a all possible SymConst nodes.
205 static void lower_symconst(ir_node *symc)
215 switch (get_SymConst_kind(symc)) {
216 case symconst_type_tag:
217 assert(!"SymConst kind symconst_type_tag not implemented");
219 case symconst_type_size:
220 /* rewrite the SymConst node by a Const node */
221 irg = get_irn_irg(symc);
222 tp = get_SymConst_type(symc);
223 assert(get_type_state(tp) == layout_fixed);
224 mode = get_irn_mode(symc);
225 newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp));
229 exchange(symc, newn);
231 case symconst_type_align:
232 /* rewrite the SymConst node by a Const node */
233 irg = get_irn_irg(symc);
234 tp = get_SymConst_type(symc);
235 assert(get_type_state(tp) == layout_fixed);
236 mode = get_irn_mode(symc);
237 newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp));
241 exchange(symc, newn);
243 case symconst_addr_ent:
246 case symconst_ofs_ent:
247 /* rewrite the SymConst node by a Const node */
248 irg = get_irn_irg(symc);
249 ent = get_SymConst_entity(symc);
250 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
251 mode = get_irn_mode(symc);
252 newn = new_r_Const_long(irg, mode, get_entity_offset(ent));
256 exchange(symc, newn);
258 case symconst_enum_const:
259 /* rewrite the SymConst node by a Const node */
260 irg = get_irn_irg(symc);
261 ec = get_SymConst_enum(symc);
262 assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
263 tv = get_enumeration_value(ec);
264 newn = new_r_Const(irg, tv);
268 exchange(symc, newn);
272 assert(!"unknown SymConst kind");
275 } /* lower_symconst */
278 * Checks, whether a size is an integral size
280 * @param size the size on bits
282 static int is_integral_size(int size)
287 /* must be at least byte size */
289 } /* is_integral_size */
292 * lower bitfield load access.
294 * @param proj the Proj(result) node
295 * @param load the Load node
297 static void lower_bitfields_loads(ir_node *proj, ir_node *load)
299 ir_node *sel = get_Load_ptr(load);
300 ir_node *block, *res, *ptr;
304 ir_mode *bf_mode, *mode;
305 int offset, bit_offset, bits, bf_bits, old_cse;
311 ent = get_Sel_entity(sel);
312 bf_type = get_entity_type(ent);
314 /* must be a bitfield type */
315 if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
318 /* We have a bitfield access, if either a bit offset is given, or
319 the size is not integral. */
320 bf_mode = get_type_mode(bf_type);
324 mode = get_irn_mode(proj);
325 block = get_nodes_block(proj);
326 bf_bits = get_mode_size_bits(bf_mode);
327 bit_offset = get_entity_offset_bits_remainder(ent);
329 if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
332 bits = get_mode_size_bits(mode);
333 offset = get_entity_offset(ent);
336 * ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned
337 * and Shr(Shl(Proj_mode(load)) for signed
340 /* abandon bitfield sel */
341 irg = get_irn_irg(sel);
342 ptr = get_Sel_ptr(sel);
343 db = get_irn_dbg_info(sel);
344 ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
346 set_Load_ptr(load, ptr);
347 set_Load_mode(load, mode);
350 /* create new proj, switch off CSE or we may get the old one back */
351 old_cse = get_opt_cse();
353 res = new_r_Proj(load, mode, pn_Load_res);
354 set_opt_cse(old_cse);
356 if (mode_is_signed(mode)) { /* signed */
357 int shift_count_up = bits - (bf_bits + bit_offset);
358 int shift_count_down = bits - bf_bits;
360 if (shift_count_up) {
361 res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode);
363 if (shift_count_down) {
364 res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
366 } else { /* unsigned */
367 int shift_count_down = bit_offset;
368 unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
370 if (shift_count_down) {
371 res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
373 if (bits != bf_bits) {
374 res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode);
379 } /* lower_bitfields_loads */
382 * lower bitfield store access.
384 * @todo: It adds a load which may produce an exception!
386 static void lower_bitfields_stores(ir_node *store)
388 ir_node *sel = get_Store_ptr(store);
389 ir_node *ptr, *value;
392 ir_mode *bf_mode, *mode;
393 ir_node *mem, *irn, *block;
395 unsigned mask, neg_mask;
396 int bf_bits, bits_mask, offset, bit_offset;
399 /* check bitfield access */
403 ent = get_Sel_entity(sel);
404 bf_type = get_entity_type(ent);
406 /* must be a bitfield type */
407 if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
410 /* We have a bitfield access, if either a bit offset is given, or
411 the size is not integral. */
412 bf_mode = get_type_mode(bf_type);
416 value = get_Store_value(store);
417 mode = get_irn_mode(value);
418 block = get_nodes_block(store);
420 bf_bits = get_mode_size_bits(bf_mode);
421 bit_offset = get_entity_offset_bits_remainder(ent);
423 if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
427 * ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
429 mem = get_Store_mem(store);
430 offset = get_entity_offset(ent);
432 bits_mask = get_mode_size_bits(mode) - bf_bits;
433 mask = ((unsigned)-1) >> bits_mask;
437 /* abandon bitfield sel */
438 irg = get_irn_irg(sel);
439 ptr = get_Sel_ptr(sel);
440 db = get_irn_dbg_info(sel);
441 ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
444 /* there are some bits, normal case */
445 irn = new_r_Load(block, mem, ptr, mode, cons_none);
446 mem = new_r_Proj(irn, mode_M, pn_Load_M);
447 irn = new_r_Proj(irn, mode, pn_Load_res);
449 irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode);
451 if (bit_offset > 0) {
452 value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode);
455 value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode);
457 value = new_r_Or(block, value, irn, mode);
460 set_Store_mem(store, mem);
461 set_Store_value(store, value);
462 set_Store_ptr(store, ptr);
463 } /* lower_bitfields_stores */
466 * lowers IR-nodes, called from walker
468 static void lower_irnode(ir_node *irn, void *env)
471 switch (get_irn_opcode(irn)) {
479 exchange(irn, get_Cast_op(irn));
487 * Walker: lowers IR-nodes for bitfield access
489 static void lower_bf_access(ir_node *irn, void *env)
492 switch (get_irn_opcode(irn)) {
495 long proj = get_Proj_proj(irn);
496 ir_node *pred = get_Proj_pred(irn);
498 if (proj == pn_Load_res && is_Load(pred))
499 lower_bitfields_loads(irn, pred);
503 lower_bitfields_stores(irn);
512 * Replaces SymConsts by a real constant if possible.
513 * Replace Sel nodes by address computation. Also resolves array access.
514 * Handle Bitfields by added And/Or calculations.
516 void lower_highlevel_graph(ir_graph *irg)
518 if (is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
519 /* First step: lower bitfield access: must be run as long as Sels still
521 irg_walk_graph(irg, NULL, lower_bf_access, NULL);
524 /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
525 irg_walk_graph(irg, NULL, lower_irnode, NULL);
528 typedef struct pass_t {
529 ir_graph_pass_t pass;
533 * Wrapper for running lower_highlevel_graph() as an ir_graph pass.
535 static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context)
539 lower_highlevel_graph(irg);
541 } /* lower_highlevel_graph_wrapper */
543 ir_graph_pass_t *lower_highlevel_graph_pass(const char *name)
545 pass_t *pass = XMALLOCZ(pass_t);
547 return def_graph_pass_constructor(
548 &pass->pass, name ? name : "lower_hl", lower_highlevel_graph_wrapper);
549 } /* lower_highlevel_graph_pass */
552 * does the same as lower_highlevel() for all nodes on the const code irg
554 void lower_const_code(void)
556 walk_const_code(NULL, lower_irnode, NULL);
557 } /* lower_const_code */
559 ir_prog_pass_t *lower_const_code_pass(const char *name)
561 return def_prog_pass(name ? name : "lower_const_code", lower_const_code);
565 * Replaces SymConsts by a real constant if possible.
566 * Replace Sel nodes by address computation. Also resolves array access.
567 * Handle Bitfields by added And/Or calculations.
569 void lower_highlevel()
573 n = get_irp_n_irgs();
574 for (i = 0; i < n; ++i) {
575 ir_graph *irg = get_irp_irg(i);
576 lower_highlevel_graph(irg);
579 } /* lower_highlevel */