2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Lower some High-level constructs, moved from the firmlower.
23 * @author Boris Boesler, Goetz Lindenmaier, Michael Beck
40 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
42 static void lower_sel(ir_node *sel) {
43 ir_graph *irg = current_ir_graph;
45 ir_node *newn, *cnst, *index, *ptr, *bl;
47 ir_mode *basemode, *mode, *mode_Int;
48 ir_type *basetyp, *owner;
53 /* Do not lower frame type/global offset table access: must be lowered by the backend. */
54 ptr = get_Sel_ptr(sel);
55 if (ptr == get_irg_frame(current_ir_graph))
58 ent = get_Sel_entity(sel);
59 owner = get_entity_owner(ent);
62 * Cannot handle value param entities or frame type entities here.
63 * Must be lowered by the backend.
65 if (is_value_param_type(owner) || is_frame_type(owner))
68 dbg = get_irn_dbg_info(sel);
69 mode = get_irn_mode(sel);
71 mode_Int = get_reference_mode_signed_eq(mode);
73 /* TLS access, must be handled by the linker */
74 if (get_tls_type() == owner) {
78 bl = get_nodes_block(sel);
80 cnst = new_rd_SymConst(dbg, irg, bl, mode, sym, symconst_addr_ent);
81 newn = new_rd_Add(dbg, irg, bl, ptr, cnst, mode);
85 assert(get_type_state(get_entity_owner(ent)) == layout_fixed);
86 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
88 bl = get_nodes_block(sel);
89 if (0 < get_Sel_n_indexs(sel)) {
91 basetyp = get_entity_type(ent);
92 if (is_Primitive_type(basetyp))
93 basemode = get_type_mode(basetyp);
95 basemode = mode_P_data;
97 assert(basemode && "no mode for lowering Sel");
98 assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
99 index = get_Sel_index(sel, 0);
101 if (is_Array_type(owner)) {
102 ir_type *arr_ty = owner;
103 int dims = get_array_n_dimensions(arr_ty);
104 int *map = ALLOCAN(int, dims);
108 assert(dims == get_Sel_n_indexs(sel)
109 && "array dimension must match number of indices of Sel node");
111 for (i = 0; i < dims; i++) {
112 int order = get_array_order(arr_ty, i);
114 assert(order < dims &&
115 "order of a dimension must be smaller than the arrays dim");
118 newn = get_Sel_ptr(sel);
120 /* Size of the array element */
121 tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
122 last_size = new_rd_Const(dbg, irg, tv);
125 * We compute the offset part of dimension d_i recursively
126 * with the the offset part of dimension d_{i-1}
128 * off_0 = sizeof(array_element_type);
129 * off_i = (u_i - l_i) * off_{i-1} ; i >= 1
131 * whereas u_i is the upper bound of the current dimension
132 * and l_i the lower bound of the current dimension.
134 for (i = dims - 1; i >= 0; i--) {
136 ir_node *lb, *ub, *elms, *n, *ind;
139 lb = get_array_lower_bound(arr_ty, dim);
140 ub = get_array_upper_bound(arr_ty, dim);
142 assert(irg == current_ir_graph);
143 if (! is_Unknown(lb))
144 lb = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), lb), mode_Int, 0);
148 if (! is_Unknown(ub))
149 ub = new_rd_Conv(dbg, irg, bl, copy_const_value(get_irn_dbg_info(sel), ub), mode_Int, 0);
154 * If the array has more than one dimension, lower and upper
155 * bounds have to be set in the non-last dimension.
158 assert(lb != NULL && "lower bound has to be set in multi-dim array");
159 assert(ub != NULL && "upper bound has to be set in multi-dim array");
161 /* Elements in one Dimension */
162 elms = new_rd_Sub(dbg, irg, bl, ub, lb, mode_Int);
165 ind = new_rd_Conv(dbg, irg, bl, get_Sel_index(sel, dim), mode_Int, 0);
168 * Normalize index, id lower bound is set, also assume
172 ind = new_rd_Sub(dbg, irg, bl, ind, lb, mode_Int);
174 n = new_rd_Mul(dbg, irg, bl, ind, last_size, mode_Int);
180 last_size = new_rd_Mul(dbg, irg, bl, last_size, elms, mode_Int);
182 newn = new_rd_Add(dbg, irg, bl, newn, n, mode);
186 ir_mode *idx_mode = get_irn_mode(index);
187 tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);
189 newn = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel),
190 new_rd_Mul(dbg, irg, bl, index,
191 new_r_Const(irg, tv),
195 } else if (is_Method_type(get_entity_type(ent)) &&
196 is_Class_type(owner) &&
197 (owner != get_glob_type()) &&
198 (!is_frame_type(owner))) {
200 ir_mode *ent_mode = get_type_mode(get_entity_type(ent));
202 /* We need an additional load when accessing methods from a dispatch table. */
203 tv = new_tarval_from_long(get_entity_offset(ent), mode_Int);
204 cnst = new_rd_Const(dbg, irg, tv);
205 add = new_rd_Add(dbg, irg, bl, get_Sel_ptr(sel), cnst, mode);
206 #ifdef DO_CACHEOPT /* cacheopt version */
207 newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), sel, ent_mode, 0);
208 cacheopt_map_addrs_register_node(newn);
209 set_Load_ptr(newn, add);
210 #else /* normal code */
211 newn = new_rd_Load(dbg, irg, bl, get_Sel_mem(sel), add, ent_mode, 0);
213 newn = new_r_Proj(irg, bl, newn, ent_mode, pn_Load_res);
215 } else if (get_entity_owner(ent) != get_glob_type()) {
218 /* replace Sel by add(obj, const(ent.offset)) */
219 assert(!(get_entity_allocation(ent) == allocation_static &&
220 (get_entity_n_overwrites(ent) == 0 && get_entity_n_overwrittenby(ent) == 0)));
221 newn = get_Sel_ptr(sel);
222 offset = get_entity_offset(ent);
224 ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
226 tv = new_tarval_from_long(offset, mode_UInt);
227 cnst = new_r_Const(irg, tv);
228 newn = new_rd_Add(dbg, irg, bl, newn, cnst, mode);
232 newn = new_rd_SymConst_addr_ent(NULL, current_ir_graph, mode, ent, firm_unknown_type);
242 * Lower a all possible SymConst nodes.
244 static void lower_symconst(ir_node *symc) {
252 switch (get_SymConst_kind(symc)) {
253 case symconst_type_tag:
254 assert(!"SymConst kind symconst_type_tag not implemented");
256 case symconst_type_size:
257 /* rewrite the SymConst node by a Const node */
258 tp = get_SymConst_type(symc);
259 assert(get_type_state(tp) == layout_fixed);
260 mode = get_irn_mode(symc);
261 newn = new_Const_long(mode, get_type_size_bytes(tp));
265 exchange(symc, newn);
267 case symconst_type_align:
268 /* rewrite the SymConst node by a Const node */
269 tp = get_SymConst_type(symc);
270 assert(get_type_state(tp) == layout_fixed);
271 mode = get_irn_mode(symc);
272 newn = new_Const_long(mode, get_type_alignment_bytes(tp));
276 exchange(symc, newn);
278 case symconst_addr_name:
279 /* do not rewrite - pass info to back end */
281 case symconst_addr_ent:
284 case symconst_ofs_ent:
285 /* rewrite the SymConst node by a Const node */
286 ent = get_SymConst_entity(symc);
287 assert(get_type_state(get_entity_type(ent)) == layout_fixed);
288 mode = get_irn_mode(symc);
289 newn = new_Const_long(mode, get_entity_offset(ent));
293 exchange(symc, newn);
295 case symconst_enum_const:
296 /* rewrite the SymConst node by a Const node */
297 ec = get_SymConst_enum(symc);
298 assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed);
299 tv = get_enumeration_value(ec);
300 newn = new_Const(tv);
304 exchange(symc, newn);
311 assert(!"unknown SymConst kind");
314 } /* lower_symconst */
317 * Checks, whether a size is an integral size
319 * @param size the size on bits
321 static int is_integral_size(int size) {
325 /* must be at least byte size */
327 } /* is_integral_size */
330 * lower bitfield load access.
332 * @param proj the Proj(result) node
333 * @param load the Load node
335 static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
336 ir_node *sel = get_Load_ptr(load);
337 ir_node *block, *n_proj, *res, *ptr;
340 ir_mode *bf_mode, *mode;
341 int offset, bit_offset, bits, bf_bits, old_cse;
347 ent = get_Sel_entity(sel);
348 bf_type = get_entity_type(ent);
350 /* must be a bitfield type */
351 if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
354 /* We have a bitfield access, if either a bit offset is given, or
355 the size is not integral. */
356 bf_mode = get_type_mode(bf_type);
360 mode = get_irn_mode(proj);
361 block = get_nodes_block(proj);
362 bf_bits = get_mode_size_bits(bf_mode);
363 bit_offset = get_entity_offset_bits_remainder(ent);
365 if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
368 bits = get_mode_size_bits(mode);
369 offset = get_entity_offset(ent);
372 * ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned
373 * and Shr(Shl(Proj_mode(load)) for signed
376 /* abandon bitfield sel */
377 ptr = get_Sel_ptr(sel);
378 db = get_irn_dbg_info(sel);
379 ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
381 set_Load_ptr(load, ptr);
382 set_Load_mode(load, mode);
385 /* create new proj, switch off CSE or we may get the old one back */
386 old_cse = get_opt_cse();
388 res = n_proj = new_r_Proj(current_ir_graph, block, load, mode, pn_Load_res);
389 set_opt_cse(old_cse);
391 if (mode_is_signed(mode)) { /* signed */
392 int shift_count_up = bits - (bf_bits + bit_offset);
393 int shift_count_down = bits - bf_bits;
395 if (shift_count_up) {
396 res = new_r_Shl(current_ir_graph, block, res,
397 new_Const_long(mode_Iu, shift_count_up), mode);
399 if (shift_count_down) {
400 res = new_r_Shrs(current_ir_graph, block, res,
401 new_Const_long(mode_Iu, shift_count_down), mode);
403 } else { /* unsigned */
404 int shift_count_down = bit_offset;
405 unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
407 if (shift_count_down) {
408 res = new_r_Shr(current_ir_graph, block, res,
409 new_Const_long(mode_Iu, shift_count_down), mode);
411 if (bits != bf_bits) {
412 res = new_r_And(current_ir_graph, block, res,
413 new_Const_long(mode, mask), mode);
418 } /* lower_bitfields_loads */
421 * lower bitfield store access.
423 * @todo: It adds a load which may produce an exception!
425 static void lower_bitfields_stores(ir_node *store) {
426 ir_node *sel = get_Store_ptr(store);
427 ir_node *ptr, *value;
430 ir_mode *bf_mode, *mode;
431 ir_node *mem, *irn, *block;
432 unsigned mask, neg_mask;
433 int bf_bits, bits_mask, offset, bit_offset;
436 /* check bitfield access */
440 ent = get_Sel_entity(sel);
441 bf_type = get_entity_type(ent);
443 /* must be a bitfield type */
444 if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
447 /* We have a bitfield access, if either a bit offset is given, or
448 the size is not integral. */
449 bf_mode = get_type_mode(bf_type);
453 value = get_Store_value(store);
454 mode = get_irn_mode(value);
455 block = get_nodes_block(store);
457 bf_bits = get_mode_size_bits(bf_mode);
458 bit_offset = get_entity_offset_bits_remainder(ent);
460 if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
464 * ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
466 mem = get_Store_mem(store);
467 offset = get_entity_offset(ent);
469 bits_mask = get_mode_size_bits(mode) - bf_bits;
470 mask = ((unsigned)-1) >> bits_mask;
474 /* abandon bitfield sel */
475 ptr = get_Sel_ptr(sel);
476 db = get_irn_dbg_info(sel);
477 ptr = new_rd_Add(db, current_ir_graph, block, ptr, new_Const_long(mode_Is, offset), get_irn_mode(ptr));
480 /* there are some bits, normal case */
481 irn = new_r_Load(current_ir_graph, block, mem, ptr, mode, 0);
482 mem = new_r_Proj(current_ir_graph, block, irn, mode_M, pn_Load_M);
483 irn = new_r_Proj(current_ir_graph, block, irn, mode, pn_Load_res);
485 irn = new_r_And(current_ir_graph, block, irn,
486 new_Const_long(mode, neg_mask), mode);
488 if (bit_offset > 0) {
489 value = new_r_Shl(current_ir_graph, block, value,
490 new_Const_long(mode_Iu, bit_offset), mode);
493 value = new_r_And(current_ir_graph, block, value,
494 new_Const_long(mode, mask), mode);
496 value = new_r_Or(current_ir_graph, block, value, irn, mode);
499 set_Store_mem(store, mem);
500 set_Store_value(store, value);
501 set_Store_ptr(store, ptr);
502 } /* lower_bitfields_stores */
505 * Lowers unaligned Loads.
507 static void lower_unaligned_Load(ir_node *load) {
513 * Lowers unaligned Stores
515 static void lower_unaligned_Store(ir_node *store) {
521 * lowers IR-nodes, called from walker
523 static void lower_irnode(ir_node *irn, void *env) {
525 switch (get_irn_opcode(irn)) {
533 if (env != NULL && get_Load_align(irn) == align_non_aligned)
534 lower_unaligned_Load(irn);
537 if (env != NULL && get_Store_align(irn) == align_non_aligned)
538 lower_unaligned_Store(irn);
541 exchange(irn, get_Cast_op(irn));
549 * Walker: lowers IR-nodes for bitfield access
551 static void lower_bf_access(ir_node *irn, void *env) {
553 switch (get_irn_opcode(irn)) {
556 long proj = get_Proj_proj(irn);
557 ir_node *pred = get_Proj_pred(irn);
559 if (proj == pn_Load_res && is_Load(pred))
560 lower_bitfields_loads(irn, pred);
564 lower_bitfields_stores(irn);
570 } /* lower_bf_access */
573 * Replaces SymConsts by a real constant if possible.
574 * Replace Sel nodes by address computation. Also resolves array access.
575 * Handle Bitfields by added And/Or calculations.
577 void lower_highlevel_graph(ir_graph *irg, int lower_bitfields) {
579 if (lower_bitfields) {
580 /* First step: lower bitfield access: must be run as long as Sels still
582 irg_walk_graph(irg, NULL, lower_bf_access, NULL);
585 /* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
586 irg_walk_graph(irg, NULL, lower_irnode, NULL);
587 } /* lower_highlevel_graph */
590 * does the same as lower_highlevel() for all nodes on the const code irg
592 void lower_const_code(void) {
593 walk_const_code(NULL, lower_irnode, NULL);
594 } /* lower_const_code */
597 * Replaces SymConsts by a real constant if possible.
598 * Replace Sel nodes by address computation. Also resolves array access.
599 * Handle Bitfields by added And/Or calculations.
601 void lower_highlevel(int lower_bitfields) {
604 n = get_irp_n_irgs();
605 for (i = 0; i < n; ++i) {
606 ir_graph *irg = get_irp_irg(i);
607 lower_highlevel_graph(irg, lower_bitfields);
610 } /* lower_highlevel */