2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for ARM.
23 * @author Michael Beck
33 #include "bepeephole.h"
36 #include "arm_optimize.h"
37 #include "gen_arm_regalloc_if.h"
38 #include "gen_arm_new_nodes.h"
40 static arm_code_gen_t *cg;
42 /** Execute ARM ROL. */
43 static unsigned arm_rol(unsigned v, unsigned rol) {
44 return (v << rol) | (v >> (32 - rol));
48 * construct 8bit values and rot amounts for a value.
50 void arm_gen_vals_from_word(unsigned int value, arm_vals *result)
54 memset(result, 0, sizeof(*result));
56 /* special case: we prefer shift amount 0 */
58 result->values[0] = value;
65 unsigned v = arm_rol(value, 8) & 0xFFFFFF;
74 shf = (initial + shf - 8) & 0x1F;
75 result->values[result->ops] = v;
76 result->shifts[result->ops] = shf;
79 value ^= arm_rol(v, shf) >> initial;
89 * Encodes an immediate with shifter operand
91 unsigned int arm_encode_imm_w_shift(unsigned int shift, unsigned int immediate) {
92 return immediate | ((shift>>1)<<8);
96 * Decode an immediate with shifter operand
98 unsigned int arm_decode_imm_w_shift(long imm_value) {
99 unsigned l = (unsigned)imm_value;
100 unsigned rol = (l & ~0xFF) >> 7;
102 return arm_rol(l & 0xFF, rol);
106 * Returns non.zero if the given offset can be directly encoded into an ARM instruction.
108 static int allowed_arm_immediate(int offset, arm_vals *result) {
109 arm_gen_vals_from_word(offset, result);
110 return result->ops <= 1;
114 * Fix an IncSP node if the offset gets too big
116 static void peephole_be_IncSP(ir_node *node) {
119 int offset, cnt, align, sign = 1;
122 /* first optimize incsp->incsp combinations */
123 node = be_peephole_IncSP_IncSP(node);
125 offset = be_get_IncSP_offset(node);
126 /* can be transformed into Add OR Sub */
131 if (allowed_arm_immediate(offset, &v))
134 be_set_IncSP_offset(node, (int)arm_rol(v.values[0], v.shifts[0]) * sign);
136 irg = current_ir_graph;
137 block = get_nodes_block(node);
138 align = be_get_IncSP_align(node);
139 for (cnt = 1; cnt < v.ops; ++cnt) {
140 int value = (int)arm_rol(v.values[cnt], v.shifts[cnt]);
141 ir_node *next = be_new_IncSP(&arm_gp_regs[REG_SP], irg, block, node, value * sign, align);
142 sched_add_after(node, next);
148 * creates the address by Adds
150 static ir_node *gen_ptr_add(ir_node *node, ir_node *frame, arm_vals *v)
152 dbg_info *dbg = get_irn_dbg_info(node);
153 ir_node *block = get_nodes_block(node);
157 ptr = new_bd_arm_Add_i(dbg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
158 arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
159 sched_add_before(node, ptr);
161 for (cnt = 1; cnt < v->ops; ++cnt) {
162 long value = arm_encode_imm_w_shift(v->shifts[cnt], v->values[cnt]);
163 ir_node *next = new_bd_arm_Add_i(dbg, block, ptr, mode_Iu, value);
164 arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
165 sched_add_before(node, next);
172 * creates the address by Subs
174 static ir_node *gen_ptr_sub(ir_node *node, ir_node *frame, arm_vals *v)
176 dbg_info *dbg = get_irn_dbg_info(node);
177 ir_node *block = get_nodes_block(node);
181 ptr = new_bd_arm_Sub_i(dbg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
182 arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
183 sched_add_before(node, ptr);
185 for (cnt = 1; cnt < v->ops; ++cnt) {
186 long value = arm_encode_imm_w_shift(v->shifts[cnt], v->values[cnt]);
187 ir_node *next = new_bd_arm_Sub_i(dbg, block, ptr, mode_Iu, value);
188 arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
189 sched_add_before(node, next);
196 * Fix an be_Spill node if the offset gets too big
198 static void peephole_be_Spill(ir_node *node) {
199 ir_entity *ent = be_get_frame_entity(node);
200 int use_add = 1, offset = get_entity_offset(ent);
201 ir_node *block, *ptr, *frame, *value, *store;
207 if (allowed_arm_immediate(offset, &v))
214 frame = be_get_Spill_frame(node);
216 ptr = gen_ptr_add(node, frame, &v);
218 ptr = gen_ptr_sub(node, frame, &v);
221 value = be_get_Spill_val(node);
222 mode = get_irn_mode(value);
223 irg = current_ir_graph;
224 dbg = get_irn_dbg_info(node);
225 block = get_nodes_block(node);
227 if (mode_is_float(mode)) {
228 if (USE_FPA(cg->isa)) {
229 /* transform into fpaStf */
230 store = new_bd_arm_fpaStf(dbg, block, ptr, value, get_irg_no_mem(irg), mode);
231 sched_add_before(node, store);
233 panic("peephole_be_Spill: spill not supported for this mode");
235 } else if (mode_is_dataM(mode)) {
236 /* transform into Store */;
237 store = new_bd_arm_Store(dbg, block, ptr, value, get_irg_no_mem(irg));
238 sched_add_before(node, store);
240 panic("peephole_be_Spill: spill not supported for this mode");
243 be_peephole_exchange(node, store);
247 * Fix an be_Reload node if the offset gets too big
249 static void peephole_be_Reload(ir_node *node) {
250 ir_entity *ent = be_get_frame_entity(node);
251 int use_add = 1, offset = get_entity_offset(ent);
252 ir_node *block, *ptr, *frame, *load, *mem, *proj;
257 const arch_register_t *reg;
259 if (allowed_arm_immediate(offset, &v))
266 frame = be_get_Reload_frame(node);
268 ptr = gen_ptr_add(node, frame, &v);
270 ptr = gen_ptr_sub(node, frame, &v);
273 reg = arch_get_irn_register(node);
274 mem = be_get_Reload_mem(node);
275 mode = get_irn_mode(node);
276 irg = current_ir_graph;
277 dbg = get_irn_dbg_info(node);
278 block = get_nodes_block(node);
280 if (mode_is_float(mode)) {
281 if (USE_FPA(cg->isa)) {
282 /* transform into fpaLdf */
283 load = new_bd_arm_fpaLdf(dbg, block, ptr, mem, mode);
284 sched_add_before(node, load);
285 proj = new_rd_Proj(dbg, irg, block, load, mode, pn_arm_fpaLdf_res);
286 arch_set_irn_register(proj, reg);
288 panic("peephole_be_Spill: spill not supported for this mode");
290 } else if (mode_is_dataM(mode)) {
291 /* transform into Store */;
292 load = new_bd_arm_Load(dbg, block, ptr, mem);
293 sched_add_before(node, load);
294 proj = new_rd_Proj(dbg, irg, block, load, mode_Iu, pn_arm_Load_res);
295 arch_set_irn_register(proj, reg);
297 panic("peephole_be_Spill: spill not supported for this mode");
300 be_peephole_exchange(node, proj);
304 * Register a peephole optimization function.
306 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
307 assert(op->ops.generic == NULL);
308 op->ops.generic = (op_func)func;
311 /* Perform peephole-optimizations. */
312 void arm_peephole_optimization(arm_code_gen_t *new_cg)
316 /* register peephole optimizations */
317 clear_irp_opcodes_generic_func();
318 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
319 register_peephole_optimisation(op_be_Spill, peephole_be_Spill);
320 register_peephole_optimisation(op_be_Reload, peephole_be_Reload);
322 be_peephole_opt(cg->birg);