2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for ARM.
23 * @author Michael Beck
33 #include "bepeephole.h"
36 #include "arm_optimize.h"
37 #include "gen_arm_regalloc_if.h"
38 #include "gen_arm_new_nodes.h"
40 static arm_code_gen_t *cg;
42 /** Execute ARM ROL. */
43 static unsigned arm_rol(unsigned v, unsigned rol) {
44 return (v << rol) | (v >> (32 - rol));
48 * construct 8bit values and rot amounts for a value.
50 void arm_gen_vals_from_word(unsigned int value, arm_vals *result)
54 memset(result, 0, sizeof(*result));
56 /* special case: we prefer shift amount 0 */
58 result->values[0] = value;
65 unsigned v = arm_rol(value, 8) & 0xFFFFFF;
74 shf = (initial + shf - 8) & 0x1F;
75 result->values[result->ops] = v;
76 result->shifts[result->ops] = shf;
79 value ^= arm_rol(v, shf) >> initial;
89 * Encodes an immediate with shifter operand
91 unsigned int arm_encode_imm_w_shift(unsigned int shift, unsigned int immediate) {
92 return immediate | ((shift>>1)<<8);
96 * Decode an immediate with shifter operand
98 unsigned int arm_decode_imm_w_shift(long imm_value) {
99 unsigned l = (unsigned)imm_value;
100 unsigned rol = (l & ~0xFF) >> 7;
102 return arm_rol(l & 0xFF, rol);
106 * Returns non.zero if the given offset can be directly encoded into an ARM instruction.
108 static int allowed_arm_immediate(int offset, arm_vals *result) {
109 arm_gen_vals_from_word(offset, result);
110 return result->ops <= 1;
114 * Fix an IncSP node if the offset gets too big
116 static void peephole_be_IncSP(ir_node *node) {
119 int offset, cnt, align, sign = 1;
122 /* first optimize incsp->incsp combinations */
123 node = be_peephole_IncSP_IncSP(node);
125 offset = be_get_IncSP_offset(node);
126 /* can be transformed into Add OR Sub */
131 if (allowed_arm_immediate(offset, &v))
134 be_set_IncSP_offset(node, (int)arm_rol(v.values[0], v.shifts[0]) * sign);
136 irg = current_ir_graph;
137 block = get_nodes_block(node);
138 align = be_get_IncSP_align(node);
139 for (cnt = 1; cnt < v.ops; ++cnt) {
140 int value = (int)arm_rol(v.values[cnt], v.shifts[cnt]);
141 ir_node *next = be_new_IncSP(&arm_gp_regs[REG_SP], irg, block, node, value * sign, align);
142 sched_add_after(node, next);
148 * creates the address by Adds
150 static ir_node *gen_ptr_add(ir_node *node, ir_node *frame, arm_vals *v)
152 ir_graph *irg = current_ir_graph;
153 dbg_info *dbg = get_irn_dbg_info(node);
154 ir_node *block = get_nodes_block(node);
158 ptr = new_rd_arm_Add_i(dbg, irg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
159 arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
160 sched_add_before(node, ptr);
162 for (cnt = 1; cnt < v->ops; ++cnt) {
163 long value = arm_encode_imm_w_shift(v->shifts[cnt], v->values[cnt]);
164 ir_node *next = new_rd_arm_Add_i(dbg, irg, block, ptr, mode_Iu, value);
165 arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
166 sched_add_before(node, next);
173 * creates the address by Subs
175 static ir_node *gen_ptr_sub(ir_node *node, ir_node *frame, arm_vals *v)
177 ir_graph *irg = current_ir_graph;
178 dbg_info *dbg = get_irn_dbg_info(node);
179 ir_node *block = get_nodes_block(node);
183 ptr = new_rd_arm_Sub_i(dbg, irg, block, frame, mode_Iu, arm_encode_imm_w_shift(v->shifts[0], v->values[0]));
184 arch_set_irn_register(ptr, &arm_gp_regs[REG_R12]);
185 sched_add_before(node, ptr);
187 for (cnt = 1; cnt < v->ops; ++cnt) {
188 long value = arm_encode_imm_w_shift(v->shifts[cnt], v->values[cnt]);
189 ir_node *next = new_rd_arm_Sub_i(dbg, irg, block, ptr, mode_Iu, value);
190 arch_set_irn_register(next, &arm_gp_regs[REG_R12]);
191 sched_add_before(node, next);
198 * Fix an be_Spill node if the offset gets too big
200 static void peephole_be_Spill(ir_node *node) {
201 ir_entity *ent = be_get_frame_entity(node);
202 int use_add = 1, offset = get_entity_offset(ent);
203 ir_node *block, *ptr, *frame, *value, *store;
209 if (allowed_arm_immediate(offset, &v))
216 frame = be_get_Spill_frame(node);
218 ptr = gen_ptr_add(node, frame, &v);
220 ptr = gen_ptr_sub(node, frame, &v);
223 value = be_get_Spill_val(node);
224 mode = get_irn_mode(value);
225 irg = current_ir_graph;
226 dbg = get_irn_dbg_info(node);
227 block = get_nodes_block(node);
229 if (mode_is_float(mode)) {
230 if (USE_FPA(cg->isa)) {
231 /* transform into fpaStf */
232 store = new_rd_arm_fpaStf(dbg, irg, block, ptr, value, get_irg_no_mem(irg), mode);
233 sched_add_before(node, store);
235 panic("peephole_be_Spill: spill not supported for this mode");
237 } else if (mode_is_dataM(mode)) {
238 /* transform into Store */;
239 store = new_rd_arm_Store(dbg, irg, block, ptr, value, get_irg_no_mem(irg));
240 sched_add_before(node, store);
242 panic("peephole_be_Spill: spill not supported for this mode");
245 be_peephole_exchange(node, store);
249 * Fix an be_Reload node if the offset gets too big
251 static void peephole_be_Reload(ir_node *node) {
252 ir_entity *ent = be_get_frame_entity(node);
253 int use_add = 1, offset = get_entity_offset(ent);
254 ir_node *block, *ptr, *frame, *load, *mem, *proj;
259 const arch_register_t *reg;
261 if (allowed_arm_immediate(offset, &v))
268 frame = be_get_Reload_frame(node);
270 ptr = gen_ptr_add(node, frame, &v);
272 ptr = gen_ptr_sub(node, frame, &v);
275 reg = arch_get_irn_register(node);
276 mem = be_get_Reload_mem(node);
277 mode = get_irn_mode(node);
278 irg = current_ir_graph;
279 dbg = get_irn_dbg_info(node);
280 block = get_nodes_block(node);
282 if (mode_is_float(mode)) {
283 if (USE_FPA(cg->isa)) {
284 /* transform into fpaLdf */
285 load = new_rd_arm_fpaLdf(dbg, irg, block, ptr, mem, mode);
286 sched_add_before(node, load);
287 proj = new_rd_Proj(dbg, irg, block, load, mode, pn_arm_fpaLdf_res);
288 arch_set_irn_register(proj, reg);
290 panic("peephole_be_Spill: spill not supported for this mode");
292 } else if (mode_is_dataM(mode)) {
293 /* transform into Store */;
294 load = new_rd_arm_Load(dbg, irg, block, ptr, mem);
295 sched_add_before(node, load);
296 proj = new_rd_Proj(dbg, irg, block, load, mode_Iu, pn_arm_Load_res);
297 arch_set_irn_register(proj, reg);
299 panic("peephole_be_Spill: spill not supported for this mode");
302 be_peephole_exchange(node, proj);
306 * Register a peephole optimization function.
308 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
309 assert(op->ops.generic == NULL);
310 op->ops.generic = (op_func)func;
313 /* Perform peephole-optimizations. */
314 void arm_peephole_optimization(arm_code_gen_t *new_cg)
318 /* register peephole optimizations */
319 clear_irp_opcodes_generic_func();
320 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
321 register_peephole_optimisation(op_be_Spill, peephole_be_Spill);
322 register_peephole_optimisation(op_be_Reload, peephole_be_Reload);
324 be_peephole_opt(cg->birg);