2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Helper functions for handling ABI constraints in the code
24 * @author Matthias Braun
29 #include "beabihelper.h"
37 typedef struct reg_flag_t {
38 const arch_register_t *reg; /**< register at an input position.
39 may be NULL in case of memory input */
40 arch_irn_flags_t flags;
44 * A register state mapping keeps track of the symbol values (=firm nodes)
45 * to registers. This is usefull when constructing straight line code
46 * which like the function prolog or epilog in some architectures.
48 typedef struct register_state_mapping_t {
49 ir_node **value_map; /**< mapping of state indices to values */
50 int **reg_index_map; /**< mapping of regclass,regnum to an index
52 reg_flag_t *regs; /**< registers (and memory values) that form a
54 ir_node *last_barrier;
55 } register_state_mapping_t;
57 struct beabi_helper_env_t {
59 register_state_mapping_t prolog;
60 register_state_mapping_t epilog;
63 static void prepare_rsm(register_state_mapping_t *rsm,
64 const arch_env_t *arch_env)
66 unsigned n_reg_classes = arch_env_get_n_reg_class(arch_env);
68 reg_flag_t memory = { NULL, 0 };
70 rsm->regs = NEW_ARR_F(reg_flag_t, 0);
71 /* memory input at 0 */
72 ARR_APP1(reg_flag_t, rsm->regs, memory);
74 rsm->value_map = NULL;
75 rsm->reg_index_map = XMALLOCN(int*, n_reg_classes);
76 for (c = 0; c < n_reg_classes; ++c) {
77 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, c);
78 unsigned n_regs = arch_register_class_n_regs(cls);
81 rsm->reg_index_map[c] = XMALLOCN(int, n_regs);
82 for (r = 0; r < n_regs; ++r) {
83 rsm->reg_index_map[c][r] = -1;
88 static void free_rsm(register_state_mapping_t *rsm, const arch_env_t *arch_env)
90 unsigned n_reg_classes = arch_env_get_n_reg_class(arch_env);
93 for (c = 0; c < n_reg_classes; ++c) {
94 free(rsm->reg_index_map[c]);
97 free(rsm->reg_index_map);
98 if (rsm->value_map != NULL)
99 DEL_ARR_F(rsm->value_map);
100 DEL_ARR_F(rsm->regs);
103 rsm->reg_index_map = NULL;
104 rsm->value_map = NULL;
107 static void rsm_clear_regs(register_state_mapping_t *rsm,
108 const arch_env_t *arch_env)
110 unsigned n_reg_classes = arch_env_get_n_reg_class(arch_env);
112 reg_flag_t memory = { NULL, 0 };
114 for (c = 0; c < n_reg_classes; ++c) {
115 const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, c);
116 unsigned n_regs = arch_register_class_n_regs(cls);
119 for (r = 0; r < n_regs; ++r) {
120 rsm->reg_index_map[c][r] = -1;
123 ARR_RESIZE(reg_flag_t, rsm->regs, 0);
124 ARR_APP1(reg_flag_t, rsm->regs, memory);
126 if (rsm->value_map != NULL) {
127 DEL_ARR_F(rsm->value_map);
128 rsm->value_map = NULL;
132 static void rsm_add_reg(register_state_mapping_t *rsm,
133 const arch_register_t *reg, arch_irn_flags_t flags)
135 int input_idx = ARR_LEN(rsm->regs);
136 int cls_idx = reg->reg_class->index;
137 int reg_idx = reg->index;
138 reg_flag_t regflag = { reg, flags };
140 /* we must not have used get_value yet */
141 assert(rsm->reg_index_map[cls_idx][reg_idx] == -1);
142 rsm->reg_index_map[cls_idx][reg_idx] = input_idx;
143 ARR_APP1(reg_flag_t, rsm->regs, regflag);
147 static ir_node *rsm_get_value(register_state_mapping_t *rsm, int index)
149 assert(index < ARR_LEN(rsm->value_map));
150 return rsm->value_map[index];
153 static ir_node *rsm_get_reg_value(register_state_mapping_t *rsm,
154 const arch_register_t *reg)
156 int cls_idx = reg->reg_class->index;
157 int reg_idx = reg->index;
158 int input_idx = rsm->reg_index_map[cls_idx][reg_idx];
160 return rsm_get_value(rsm, input_idx);
163 static void rsm_set_value(register_state_mapping_t *rsm, int index,
166 assert(index < ARR_LEN(rsm->value_map));
167 rsm->value_map[index] = value;
170 static void rsm_set_reg_value(register_state_mapping_t *rsm,
171 const arch_register_t *reg, ir_node *value)
173 int cls_idx = reg->reg_class->index;
174 int reg_idx = reg->index;
175 int input_idx = rsm->reg_index_map[cls_idx][reg_idx];
176 rsm_set_value(rsm, input_idx, value);
179 static ir_node *rsm_create_barrier(register_state_mapping_t *rsm,
182 int n_barrier_outs = ARR_LEN(rsm->regs);
183 ir_node **in = rsm->value_map;
187 assert(ARR_LEN(rsm->value_map) == n_barrier_outs);
189 barrier = be_new_Barrier(block, n_barrier_outs, in);
191 for (o = 0; o < n_barrier_outs; ++o) {
192 const reg_flag_t *regflag = &rsm->regs[o];
193 const arch_register_t *reg = regflag->reg;
196 arch_set_out_register_req(barrier, o, arch_no_register_req);
197 proj = new_r_Proj(barrier, mode_M, o);
199 be_set_constr_single_reg_in(barrier, o, reg, 0);
200 be_set_constr_single_reg_out(barrier, o, reg, regflag->flags);
201 proj = new_r_Proj(barrier, reg->reg_class->mode, o);
203 rsm->value_map[o] = proj;
206 rsm->last_barrier = barrier;
215 beabi_helper_env_t *be_abihelper_prepare(ir_graph *irg)
217 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
218 beabi_helper_env_t *env = XMALLOCZ(beabi_helper_env_t);
221 prepare_rsm(&env->prolog, arch_env);
222 prepare_rsm(&env->epilog, arch_env);
227 void be_abihelper_finish(beabi_helper_env_t *env)
229 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
231 free_rsm(&env->prolog, arch_env);
232 if (env->epilog.reg_index_map != NULL) {
233 free_rsm(&env->epilog, arch_env);
238 void be_prolog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
239 arch_irn_flags_t flags)
241 rsm_add_reg(&env->prolog, reg, flags);
244 ir_node *be_prolog_create_start(beabi_helper_env_t *env, dbg_info *dbgi,
247 int n_start_outs = ARR_LEN(env->prolog.regs);
248 ir_node *start = be_new_Start(dbgi, block, n_start_outs);
251 assert(env->prolog.value_map == NULL);
252 env->prolog.value_map = NEW_ARR_F(ir_node*, n_start_outs);
254 for (o = 0; o < n_start_outs; ++o) {
255 const reg_flag_t *regflag = &env->prolog.regs[o];
256 const arch_register_t *reg = regflag->reg;
259 arch_set_out_register_req(start, o, arch_no_register_req);
260 proj = new_r_Proj(start, mode_M, o);
262 be_set_constr_single_reg_out(start, o, regflag->reg,
264 arch_irn_set_register(start, o, regflag->reg);
265 proj = new_r_Proj(start, reg->reg_class->mode, o);
267 env->prolog.value_map[o] = proj;
270 /* start node should really be the first thing constructed */
271 assert(env->prolog.last_barrier == NULL);
272 env->prolog.last_barrier = start;
277 ir_node *be_prolog_create_barrier(beabi_helper_env_t *env, ir_node *block)
279 return rsm_create_barrier(&env->prolog, block);
282 ir_node *be_prolog_get_reg_value(beabi_helper_env_t *env,
283 const arch_register_t *reg)
285 return rsm_get_reg_value(&env->prolog, reg);
288 ir_node *be_prolog_get_memory(beabi_helper_env_t *env)
290 return rsm_get_value(&env->prolog, 0);
293 void be_prolog_set_reg_value(beabi_helper_env_t *env,
294 const arch_register_t *reg, ir_node *value)
296 rsm_set_reg_value(&env->prolog, reg, value);
299 void be_prolog_set_memory(beabi_helper_env_t *env, ir_node *value)
301 rsm_set_value(&env->prolog, 0, value);
306 void be_epilog_begin(beabi_helper_env_t *env)
308 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
309 rsm_clear_regs(&env->epilog, arch_env);
310 env->epilog.value_map = NEW_ARR_F(ir_node*, 1);
311 env->epilog.value_map[0] = NULL;
314 void be_epilog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
315 arch_irn_flags_t flags, ir_node *value)
317 rsm_add_reg(&env->epilog, reg, flags);
318 ARR_APP1(ir_node*, env->epilog.value_map, value);
321 void be_epilog_set_reg_value(beabi_helper_env_t *env,
322 const arch_register_t *reg, ir_node *value)
324 rsm_set_reg_value(&env->epilog, reg, value);
327 void be_epilog_set_memory(beabi_helper_env_t *env, ir_node *value)
329 rsm_set_value(&env->epilog, 0, value);
332 ir_node *be_epilog_get_reg_value(beabi_helper_env_t *env,
333 const arch_register_t *reg)
335 return rsm_get_reg_value(&env->epilog, reg);
338 ir_node *be_epilog_get_memory(beabi_helper_env_t *env)
340 return rsm_get_value(&env->epilog, 0);
343 ir_node *be_epilog_create_barrier(beabi_helper_env_t *env, ir_node *block)
345 return rsm_create_barrier(&env->epilog, block);
348 ir_node *be_epilog_create_return(beabi_helper_env_t *env, dbg_info *dbgi,
351 int n_return_in = ARR_LEN(env->epilog.regs);
352 ir_node **in = env->epilog.value_map;
353 int n_res = 1; /* TODO */
354 unsigned pop = 0; /* TODO */
358 assert(ARR_LEN(env->epilog.value_map) == n_return_in);
360 ret = be_new_Return(dbgi, get_irn_irg(block), block, n_res, pop,
362 for (i = 0; i < n_return_in; ++i) {
363 const reg_flag_t *regflag = &env->epilog.regs[i];
364 const arch_register_t *reg = regflag->reg;
366 be_set_constr_single_reg_in(ret, i, reg, 0);
370 rsm_clear_regs(&env->epilog, be_get_irg_arch_env(env->irg));
371 env->epilog.last_barrier = NULL;
376 static void add_missing_keep_walker(ir_node *node, void *data)
379 unsigned found_projs = 0;
380 const ir_edge_t *edge;
381 ir_mode *mode = get_irn_mode(node);
387 n_outs = arch_irn_get_n_outs(node);
391 assert(n_outs < (int) sizeof(unsigned) * 8);
392 foreach_out_edge(node, edge) {
393 ir_node *node = get_edge_src_irn(edge);
396 /* The node could be kept */
397 if (is_End(node) || is_Anchor(node))
400 if (get_irn_mode(node) == mode_M)
403 pn = get_Proj_proj(node);
405 found_projs |= 1 << pn;
409 /* are keeps missing? */
411 for (i = 0; i < n_outs; ++i) {
414 const arch_register_req_t *req;
415 const arch_register_class_t *cls;
417 if (found_projs & (1 << i)) {
421 req = arch_get_out_register_req(node, i);
427 block = get_nodes_block(node);
428 in[0] = new_r_Proj(node, arch_register_class_mode(cls), i);
429 if (last_keep != NULL) {
430 be_Keep_add_node(last_keep, cls, in[0]);
432 last_keep = be_new_Keep(block, 1, in);
433 if (sched_is_scheduled(node)) {
434 sched_add_after(node, last_keep);
440 void be_add_missing_keeps(ir_graph *irg)
442 irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);