2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Helper functions for handling ABI constraints in the code
24 * @author Matthias Braun
29 #include "beabihelper.h"
36 #include "irphase_t.h"
39 typedef struct reg_flag_t {
40 const arch_register_t *reg; /**< register at an input position.
41 may be NULL in case of memory input */
42 arch_register_req_type_t flags;
46 * A register state mapping keeps track of the symbol values (=firm nodes)
47 * to registers. This is usefull when constructing straight line code
48 * which like the function prolog or epilog in some architectures.
50 typedef struct register_state_mapping_t {
51 ir_node **value_map; /**< mapping of state indices to values */
52 int **reg_index_map; /**< mapping of regclass,regnum to an index
54 reg_flag_t *regs; /**< registers (and memory values) that form a
56 ir_node *last_barrier;
57 } register_state_mapping_t;
59 struct beabi_helper_env_t {
61 register_state_mapping_t prolog;
62 register_state_mapping_t epilog;
63 ir_phase *stack_order;
66 static void prepare_rsm(register_state_mapping_t *rsm,
67 const arch_env_t *arch_env)
69 unsigned n_reg_classes = arch_env->n_register_classes;
71 reg_flag_t memory = { NULL, arch_register_req_type_none };
73 rsm->regs = NEW_ARR_F(reg_flag_t, 0);
74 /* memory input at 0 */
75 ARR_APP1(reg_flag_t, rsm->regs, memory);
77 rsm->value_map = NULL;
78 rsm->reg_index_map = XMALLOCN(int*, n_reg_classes);
79 for (c = 0; c < n_reg_classes; ++c) {
80 const arch_register_class_t *cls = &arch_env->register_classes[c];
81 unsigned n_regs = arch_register_class_n_regs(cls);
84 rsm->reg_index_map[c] = XMALLOCN(int, n_regs);
85 for (r = 0; r < n_regs; ++r) {
86 rsm->reg_index_map[c][r] = -1;
91 static void free_rsm(register_state_mapping_t *rsm, const arch_env_t *arch_env)
93 unsigned n_reg_classes = arch_env->n_register_classes;
96 for (c = 0; c < n_reg_classes; ++c) {
97 free(rsm->reg_index_map[c]);
100 free(rsm->reg_index_map);
101 if (rsm->value_map != NULL)
102 DEL_ARR_F(rsm->value_map);
103 DEL_ARR_F(rsm->regs);
106 rsm->reg_index_map = NULL;
107 rsm->value_map = NULL;
110 static void rsm_clear_regs(register_state_mapping_t *rsm,
111 const arch_env_t *arch_env)
113 unsigned n_reg_classes = arch_env->n_register_classes;
115 reg_flag_t memory = { NULL, arch_register_req_type_none };
117 for (c = 0; c < n_reg_classes; ++c) {
118 const arch_register_class_t *cls = &arch_env->register_classes[c];
119 unsigned n_regs = arch_register_class_n_regs(cls);
122 for (r = 0; r < n_regs; ++r) {
123 rsm->reg_index_map[c][r] = -1;
126 ARR_RESIZE(reg_flag_t, rsm->regs, 0);
127 ARR_APP1(reg_flag_t, rsm->regs, memory);
129 if (rsm->value_map != NULL) {
130 DEL_ARR_F(rsm->value_map);
131 rsm->value_map = NULL;
135 static int rsm_add_reg(register_state_mapping_t *rsm,
136 const arch_register_t *reg,
137 arch_register_req_type_t flags)
139 int input_idx = ARR_LEN(rsm->regs);
140 int cls_idx = reg->reg_class->index;
141 int reg_idx = reg->index;
142 reg_flag_t regflag = { reg, flags };
144 /* we must not have used get_value yet */
145 assert(rsm->reg_index_map[cls_idx][reg_idx] == -1);
146 rsm->reg_index_map[cls_idx][reg_idx] = input_idx;
147 ARR_APP1(reg_flag_t, rsm->regs, regflag);
149 if (rsm->value_map != NULL) {
150 ARR_APP1(ir_node*, rsm->value_map, NULL);
151 assert(ARR_LEN(rsm->value_map) == ARR_LEN(rsm->regs));
157 static ir_node *rsm_get_value(register_state_mapping_t *rsm, int index)
159 assert(0 <= index && index < ARR_LEN(rsm->value_map));
160 return rsm->value_map[index];
163 static ir_node *rsm_get_reg_value(register_state_mapping_t *rsm,
164 const arch_register_t *reg)
166 int cls_idx = reg->reg_class->index;
167 int reg_idx = reg->index;
168 int input_idx = rsm->reg_index_map[cls_idx][reg_idx];
170 return rsm_get_value(rsm, input_idx);
173 static void rsm_set_value(register_state_mapping_t *rsm, int index,
176 assert(0 <= index && index < ARR_LEN(rsm->value_map));
177 rsm->value_map[index] = value;
180 static void rsm_set_reg_value(register_state_mapping_t *rsm,
181 const arch_register_t *reg, ir_node *value)
183 int cls_idx = reg->reg_class->index;
184 int reg_idx = reg->index;
185 int input_idx = rsm->reg_index_map[cls_idx][reg_idx];
186 rsm_set_value(rsm, input_idx, value);
189 static ir_node *rsm_create_barrier(register_state_mapping_t *rsm,
192 int n_barrier_outs = ARR_LEN(rsm->regs);
193 ir_node **in = rsm->value_map;
197 assert(ARR_LEN(rsm->value_map) == n_barrier_outs);
199 barrier = be_new_Barrier(block, n_barrier_outs, in);
201 for (o = 0; o < n_barrier_outs; ++o) {
202 const reg_flag_t *regflag = &rsm->regs[o];
203 const arch_register_t *reg = regflag->reg;
206 arch_set_out_register_req(barrier, o, arch_no_register_req);
207 proj = new_r_Proj(barrier, mode_M, o);
209 be_set_constr_single_reg_in(barrier, o, reg, arch_register_req_type_none);
210 be_set_constr_single_reg_out(barrier, o, reg, regflag->flags);
211 proj = new_r_Proj(barrier, reg->reg_class->mode, o);
213 rsm->value_map[o] = proj;
216 rsm->last_barrier = barrier;
225 beabi_helper_env_t *be_abihelper_prepare(ir_graph *irg)
227 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
228 beabi_helper_env_t *env = XMALLOCZ(beabi_helper_env_t);
231 prepare_rsm(&env->prolog, arch_env);
232 prepare_rsm(&env->epilog, arch_env);
237 void be_abihelper_finish(beabi_helper_env_t *env)
239 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
241 free_rsm(&env->prolog, arch_env);
242 if (env->epilog.reg_index_map != NULL) {
243 free_rsm(&env->epilog, arch_env);
248 void be_prolog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
249 arch_register_req_type_t flags)
251 rsm_add_reg(&env->prolog, reg, flags);
254 ir_node *be_prolog_create_start(beabi_helper_env_t *env, dbg_info *dbgi,
257 int n_start_outs = ARR_LEN(env->prolog.regs);
258 ir_node *start = be_new_Start(dbgi, block, n_start_outs);
261 assert(env->prolog.value_map == NULL);
262 env->prolog.value_map = NEW_ARR_F(ir_node*, n_start_outs);
264 for (o = 0; o < n_start_outs; ++o) {
265 const reg_flag_t *regflag = &env->prolog.regs[o];
266 const arch_register_t *reg = regflag->reg;
269 arch_set_out_register_req(start, o, arch_no_register_req);
270 proj = new_r_Proj(start, mode_M, o);
272 be_set_constr_single_reg_out(start, o, regflag->reg,
274 arch_irn_set_register(start, o, regflag->reg);
275 proj = new_r_Proj(start, reg->reg_class->mode, o);
277 env->prolog.value_map[o] = proj;
280 /* start node should really be the first thing constructed */
281 assert(env->prolog.last_barrier == NULL);
282 env->prolog.last_barrier = start;
287 ir_node *be_prolog_create_barrier(beabi_helper_env_t *env, ir_node *block)
289 return rsm_create_barrier(&env->prolog, block);
292 ir_node *be_prolog_get_reg_value(beabi_helper_env_t *env,
293 const arch_register_t *reg)
295 return rsm_get_reg_value(&env->prolog, reg);
298 ir_node *be_prolog_get_memory(beabi_helper_env_t *env)
300 return rsm_get_value(&env->prolog, 0);
303 void be_prolog_set_reg_value(beabi_helper_env_t *env,
304 const arch_register_t *reg, ir_node *value)
306 rsm_set_reg_value(&env->prolog, reg, value);
309 void be_prolog_set_memory(beabi_helper_env_t *env, ir_node *value)
311 rsm_set_value(&env->prolog, 0, value);
316 void be_epilog_begin(beabi_helper_env_t *env)
318 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
319 rsm_clear_regs(&env->epilog, arch_env);
320 env->epilog.value_map = NEW_ARR_F(ir_node*, 1);
321 env->epilog.value_map[0] = NULL;
324 void be_epilog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
325 arch_register_req_type_t flags, ir_node *value)
327 int index = rsm_add_reg(&env->epilog, reg, flags);
328 rsm_set_value(&env->epilog, index, value);
331 void be_epilog_set_reg_value(beabi_helper_env_t *env,
332 const arch_register_t *reg, ir_node *value)
334 rsm_set_reg_value(&env->epilog, reg, value);
337 void be_epilog_set_memory(beabi_helper_env_t *env, ir_node *value)
339 rsm_set_value(&env->epilog, 0, value);
342 ir_node *be_epilog_get_reg_value(beabi_helper_env_t *env,
343 const arch_register_t *reg)
345 return rsm_get_reg_value(&env->epilog, reg);
348 ir_node *be_epilog_get_memory(beabi_helper_env_t *env)
350 return rsm_get_value(&env->epilog, 0);
353 ir_node *be_epilog_create_barrier(beabi_helper_env_t *env, ir_node *block)
355 return rsm_create_barrier(&env->epilog, block);
358 ir_node *be_epilog_create_return(beabi_helper_env_t *env, dbg_info *dbgi,
361 int n_return_in = ARR_LEN(env->epilog.regs);
362 ir_node **in = env->epilog.value_map;
363 int n_res = 1; /* TODO */
364 unsigned pop = 0; /* TODO */
368 assert(ARR_LEN(env->epilog.value_map) == n_return_in);
370 ret = be_new_Return(dbgi, get_irn_irg(block), block, n_res, pop,
372 for (i = 0; i < n_return_in; ++i) {
373 const reg_flag_t *regflag = &env->epilog.regs[i];
374 const arch_register_t *reg = regflag->reg;
376 be_set_constr_single_reg_in(ret, i, reg,
377 arch_register_req_type_none);
381 rsm_clear_regs(&env->epilog, be_get_irg_arch_env(env->irg));
382 env->epilog.last_barrier = NULL;
387 static void add_missing_keep_walker(ir_node *node, void *data)
390 unsigned *found_projs;
391 const ir_edge_t *edge;
392 ir_mode *mode = get_irn_mode(node);
398 n_outs = arch_irn_get_n_outs(node);
402 rbitset_alloca(found_projs, n_outs);
403 foreach_out_edge(node, edge) {
404 ir_node *succ = get_edge_src_irn(edge);
405 ir_mode *mode = get_irn_mode(succ);
408 /* The node could be kept */
409 if (is_End(succ) || is_Anchor(succ))
412 if (mode == mode_M || mode == mode_X)
415 pn = get_Proj_proj(succ);
417 rbitset_set(found_projs, pn);
421 /* are keeps missing? */
423 for (i = 0; i < n_outs; ++i) {
426 const arch_register_req_t *req;
427 const arch_register_class_t *cls;
429 if (rbitset_is_set(found_projs, i)) {
433 req = arch_get_out_register_req(node, i);
435 if (cls == NULL || (cls->flags & arch_register_class_flag_manual_ra)) {
439 block = get_nodes_block(node);
440 in[0] = new_r_Proj(node, arch_register_class_mode(cls), i);
441 if (last_keep != NULL) {
442 be_Keep_add_node(last_keep, cls, in[0]);
444 last_keep = be_new_Keep(block, 1, in);
445 if (sched_is_scheduled(node)) {
446 sched_add_after(node, last_keep);
452 void be_add_missing_keeps(ir_graph *irg)
454 irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);
459 static void collect_node(ir_node *node)
461 ir_node *block = get_nodes_block(node);
462 ir_node *old = (ir_node*)get_irn_link(block);
464 set_irn_link(node, old);
465 set_irn_link(block, node);
468 static void link_ops_in_block_walker(ir_node *node, void *data)
472 switch (get_irn_opcode(node)) {
478 /** all non-stack alloc nodes should be lowered before the backend */
479 assert(get_Alloc_where(node) == stack_alloc);
483 assert(get_Free_where(node) == stack_alloc);
487 if (get_Builtin_kind(node) == ir_bk_return_address) {
488 ir_node *param = get_Builtin_param(node, 0);
489 ir_tarval *tv = get_Const_tarval(param); /* must be Const */
490 long value = get_tarval_long(tv);
492 /* we need esp for the climbframe algo */
502 static ir_heights_t *heights;
505 * Check if a node is somehow data dependent on another one.
506 * both nodes must be in the same basic block.
507 * @param n1 The first node.
508 * @param n2 The second node.
509 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
511 static int dependent_on(const ir_node *n1, const ir_node *n2)
513 assert(get_nodes_block(n1) == get_nodes_block(n2));
515 return heights_reachable_in_block(heights, n1, n2);
518 static int cmp_call_dependency(const void *c1, const void *c2)
520 const ir_node *n1 = *(const ir_node **) c1;
521 const ir_node *n2 = *(const ir_node **) c2;
524 Classical qsort() comparison function behavior:
525 0 if both elements are equal
526 1 if second is "smaller" that first
527 -1 if first is "smaller" that second
529 if (dependent_on(n1, n2))
532 if (dependent_on(n2, n1))
535 /* The nodes have no depth order, but we need a total order because qsort()
537 return get_irn_idx(n2) - get_irn_idx(n1);
540 static void process_ops_in_block(ir_node *block, void *data)
542 ir_phase *phase = (ir_phase*)data;
549 for (node = (ir_node*)get_irn_link(block); node != NULL;
550 node = (ir_node*)get_irn_link(node)) {
557 nodes = XMALLOCN(ir_node*, n_nodes);
559 for (node = (ir_node*)get_irn_link(block); node != NULL;
560 node = (ir_node*)get_irn_link(node)) {
563 assert(n == n_nodes);
565 /* order nodes according to their data dependencies */
566 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
568 for (n = n_nodes-1; n > 0; --n) {
569 ir_node *node = nodes[n];
570 ir_node *pred = nodes[n-1];
572 phase_set_irn_data(phase, node, pred);
576 void be_collect_stacknodes(beabi_helper_env_t *env)
578 ir_graph *irg = env->irg;
579 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, NULL);
581 assert(env->stack_order == NULL);
582 env->stack_order = new_phase(irg, phase_irn_init_default);
584 heights = heights_new(irg);
585 irg_block_walk_graph(irg, NULL, process_ops_in_block, env->stack_order);
586 heights_free(heights);
589 ir_node *be_get_stack_pred(const beabi_helper_env_t *env, const ir_node *node)
591 return (ir_node*)phase_get_irn_data(env->stack_order, node);