2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Helper functions for handling ABI constraints in the code
10 * @author Matthias Braun
14 #include "beabihelper.h"
22 #include "irnodemap.h"
27 * An entry in the register state map.
29 typedef struct reg_flag_t {
30 const arch_register_t *reg; /**< register at an input position.
31 may be NULL in case of memory input */
32 arch_register_req_type_t flags; /**< requirement flags for this register. */
36 * A register state mapping keeps track of the symbol values (=firm nodes)
37 * to registers. This is useful when constructing straight line code
38 * like the function prolog or epilog in some architectures.
40 typedef struct register_state_mapping_t {
41 ir_node **value_map; /**< mapping of state indices to values */
42 size_t **reg_index_map; /**< mapping of regclass,regnum to an index
44 reg_flag_t *regs; /**< registers (and memory values) that form a
46 } register_state_mapping_t;
49 * The environment for all helper functions.
51 struct beabi_helper_env_t {
52 ir_graph *irg; /**< the graph we operate on */
53 register_state_mapping_t prolog; /**< the register state map for the prolog */
54 register_state_mapping_t epilog; /**< the register state map for the epilog */
58 * Create a new empty register state map for the given
61 * @param rsm the register state map to be initialized
62 * @param arch_env the architecture environment
64 * After this call, the register map is initialized to empty.
66 static void prepare_rsm(register_state_mapping_t *rsm,
67 const arch_env_t *arch_env)
69 unsigned n_reg_classes = arch_env->n_register_classes;
71 reg_flag_t memory = { NULL, arch_register_req_type_none };
73 rsm->regs = NEW_ARR_F(reg_flag_t, 0);
74 /* memory input at 0 */
75 ARR_APP1(reg_flag_t, rsm->regs, memory);
77 rsm->value_map = NULL;
78 rsm->reg_index_map = XMALLOCN(size_t*, n_reg_classes);
79 for (c = 0; c < n_reg_classes; ++c) {
80 const arch_register_class_t *cls = &arch_env->register_classes[c];
81 unsigned n_regs = arch_register_class_n_regs(cls);
84 rsm->reg_index_map[c] = XMALLOCN(size_t, n_regs);
85 for (r = 0; r < n_regs; ++r) {
86 rsm->reg_index_map[c][r] = (size_t)-1;
92 * Destroy a register state map for the given
95 * @param rsm the register state map to be destroyed
96 * @param arch_env the architecture environment
98 * After this call, the register map is initialized to empty.
100 static void free_rsm(register_state_mapping_t *rsm, const arch_env_t *arch_env)
102 unsigned n_reg_classes = arch_env->n_register_classes;
105 for (c = 0; c < n_reg_classes; ++c) {
106 free(rsm->reg_index_map[c]);
109 free(rsm->reg_index_map);
110 if (rsm->value_map != NULL)
111 DEL_ARR_F(rsm->value_map);
112 DEL_ARR_F(rsm->regs);
115 rsm->reg_index_map = NULL;
116 rsm->value_map = NULL;
120 * Remove all registers from a register state map.
122 * @param rsm the register state map to be destroyed
123 * @param arch_env the architecture environment
125 static void rsm_clear_regs(register_state_mapping_t *rsm,
126 const arch_env_t *arch_env)
128 unsigned n_reg_classes = arch_env->n_register_classes;
130 reg_flag_t memory = { NULL, arch_register_req_type_none };
132 for (c = 0; c < n_reg_classes; ++c) {
133 const arch_register_class_t *cls = &arch_env->register_classes[c];
134 unsigned n_regs = arch_register_class_n_regs(cls);
137 for (r = 0; r < n_regs; ++r) {
138 rsm->reg_index_map[c][r] = (size_t)-1;
141 ARR_RESIZE(reg_flag_t, rsm->regs, 0);
142 ARR_APP1(reg_flag_t, rsm->regs, memory);
144 if (rsm->value_map != NULL) {
145 DEL_ARR_F(rsm->value_map);
146 rsm->value_map = NULL;
151 * Add a register and its constraint flags to a register state map
152 * and return its index inside the map.
154 static size_t rsm_add_reg(register_state_mapping_t *rsm,
155 const arch_register_t *reg,
156 arch_register_req_type_t flags)
158 size_t input_idx = ARR_LEN(rsm->regs);
159 int cls_idx = reg->reg_class->index;
160 int reg_idx = reg->index;
161 reg_flag_t regflag = { reg, flags };
163 /* we must not have used get_value yet */
164 assert(rsm->reg_index_map[cls_idx][reg_idx] == (size_t)-1);
165 rsm->reg_index_map[cls_idx][reg_idx] = input_idx;
166 ARR_APP1(reg_flag_t, rsm->regs, regflag);
168 if (rsm->value_map != NULL) {
169 ARR_APP1(ir_node*, rsm->value_map, NULL);
170 assert(ARR_LEN(rsm->value_map) == ARR_LEN(rsm->regs));
176 * Retrieve the ir_node stored at the given index in the register state map.
178 static ir_node *rsm_get_value(register_state_mapping_t *rsm, size_t index)
180 assert(index < ARR_LEN(rsm->value_map));
181 return rsm->value_map[index];
185 * Retrieve the ir_node occupying the given register in the register state map.
187 static ir_node *rsm_get_reg_value(register_state_mapping_t *rsm,
188 const arch_register_t *reg)
190 int cls_idx = reg->reg_class->index;
191 int reg_idx = reg->index;
192 size_t input_idx = rsm->reg_index_map[cls_idx][reg_idx];
194 return rsm_get_value(rsm, input_idx);
198 * Enter a ir_node at the given index in the register state map.
200 static void rsm_set_value(register_state_mapping_t *rsm, size_t index,
203 assert(index < ARR_LEN(rsm->value_map));
204 rsm->value_map[index] = value;
208 * Enter a ir_node at the given register in the register state map.
210 static void rsm_set_reg_value(register_state_mapping_t *rsm,
211 const arch_register_t *reg, ir_node *value)
213 int cls_idx = reg->reg_class->index;
214 int reg_idx = reg->index;
215 size_t input_idx = rsm->reg_index_map[cls_idx][reg_idx];
216 rsm_set_value(rsm, input_idx, value);
220 beabi_helper_env_t *be_abihelper_prepare(ir_graph *irg)
222 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
223 beabi_helper_env_t *env = XMALLOCZ(beabi_helper_env_t);
226 prepare_rsm(&env->prolog, arch_env);
227 prepare_rsm(&env->epilog, arch_env);
232 void be_abihelper_finish(beabi_helper_env_t *env)
234 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
236 free_rsm(&env->prolog, arch_env);
237 if (env->epilog.reg_index_map != NULL) {
238 free_rsm(&env->epilog, arch_env);
243 void be_prolog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
244 arch_register_req_type_t flags)
246 rsm_add_reg(&env->prolog, reg, flags);
249 ir_node *be_prolog_create_start(beabi_helper_env_t *env, dbg_info *dbgi,
252 int n_start_outs = ARR_LEN(env->prolog.regs);
253 ir_node *start = be_new_Start(dbgi, block, n_start_outs);
256 assert(env->prolog.value_map == NULL);
257 env->prolog.value_map = NEW_ARR_F(ir_node*, n_start_outs);
259 for (o = 0; o < n_start_outs; ++o) {
260 const reg_flag_t *regflag = &env->prolog.regs[o];
261 const arch_register_t *reg = regflag->reg;
264 arch_set_irn_register_req_out(start, o, arch_no_register_req);
265 proj = new_r_Proj(start, mode_M, o);
267 be_set_constr_single_reg_out(start, o, regflag->reg,
269 arch_set_irn_register_out(start, o, regflag->reg);
270 proj = new_r_Proj(start, reg->reg_class->mode, o);
272 env->prolog.value_map[o] = proj;
278 ir_node *be_prolog_get_reg_value(beabi_helper_env_t *env,
279 const arch_register_t *reg)
281 return rsm_get_reg_value(&env->prolog, reg);
284 ir_node *be_prolog_get_memory(beabi_helper_env_t *env)
286 return rsm_get_value(&env->prolog, 0);
289 void be_prolog_set_reg_value(beabi_helper_env_t *env,
290 const arch_register_t *reg, ir_node *value)
292 rsm_set_reg_value(&env->prolog, reg, value);
295 void be_prolog_set_memory(beabi_helper_env_t *env, ir_node *value)
297 rsm_set_value(&env->prolog, 0, value);
302 void be_epilog_begin(beabi_helper_env_t *env)
304 const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
305 rsm_clear_regs(&env->epilog, arch_env);
306 env->epilog.value_map = NEW_ARR_F(ir_node*, 1);
307 env->epilog.value_map[0] = NULL;
310 void be_epilog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
311 arch_register_req_type_t flags, ir_node *value)
313 size_t index = rsm_add_reg(&env->epilog, reg, flags);
314 rsm_set_value(&env->epilog, index, value);
317 void be_epilog_set_reg_value(beabi_helper_env_t *env,
318 const arch_register_t *reg, ir_node *value)
320 rsm_set_reg_value(&env->epilog, reg, value);
323 void be_epilog_set_memory(beabi_helper_env_t *env, ir_node *value)
325 rsm_set_value(&env->epilog, 0, value);
328 ir_node *be_epilog_get_reg_value(beabi_helper_env_t *env,
329 const arch_register_t *reg)
331 return rsm_get_reg_value(&env->epilog, reg);
334 ir_node *be_epilog_get_memory(beabi_helper_env_t *env)
336 return rsm_get_value(&env->epilog, 0);
339 ir_node *be_epilog_create_return(beabi_helper_env_t *env, dbg_info *dbgi,
342 size_t n_return_in = ARR_LEN(env->epilog.regs);
343 ir_node **in = env->epilog.value_map;
344 int n_res = 1; /* TODO */
345 unsigned pop = 0; /* TODO */
348 assert(ARR_LEN(env->epilog.value_map) == n_return_in);
350 ir_node *const ret = be_new_Return(dbgi, block, n_res, pop, n_return_in, in);
351 for (i = 0; i < n_return_in; ++i) {
352 const reg_flag_t *regflag = &env->epilog.regs[i];
353 const arch_register_t *reg = regflag->reg;
355 be_set_constr_single_reg_in(ret, i, reg,
356 arch_register_req_type_none);
360 rsm_clear_regs(&env->epilog, be_get_irg_arch_env(env->irg));
366 * Tests whether a node has a real user and is not just kept by the End or
369 static bool has_real_user(const ir_node *node)
371 foreach_out_edge(node, edge) {
372 ir_node *user = get_edge_src_irn(edge);
373 if (!is_End(user) && !is_Anchor(user))
379 static ir_node *add_to_keep(ir_node *last_keep,
380 const arch_register_class_t *cls, ir_node *node)
382 if (last_keep != NULL) {
383 be_Keep_add_node(last_keep, cls, node);
385 ir_node *in[1] = { node };
386 ir_node *block = get_nodes_block(node);
388 last_keep = be_new_Keep(block, 1, in);
390 schedpoint = skip_Proj(node);
391 if (sched_is_scheduled(schedpoint)) {
392 sched_add_after(schedpoint, last_keep);
398 void be_add_missing_keeps_node(ir_node *node)
401 ir_mode *mode = get_irn_mode(node);
404 if (mode != mode_T) {
405 if (!has_real_user(node)) {
406 const arch_register_req_t *req = arch_get_irn_register_req(node);
407 const arch_register_class_t *cls = req->cls;
409 || (cls->flags & arch_register_class_flag_manual_ra)) {
413 add_to_keep(NULL, cls, node);
418 n_outs = arch_get_irn_n_outs(node);
422 unsigned *const found_projs = rbitset_alloca(n_outs);
423 ir_node **const existing_projs = ALLOCANZ(ir_node*, n_outs);
424 foreach_out_edge(node, edge) {
425 ir_node *succ = get_edge_src_irn(edge);
426 ir_mode *mode = get_irn_mode(succ);
429 /* The node could be kept */
430 if (is_End(succ) || is_Anchor(succ))
432 if (mode == mode_M || mode == mode_X)
434 pn = get_Proj_proj(succ);
435 existing_projs[pn] = succ;
436 if (!has_real_user(succ))
440 rbitset_set(found_projs, pn);
443 /* are keeps missing? */
445 for (i = 0; i < n_outs; ++i) {
447 const arch_register_req_t *req;
448 const arch_register_class_t *cls;
450 if (rbitset_is_set(found_projs, i)) {
454 req = arch_get_irn_register_req_out(node, i);
456 if (cls == NULL || (cls->flags & arch_register_class_flag_manual_ra)) {
460 value = existing_projs[i];
462 value = new_r_Proj(node, arch_register_class_mode(cls), i);
463 last_keep = add_to_keep(last_keep, cls, value);
467 static void add_missing_keep_walker(ir_node *node, void *data)
470 be_add_missing_keeps_node(node);
473 void be_add_missing_keeps(ir_graph *irg)
475 irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);
480 * Link the node into its block list as a new head.
482 static void collect_node(ir_node *node)
484 ir_node *block = get_nodes_block(node);
485 ir_node *old = (ir_node*)get_irn_link(block);
487 set_irn_link(node, old);
488 set_irn_link(block, node);
492 * Post-walker: link all nodes that probably access the stack into lists of their block.
494 static void link_ops_in_block_walker(ir_node *node, void *data)
498 switch (get_irn_opcode(node)) {
504 /** all non-stack alloc nodes should be lowered before the backend */
505 assert(get_Alloc_where(node) == stack_alloc);
509 assert(get_Free_where(node) == stack_alloc);
513 if (get_Builtin_kind(node) == ir_bk_return_address) {
514 ir_node *param = get_Builtin_param(node, 0);
515 ir_tarval *tv = get_Const_tarval(param); /* must be Const */
516 long value = get_tarval_long(tv);
518 /* not the return address of the current function:
519 * we need the stack pointer for the frame climbing */
529 static ir_heights_t *heights;
532 * Check if a node is somehow data dependent on another one.
533 * both nodes must be in the same basic block.
534 * @param n1 The first node.
535 * @param n2 The second node.
536 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
538 static int dependent_on(const ir_node *n1, const ir_node *n2)
540 assert(get_nodes_block(n1) == get_nodes_block(n2));
542 return heights_reachable_in_block(heights, n1, n2);
546 * Classical qsort() comparison function behavior:
548 * 0 if both elements are equal, no node depend on the other
549 * +1 if first depends on second (first is greater)
550 * -1 if second depends on first (second is greater)
552 static int cmp_call_dependency(const void *c1, const void *c2)
554 const ir_node *n1 = *(const ir_node **) c1;
555 const ir_node *n2 = *(const ir_node **) c2;
558 if (dependent_on(n1, n2))
561 if (dependent_on(n2, n1))
564 /* The nodes have no depth order, but we need a total order because qsort()
567 * Additionally, we need to respect transitive dependencies. Consider a
568 * Call a depending on Call b and an independent Call c.
569 * We MUST NOT order c > a and b > c. */
570 h1 = get_irn_height(heights, n1);
571 h2 = get_irn_height(heights, n2);
572 if (h1 < h2) return 1;
573 if (h1 > h2) return -1;
574 /* Same height, so use a random (but stable) order */
575 return get_irn_idx(n2) - get_irn_idx(n1);
579 * Block-walker: sorts dependencies and remember them into a phase
581 static void process_ops_in_block(ir_node *block, void *data)
583 ir_nodemap *map = (ir_nodemap*)data;
590 for (node = (ir_node*)get_irn_link(block); node != NULL;
591 node = (ir_node*)get_irn_link(node)) {
598 nodes = XMALLOCN(ir_node*, n_nodes);
600 for (node = (ir_node*)get_irn_link(block); node != NULL;
601 node = (ir_node*)get_irn_link(node)) {
604 assert(n == n_nodes);
606 /* order nodes according to their data dependencies */
607 qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
609 /* remember the calculated dependency into a phase */
610 for (n = n_nodes-1; n > 0; --n) {
611 ir_node *node = nodes[n];
612 ir_node *pred = nodes[n-1];
614 ir_nodemap_insert(map, node, pred);
621 struct be_stackorder_t {
622 ir_nodemap stack_order; /**< a phase to handle stack dependencies. */
625 be_stackorder_t *be_collect_stacknodes(ir_graph *irg)
627 be_stackorder_t *env = XMALLOCZ(be_stackorder_t);
629 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
631 /* collect all potential^stack accessing nodes */
632 irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, NULL);
634 ir_nodemap_init(&env->stack_order, irg);
636 /* use heights to create a total order for those nodes: this order is stored
637 * in the created phase */
638 heights = heights_new(irg);
639 irg_block_walk_graph(irg, NULL, process_ops_in_block, &env->stack_order);
640 heights_free(heights);
642 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
647 ir_node *be_get_stack_pred(const be_stackorder_t *env, const ir_node *node)
649 return ir_nodemap_get(ir_node, &env->stack_order, node);
652 void be_free_stackorder(be_stackorder_t *env)
654 ir_nodemap_destroy(&env->stack_order);
658 static void create_stores_for_type(ir_graph *irg, ir_type *type)
660 size_t n = get_compound_n_members(type);
661 ir_node *frame = get_irg_frame(irg);
662 ir_node *initial_mem = get_irg_initial_mem(irg);
663 ir_node *mem = initial_mem;
664 ir_node *first_store = NULL;
665 ir_node *start_block = get_irg_start_block(irg);
666 ir_node *args = get_irg_args(irg);
669 /* all parameter entities left in the frame type require stores.
670 * (The ones passed on the stack have been moved to the arg type) */
671 for (i = 0; i < n; ++i) {
672 ir_entity *entity = get_compound_member(type, i);
675 if (!is_parameter_entity(entity))
678 arg = get_entity_parameter_number(entity);
679 if (arg == IR_VA_START_PARAMETER_NUMBER)
682 addr = new_r_Sel(start_block, mem, frame, 0, NULL, entity);
683 if (entity->attr.parameter.doubleword_low_mode != NULL) {
684 ir_mode *mode = entity->attr.parameter.doubleword_low_mode;
685 ir_node *val0 = new_r_Proj(args, mode, arg);
686 ir_node *val1 = new_r_Proj(args, mode, arg+1);
687 ir_node *store0 = new_r_Store(start_block, mem, addr, val0,
689 ir_node *mem0 = new_r_Proj(store0, mode_M, pn_Store_M);
690 size_t offset = get_mode_size_bits(mode)/8;
691 ir_mode *addr_mode = get_irn_mode(addr);
692 ir_node *cnst = new_r_Const_long(irg, addr_mode, offset);
693 ir_node *next_addr = new_r_Add(start_block, addr, cnst, addr_mode);
694 ir_node *store1 = new_r_Store(start_block, mem0, next_addr, val1,
696 mem = new_r_Proj(store1, mode_M, pn_Store_M);
697 if (first_store == NULL)
698 first_store = store0;
700 ir_type *tp = get_entity_type(entity);
701 ir_mode *mode = is_compound_type(tp) ? mode_P : get_type_mode(tp);
702 ir_node *val = new_r_Proj(args, mode, arg);
703 ir_node *store = new_r_Store(start_block, mem, addr, val, cons_none);
704 mem = new_r_Proj(store, mode_M, pn_Store_M);
705 if (first_store == NULL)
710 if (mem != initial_mem)
711 edges_reroute_except(initial_mem, mem, first_store);
714 void be_add_parameter_entity_stores(ir_graph *irg)
716 ir_type *frame_type = get_irg_frame_type(irg);
717 be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
718 ir_type *between_type = layout->between_type;
720 create_stores_for_type(irg, frame_type);
721 if (between_type != NULL) {
722 create_stores_for_type(irg, between_type);