2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief modifies schedule so flags dependencies are respected.
23 * @author Matthias Braun, Christoph Mallon
24 * @version $Id: besched.h 14693 2007-06-21 15:35:49Z beck $
26 * Fixup schedule to respect flag constraints by moving and rematerialisation of
29 * Flags are modeled as register classes with ignore registers. However to avoid
30 * bloating the graph, only flag-consumer -> producer dependencies are
31 * explicitely modeled in the graph. Nodes that just change the flags are only
32 * marked with the arch_irn_flags_modify_flags flag.
34 * Flags are usually a limited resource that can't (or at least shouldn't) be
35 * spilled. So in some situations (for example 2 adc-nodes that use the flags of
36 * a single add node on x86) operations have to be repeated to work correctly.
44 #include "iredges_t.h"
51 #include "besched_t.h"
54 static const arch_register_class_t *flag_class = NULL;
55 static const arch_register_t *flags_reg = NULL;
56 static func_rematerialize remat = NULL;
59 static ir_node *default_remat(ir_node *node, ir_node *after)
61 ir_node *block, *copy;
65 block = get_nodes_block(after);
67 copy = exact_copy(node);
68 set_nodes_block(copy, block);
69 sched_add_after(after, copy);
75 * tests whether we can legally move node node after node after
76 * (only works for nodes in same block)
78 static int can_move(ir_node *node, ir_node *after)
80 const ir_edge_t *edge;
81 assert(get_nodes_block(node) == get_nodes_block(after));
83 /* TODO respect dep edges */
84 assert(get_irn_n_edges_kind(node, EDGE_KIND_DEP) == 0);
86 /** all users have to be after the after node */
87 foreach_out_edge(node, edge) {
88 ir_node *out = get_edge_src_irn(edge);
90 const ir_edge_t *edge2;
91 assert(get_irn_n_edges_kind(out, EDGE_KIND_DEP) == 0);
92 foreach_out_edge(out, edge2) {
93 ir_node *out2 = get_edge_src_irn(edge2);
94 /* Phi or End represents a usage at block end. */
95 if(is_Phi(out2) || is_End(out2))
98 const ir_edge_t *edge3;
99 foreach_out_edge(out2, edge3) {
100 ir_node *out3 = get_edge_src_irn(edge3);
101 /* Phi or End represents a usage at block end. */
102 if(is_Phi(out3) || is_End(out3))
104 assert(!is_Sync(out3));
105 if(sched_get_time_step(out3) <= sched_get_time_step(after)) {
109 } else if(sched_get_time_step(out2) <= sched_get_time_step(after)) {
114 /* phi represents a usage at block end */
117 if(sched_get_time_step(out) <= sched_get_time_step(after)) {
126 static void rematerialize_or_move(ir_node *flags_needed, ir_node *node,
127 ir_node *flag_consumers, int pn, be_lv_t *lv)
133 if(!is_Block(node) &&
134 get_nodes_block(flags_needed) == get_nodes_block(node) &&
135 can_move(flags_needed, node)) {
137 sched_remove(flags_needed);
138 sched_add_after(node, flags_needed);
139 /* No need to update liveness, because the node stays in the same block */
144 copy = remat(flags_needed, node);
146 if(get_irn_mode(copy) == mode_T) {
147 ir_node *block = get_nodes_block(copy);
148 ir_mode *mode = flag_class->mode;
149 value = new_rd_Proj(NULL, block, copy, mode, pn);
157 int arity = get_irn_arity(n);
158 for(i = 0; i < arity; ++i) {
159 ir_node *in = get_irn_n(n, i);
161 if(in == flags_needed) {
162 set_irn_n(n, i, value);
169 /* No need to introduce the copy, because it only lives in this block, but
170 * we have to update the liveness of all operands */
171 if (is_Block(node) ||
172 get_nodes_block(node) != get_nodes_block(flags_needed)) {
175 for (i = get_irn_arity(copy) - 1; i >= 0; --i) {
176 be_liveness_update(lv, get_irn_n(copy, i));
181 static int is_modify_flags(ir_node *node) {
184 if (arch_irn_is(node, modify_flags))
186 if(!be_is_Keep(node))
189 arity = get_irn_arity(node);
190 for(i = 0; i < arity; ++i) {
191 ir_node *in = get_irn_n(node, i);
193 if (arch_irn_is(in, modify_flags))
201 * walks up the schedule and makes sure there are no flag-destroying nodes
202 * between a flag-consumer -> flag-producer chain. Fixes problematic situations
203 * by moving and/or rematerialisation of the flag-producers.
204 * (This can be extended in the future to do some register allocation on targets
205 * like ppc32 where we conceptually have 8 flag registers)
207 static void fix_flags_walker(ir_node *block, void *env)
210 ir_node *flags_needed = NULL;
211 ir_node *flag_consumers = NULL;
214 sched_foreach_reverse(block, node) {
216 ir_node *new_flags_needed = NULL;
221 if(node == flags_needed) {
224 flag_consumers = NULL;
227 /* test whether node destroys the flags */
228 if(flags_needed != NULL && is_modify_flags(node)) {
230 rematerialize_or_move(flags_needed, node, flag_consumers, pn, env);
232 flag_consumers = NULL;
235 /* test whether the current node needs flags */
236 arity = get_irn_arity(node);
237 for(i = 0; i < arity; ++i) {
238 const arch_register_class_t *cls = arch_get_irn_reg_class(node, i);
239 if(cls == flag_class) {
240 assert(new_flags_needed == NULL);
241 new_flags_needed = get_irn_n(node, i);
245 if(new_flags_needed == NULL)
248 /* spiller can't (correctly) remat flag consumers at the moment */
249 assert(!arch_irn_is(node, rematerializable));
251 if(skip_Proj(new_flags_needed) != flags_needed) {
252 if(flags_needed != NULL) {
253 /* rematerialize node */
254 rematerialize_or_move(flags_needed, node, flag_consumers, pn, env);
256 flag_consumers = NULL;
259 flags_needed = new_flags_needed;
260 arch_set_irn_register(flags_needed, flags_reg);
261 if(is_Proj(flags_needed)) {
262 pn = get_Proj_proj(flags_needed);
263 flags_needed = get_Proj_pred(flags_needed);
265 flag_consumers = node;
266 set_irn_link(flag_consumers, NULL);
267 assert(arch_irn_is(flags_needed, rematerializable));
269 /* link all consumers in a list */
270 set_irn_link(node, flag_consumers);
271 flag_consumers = node;
275 if(flags_needed != NULL) {
276 assert(get_nodes_block(flags_needed) != block);
277 rematerialize_or_move(flags_needed, node, flag_consumers, pn, env);
279 flag_consumers = NULL;
282 assert(flags_needed == NULL);
283 assert(flag_consumers == NULL);
286 void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
287 func_rematerialize remat_func)
289 ir_graph *irg = be_get_birg_irg(birg);
291 flag_class = flag_cls;
292 flags_reg = & flag_class->regs[0];
296 remat = &default_remat;
298 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
299 irg_block_walk_graph(irg, fix_flags_walker, NULL, birg->lv);
300 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
303 be_remove_dead_nodes_from_schedule(birg);