2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief modifies schedule so flags dependencies are respected.
23 * @author Matthias Braun, Christoph Mallon
24 * @version $Id: besched.h 14693 2007-06-21 15:35:49Z beck $
26 * Fixup schedule to respect flag constraints by moving and rematerialisation of
29 * Flags are modeled as register classes with ignore registers. However to avoid
30 * bloating the graph, only flag-consumer -> producer dependencies are
31 * explicitely modeled in the graph. Nodes that just change the flags are only
32 * marked with the arch_irn_flags_modify_flags flag.
34 * Flags are usually a limited resource that can't (or at least shouldn't) be
35 * spilled. So in some situations (for example 2 adc-nodes that use the flags of
36 * a single add node on x86) operations have to be repeated to work correctly.
46 #include "iredges_t.h"
53 #include "besched_t.h"
55 static const arch_env_t *arch_env = NULL;
56 static const arch_register_class_t *flag_class = NULL;
57 static const arch_register_t *flags_reg = NULL;
58 static func_rematerialize remat = NULL;
60 static ir_node *default_remat(ir_node *node, ir_node *after)
62 ir_node *copy = exact_copy(node);
63 sched_add_after(after, copy);
69 * tests wether we can legally move node node after node after
70 * (only works for nodes in same block)
72 static int can_move(ir_node *node, ir_node *after)
74 const ir_edge_t *edge;
75 assert(get_nodes_block(node) == get_nodes_block(after));
77 /* TODO respect dep edges */
78 assert(get_irn_n_edges_kind(node, EDGE_KIND_DEP) == 0);
80 /** all users have to be after the after node */
81 foreach_out_edge(node, edge) {
82 ir_node *out = get_edge_src_irn(edge);
84 const ir_edge_t *edge2;
85 assert(get_irn_n_edges_kind(out, EDGE_KIND_DEP) == 0);
86 foreach_out_edge(out, edge2) {
87 ir_node *out2 = get_edge_src_irn(edge2);
88 /* phi represents a usage at block end */
91 if(sched_get_time_step(out2) <= sched_get_time_step(after)) {
96 /* phi represents a usage at block end */
99 if(sched_get_time_step(out) <= sched_get_time_step(after)) {
105 ir_fprintf(stderr, "Can move node %+F after node %+F\n", node, after);
109 static void rematerialize_or_move(ir_node *flags_needed, ir_node *node,
110 ir_node *flag_consumers, int pn)
116 if(can_move(flags_needed, node)) {
118 sched_remove(flags_needed);
119 sched_add_after(node, flags_needed);
123 copy = remat(flags_needed, node);
125 if(get_irn_mode(copy) == mode_T) {
126 ir_node *block = get_nodes_block(copy);
127 ir_mode *mode = flag_class->mode;
128 value = new_rd_Proj(NULL, current_ir_graph, block,
137 int arity = get_irn_arity(n);
138 for(i = 0; i < arity; ++i) {
139 ir_node *in = get_irn_n(n, i);
141 if(in == flags_needed) {
142 set_irn_n(n, i, value);
151 * walks up the schedule and makes sure there are no flag-destroying nodes
152 * between a flag-consumer -> flag-producer chain. Fixes problematic situations
153 * by moving and/or rematerialisation of the flag-producers.
154 * (This can be extended in the future to do some register allocation on targets
155 * like ppc32 where we conceptually have 8 flag registers)
157 static void fix_flags_walker(ir_node *block, void *env)
160 ir_node *flags_needed = NULL;
161 ir_node *flag_consumers = NULL;
165 sched_foreach_reverse(block, node) {
167 ir_node *new_flags_needed = NULL;
169 if(node == flags_needed) {
172 flag_consumers = NULL;
175 /* test wether node destroys the flags */
176 if(flags_needed != NULL && arch_irn_is(arch_env, node, modify_flags)) {
178 rematerialize_or_move(flags_needed, node, flag_consumers, pn);
180 flag_consumers = NULL;
183 /* test wether the current node needs flags */
184 arity = get_irn_arity(node);
185 for(i = 0; i < arity; ++i) {
186 //ir_node *in = get_irn_n(node, i);
187 const arch_register_class_t *cls
188 = arch_get_irn_reg_class(arch_env, node, i);
189 if(cls == flag_class) {
190 assert(new_flags_needed == NULL);
191 new_flags_needed = get_irn_n(node, i);
195 if(new_flags_needed == NULL)
198 if(new_flags_needed != flags_needed) {
199 if(flags_needed != NULL) {
200 /* rematerialize node */
201 rematerialize_or_move(flags_needed, node, flag_consumers, pn);
203 flag_consumers = NULL;
206 if(get_nodes_block(new_flags_needed) != block) {
207 panic("remat across blocks not implemented yet");
209 flag_consumers = NULL;
211 flags_needed = new_flags_needed;
212 arch_set_irn_register(arch_env, flags_needed, flags_reg);
213 if(is_Proj(flags_needed)) {
214 pn = get_Proj_proj(flags_needed);
215 flags_needed = get_Proj_pred(flags_needed);
217 flag_consumers = node;
218 set_irn_link(flag_consumers, NULL);
219 assert(arch_irn_is(arch_env, flags_needed, rematerializable));
222 /* link all consumers in a list */
223 set_irn_link(flag_consumers, node);
224 flag_consumers = node;
228 assert(flags_needed == NULL);
229 assert(flag_consumers == NULL);
232 void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
233 func_rematerialize remat_func)
235 ir_graph *irg = be_get_birg_irg(birg);
237 arch_env = be_get_birg_arch_env(birg);
238 flag_class = flag_cls;
239 flags_reg = & flag_class->regs[0];
242 remat = &default_remat;
244 set_using_irn_link(irg);
245 irg_block_walk_graph(irg, fix_flags_walker, NULL, NULL);
246 clear_using_irn_link(irg);