2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
41 #include "iredges_t.h"
46 #include "irprintf_t.h"
55 #include "belistsched.h"
61 #include "lc_opts_enum.h"
63 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
66 * All scheduling info needed per node.
68 typedef struct sched_irn_t {
69 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
70 unsigned already_sched : 1; /**< Set if this node is already scheduled */
74 * Scheduling environment for the whole graph.
76 typedef struct sched_env_t {
77 sched_irn_t *sched_info; /**< scheduling info per node */
78 const list_sched_selector_t *selector; /**< The node selector. */
79 void *selector_env; /**< A pointer to give to the selector. */
83 * Environment for a block scheduler.
85 typedef struct block_sched_env_t {
86 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
87 ir_nodeset_t cands; /**< the set of candidates */
88 ir_node *block; /**< the current block */
89 sched_env_t *sched_env; /**< the scheduler environment */
90 ir_nodeset_t live; /**< simple liveness during scheduling */
91 const list_sched_selector_t *selector;
92 void *selector_block_env;
96 * Returns non-zero if the node is already scheduled
98 static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
100 unsigned const idx = get_irn_idx(n);
102 assert(idx < ARR_LEN(env->sched_info));
103 return env->sched_info[idx].already_sched;
107 * Mark a node as already scheduled
109 static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
111 unsigned const idx = get_irn_idx(n);
113 assert(idx < ARR_LEN(env->sched_info));
114 env->sched_info[idx].already_sched = 1;
117 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
120 * Try to put a node in the ready set.
121 * @param env The block scheduler environment.
122 * @param pred The previous scheduled node.
123 * @param irn The node to make ready.
124 * @return 1, if the node could be made ready, 0 else.
126 static inline int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
130 /* Blocks cannot be scheduled. */
131 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
135 * Check, if the given ir node is in a different block as the
136 * currently scheduled one. If that is so, don't make the node ready.
138 if (env->block != get_nodes_block(irn))
141 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
142 ir_node *op = get_irn_in_or_dep(irn, i);
144 /* if irn is an End we have keep-alives and op might be a block, skip that */
150 /* If the operand is local to the scheduled block and not yet
151 * scheduled, this nodes cannot be made ready, so exit. */
152 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
156 if (! to_appear_in_schedule(irn)) {
157 add_to_sched(env, irn);
158 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
160 ir_nodeset_insert(&env->cands, irn);
162 /* Notify selector about the ready node. */
163 if (env->selector->node_ready)
164 env->selector->node_ready(env->selector_block_env, irn, pred);
166 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
173 * Try, to make all users of a node ready.
174 * In fact, a usage node can only be made ready, if all its operands
175 * have already been scheduled yet. This is checked by make_ready().
176 * @param env The block schedule environment.
177 * @param irn The node, which usages (successors) are to be made ready.
179 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
181 const ir_edge_t *edge;
183 /* make all data users ready */
184 foreach_out_edge(irn, edge) {
185 ir_node *user = get_edge_src_irn(edge);
188 make_ready(env, irn, user);
191 /* and the dependent nodes as well */
192 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
193 ir_node *user = get_edge_src_irn(edge);
196 make_ready(env, irn, user);
201 * Returns the number of not yet schedules users.
203 static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n)
205 unsigned const idx = get_irn_idx(n);
207 assert(idx < ARR_LEN(env->sched_info));
208 return env->sched_info[idx].num_not_sched_user;
212 * Sets the number of not yet schedules users.
214 static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
216 unsigned const idx = get_irn_idx(n);
218 assert(idx < ARR_LEN(env->sched_info));
219 env->sched_info[idx].num_not_sched_user = num;
223 * Add @p num to the number of not yet schedules users and returns the result.
225 static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
227 unsigned const idx = get_irn_idx(n);
229 assert(idx < ARR_LEN(env->sched_info));
230 env->sched_info[idx].num_not_sched_user += num;
231 return env->sched_info[idx].num_not_sched_user;
235 * Returns the number of users of a node having mode datab.
237 static int get_num_successors(ir_node *irn)
240 const ir_edge_t *edge;
242 if (get_irn_mode(irn) == mode_T) {
243 /* for mode_T nodes: count the users of all Projs */
244 foreach_out_edge(irn, edge) {
245 ir_node *proj = get_edge_src_irn(edge);
246 ir_mode *mode = get_irn_mode(proj);
248 if (mode == mode_T) {
249 sum += get_num_successors(proj);
250 } else if (mode_is_datab(mode)) {
251 sum += get_irn_n_edges(proj);
256 /* do not count keep-alive edges */
257 foreach_out_edge(irn, edge) {
258 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
267 * Adds irn to @p live, updates all inputs that this user is scheduled
268 * and counts all of its non scheduled users.
270 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn)
278 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
279 ir_node *in = get_irn_in_or_dep(irn, i);
281 /* if in is a proj: update predecessor */
284 /* if in is still in the live set: reduce number of users by one */
285 if (ir_nodeset_contains(&env->live, in)) {
286 if (add_irn_not_sched_user(env, in, -1) <= 0)
287 ir_nodeset_remove(&env->live, in);
292 get_num_successors returns the number of all users. This includes
293 users in different blocks as well. As the each block is scheduled separately
294 the liveness info of those users will not be updated and so these
295 users will keep up the register pressure as it is desired.
297 i = get_num_successors(irn);
299 set_irn_not_sched_user(env, irn, i);
300 ir_nodeset_insert(&env->live, irn);
305 * Append an instruction to a schedule.
306 * @param env The block scheduling environment.
307 * @param irn The node to add to the schedule.
308 * @return The given node.
310 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
312 /* If the node consumes/produces data, it is appended to the schedule
313 * list, otherwise, it is not put into the list */
314 if (to_appear_in_schedule(irn)) {
315 update_sched_liveness(env, irn);
316 sched_add_before(env->block, irn);
318 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
320 /* Remove the node from the ready set */
321 ir_nodeset_remove(&env->cands, irn);
324 /* notify the selector about the finally selected node. */
325 if (env->selector->node_selected)
326 env->selector->node_selected(env->selector_block_env, irn);
328 /* Insert the node in the set of all available scheduled nodes. */
329 set_already_scheduled(env, irn);
331 make_users_ready(env, irn);
335 * Perform list scheduling on a block.
337 * Note, that the caller must compute a linked list of nodes in the block
338 * using the link field before calling this function.
340 * Also the outs must have been computed.
342 * @param block The block node.
343 * @param env Scheduling environment.
345 static void list_sched_block(ir_node *block, void *env_ptr)
347 sched_env_t *env = (sched_env_t*)env_ptr;
348 const list_sched_selector_t *selector = env->selector;
350 block_sched_env_t be;
351 const ir_edge_t *edge;
355 /* Initialize the block's list head that will hold the schedule. */
356 sched_init_block(block);
358 /* Initialize the block scheduling environment */
359 be.sched_info = env->sched_info;
361 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
362 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
363 be.selector = selector;
366 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
368 if (selector->init_block)
369 be.selector_block_env = selector->init_block(env->selector_env, block);
371 /* Then one can add all nodes are ready to the set. */
372 foreach_out_edge(block, edge) {
373 ir_node *irn = get_edge_src_irn(edge);
374 unsigned code = get_irn_opcode(irn);
377 if (code == iro_End) {
378 /* Skip the end node because of keep-alive edges. */
382 users = get_irn_n_edges(irn);
385 else if (users == 1) { /* ignore nodes that are only hold by the anchor */
386 const ir_edge_t *edge = get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL);
387 ir_node *user = get_edge_src_irn(edge);
394 Phi functions are scheduled immediately, since they only
395 transfer data flow from the predecessors to this block.
397 add_to_sched(&be, irn);
398 } else if (be_is_Start(irn)) {
399 /* The start block will be scheduled as the first node */
400 add_to_sched(&be, irn);
402 /* Other nodes must have all operands in other blocks to be made
406 /* Check, if the operands of a node are not local to this block */
407 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
408 ir_node *operand = get_irn_in_or_dep(irn, j);
410 if (get_nodes_block(operand) == block) {
414 /* live in values increase register pressure */
415 ir_nodeset_insert(&be.live, operand);
419 /* Make the node ready, if all operands live in a foreign block */
421 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
422 make_ready(&be, NULL, irn);
427 /* Iterate over all remaining nodes */
428 while (ir_nodeset_size(&be.cands) > 0) {
429 ir_nodeset_iterator_t iter;
431 /* Keeps must be scheduled immediately */
432 foreach_ir_nodeset(&be.cands, irn, iter) {
433 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
439 /* Keeps must be immediately scheduled */
440 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
443 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
445 /* Add the node to the schedule. */
446 add_to_sched(&be, irn);
448 /* remove the scheduled node from the ready list. */
449 ir_nodeset_remove(&be.cands, irn);
452 if (selector->finish_block)
453 selector->finish_block(be.selector_block_env);
455 ir_nodeset_destroy(&be.cands);
456 ir_nodeset_destroy(&be.live);
459 /* List schedule a graph. */
460 void be_list_sched_graph(ir_graph *irg, const list_sched_selector_t *selector)
466 /* Matze: This is very slow, we should avoid it to improve backend speed,
467 * we just have to make sure that we have no dangling out-edges at this
471 /* Assure, that we have no dangling out-edges to deleted stuff */
472 edges_deactivate(irg);
476 num_nodes = get_irg_last_idx(irg);
478 /* initialize environment for list scheduler */
479 memset(&env, 0, sizeof(env));
480 env.selector = selector;
481 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
483 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
485 if (selector->init_graph != NULL)
486 env.selector_env = selector->init_graph(irg);
488 /* Schedule each single block. */
489 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
491 if (selector->finish_graph != NULL)
492 selector->finish_graph(env.selector_env);
494 DEL_ARR_F(env.sched_info);
497 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
498 void be_init_listsched(void)
500 FIRM_DBG_REGISTER(dbg, "firm.be.sched");