* @brief Backend IRG modification routines.
* @author Sebastian Hack, Daniel Grund, Matthias Braun, Christian Wuerdig
* @date 04.05.2005
- * @version $Id$
*
* This file contains the following IRG modifications for be routines:
* - insertion of Perm nodes
* - empty block elimination
* - a simple dead node elimination (set inputs of unreachable nodes to BAD)
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <stdlib.h>
#include "irgraph_t.h"
#include "irgopt.h"
#include "irgmod.h"
-#include "irprintf_t.h"
+#include "irprintf.h"
#include "irgwalk.h"
#include "be_t.h"
#include "bechordal_t.h"
-#include "bearch_t.h"
-#include "besched_t.h"
+#include "bearch.h"
+#include "besched.h"
#include "belive_t.h"
-#include "benode_t.h"
+#include "benode.h"
#include "beutil.h"
#include "beinsn_t.h"
#include "bessaconstr.h"
-#include "beirg_t.h"
+#include "beirg.h"
#include "beirgmod.h"
#include "bemodule.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
+static int cmp_node_nr(const void *a, const void *b)
+{
+ ir_node **p1 = (ir_node**)a;
+ ir_node **p2 = (ir_node**)b;
+ long n1 = get_irn_node_nr(*p1);
+ long n2 = get_irn_node_nr(*p2);
+ return (n1>n2) - (n1<n2);
+}
+
/*
___ _ ____
|_ _|_ __ ___ ___ _ __| |_ | _ \ ___ _ __ _ __ ___
*/
-ir_node *insert_Perm_after(be_irg_t *birg,
- const arch_register_class_t *cls,
+ir_node *insert_Perm_before(ir_graph *irg, const arch_register_class_t *cls,
ir_node *pos)
{
- const arch_env_t *arch_env = birg->main_env->arch_env;
- be_lv_t *lv = birg->lv;
- ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
- ir_graph *irg = get_irn_irg(bl);
- ir_nodeset_t live;
- ir_nodeset_iterator_t iter;
-
- ir_node *curr, *irn, *perm, **nodes;
+ be_lv_t *lv = be_get_irg_liveness(irg);
+ ir_nodeset_t live;
+
+ ir_node *perm, **nodes;
size_t i, n;
- DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
+ DBG((dbg, LEVEL_1, "Insert Perm before: %+F\n", pos));
ir_nodeset_init(&live);
- be_liveness_nodes_live_at(lv, arch_env, cls, pos, &live);
+ be_liveness_nodes_live_before(lv, cls, pos, &live);
n = ir_nodeset_size(&live);
- if(n == 0) {
+ if (n == 0) {
ir_nodeset_destroy(&live);
return NULL;
}
i++;
}
ir_nodeset_destroy(&live);
+ /* make the input order deterministic */
+ qsort(nodes, n, sizeof(nodes[0]), cmp_node_nr);
- perm = be_new_Perm(cls, irg, bl, n, nodes);
- sched_add_after(pos, perm);
+ ir_node *const bl = get_nodes_block(pos);
+ perm = be_new_Perm(cls, bl, n, nodes);
+ sched_add_before(pos, perm);
free(nodes);
- curr = perm;
for (i = 0; i < n; ++i) {
ir_node *perm_op = get_irn_n(perm, i);
- const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
be_ssa_construction_env_t senv;
ir_mode *mode = get_irn_mode(perm_op);
- ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
- arch_set_irn_register(arch_env, proj, reg);
-
- curr = proj;
+ ir_node *proj = new_r_Proj(perm, mode, i);
- be_ssa_construction_init(&senv, birg);
+ be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copy(&senv, perm_op);
be_ssa_construction_add_copy(&senv, proj);
be_ssa_construction_fix_users(&senv, perm_op);
*/
static void remove_empty_block(ir_node *block)
{
- const ir_edge_t *edge, *next;
- int i, arity;
- ir_node *node;
- ir_node *pred;
- ir_node *succ_block;
- ir_node *jump = NULL;
+ int i;
+ int arity;
+ ir_node *pred;
+ ir_node *succ_block;
+ ir_node *jump = NULL;
+ ir_graph *irg = get_irn_irg(block);
+ ir_entity *entity;
if (irn_visited_else_mark(block))
return;
goto check_preds;
sched_foreach(block, node) {
- if (! is_Jmp(node))
+ if (! is_Jmp(node)
+ && !(arch_get_irn_flags(node) & arch_irn_flags_simple_jump))
goto check_preds;
if (jump != NULL) {
/* we should never have 2 jumps in a block */
if (jump == NULL)
goto check_preds;
+ entity = get_Block_entity(block);
pred = get_Block_cfgpred(block, 0);
succ_block = NULL;
- foreach_out_edge_safe(jump, edge, next) {
+ foreach_out_edge_safe(jump, edge) {
int pos = get_edge_src_pos(edge);
assert(succ_block == NULL);
succ_block = get_edge_src_irn(edge);
+ if (get_Block_entity(succ_block) != NULL && entity != NULL) {
+ /*
+ * Currently we can add only one label for a block.
+ * Therefore we cannot combine them if both block already have one.
+ */
+ goto check_preds;
+ }
set_irn_n(succ_block, pos, pred);
}
+ if (entity != NULL) {
+ /* move the label to the successor block */
+ set_Block_entity(succ_block, entity);
+ }
+
/* there can be some non-scheduled Pin nodes left in the block, move them
* to the succ block (Pin) or pred block (Sync) */
- foreach_out_edge_safe(block, edge, next) {
- node = get_edge_src_irn(edge);
+ foreach_out_edge_safe(block, edge) {
+ ir_node *const node = get_edge_src_irn(edge);
- if(node == jump)
- continue;
- if (is_Block(node)) {
- /* a Block->Block edge: This should be the MacroBlock
- edge, ignore it. */
- assert(get_Block_MacroBlock(node) == block && "Wrong Block->Block edge");
+ if (node == jump)
continue;
- }
+ /* we simply kill Pins, because there are some strange interactions
+ * between jump threading, which produce PhiMs with Pins, we simply
+ * kill the pins here, everything is scheduled anyway */
if (is_Pin(node)) {
- set_nodes_block(node, succ_block);
+ exchange(node, get_Pin_op(node));
continue;
}
if (is_Sync(node)) {
panic("Unexpected node %+F in block %+F with empty schedule", node, block);
}
- set_Block_cfgpred(block, 0, new_Bad());
+ set_Block_cfgpred(block, 0, new_r_Bad(irg, mode_X));
kill_node(jump);
blocks_removed = 1;
check_preds:
arity = get_Block_n_cfgpreds(block);
- for(i = 0; i < arity; ++i) {
+ for (i = 0; i < arity; ++i) {
ir_node *pred = get_Block_cfgpred_block(block, i);
remove_empty_block(pred);
}
remove_empty_block(get_irg_end_block(irg));
end = get_irg_end(irg);
arity = get_irn_arity(end);
- for(i = 0; i < arity; ++i) {
+ for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(end, i);
- if(!is_Block(pred))
+ if (!is_Block(pred))
continue;
remove_empty_block(pred);
}
if (blocks_removed) {
/* invalidate analysis info */
- set_irg_doms_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
- set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
}
return blocks_removed;
}
+//---------------------------------------------------------------------------
+
+typedef struct remove_dead_nodes_env_t_ {
+ bitset_t *reachable;
+ ir_graph *irg;
+ be_lv_t *lv;
+} remove_dead_nodes_env_t;
+
+/**
+ * Post-walker: remember all visited nodes in a bitset.
+ */
+static void mark_dead_nodes_walker(ir_node *node, void *data)
+{
+ remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
+ bitset_set(env->reachable, get_irn_idx(node));
+}
+
+/**
+ * Post-block-walker:
+ * Walk through the schedule of every block and remove all dead nodes from it.
+ */
+static void remove_dead_nodes_walker(ir_node *block, void *data)
+{
+ remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
+ ir_node *node, *next;
+
+ for (node = sched_first(block); ! sched_is_end(node); node = next) {
+ /* get next node now, as after calling sched_remove it will be invalid */
+ next = sched_next(node);
+
+ if (bitset_is_set(env->reachable, get_irn_idx(node)))
+ continue;
+
+ if (env->lv != NULL)
+ be_liveness_remove(env->lv, node);
+ sched_remove(node);
+
+ /* kill projs */
+ if (get_irn_mode(node) == mode_T) {
+ foreach_out_edge_safe(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (!is_Proj(proj))
+ continue;
+ if (env->lv != NULL)
+ be_liveness_remove(env->lv, proj);
+ kill_node(proj);
+ }
+ }
+ kill_node(node);
+ }
+}
+
+void be_remove_dead_nodes_from_schedule(ir_graph *irg)
+{
+ remove_dead_nodes_env_t env;
+ env.reachable = bitset_alloca(get_irg_last_idx(irg));
+ env.lv = be_get_irg_liveness(irg);
+ env.irg = irg;
+
+ // mark all reachable nodes
+ irg_walk_graph(irg, mark_dead_nodes_walker, NULL, &env);
+
+ // walk schedule and remove non-marked nodes
+ irg_block_walk_graph(irg, remove_dead_nodes_walker, NULL, &env);
+}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_irgmod)
void be_init_irgmod(void)
{
FIRM_DBG_REGISTER(dbg, "firm.be.irgmod");
}
-
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_irgmod);