* @author Christoph Mallon
* @version $Id: $
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "iroptimize.h"
-#include "array.h"
+#include "array_t.h"
#include "debug.h"
#include "ircons.h"
#include "irgraph.h"
#include "irdump.h"
#include "irflag_t.h"
#include "irprintf.h"
+#include "irpass.h"
#if +0
#define OPTIMISE_LOAD_AFTER_LOAD
}
+static int in_cmp(void const* va, void const* vb)
+{
+ ir_node const* const a = *(ir_node const*const*)va;
+ ir_node const* const b = *(ir_node const*const*)vb;
+ return get_irn_idx(a) - get_irn_idx(b);
+}
+
+
static ir_node* GenerateSync(ir_graph* irg, ir_node* block, ir_nodeset_t* after_set)
{
size_t set_size = ir_nodeset_size(after_set);
for (i = 0; i < set_size; i++) {
in[i] = ir_nodeset_iterator_next(&iter);
}
+ qsort(in, set_size, sizeof(*in), in_cmp);
return new_r_Sync(irg, block, set_size, in);
}
}
if (block != last_block) {
DB((dbg, LEVEL_3, "===> Changing block from %+F to %+F\n", last_block, block));
block_change = 1;
- if (Block_not_block_visited(block)) {
+ if (!Block_block_visited(block)) {
mark_Block_block_visited(block);
} else {
DB((dbg, LEVEL_2, "===> Hit already visited block at %+F\n", node));
ir_node* unknown;
DB((dbg, LEVEL_3, "===> The predecessor was not finished yet\n"));
- assert(!Block_not_block_visited(pred_block));
+ assert(Block_block_visited(pred_block));
unknown = new_r_Unknown(irg, mode_M);
for (i = 0; i < count_addrs; i++) {
size_t npreds = get_Block_n_cfgpreds(end_block);
size_t i;
- unfinished_phis = xmalloc(sizeof(*unfinished_phis) * count_addrs);
+ unfinished_phis = XMALLOCN(ir_node, count_addrs);
for (i = 0; i < count_addrs; i++) {
unfinished_phis[i] = NULL;
}
#endif
+#if 0
static void AddSyncPreds(ir_nodeset_t* preds, ir_node* sync)
{
size_t n = get_Sync_n_preds(sync);
}
}
-#if 0
static void NormaliseSync(ir_node* node, void* env)
{
ir_nodeset_t preds;
static void parallelise_load(parallelise_info *pi, ir_node *irn)
{
+ /* There is no point in investigating the same subgraph twice */
+ if (ir_nodeset_contains(&pi->user_mem, irn))
+ return;
+
//ir_fprintf(stderr, "considering %+F\n", irn);
if (get_nodes_block(irn) == pi->origin_block) {
if (is_Proj(irn)) {
static void parallelise_store(parallelise_info *pi, ir_node *irn)
{
+ /* There is no point in investigating the same subgraph twice */
+ if (ir_nodeset_contains(&pi->user_mem, irn))
+ return;
+
//ir_fprintf(stderr, "considering %+F\n", irn);
if (get_nodes_block(irn) == pi->origin_block) {
if (is_Proj(irn)) {
}
n = ir_nodeset_size(&pi.user_mem);
- if (n != 0) { /* nothing happend otherwise */
+ if (n != 0) { /* nothing happened otherwise */
ir_graph *irg = current_ir_graph;
ir_node *sync;
ir_node **in;
in[i++] = p;
}
assert(i == n);
- sync = new_r_Sync(irg, block, n, in);
+ sync = new_r_Sync(block, n, in);
exchange(proj, sync);
assert(pn_Load_M == pn_Store_M);
- proj = new_r_Proj(irg, block, mem_op, mode_M, pn_Load_M);
+ proj = new_r_Proj(block, mem_op, mode_M, pn_Load_M);
set_Sync_pred(sync, 0, proj);
n = ir_nodeset_size(&pi.this_mem);
in[i++] = p;
}
assert(i == n);
- sync = new_r_Sync(irg, block, n, in);
+ sync = new_r_Sync(block, n, in);
}
set_memop_mem(mem_op, sync);
}
}
-void opt_ldst2(ir_graph *irg)
+void opt_sync(ir_graph *irg)
{
- assure_irg_address_taken_computed(irg);
- assure_irp_globals_address_taken_computed();
+ //assure_irg_entity_usage_computed(irg);
+ //assure_irp_globals_entity_usage_computed();
irg_walk_graph(irg, NULL, walker, NULL);
- //optimize_graph_df(irg);
+ //optimize_graph_df(irg);
//irg_walk_graph(irg, NormaliseSync, NULL, NULL);
}
+
+ir_graph_pass_t *opt_sync_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "opt_sync", opt_sync);
+}