* edge.
*/
#define foreach_out_edge_kind(irn, edge, kind) \
- for(edge = get_irn_out_edge_first_kind(irn, kind); edge; edge = get_irn_out_edge_next(irn, edge))
+ for (ir_edge_t const *edge = get_irn_out_edge_first_kind(irn, kind); edge; edge = get_irn_out_edge_next(irn, edge))
/**
* A convenience iteration macro over all out edges of a node, which is safe
*
* @param irn The node.
* @param edge An ir_edge_t pointer which shall be set to the current edge.
- * @param ne The next edge, enables alteration safe edge processing.
* @param kind The kind of the edge.
*/
-#define foreach_out_edge_kind_safe(irn, edge, ne, kind) \
- for((edge) = (get_irn_out_edge_first_kind(irn, kind)), (ne) = ((edge) ? (get_irn_out_edge_next(irn, edge)) : NULL); \
- edge; (edge) = (ne), (ne) = ((edge) ? (get_irn_out_edge_next(irn, edge)) : NULL))
+#define foreach_out_edge_kind_safe(irn, edge, kind) \
+ for (ir_edge_t const *edge = get_irn_out_edge_first_kind((irn), (kind)), *edge##__next; \
+ edge ? edge##__next = get_irn_out_edge_next((irn), edge), 1 : 0; \
+ edge = edge##__next)
/**
* Convenience macro for normal out edges.
*/
-#define foreach_out_edge(irn, edge) foreach_out_edge_kind(irn, edge, EDGE_KIND_NORMAL)
+#define foreach_out_edge(irn, edge) foreach_out_edge_kind(irn, edge, EDGE_KIND_NORMAL)
/**
* Convenience macro for normal out edges.
*/
-#define foreach_out_edge_safe(irn, edge, tmp) foreach_out_edge_kind_safe(irn, edge, tmp, EDGE_KIND_NORMAL)
+#define foreach_out_edge_safe(irn, edge) foreach_out_edge_kind_safe(irn, edge, EDGE_KIND_NORMAL)
/**
* A convenience iteration macro for all control flow edges.
*/
-#define foreach_block_succ(bl, edge) foreach_out_edge_kind(bl, edge, EDGE_KIND_BLOCK)
+#define foreach_block_succ(bl, edge) foreach_out_edge_kind(bl, edge, EDGE_KIND_BLOCK)
/**
* Returns the source node of an edge.
static void irg_cfg_succ_grow_succs(void *self, void *node, struct obstack *obst)
{
ir_node *bl = (ir_node*) node;
- const ir_edge_t *edge;
(void) self;
foreach_block_succ(bl, edge) {
static ir_node **compute_df(ir_node *blk, ir_dom_front_info_t *info)
{
ir_node *c;
- const ir_edge_t *edge;
ir_node **df_list = NEW_ARR_F(ir_node *, 0);
ir_node **df;
size_t len;
*/
static double get_cf_probability(ir_node *bb, int pos, double loop_weight)
{
- double sum = 0.0;
- double cur = 1.0;
- double inv_loop_weight = 1./loop_weight;
- const ir_node *pred = get_Block_cfgpred_block(bb, pos);
- const ir_loop *pred_loop;
- int pred_depth;
- const ir_edge_t *edge;
- const ir_loop *loop;
- int depth;
- int d;
+ double sum = 0.0;
+ double cur = 1.0;
+ double inv_loop_weight = 1./loop_weight;
+ const ir_node *pred = get_Block_cfgpred_block(bb, pos);
+ const ir_loop *pred_loop;
+ int pred_depth;
+ const ir_loop *loop;
+ int depth;
+ int d;
if (is_Bad(pred))
return 0;
{
irn_height_t *ih = get_height_data(h, irn);
- const ir_edge_t *edge;
-
/* bail out if we already visited that node. */
if (ih->visited >= h->visited)
return ih->height;
static unsigned compute_heights_in_block(ir_node *bl, ir_heights_t *h)
{
- int max_height = -1;
- const ir_edge_t *edge;
+ int max_height = -1;
h->visited++;
unsigned heights_recompute_block(ir_heights_t *h, ir_node *block)
{
ir_graph *irg = get_irn_irg(block);
- const ir_edge_t *edge;
assure_edges(irg);
*/
static void handle_case(ir_node *block, ir_node *switchn, long pn, env_t *env)
{
- ir_node *c = NULL;
- ir_node *selector = get_Switch_selector(switchn);
- const ir_edge_t *edge;
- const ir_edge_t *next;
+ ir_node *c = NULL;
+ ir_node *selector = get_Switch_selector(switchn);
/* we can't do usefull things with the default label */
if (pn == pn_Switch_default)
return;
- foreach_out_edge_safe(selector, edge, next) {
+ foreach_out_edge_safe(selector, edge) {
ir_node *succ = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
ir_node *blk = get_effective_use_block(succ, pos);
{
ir_node *cond, *old, *other_blk = NULL, *con = NULL;
ir_node *c_b = NULL, *c_o = NULL;
- const ir_edge_t *edge, *next;
- foreach_out_edge_safe(selector, edge, next) {
+ foreach_out_edge_safe(selector, edge) {
ir_node *user = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
ir_node *user_blk = get_effective_use_block(user, pos);
ir_node *right = get_Cmp_right(cmp);
ir_node *cond_block;
ir_op *op;
- const ir_edge_t *edge, *next;
/* Beware of Bads */
if (is_Bad(left) || is_Bad(right))
*/
if (rel == ir_relation_equal) {
cond_block = get_Block_cfgpred_block(block, 0);
- foreach_out_edge_safe(left, edge, next) {
+ foreach_out_edge_safe(left, edge) {
ir_node *user = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
ir_node *blk = get_effective_use_block(user, pos);
* left == Const and we found a movable user of left in a
* dominator of the Cond block
*/
- const ir_edge_t *user_edge;
- const ir_edge_t *user_next;
- foreach_out_edge_safe(user, user_edge, user_next) {
+ foreach_out_edge_safe(user, user_edge) {
ir_node *usr_of_usr = get_edge_src_irn(user_edge);
int npos = get_edge_src_pos(user_edge);
ir_node *user_blk = get_effective_use_block(usr_of_usr, npos);
} else { /* not ir_relation_equal cases */
ir_node *c = NULL;
- foreach_out_edge_safe(left, edge, next) {
+ foreach_out_edge_safe(left, edge) {
ir_node *succ = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
ir_node *blk = get_effective_use_block(succ, pos);
ir_node *rc = NULL;
rel = get_inversed_relation(rel);
- foreach_out_edge_safe(right, edge, next) {
+ foreach_out_edge_safe(right, edge) {
ir_node *succ = get_edge_src_irn(edge);
int pos;
ir_node *blk;
*/
static void insert_non_null(ir_node *ptr, ir_node *block, env_t *env)
{
- const ir_edge_t *edge, *next;
- ir_node *c = NULL;
+ ir_node *c = NULL;
- foreach_out_edge_safe(ptr, edge, next) {
+ foreach_out_edge_safe(ptr, edge) {
ir_node *succ = get_edge_src_irn(edge);
int pos;
ir_node *blk;
const ir_node *bl = (const ir_node*) dfs_get_post_num_node(lv->dfs, i);
bl_info_t *bi = get_block_info(lv, bl);
- const ir_edge_t *edge;
-
bitset_set(bi->red_reachable, bi->id);
foreach_block_succ (bl, edge) {
ir_node *succ = get_edge_src_irn(edge);
/* iterate over them ... */
bitset_foreach(tmp, elm) {
bl_info_t *si = lv->map[elm];
- const ir_edge_t *edge;
/* and find back edge targets which are not reduced reachable from bl */
foreach_block_succ (si->block, edge) {
const ir_node *bl = (const ir_node*) dfs_get_post_num_node(lv->dfs, i);
bl_info_t *bi = get_block_info(lv, bl);
- const ir_edge_t *edge;
-
if (!bitset_is_set(lv->back_edge_tgt, bi->id)) {
foreach_block_succ (bl, edge) {
ir_node *succ = get_edge_src_irn(edge);
* the algorithm is simple. Just check for uses not inside this block.
*/
if (def_bl == bl) {
- const ir_edge_t *edge;
-
stat_ev("lv_chk_def_block");
DBG((lv->dbg, LEVEL_2, "lv check same block %+F in %+F\n", var, bl));
foreach_out_edge (var, edge) {
size_t i;
unsigned min_dom, max_dom;
- const ir_edge_t *edge;
/* if the block has no DFS info, it cannot be reached.
* This can happen in functions with endless loops.
*/
static void emit_amd64_Jcc(const ir_node *irn)
{
- const ir_edge_t *edge;
- const ir_node *proj_true = NULL;
- const ir_node *proj_false = NULL;
- const ir_node *block;
- const ir_node *next_block;
- const char *suffix;
- const amd64_attr_t *attr = get_amd64_attr_const(irn);
- ir_relation relation = attr->ext.relation;
- ir_node *op1 = get_irn_n(irn, 0);
- const amd64_attr_t *cmp_attr = get_amd64_attr_const(op1);
- bool is_signed = !cmp_attr->data.cmp_unsigned;
+ const ir_node *proj_true = NULL;
+ const ir_node *proj_false = NULL;
+ const ir_node *block;
+ const ir_node *next_block;
+ const char *suffix;
+ const amd64_attr_t *attr = get_amd64_attr_const(irn);
+ ir_relation relation = attr->ext.relation;
+ ir_node *op1 = get_irn_n(irn, 0);
+ const amd64_attr_t *cmp_attr = get_amd64_attr_const(op1);
+ bool is_signed = !cmp_attr->data.cmp_unsigned;
assert(is_amd64_Cmp(op1));
*/
static void emit_arm_B(const ir_node *irn)
{
- const ir_edge_t *edge;
const ir_node *proj_true = NULL;
const ir_node *proj_false = NULL;
const ir_node *block;
int cnt;
int sign = 1;
arm_vals v;
- const ir_edge_t *edge;
- const ir_edge_t *next;
/* first optimize incsp->incsp combinations */
node = be_peephole_IncSP_IncSP(node);
/* reattach IncSP users */
last = node;
node = sched_next(first);
- foreach_out_edge_safe(first, edge, next) {
+ foreach_out_edge_safe(first, edge) {
ir_node *user = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
if (user == node)
ir_node **in;
ir_node **res_projs;
int n_reg_results = 0;
- const ir_edge_t *edge;
int *reg_param_idxs;
int *stack_param_idx;
int i, n;
res_projs = ALLOCANZ(ir_node*, n_res);
foreach_out_edge(irn, edge) {
- const ir_edge_t *res_edge;
- ir_node *irn = get_edge_src_irn(edge);
+ ir_node *irn = get_edge_src_irn(edge);
if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
continue;
ir_type *type = get_Alloc_type(alloc);
dbg_info *dbg;
- const ir_edge_t *edge;
ir_node *new_alloc;
ir_node *count;
ir_node *size;
/* merge start block with successor if possible */
{
- const ir_edge_t *edge;
foreach_out_edge(jmp, edge) {
ir_node *succ = get_edge_src_irn(edge);
if (!is_Block(succ))
ir_node *start_bl;
ir_node **args;
ir_node *arg_tuple;
- const ir_edge_t *edge;
ir_type *arg_type, *bet_type;
lower_frame_sels_env_t ctx;
*/
static bool has_real_user(const ir_node *node)
{
- const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *user = get_edge_src_irn(edge);
if (!is_End(user) && !is_Anchor(user))
void be_add_missing_keeps_node(ir_node *node)
{
- int n_outs, i;
- unsigned *found_projs;
- const ir_edge_t *edge;
- ir_mode *mode = get_irn_mode(node);
- ir_node *last_keep;
- ir_node **existing_projs;
+ int n_outs, i;
+ unsigned *found_projs;
+ ir_mode *mode = get_irn_mode(node);
+ ir_node *last_keep;
+ ir_node **existing_projs;
if (mode != mode_T) {
if (!has_real_user(node)) {
#define be_foreach_definition_(node, cls, value, code) \
do { \
if (get_irn_mode(node) == mode_T) { \
- const ir_edge_t *edge_; \
foreach_out_edge(node, edge_) { \
const arch_register_req_t *req_; \
value = get_edge_src_irn(edge_); \
ir_node *block = entry->block;
ir_node *succ = NULL;
blocksched_entry_t *succ_entry;
- const ir_edge_t *edge;
- double best_succ_execfreq;
+ double best_succ_execfreq;
if (irn_visited_else_mark(block))
return;
pmap *partners;
int i, n_alloc;
size_t col;
- const ir_edge_t *edge;
ir_node *perm = NULL;
//int match_res, cost;
be_chordal_env_t *env = alloc_env->chordal_env;
DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
if (get_irn_mode(irn) == mode_T) {
- const ir_edge_t *edge;
-
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_node *pre_process_constraints(be_chordal_env_t *env, be_insn_t **the_insn)
{
- be_insn_t *insn = *the_insn;
- ir_node *perm = NULL;
- bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
- const ir_edge_t *edge;
- int i;
+ be_insn_t *insn = *the_insn;
+ ir_node *perm = NULL;
+ bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
+ int i;
assert(insn->has_constraints && "only do this for constrained nodes");
static color_t *reg_to_color(const draw_chordal_env_t *env,
ir_node *rel_bl, ir_node *irn, color_t *color)
{
- int phi_arg = 0;
- const ir_edge_t *edge;
+ int phi_arg = 0;
(void) env;
(void) rel_bl;
*/
static void memory_operand_walker(ir_node *irn, void *env)
{
- const ir_edge_t *edge, *ne;
- ir_node *block;
- ir_node *spill;
+ ir_node *block;
+ ir_node *spill;
(void)env;
spill = be_get_Reload_mem(irn);
block = get_nodes_block(irn);
- foreach_out_edge_safe(irn, edge, ne) {
+ foreach_out_edge_safe(irn, edge) {
ir_node *src = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
*/
static bool can_move(ir_node *node, ir_node *after)
{
- const ir_edge_t *edge;
ir_node *node_block = get_nodes_block(node);
assert(node_block == get_nodes_block(after));
foreach_out_edge(node, edge) {
ir_node *out = get_edge_src_irn(edge);
if (is_Proj(out)) {
- const ir_edge_t *edge2;
assert(get_irn_n_edges_kind(out, EDGE_KIND_DEP) == 0);
foreach_out_edge(out, edge2) {
ir_node *out2 = get_edge_src_irn(edge2);
if (is_Phi(out2) || is_End(out2))
continue;
if (is_Sync(out2)) {
- const ir_edge_t *edge3;
foreach_out_edge(out2, edge3) {
ir_node *out3 = get_edge_src_irn(edge3);
/* Phi or End represents a usage at block end. */
void be_emit_jump_table(const ir_node *node, const ir_switch_table *table,
ir_entity *entity, get_cfop_target_func get_cfop_target)
{
- unsigned n_outs = arch_get_irn_n_outs(node);
- const ir_node **targets = XMALLOCNZ(const ir_node*, n_outs);
- size_t n_entries = ir_switch_table_get_n_entries(table);
- unsigned long length = 0;
- size_t e;
- const ir_edge_t *edge;
- unsigned i;
- const ir_node **labels;
+ unsigned n_outs = arch_get_irn_n_outs(node);
+ const ir_node **targets = XMALLOCNZ(const ir_node*, n_outs);
+ size_t n_entries = ir_switch_table_get_n_entries(table);
+ unsigned long length = 0;
+ size_t e;
+ unsigned i;
+ const ir_node **labels;
/* go over all proj's and collect their jump targets */
foreach_out_edge(node, edge) {
insn->irn = irn;
insn->next_insn = sched_next(irn);
if (get_irn_mode(irn) == mode_T) {
- const ir_edge_t *edge;
ir_node *p;
/* This instruction might create more than one def. These are handled
/* If there is no dominance relation, they do not interfere. */
if(a2b) {
- const ir_edge_t *edge;
ir_node *bb = get_nodes_block(b);
/*
const ir_node *where)
{
const be_lv_t *lv = be_get_irg_liveness(irg);
- const ir_edge_t *edge;
/* the node must strictly dominate the location, else it cannot be live there. */
if (!_value_dominates(irn, where) || irn == where)
const ir_node *where)
{
const be_lv_t *lv = be_get_irg_liveness(irg);
- const ir_edge_t *edge;
if (!_value_dominates(irn, where))
return 0;
*/
static void remove_empty_block(ir_node *block)
{
- const ir_edge_t *edge;
- const ir_edge_t *next;
- int i;
- int arity;
- ir_node *node;
- ir_node *pred;
- ir_node *succ_block;
- ir_node *jump = NULL;
- ir_graph *irg = get_irn_irg(block);
- ir_entity *entity;
+ int i;
+ int arity;
+ ir_node *node;
+ ir_node *pred;
+ ir_node *succ_block;
+ ir_node *jump = NULL;
+ ir_graph *irg = get_irn_irg(block);
+ ir_entity *entity;
if (irn_visited_else_mark(block))
return;
entity = get_Block_entity(block);
pred = get_Block_cfgpred(block, 0);
succ_block = NULL;
- foreach_out_edge_safe(jump, edge, next) {
+ foreach_out_edge_safe(jump, edge) {
int pos = get_edge_src_pos(edge);
assert(succ_block == NULL);
/* there can be some non-scheduled Pin nodes left in the block, move them
* to the succ block (Pin) or pred block (Sync) */
- foreach_out_edge_safe(block, edge, next) {
+ foreach_out_edge_safe(block, edge) {
node = get_edge_src_irn(edge);
if (node == jump)
/* kill projs */
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
- const ir_edge_t *next_edge;
- foreach_out_edge_safe(node, edge, next_edge) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
static void selected(block_sched_env_t *env, ir_node *node)
{
- const ir_edge_t *edge;
-
/* notify the selector about the finally selected node. */
if (env->selector->node_selected)
env->selector->node_selected(env->selector_block_env, node);
const list_sched_selector_t *selector = env->selector;
block_sched_env_t be;
- const ir_edge_t *edge;
ir_nodeset_t *cands = &be.cands;
/* Initialize the block's list head that will hold the schedule. */
*/
static void liveness_for_node(ir_node *irn)
{
- const ir_edge_t *edge;
ir_node *def_block;
bitset_clear_all(re.visited);
assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
-
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
* NOTE: This works with auto-magic. If we insert the new copy/exchange
* nodes after this node, everything should be ok. */
ir_node * sched_point = sched_prev(irn);
- const ir_edge_t * edge;
- const ir_edge_t * next;
int n;
int i;
/* build the list of register pairs (in, out) */
n = 0;
- foreach_out_edge_safe(irn, edge, next) {
+ foreach_out_edge_safe(irn, edge) {
ir_node *const out = get_edge_src_irn(edge);
long const pn = get_Proj_proj(out);
ir_node *const in = get_irn_n(irn, pn);
ir_mode *mode = get_irn_mode(irn);
if (mode == mode_T) {
- const ir_edge_t *edge;
-
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
int i, n;
/* get some Proj and find out the register class of that Proj. */
- const ir_edge_t *edge = get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL);
- ir_node *one_proj = get_edge_src_irn(edge);
+ ir_node *one_proj = get_edge_src_irn(get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL));
const arch_register_class_t *cls = arch_get_irn_reg_class(one_proj);
assert(is_Proj(one_proj));
int i = get_start_reg_index(irg, reg);
ir_node *start = get_irg_start(irg);
ir_mode *mode = arch_register_class_mode(arch_register_get_class(reg));
- const ir_edge_t *edge;
foreach_out_edge(start, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_nodeset_iterator_t iter;
if (get_irn_mode(irn) == mode_T) {
- const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!arch_irn_consider_in_reg_alloc(cls, proj))
bipartite_t *bp = bipartite_new(cls->n_regs, cls->n_regs);
/* add all proj after a perm to clique */
- const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
#else
/* order nodes for perfect elimination order */
if (get_irn_mode(irn) == mode_T) {
- bool allHaveIFEdges = true;
- const ir_edge_t *edge;
-
+ bool allHaveIFEdges = true;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!arch_irn_consider_in_reg_alloc(cls, proj))
{
/* clear values defined */
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
clear_reg_value(proj);
*/
bool be_has_only_one_user(ir_node *node)
{
- int n = get_irn_n_edges(node);
- int n_users;
- const ir_edge_t *edge;
+ int n = get_irn_n_edges(node);
+ int n_users;
if (n <= 1)
return 1;
ir_graph *irg = get_irn_irg(bl);
int res = 0;
- const ir_edge_t *edge;
-
foreach_out_edge(irn, edge) {
ir_node *user = get_edge_src_irn(edge);
unsigned visited_nr = get_irg_visited(irg) + 1;
{
int res = 0;
if (get_irn_mode(irn) == mode_T) {
- const ir_edge_t *edge;
-
foreach_out_edge(irn, edge)
res += get_result_hops_sum(env, get_edge_src_irn(edge));
}
static int get_num_successors(ir_node *irn)
{
int sum = 0;
- const ir_edge_t *edge;
if (get_irn_mode(irn) == mode_T) {
/* for mode_T nodes: count the users of all Projs */
if (get_irn_mode(irn) == mode_T) {
/* mode_T nodes: num out regs == num Projs with mode datab */
- const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (mode_is_datab(get_irn_mode(proj)))
*/
static int is_root(ir_node *root, ir_node *block)
{
- const ir_edge_t *edge;
-
foreach_out_edge(root, edge) {
ir_node *succ = get_edge_src_irn(edge);
ir_node *root = NULL, *preord = NULL;
ir_node *curr, *irn;
int cur_pos;
- const ir_edge_t *edge;
/* First step: Find the root set. */
foreach_out_edge(block, edge) {
static double get_spill_costs(ir_node *node)
{
- const ir_edge_t *edge;
- ir_node *spill_place = skip_Proj(node);
- double costs = be_get_spill_costs(spill_env, node,
- spill_place);
+ ir_node *spill_place = skip_Proj(node);
+ double costs = be_get_spill_costs(spill_env, node, spill_place);
foreach_out_edge(node, edge) {
ir_node *use = get_edge_src_irn(edge);
*/
static void spill_node(ir_node *node)
{
- const ir_edge_t *edge;
-
DBG((dbg, LEVEL_3, "\tspilling %+F\n", node));
foreach_out_edge(node, edge) {
/* If there is no dominance relation, they do not interfere. */
if ((a2b | b2a) > 0) {
- const ir_edge_t *edge;
ir_node *bb;
/*
foreach_out_edge(a, edge) {
const ir_node *user = get_edge_src_irn(edge);
if (is_Sync(user)) {
- const ir_edge_t *edge2;
foreach_out_edge(user, edge2) {
const ir_node *user2 = get_edge_src_irn(edge2);
assert(!is_Sync(user2));
stat_ev_tim_push();
for (i = 0; i < nodes_len; ++i) {
- const ir_edge_t *edge, *next;
ir_node *value = nodes[i];
DBG((dbg, LEVEL_3, "\tfixing users of %+F\n", value));
introduce_definition(env, value);
- foreach_out_edge_safe(value, edge, next) {
+ foreach_out_edge_safe(value, edge) {
ir_node *use = get_edge_src_irn(edge);
if (env->ignore_uses != NULL &&
/* record state changes by the node */
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
-
foreach_out_edge(node, edge) {
const arch_register_t *reg;
ir_node *proj = get_edge_src_irn(edge);
ir_node *node;
unsigned timestep;
unsigned next_use_step;
- const ir_edge_t *edge;
assert(skip_from_uses == 0 || skip_from_uses == 1);
if (skip_from_uses) {
*/
ir_node *be_get_Proj_for_pn(const ir_node *irn, long pn)
{
- const ir_edge_t *edge;
- ir_node *proj;
+ ir_node *proj;
assert(get_irn_mode(irn) == mode_T && "need mode_T");
foreach_out_edge(irn, edge) {
*/
static int my_values_interfere(const ir_node *a, const ir_node *b)
{
- const ir_edge_t *edge;
ir_node *bb;
int a2b = value_dominates(a, b);
int b2a = value_dominates(b, a);
int arity;
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *def = get_edge_src_irn(edge);
value_def(def);
/* rewire mem-proj */
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *out = get_edge_src_irn(edge);
if (get_irn_mode(out) == mode_M) {
*/
static void transform_MemPerm(ir_node *node)
{
- ir_node *block = get_nodes_block(node);
- ir_graph *irg = get_irn_irg(node);
- ir_node *sp = be_get_initial_reg_value(irg, &ia32_registers[REG_ESP]);
- int arity = be_get_MemPerm_entity_arity(node);
- ir_node **pops = ALLOCAN(ir_node*, arity);
- ir_node *in[1];
- ir_node *keep;
- int i;
- const ir_edge_t *edge;
- const ir_edge_t *next;
+ ir_node *block = get_nodes_block(node);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *sp = be_get_initial_reg_value(irg, &ia32_registers[REG_ESP]);
+ int arity = be_get_MemPerm_entity_arity(node);
+ ir_node **pops = ALLOCAN(ir_node*, arity);
+ ir_node *in[1];
+ ir_node *keep;
+ int i;
/* create Pushs */
for (i = 0; i < arity; ++i) {
sched_add_before(node, keep);
/* exchange memprojs */
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
int p = get_Proj_proj(proj);
*/
static int value_last_used_here(be_lv_t *lv, ir_node *here, ir_node *value)
{
- ir_node *block = get_nodes_block(here);
- const ir_edge_t *edge;
+ ir_node *block = get_nodes_block(here);
/* If the value is live end it is for sure it does not die here */
if (be_is_live_end(lv, block, value)) return 0;
*/
static ir_node *get_proj(const ir_node *node, long proj)
{
- const ir_edge_t *edge;
- ir_node *src;
+ ir_node *src;
assert(get_irn_mode(node) == mode_T && "expected mode_T node");
res = new_bd_ia32_xAdd(dbgi, block, noreg, noreg, nomem, res, in1);
set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
} else {
- ir_node *flags_proj = NULL;
- ir_node *carry;
- const ir_edge_t *edge;
+ ir_node *flags_proj = NULL;
+ ir_node *carry;
if (get_irn_mode(irn) == mode_T) {
/* collect the Proj uses */
*/
static void reroute_result(ir_node *resproj, ir_node *l_res, ir_node *h_res)
{
- const ir_edge_t *edge, *next;
-
- foreach_out_edge_safe(resproj, edge, next) {
+ foreach_out_edge_safe(resproj, edge) {
ir_node *proj = get_edge_src_irn(edge);
long pn = get_Proj_proj(proj);
if (edges_activated(irg)) {
/* use rerouting to prevent some warning in the backend */
- const ir_edge_t *edge, *next;
-
- foreach_out_edge_safe(call, edge, next) {
+ foreach_out_edge_safe(call, edge) {
ir_node *proj = get_edge_src_irn(edge);
pn_Call pn = (pn_Call)get_Proj_proj(proj);
int ins_permuted;
ir_node *test;
arch_register_t const *reg;
- ir_edge_t const *edge;
- ir_edge_t const *tmp;
if (get_ia32_op_type(node) != ia32_Normal)
return;
reg = arch_get_irn_register_out(node, pn_ia32_Cmp_eflags);
arch_set_irn_register_out(test, pn_ia32_Test_eflags, reg);
- foreach_out_edge_safe(node, edge, tmp) {
+ foreach_out_edge_safe(node, edge) {
ir_node *const user = get_edge_src_irn(edge);
if (is_Proj(user))
ir_mode *flags_mode;
ir_mode *op_mode;
ir_node *schedpoint;
- const ir_edge_t *edge;
produces_flag_t produced;
if (get_nodes_block(left) != block)
*/
static void peephole_IncSP_Store_to_push(ir_node *irn)
{
- int i;
- int maxslot;
- int inc_ofs;
- ir_node *node;
- ir_node *stores[MAXPUSH_OPTIMIZE];
- ir_node *block;
- ir_graph *irg;
- ir_node *curr_sp;
- ir_mode *spmode;
- ir_node *first_push = NULL;
- ir_edge_t const *edge;
- ir_edge_t const *next;
+ int i;
+ int maxslot;
+ int inc_ofs;
+ ir_node *node;
+ ir_node *stores[MAXPUSH_OPTIMIZE];
+ ir_node *block;
+ ir_graph *irg;
+ ir_node *curr_sp;
+ ir_mode *spmode;
+ ir_node *first_push = NULL;
memset(stores, 0, sizeof(stores));
mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M);
/* rewire Store Projs */
- foreach_out_edge_safe(store, edge, next) {
+ foreach_out_edge_safe(store, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
inc_ofs -= 4;
}
- foreach_out_edge_safe(irn, edge, next) {
+ foreach_out_edge_safe(irn, edge) {
ir_node *const src = get_edge_src_irn(edge);
int const pos = get_edge_src_pos(edge);
for (++i; i <= maxslot; ++i) {
ir_node *load = loads[i];
ir_node *mem, *pop;
- const ir_edge_t *edge, *tmp;
const arch_register_t *reg;
mem = get_irn_n(load, n_ia32_mem);
sched_add_before(irn, pop);
/* rewire now */
- foreach_out_edge_safe(load, edge, tmp) {
+ foreach_out_edge_safe(load, edge) {
ir_node *proj = get_edge_src_irn(edge);
set_Proj_pred(proj, pop);
static inline void try_kill(ir_node *node)
{
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge, *next;
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
try_kill(proj);
}
for (j = get_method_n_ress(mtp) - 1; j >= 0; --j) {
ir_type *res_tp = get_method_res_type(mtp, j);
ir_node *res, *new_res;
- const ir_edge_t *edge, *next;
ir_mode *res_mode;
if (! is_atomic_type(res_tp)) {
new_res = NULL;
/* now patch the users */
- foreach_out_edge_safe(res, edge, next) {
+ foreach_out_edge_safe(res, edge) {
ir_node *succ = get_edge_src_irn(edge);
/* ignore Keeps */
if (mode == mode_T) {
/* patch all Proj's */
- const ir_edge_t *edge;
-
foreach_out_edge(n, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (is_Proj(proj)) {
*/
static ir_node *get_irn_Proj_for_mode(ir_node *n, ir_mode *m)
{
- const ir_edge_t *edge;
-
assert(get_irn_mode(n) == mode_T && "Need mode_T node");
foreach_out_edge(n, edge) {
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
if (get_irn_mode(irn) == mode_T) {
- const ir_edge_t *edge;
-
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
*/
static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *new_val)
{
- const ir_edge_t *edge, *ne;
-
- foreach_out_edge_safe(old_val, edge, ne) {
+ foreach_out_edge_safe(old_val, edge) {
ir_node *user = get_edge_src_irn(edge);
if (! user || user == store)
*/
static ir_node *get_call_result_proj(ir_node *call)
{
- const ir_edge_t *edge;
-
/* search the result proj */
foreach_out_edge(call, edge) {
ir_node *proj = get_edge_src_irn(edge);
*/
static int sim_Perm(x87_state *state, ir_node *irn)
{
- int i, n;
- ir_node *pred = get_irn_n(irn, 0);
- int *stack_pos;
- const ir_edge_t *edge;
+ int i, n;
+ ir_node *pred = get_irn_n(irn, 0);
+ int *stack_pos;
/* handle only floating point Perms */
if (! mode_is_float(get_irn_mode(pred)))
ir_node *n, *next;
blk_state *bl_state = x87_get_bl_state(sim, block);
x87_state *state = bl_state->begin;
- const ir_edge_t *edge;
ir_node *start_block;
assert(state != NULL);
static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
{
const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
- ir_relation relation = attr->relation;
- const ir_node *proj_true = NULL;
- const ir_node *proj_false = NULL;
- const ir_edge_t *edge;
- const ir_node *block;
- const ir_node *next_block;
+ ir_relation relation = attr->relation;
+ const ir_node *proj_true = NULL;
+ const ir_node *proj_false = NULL;
+ const ir_node *block;
+ const ir_node *next_block;
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
static void process_bias(ir_node *block, bool sp_relative, int bias,
int free_bytes)
{
- const ir_edge_t *edge;
- ir_node *irn;
+ ir_node *irn;
mark_Block_block_visited(block);
char *b = buf;
size_t l;
size_t len = sizeof(buf);
- const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *n = get_edge_src_irn(edge);
void instrument_initcall(ir_graph *irg, ir_entity *ent)
{
- const ir_edge_t *edge;
- ir_node *initial_exec;
- ir_node *initial_mem;
- ir_node *start_block;
- ir_node *adr, *call, *new_mem;
- ir_node *first_block = NULL;
+ ir_node *initial_exec;
+ ir_node *initial_mem;
+ ir_node *start_block;
+ ir_node *adr, *call, *new_mem;
+ ir_node *first_block = NULL;
int i, idx, need_new_block;
symconst_symbol sym;
*/
static void dump_ir_edges(ir_node *node, void *env)
{
- int i = 0;
- FILE *F = (FILE*)env;
- const ir_edge_t *edge;
+ int i = 0;
+ FILE *F = (FILE*)env;
foreach_out_edge(node, edge) {
ir_node *succ = get_edge_src_irn(edge);
edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
} else if (get_irn_mode(src) == mode_X && old_tgt != NULL && is_Block(old_tgt)) {
/* moving a jump node from one block to another */
- const ir_edge_t *edge;
- const ir_edge_t *next;
- foreach_out_edge_kind_safe(old_tgt, edge, next, EDGE_KIND_BLOCK) {
+ foreach_out_edge_kind_safe(old_tgt, edge, EDGE_KIND_BLOCK) {
ir_node *succ = get_edge_src_irn(edge);
int succ_pos = get_edge_src_pos(edge);
ir_node *block_pred = get_Block_cfgpred(succ, succ_pos);
void edges_reroute_except(ir_node *from, ir_node *to, ir_node *exception)
{
- const ir_edge_t *edge;
- const ir_edge_t *next;
- foreach_out_edge_safe(from, edge, next) {
+ foreach_out_edge_safe(from, edge) {
ir_node *src = get_edge_src_irn(edge);
if (src == exception)
continue;
static void verify_list_presence(ir_node *irn, void *data)
{
- build_walker *w = (build_walker*)data;
- const ir_edge_t *e;
+ build_walker *w = (build_walker*)data;
bitset_set(w->reachable, get_irn_idx(irn));
static void irg_walk_edges2(ir_node *node, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
- const ir_edge_t *edge, *next;
-
if (irn_visited_else_mark(node))
return;
if (pre != NULL)
pre(node, env);
- foreach_out_edge_kind_safe(node, edge, next, EDGE_KIND_NORMAL) {
+ foreach_out_edge_kind_safe(node, edge, EDGE_KIND_NORMAL) {
/* find the corresponding successor block. */
ir_node *pred = get_edge_src_irn(edge);
irg_walk_edges2(pred, pre, post, env);
static void irg_block_edges_walk2(ir_node *bl, irg_walk_func *pre,
irg_walk_func *post, void *env)
{
- const ir_edge_t *edge, *next;
-
if (!Block_block_visited(bl)) {
mark_Block_block_visited(bl);
if (pre)
pre(bl, env);
- foreach_out_edge_kind_safe(bl, edge, next, EDGE_KIND_BLOCK) {
+ foreach_out_edge_kind_safe(bl, edge, EDGE_KIND_BLOCK) {
/* find the corresponding successor block. */
ir_node *pred = get_edge_src_irn(edge);
irg_block_edges_walk2(pred, pre, post, env);
static void move_projs(const ir_node *node, ir_node *to_bl)
{
- const ir_edge_t *edge;
-
if (get_irn_mode(node) != mode_T)
return;
ir_node *part_block_edges(ir_node *node)
{
- ir_graph *irg = get_irn_irg(node);
- ir_node *old_block = get_nodes_block(node);
- ir_node *new_block = new_r_Block(irg,
- get_Block_n_cfgpreds(old_block),
- get_Block_cfgpred_arr(old_block));
- const ir_edge_t *edge;
- const ir_edge_t *next;
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *old_block = get_nodes_block(node);
+ ir_node *new_block = new_r_Block(irg, get_Block_n_cfgpreds(old_block), get_Block_cfgpred_arr(old_block));
/* old_block has no predecessors anymore for now */
set_irn_in(old_block, 0, NULL);
move_edges(node, old_block, new_block);
/* move Phi nodes to new_block */
- foreach_out_edge_safe(old_block, edge, next) {
+ foreach_out_edge_safe(old_block, edge) {
ir_node *phi = get_edge_src_irn(edge);
if (!is_Phi(phi))
continue;
*/
static void enqueue_users(ir_node *n, pdeq *waitq)
{
- const ir_edge_t *edge;
-
foreach_out_edge(n, edge) {
- ir_node *succ = get_edge_src_irn(edge);
- const ir_edge_t *edge2;
+ ir_node *succ = get_edge_src_irn(edge);
enqueue_node(succ, waitq);
ir_graph *irg = get_irn_irg(block);
ir_node *end = get_irg_end(irg);
- const ir_edge_t *edge;
foreach_block_succ(block, edge) {
- const ir_edge_t *edge2;
ir_node *succ_block = get_edge_src_irn(edge);
enqueue_node(succ_block, waitq);
foreach_out_edge(succ_block, edge2) {
{
int i, arity = get_irn_arity(block);
ir_node **in;
- const ir_edge_t *edge;
assert(nr < arity);
ir_node *block = get_nodes_block(node);
ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
? cons_volatile : cons_none;
- const ir_edge_t *edge;
- const ir_edge_t *next;
if (env->params->little_endian) {
low = adr;
proj_m = new_r_Proj(low, mode_M, pn_Load_M);
high = new_rd_Load(dbg, block, proj_m, high, mode, volatility);
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
const lower64_entry_t *entry = get_node_entry(value);
ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
? cons_volatile : cons_none;
- const ir_edge_t *edge;
- const ir_edge_t *next;
(void) mode;
assert(entry);
proj_m = new_r_Proj(low, mode_M, pn_Store_M);
high = new_rd_Store(dbg, block, proj_m, high, entry->high_word, volatility);
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
*/
static void lower_Div(ir_node *node, ir_mode *mode)
{
- ir_node *left = get_Div_left(node);
- ir_node *right = get_Div_right(node);
- ir_node *block = get_nodes_block(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
- ir_mode *opmode = get_irn_op_mode(node);
- ir_node *addr
- = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
- ir_node *in[4];
- ir_node *call;
- ir_node *resproj;
- const ir_edge_t *edge;
- const ir_edge_t *next;
+ ir_node *left = get_Div_left(node);
+ ir_node *right = get_Div_right(node);
+ ir_node *block = get_nodes_block(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
+ ir_mode *opmode = get_irn_op_mode(node);
+ ir_node *addr = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
+ ir_node *in[4];
+ ir_node *call;
+ ir_node *resproj;
if (env->params->little_endian) {
in[0] = get_lowered_low(left);
resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
set_irn_pinned(call, get_irn_pinned(node));
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
*/
static void lower_Mod(ir_node *node, ir_mode *mode)
{
- ir_node *left = get_Mod_left(node);
- ir_node *right = get_Mod_right(node);
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *block = get_nodes_block(node);
- ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
- ir_mode *opmode = get_irn_op_mode(node);
- ir_node *addr
- = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
- ir_node *in[4];
- ir_node *call;
- ir_node *resproj;
- const ir_edge_t *edge;
- const ir_edge_t *next;
+ ir_node *left = get_Mod_left(node);
+ ir_node *right = get_Mod_right(node);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_type *mtp = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
+ ir_mode *opmode = get_irn_op_mode(node);
+ ir_node *addr = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
+ ir_node *in[4];
+ ir_node *call;
+ ir_node *resproj;
if (env->params->little_endian) {
in[0] = get_lowered_low(left);
resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
set_irn_pinned(call, get_irn_pinned(node));
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
/* move its Projs */
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
int n_cfgpreds = get_Block_n_cfgpreds(old_block);
ir_node **cfgpreds = get_Block_cfgpred_arr(old_block);
ir_node *new_block = new_r_Block(irg, n_cfgpreds, cfgpreds);
- const ir_edge_t *edge;
- const ir_edge_t *next;
/* old_block has no predecessors anymore for now */
set_irn_in(old_block, 0, NULL);
move(node, old_block, new_block);
/* move Phi nodes to new_block */
- foreach_out_edge_safe(old_block, edge, next) {
+ foreach_out_edge_safe(old_block, edge) {
ir_node *phi = get_edge_src_irn(edge);
if (!is_Phi(phi))
continue;
ir_relation relation;
ir_graph *irg;
dbg_info *dbg;
- const ir_edge_t *edge;
- const ir_edge_t *next;
(void) high_mode;
rentry = get_node_entry(right);
/* all right, build the code */
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
long proj_nr;
if (!is_Proj(proj))
ir_node *args;
long *new_projs;
size_t i, j, n_params;
- const ir_edge_t *edge;
- const ir_edge_t *next;
(void) high_mode;
/* if type link is NULL then the type was not lowered, hence no changes
return;
/* fix all Proj's and create new ones */
- foreach_out_edge_safe(args, edge, next) {
+ foreach_out_edge_safe(args, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_mode *mode = get_irn_mode(proj);
ir_mode *mode_l = env->low_unsigned;
size_t p;
long *res_numbers = NULL;
ir_node *resproj;
- const ir_edge_t *edge;
- const ir_edge_t *next;
(void) mode;
n_params = get_method_n_params(tp);
return;
/* fix the results */
- foreach_out_edge_safe(resproj, edge, next) {
+ foreach_out_edge_safe(resproj, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_mode *proj_mode = get_irn_mode(proj);
ir_mode *mode_l = env->low_unsigned;
ir_asm_constraint *new_outputs
= ALLOCAN(ir_asm_constraint, n_outs+n_64bit_outs);
ir_node *new_asm;
- const ir_edge_t *edge;
- const ir_edge_t *next;
for (i = 0; i < n_outs; ++i) {
const ir_asm_constraint *constraint = &output_constraints[i];
new_n_outs, new_outputs, n_clobber, clobbers,
asm_text);
- foreach_out_edge_safe(asmn, edge, next) {
+ foreach_out_edge_safe(asmn, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_mode *proj_mode = get_irn_mode(proj);
long pn;
ir_node *mem = get_Builtin_mem(builtin);
const lower64_entry_t *entry = get_node_entry(operand);
ir_mode *mode_high = get_irn_mode(entry->high_word);
- const ir_edge_t *edge;
- const ir_edge_t *next;
ir_node *res_high;
ir_node *res_low;
}
/* search result Proj */
- foreach_out_edge_safe(builtin, edge, next) {
+ foreach_out_edge_safe(builtin, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
*/
static void lower_Div(ir_node *n)
{
- ir_node *symconst;
- ir_node *block = get_nodes_block(n);
- ir_node *call_result = NULL;
- dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = get_irn_irg(n);
- ir_node *left = get_Div_left(n);
- ir_mode *mode = get_Div_resmode(n);
- ir_node *right = get_Div_right(n);
- const ir_edge_t *edge;
- const ir_edge_t *next;
+ ir_node *symconst;
+ ir_node *block = get_nodes_block(n);
+ ir_node *call_result = NULL;
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *left = get_Div_left(n);
+ ir_mode *mode = get_Div_resmode(n);
+ ir_node *right = get_Div_right(n);
if (! mode_is_float(mode))
return;
set_irn_pinned(call, get_irn_pinned(n));
- foreach_out_edge_safe(n, edge, next) {
+ foreach_out_edge_safe(n, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (! is_Proj(proj))
continue;
/* When the state of a control flow node changes, not only queue its
* successor blocks, but also the Phis in these blocks, because the Phis
* must reconsider this input path. */
- ir_edge_t const* e;
foreach_out_edge(n, e) {
ir_node* const src = get_edge_src_irn(e);
pdeq_putr(q, src);
}
}
} else {
- ir_edge_t const* e;
foreach_out_edge(n, e) {
ir_node* const src = get_edge_src_irn(e);
if (get_irn_mode(src) == mode_T) {
*/
static bool is_stored(const ir_node *n)
{
- const ir_edge_t *edge;
- const ir_node *ptr;
+ const ir_node *ptr;
foreach_out_edge(n, edge) {
const ir_node *succ = get_edge_src_irn(edge);
{
ir_graph *irg;
ir_mode *mode;
- const ir_edge_t *edge;
- const ir_edge_t *next;
/* no need to do anything */
if (orig_val == second_val)
ssa_second_def = second_val;
/* Only fix the users of the first, i.e. the original node */
- foreach_out_edge_safe(orig_val, edge, next) {
+ foreach_out_edge_safe(orig_val, edge) {
ir_node *user = get_edge_src_irn(edge);
int j = get_edge_src_pos(edge);
ir_node *user_block = get_nodes_block(user);
static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
ir_node *copy_block, int j)
{
- const ir_edge_t *edge;
-
/* Look at all nodes in the cond_block and copy them into pred */
foreach_out_edge(block, edge) {
ir_node *node = get_edge_src_irn(edge);
* mode_bs which can't be handled in all backends. Instead we duplicate
* the node and move it to its users */
if (mode == mode_b) {
- const ir_edge_t *edge, *next;
ir_node *pred;
int pn;
pred = get_Proj_pred(node);
pn = get_Proj_proj(node);
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *cmp_copy;
ir_node *user = get_edge_src_irn(edge);
int pos = get_edge_src_pos(edge);
ir_node *cond;
ir_node *copy_block;
int selector_evaluated;
- const ir_edge_t *edge, *next;
ir_graph *irg;
ir_node *badX;
int cnst_pos;
cnst_pos = env.cnst_pos;
/* shorten Phis */
- foreach_out_edge_safe(env.cnst_pred, edge, next) {
+ foreach_out_edge_safe(env.cnst_pred, edge) {
ir_node *node = get_edge_src_irn(edge);
if (is_Phi(node)) {
/* Find the loops head/the blocks with cfpred outside of the loop */
if (is_Block(node)) {
- const ir_edge_t *edge;
unsigned outs_n = 0;
/* Count innerloop branches */
{
ir_graph *irg;
ir_mode *mode;
- const ir_edge_t *edge;
- const ir_edge_t *next;
assert(orig_block && orig_val && second_block && second_val &&
"no parameter of construct_ssa may be NULL");
ssa_second_def = second_val;
/* Only fix the users of the first, i.e. the original node */
- foreach_out_edge_safe(orig_val, edge, next) {
+ foreach_out_edge_safe(orig_val, edge) {
ir_node *user = get_edge_src_irn(edge);
int j = get_edge_src_pos(edge);
ir_node *user_block = get_nodes_block(user);
*/
static void find_condition_chain(ir_node *block)
{
- const ir_edge_t *edge;
bool mark = false;
bool has_be = false;
bool jmp_only = true;
scc *pscc = e->pscc;
for (irn = pscc->head; irn != NULL; irn = e->next) {
- const ir_edge_t *edge;
-
foreach_out_edge(irn, edge) {
ir_node *user = get_edge_src_irn(edge);
node_entry *ne = get_irn_ne(user, env);