case pn_Start_P_frame_base:
return be_prolog_get_reg_value(abihelper, sp_reg);
-
- case pn_Start_max:
- break;
}
panic("unexpected start proj: %ld\n", proj);
}
case pn_Call_X_regular:
case pn_Call_X_except:
case pn_Call_T_result:
- case pn_Call_max:
break;
}
panic("Unexpected Call proj %ld\n", pn);
*/
typedef enum {
pn_be_Call_M_regular = pn_Call_M, /**< The memory result of a be_Call. */
- pn_be_Call_sp = pn_Call_max,
+ pn_be_Call_sp = pn_Call_max+1,
pn_be_Call_first_res /**< The first result proj number of a
be_Call. */
} pn_be_Call;
res = new_r_Tuple(block, 2, in);
}
- turn_into_tuple(call, pn_Call_max);
/*
* Beware:
* We do not check here if this call really has exception and regular Proj's.
jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
+ turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, nomem);
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_except);
case pn_Load_X_regular:
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Load_X_regular);
- case pn_Load_max:
- break;
}
} else if (is_ia32_Conv_I2I(new_pred) ||
is_ia32_Conv_I2I8Bit(new_pred)) {
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_except);
case pn_Load_X_regular:
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_xLoad_X_regular);
- case pn_Load_max:
- break;
}
} else if (is_ia32_vfld(new_pred)) {
switch ((pn_Load)proj) {
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_except);
case pn_Load_X_regular:
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_vfld_X_regular);
- case pn_Load_max:
- break;
}
} else {
/* can happen for ProJMs when source address mode happened for the
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_except);
case pn_Div_X_regular:
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_regular);
- case pn_Div_max:
- break;
}
panic("No idea how to transform proj->Div");
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_except);
case pn_Mod_X_regular:
return new_rd_Proj(dbgi, new_pred, mode_X, pn_ia32_Div_X_regular);
- case pn_Mod_max:
- break;
}
panic("No idea how to transform proj->Mod");
}
return new_r_Bad(get_irn_irg(block), mode_T);
case pn_Start_P_frame_base:
return get_frame_base();
- case pn_Start_max:
- break;
}
panic("Unexpected start proj: %ld\n", pn);
}
case pn_Call_X_regular:
case pn_Call_X_except:
case pn_Call_T_result:
- case pn_Call_max:
break;
}
panic("Unexpected Call proj %ld\n", pn);
/* some constants fixing the positions of nodes predecessors
in the in array */
-#define CALL_PARAM_OFFSET 2
-#define BUILDIN_PARAM_OFFSET 1
-#define SEL_INDEX_OFFSET 2
-#define RETURN_RESULT_OFFSET 1 /* mem is not a result */
+#define CALL_PARAM_OFFSET (n_Call_max+1)
+#define BUILTIN_PARAM_OFFSET (n_Builtin_max+1)
+#define SEL_INDEX_OFFSET (n_Sel_max+1)
+#define RETURN_RESULT_OFFSET (n_Return_max+1)
#define END_KEEPALIVE_OFFSET 0
static const char *relation_names [] = {
ir_node **get_Builtin_param_arr(ir_node *node)
{
assert(is_Builtin(node));
- return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
+ return &get_irn_in(node)[BUILTIN_PARAM_OFFSET + 1];
}
int get_Builtin_n_params(const ir_node *node)
{
assert(is_Builtin(node));
- return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
+ return (get_irn_arity(node) - BUILTIN_PARAM_OFFSET);
}
ir_node *get_Builtin_param(const ir_node *node, int pos)
{
assert(is_Builtin(node));
- return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
+ return get_irn_n(node, pos + BUILTIN_PARAM_OFFSET);
}
void set_Builtin_param(ir_node *node, int pos, ir_node *param)
{
assert(is_Builtin(node));
- set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
+ set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
}
/* Returns a human readable string for the ir_builtin_kind. */
/* skip a potential Pin */
mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Div_max);
+ turn_into_tuple(n, pn_Div_max+1);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X));
/* skip a potential Pin */
mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Mod_max);
+ turn_into_tuple(n, pn_Mod_max+1);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X));
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
ir_node *blk = get_nodes_block(n);
jmp = new_r_Jmp(blk);
- turn_into_tuple(n, pn_Cond_max);
+ turn_into_tuple(n, pn_Cond_max+1);
if (ta == tarval_b_true) {
set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Cond_true, jmp);
ir_node *bad = new_r_Bad(irg, mode_X);
ir_mode *mode = get_Load_mode(n);
ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
- ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
+ ir_node *in[pn_Load_max+1] = { mem, jmp, bad, res };
ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
return tuple;
}
ir_graph *irg = get_irn_irg(n);
ir_node *bad = new_r_Bad(irg, mode_X);
ir_node *res = value;
- ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
+ ir_node *in[pn_Load_max+1] = { mem, jmp, bad, res };
ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
return tuple;
}
blk = get_nodes_block(p);
/* get rid of the CopyB */
- turn_into_tuple(p, pn_CopyB_max);
+ turn_into_tuple(p, pn_CopyB_max+1);
set_Tuple_pred(p, pn_CopyB_M, mem);
set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
mode_bytes /= 2;
}
- turn_into_tuple(irn, pn_CopyB_max);
+ turn_into_tuple(irn, pn_CopyB_max+1);
set_Tuple_pred(irn, pn_CopyB_M, mem);
set_Tuple_pred(irn, pn_CopyB_X_regular, new_r_Bad(irg, mode_X));
set_Tuple_pred(irn, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
}
irn = new_r_Tuple(block, 1, &irn);
- turn_into_tuple(call, pn_Call_max);
+ turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
mem = get_Alloc_mem(alloc);
blk = get_nodes_block(alloc);
- turn_into_tuple(alloc, pn_Alloc_max);
+ turn_into_tuple(alloc, pn_Alloc_max+1);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent);
mem = get_Alloc_mem(alloc);
- turn_into_tuple(alloc, pn_Alloc_max);
+ turn_into_tuple(alloc, pn_Alloc_max+1);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
mem = get_Call_mem(call);
blk = get_nodes_block(call);
- turn_into_tuple(call, pn_Call_max);
+ turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
/** A Load/Store info. */
typedef struct ldst_info_t {
- ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
+ ir_node *projs[MAX_PROJ+1]; /**< list of Proj's of this node */
ir_node *exc_block; /**< the exception block if available */
int exc_idx; /**< predecessor index in the exception block */
unsigned visited; /**< visited counter for breaking loops */
{
ir_node *pre_call;
ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max];
+ ir_node *in[pn_Start_max+1];
ir_node *end, *end_bl, *block;
ir_node **res_pred;
ir_node **cf_pred;
in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
- pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+ pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
post_call = call;
/* --
/* build a Tuple for all results of the method.
* add Phi node if there was more than one Return. */
- turn_into_tuple(post_call, pn_Call_max);
+ turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
n_mem_phi = 0;
for (i = 0; i < arity; i++) {
memop_t *next; /**< links to the next memory op in the block in forward order. */
memop_t *prev; /**< links to the previous memory op in the block in forward order. */
unsigned flags; /**< memop flags */
- ir_node *projs[MAX_PROJ]; /**< Projs of this memory op */
+ ir_node *projs[MAX_PROJ+1]; /**< Projs of this memory op */
};
/**
val = new_rd_Conv(get_irn_dbg_info(node), block, val, mode);
mem = get_Load_mem(node);
- turn_into_tuple(node, pn_Load_max);
+ turn_into_tuple(node, pn_Load_max+1);
set_Tuple_pred(node, pn_Load_M, mem);
set_Tuple_pred(node, pn_Load_res, val);
set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(block));
set_value(vnum, val);
mem = get_Store_mem(node);
- turn_into_tuple(node, pn_Store_max);
+ turn_into_tuple(node, pn_Store_max+1);
set_Tuple_pred(node, pn_Store_M, mem);
set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(block));
set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg, mode_X));
/* create a new tuple for the return values */
tuple = new_r_Tuple(block, env->n_ress, in);
- turn_into_tuple(call, pn_Call_max);
+ turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
{%- for input in node.ins %}
n_{{node.name}}_{{input[0]}},
{%- endfor %}
+ n_{{node.name}}_max = n_{{node.name}}_{{node.ins[-1][0]}}
} n_{{node.name}};
{% endif %}
{% if node.outs %}
pn_{{node.name}}_{{out[0]}}
{%- if out.__len__() > 2 %} = {{out[2]}}{% endif %}, /**< {{out[1]}} */
{% endfor -%}
- pn_{{node.name}}_max
+ pn_{{node.name}}_max = pn_{{node.name}}_{{node.outs[-1][0]}}
} pn_{{node.name}};
{% endif %}
{%- endfor %}