cols = the_row->cols;
min = 0;
max = the_row->n_cols;
- c = (max+min)/2;
+ c = max/2;
while (min < max) {
int idx = cols[c].col_idx;
if (idx < col)
tp = get_method_res_type(method_type, 0);
mode = get_type_mode(tp);
- /* FIXME: No floating point yet */
- /* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH); */
+ if (mode_is_float(mode))
+ panic("float not supported yet");
be_abi_call_res_reg(abi, 0,
&amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
for (i = 0; i < n_res; ++i) {
ir_node *proj = res_projs[i];
be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 0);
- long pn = i + pn_be_Call_first_res;
/* returns values on stack not supported yet */
assert(arg->in_reg);
unspeakable Proj_T from the Call. Therefore, all real argument
Proj numbers must be increased by pn_be_Call_first_res
*/
- pn = i + pn_be_Call_first_res;
+ long pn = i + pn_be_Call_first_res;
if (proj == NULL) {
ir_type *res_type = get_method_res_type(call_tp, i);
{
struct obstack *obst = env->obst;
be_operand_t o;
- be_insn_t *insn;
int i, n;
int pre_colored = 0;
- insn = OALLOCZ(obst, be_insn_t);
+ be_insn_t *insn = OALLOCZ(obst, be_insn_t);
insn->irn = irn;
insn->next_insn = sched_next(irn);
if (interferes)
continue;
- node_idx = uf_union(congruence_classes, node_idx, op_idx);
+ uf_union(congruence_classes, node_idx, op_idx);
DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
node, op));
/* one should_be_same is enough... */
if (is_Proj(curr))
return 0;
+#if 0
/* predecessors Proj's must be skipped */
if (is_Proj(pred))
pred = get_Proj_pred(pred);
-#if 0
if (env->selector->latency)
return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
#endif
* Register allocation is copied from the former phi
* arguments to the projs (new phi arguments).
*/
- insert_after = perm;
foreach_set(arg_set, perm_proj_t, pp) {
ir_node *proj = new_r_Proj(perm, get_irn_mode(pp->arg), pp->pos);
pp->proj = proj;
assert(arch_get_irn_register(pp->arg));
arch_set_irn_register(proj, arch_get_irn_register(pp->arg));
- insert_after = proj;
DBG((dbg, LEVEL_2, "Copy register assignment %s from %+F to %+F\n", arch_get_irn_register(pp->arg)->name, pp->arg, pp->proj));
}
}
spill.ent = spillent;
- res = set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
+ (void)set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
int arity = be_get_MemPerm_entity_arity(memperm);
for (int i = 0; i < arity; ++i) {
}
spill.ent = ent;
- res = set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
+ (void)set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
/* is 1 of the arguments a spill? */
int arity = get_irn_arity(node);
foreach_set(env->spills, spill_t, spill) {
spills[s++] = spill;
}
+ assert(s == spillcount);
for (int i = 0; i < spillcount; ++i) {
spill_t *sp1 = spills[i];
ir_node *incsp;
curr_sp = new_r_Proj(push, mode_gp, pn_ia32_Push_stack);
- mem = new_r_Proj(push, mode_M, pn_ia32_Push_M);
arch_set_irn_register(curr_sp, sp);
sched_add_after(start, push);
p = new_r_Add(block, p, one, mode);
st = new_r_Store(block, mem, p, callee, cons_none);
mem = new_r_Proj(st, mode_M, pn_Store_M);
- p = new_r_Add(block, p, four, mode);
return mem;
}
*/
static void peephole_ia32_Lea(ir_node *node)
{
- ir_graph *irg;
ir_node *base;
ir_node *index;
const arch_register_t *base_reg;
dbg_info *dbgi;
ir_node *block;
ir_node *res;
- ir_node *noreg;
- ir_node *nomem;
assert(is_ia32_Lea(node));
make_add:
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- irg = get_irn_irg(node);
- noreg = ia32_new_NoReg_gp(irg);
- nomem = get_irg_no_mem(irg);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
+ ir_node *nomem = get_irg_no_mem(irg);
res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
arch_set_irn_register(res, out_reg);
set_ia32_commutative(res);
make_shl:
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- irg = get_irn_irg(node);
- noreg = ia32_new_NoReg_gp(irg);
- nomem = get_irg_no_mem(irg);
res = new_bd_ia32_Shl(dbgi, block, op1, op2);
arch_set_irn_register(res, out_reg);
goto exchange;
*/
static int sim_unop(x87_state *state, ir_node *n, ir_op *op)
{
- int op1_idx;
x87_simulator *sim = state->sim;
const arch_register_t *op1 = x87_get_irn_register(get_irn_n(n, 0));
const arch_register_t *out = x87_get_irn_register(n);
DB((dbg, LEVEL_1, ">>> %+F -> %s\n", n, out->name));
DEBUG_ONLY(vfp_dump_live(live);)
- op1_idx = x87_on_stack(state, arch_register_get_index(op1));
+ int op1_idx = x87_on_stack(state, arch_register_get_index(op1));
if (is_vfp_live(arch_register_get_index(op1), live)) {
/* push the operand here */
x87_create_fpush(state, n, op1_idx, 0);
op1_idx = 0;
- }
- else {
+ } else {
/* operand is dead, bring it to tos */
if (op1_idx != 0) {
x87_create_fxch(state, n, op1_idx);
- op1_idx = 0;
}
}
if (!std_sched_param_init) {
res = sched_getparam(pid, &std_sched_param);
+ if (res != 0)
+ return res;
std_sched_param_init = 1;
}
*/
static ir_tarval *condensed_to_value(mul_env *env, unsigned char *R, int r)
{
- ir_tarval *res, *tv;
- int i, j;
-
- j = 0;
- tv = get_mode_one(env->mode);
- res = NULL;
- for (i = 0; i < r; ++i) {
- j = R[i];
+ ir_tarval *tv = get_mode_one(env->mode);
+ ir_tarval *res = NULL;
+ for (int i = 0; i < r; ++i) {
+ int j = R[i];
if (j) {
ir_tarval *t = new_tarval_from_long(j, mode_Iu);
tv = tarval_shl(tv, t);
}
if (occ->flag_plus)
- strncat(buf, add, sizeof(buf)-1);
+ strncat(buf, add, sizeof(buf)-strlen(buf)-1);
return lc_arg_append(app, occ, buf, strlen(buf));
#undef A
We can call optimize_in_place_2(), as global cse has no effect on blocks.
*/
irn_verify_irg(block, irg);
- block = optimize_in_place_2(block);
+ optimize_in_place_2(block);
}
ir_node *new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
(unsigned long) first,
(unsigned long) first,
(unsigned long) i-1);
- loop_node_started = false;
}
}
*/
static void verify_edge_counter(ir_node *irn, void *env)
{
- build_walker *w = (build_walker*)env;
- bitset_t *bs;
- int list_cnt;
- int ref_cnt;
- int edge_cnt;
- const struct list_head *head;
- const struct list_head *pos;
- ir_graph *irg;
-
+ build_walker *w = (build_walker*)env;
if (IGNORE_NODE(irn))
return;
- bs = (bitset_t*)get_irn_link(irn);
- list_cnt = 0;
- ref_cnt = 0;
- edge_cnt = get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
- head = &get_irn_edge_info(irn, EDGE_KIND_NORMAL)->outs_head;
+ bitset_t *bs = (bitset_t*)get_irn_link(irn);
+ int list_cnt = 0;
+ int edge_cnt = get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
+ const struct list_head *head
+ = &get_irn_edge_info(irn, EDGE_KIND_NORMAL)->outs_head;
/* We can iterate safely here, list heads have already been verified. */
+ const struct list_head *pos;
list_for_each(pos, head) {
++list_cnt;
}
/* check all nodes that reference us and count edges that point number
* of ins that actually point to us */
- irg = get_irn_irg(irn);
- ref_cnt = 0;
+ ir_graph *irg = get_irn_irg(irn);
+ int ref_cnt = 0;
bitset_foreach(bs, idx) {
int i, arity;
ir_node *src = get_idx_irn(irg, idx);
*/
static ir_node *apply_binop_on_phi(ir_node *phi, ir_tarval *other, eval_func eval, ir_mode *mode, int left)
{
- ir_tarval *tv;
- void **res;
- ir_node *pred;
- ir_graph *irg;
- int i, n = get_irn_arity(phi);
-
- NEW_ARR_A(void *, res, n);
+ int n = get_irn_arity(phi);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
if (left) {
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = do_eval(eval, other, tv, mode);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = do_eval(eval, other, tv, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return NULL;
}
- res[i] = tv;
+ tvs[i] = tv;
}
} else {
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = do_eval(eval, tv, other, mode);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = do_eval(eval, tv, other, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return 0;
}
- res[i] = tv;
+ tvs[i] = tv;
}
}
- irg = get_irn_irg(phi);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(phi);
+ return new_r_Phi(block, n, res, mode);
}
/**
*/
static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode)
{
- ir_tarval *tv_l, *tv_r, *tv;
- void **res;
- ir_node *pred;
- ir_graph *irg;
- int i, n;
-
if (get_nodes_block(a) != get_nodes_block(b))
return NULL;
- n = get_irn_arity(a);
- NEW_ARR_A(void *, res, n);
-
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(a, i);
- tv_l = get_Const_tarval(pred);
- pred = get_irn_n(b, i);
- tv_r = get_Const_tarval(pred);
- tv = do_eval(eval, tv_l, tv_r, mode);
+ int n = get_irn_arity(a);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred_a = get_irn_n(a, i);
+ ir_tarval *tv_l = get_Const_tarval(pred_a);
+ ir_node *pred_b = get_irn_n(b, i);
+ ir_tarval *tv_r = get_Const_tarval(pred_b);
+ ir_tarval *tv = do_eval(eval, tv_l, tv_r, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return NULL;
}
- res[i] = tv;
+ tvs[i] = tv;
}
- irg = get_irn_irg(a);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(a, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(a);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(a);
+ return new_r_Phi(block, n, res, mode);
}
/**
*/
static ir_node *apply_unop_on_phi(ir_node *phi, ir_tarval *(*eval)(ir_tarval *))
{
- ir_tarval *tv;
- void **res;
- ir_node *pred;
- ir_mode *mode;
- ir_graph *irg;
- int i, n = get_irn_arity(phi);
-
- NEW_ARR_A(void *, res, n);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = eval(tv);
+ int n = get_irn_arity(phi);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = eval(tv);
if (tv == tarval_bad) {
/* folding failed, bad */
return 0;
}
- res[i] = tv;
+ tvs[i] = tv;
}
- mode = get_irn_mode(phi);
- irg = get_irn_irg(phi);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(phi);
+ ir_mode *mode = get_irn_mode(phi);
+ return new_r_Phi(block, n, res, mode);
}
/**
*/
static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode)
{
- ir_tarval *tv;
- void **res;
- ir_node *pred;
- ir_graph *irg;
- int i, n = get_irn_arity(phi);
-
- NEW_ARR_A(void *, res, n);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = tarval_convert_to(tv, mode);
+ int n = get_irn_arity(phi);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = tarval_convert_to(tv, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return 0;
}
- res[i] = tv;
+ tvs[i] = tv;
}
- irg = get_irn_irg(phi);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(phi);
+ return new_r_Phi(block, n, res, mode);
}
/**
}
if (edge == NULL) {
- edge = alloc_edge(pbqp, src_index, tgt_index, costs);
+ alloc_edge(pbqp, src_index, tgt_index, costs);
} else {
pbqp_matrix_add(edge->costs, costs);
}
{
const lc_opt_entry_t *grp = root;
size_t n = strlen(arg);
- size_t n_prefix = opt_prefix ? strlen(opt_prefix) : 0;
+ size_t n_prefix = opt_prefix != NULL ? strlen(opt_prefix) : 0;
int error = 0;
int ret = 0;
lc_opt_err_info_t err;
const char *end, *eqsign;
- if (n >= n_prefix && strncmp(opt_prefix, arg, n_prefix) == 0) {
+ if (n >= n_prefix && (n_prefix == 0 || strncmp(opt_prefix, arg, n_prefix) == 0)) {
arg = arg + n_prefix;
/* find the next delimiter (the -) and extract the string up to
ir_type *mtp = get_entity_type(ent);
size_t n_ress = get_method_n_ress(mtp);
size_t n_params = get_method_n_params(mtp);
- size_t n_ret_com = 0;
size_t n_param_com = 0;
ir_type *lowered_mtp, *tp, *ft;
wlk_env env;
/* calculate the number of compound returns */
- for (n_ret_com = i = 0; i < n_ress; ++i) {
+ size_t n_ret_com = 0;
+ for (i = 0; i < n_ress; ++i) {
ir_type *type = get_method_res_type(mtp, i);
if (is_compound_type(type))
++n_ret_com;
*/
static void lower_Return(ir_node *node, ir_mode *mode)
{
- ir_graph *irg = get_irn_irg(node);
- ir_entity *ent = get_irg_entity(irg);
- ir_type *mtp = get_entity_type(ent);
ir_node **in;
size_t i, j, n;
int need_conv = 0;
if (! need_conv)
return;
- ent = get_irg_entity(irg);
- mtp = get_entity_type(ent);
+ ir_graph *irg = get_irn_irg(node);
+ ir_entity *ent = get_irg_entity(irg);
+ ir_type *mtp = get_entity_type(ent);
/* create a new in array */
NEW_ARR_A(ir_node *, in, get_method_n_ress(mtp) + 1);
return 0;
char_tp = get_pointer_points_to_type(char_tp);
+ ir_node *mem = get_Call_mem(call);
if (left == right) {
/* a strcmp(s, s) ==> 0 */
- ir_graph *irg = get_irn_irg(call);
- ir_node *mem = get_Call_mem(call);
- ir_mode *mode = get_type_mode(res_tp);
+ ir_graph *irg = get_irn_irg(call);
+ ir_mode *mode = get_type_mode(res_tp);
irn = new_r_Const(irg, get_mode_null(mode));
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_STRCMP);
} else if (ent_r != NULL) {
if (is_empty_string(ent_r)) {
/* s strcmp(s, "") ==> (*s) */
- ir_node *mem, *block;
+ ir_node *block;
dbg_info *dbg;
ir_mode *mode;
}
if (irn != NULL) {
- ir_node *mem = get_Call_mem(call);
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_STRCMP);
replace_call(irn, call, mem, reg, exc);
return 1;
switch (opcode) {
case iro_Div:
- mode = get_Div_resmode(n);
operand_mode = get_irn_mode(get_Div_left(n));
/* fall through */
case iro_Add:
*/
static void lower_Const(ir_node *n)
{
- ir_mode *lowered_mode;
- ir_mode *mode = get_irn_mode(n);
- ir_tarval *tv = get_Const_tarval(n);
- char buf[100];
- size_t len;
-
- if (! mode_is_float(mode))
+ ir_mode *mode = get_irn_mode(n);
+ if (!mode_is_float(mode))
return;
- lowered_mode = get_lowered_mode(mode);
+ ir_mode *lowered_mode = get_lowered_mode(mode);
set_irn_mode(n, lowered_mode);
set_tarval_mode_output_option(mode, &hex_output);
- tarval_snprintf(buf, 100, get_Const_tarval(n));
+ char buf[100];
+ tarval_snprintf(buf, sizeof(buf), get_Const_tarval(n));
- len = strlen(buf);
- tv = new_tarval_from_str(buf, len, lowered_mode);
+ size_t len = strlen(buf);
+ ir_tarval *tv = new_tarval_from_str(buf, len, lowered_mode);
set_Const_tarval(n, tv);
}
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(irn, i);
ir_node *block = get_nodes_block(skip_Proj(pred));
- node_t *p_node;
if (block != bl->block) {
- p_node = create_node(pred, bl, env);
+ node_t *p_node = create_node(pred, bl, env);
if (is_input_node(pred, irn, i)) {
/* is a block live input */
p_node->is_input = 1;
}
} else if (! irn_visited_else_mark(pred)) {
/* not yet visited, ok */
- p_node = create_node(pred, bl, env);
+ create_node(pred, bl, env);
if (is_Phi(pred)) {
/* update the Phi list */
/* Inlines a method at the given call site. */
int inline_method(ir_node *call, ir_graph *called_graph)
{
- ir_node *pre_call;
- ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max+1];
- ir_node *end, *end_bl, *block;
- ir_node **res_pred;
- ir_node **cf_pred;
- ir_node **args_in;
- ir_node *ret, *phi;
- int arity, n_ret, n_exc, n_res, i, j, rem_opt;
- int irn_arity, n_params;
- int n_mem_phi;
- enum exc_mode exc_handling;
- ir_type *mtp;
- ir_type *ctp;
- ir_entity *ent;
- ir_graph *rem;
- ir_graph *irg = get_irn_irg(call);
-
/* we cannot inline some types of calls */
if (! can_inline(call, called_graph))
return 0;
/* We cannot inline a recursive call. The graph must be copied before
* the call the inline_method() using create_irg_copy(). */
+ ir_graph *irg = get_irn_irg(call);
if (called_graph == irg)
return 0;
- ent = get_irg_entity(called_graph);
- mtp = get_entity_type(ent);
- ctp = get_Call_type(call);
- n_params = get_method_n_params(mtp);
- n_res = get_method_n_ress(mtp);
+ ir_entity *ent = get_irg_entity(called_graph);
+ ir_type *mtp = get_entity_type(ent);
+ ir_type *ctp = get_Call_type(call);
+ int n_params = get_method_n_params(mtp);
- rem = current_ir_graph;
+ ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
/* optimizations can cause problems when allocating new nodes */
- rem_opt = get_opt_optimize();
+ int rem_opt = get_opt_optimize();
set_optimize(0);
/* Handle graph state */
exc_handling:
0 There is a handler.
2 Exception handling not represented in Firm. -- */
- {
- ir_node *Xproj = NULL;
- ir_node *proj;
- for (proj = (ir_node*)get_irn_link(call); proj != NULL;
- proj = (ir_node*)get_irn_link(proj)) {
- long proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_Call_X_except) Xproj = proj;
- }
- exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
+ ir_node *Xproj = NULL;
+ for (ir_node *proj = (ir_node*)get_irn_link(call); proj != NULL;
+ proj = (ir_node*)get_irn_link(proj)) {
+ long proj_nr = get_Proj_proj(proj);
+ if (proj_nr == pn_Call_X_except) Xproj = proj;
}
+ enum exc_mode exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
/* create the argument tuple */
- args_in = ALLOCAN(ir_node*, n_params);
+ ir_node **args_in = ALLOCAN(ir_node*, n_params);
- block = get_nodes_block(call);
- for (i = n_params - 1; i >= 0; --i) {
+ ir_node *block = get_nodes_block(call);
+ for (int i = n_params - 1; i >= 0; --i) {
ir_node *arg = get_Call_param(call, i);
ir_type *param_tp = get_method_param_type(mtp, i);
ir_mode *mode = get_type_mode(param_tp);
/* the procedure and later replaces the Start node of the called graph.
* Post_call is the old Call node and collects the results of the called
* graph. Both will end up being a tuple. */
- post_bl = get_nodes_block(call);
+ ir_node *post_bl = get_nodes_block(call);
/* XxMxPxPxPxT of Start + parameter of Call */
+ ir_node *in[pn_Start_max+1];
in[pn_Start_M] = get_Call_mem(call);
in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
- pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
- post_call = call;
+ ir_node *pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
+ ir_node *post_call = call;
/* --
The new block gets the ins of the old block, pre_call and all its
* node, similar for singleton nodes like NoMem and Bad.
* Note: this will prohibit predecessors to be copied - only do it for
* nodes without predecessors */
- {
- ir_node *start_block;
- ir_node *start;
- ir_node *nomem;
-
- start_block = get_irg_start_block(called_graph);
- set_new_node(start_block, get_nodes_block(pre_call));
- mark_irn_visited(start_block);
-
- start = get_irg_start(called_graph);
- set_new_node(start, pre_call);
- mark_irn_visited(start);
-
- nomem = get_irg_no_mem(called_graph);
- set_new_node(nomem, get_irg_no_mem(irg));
- mark_irn_visited(nomem);
- }
+ ir_node *start_block = get_irg_start_block(called_graph);
+ set_new_node(start_block, get_nodes_block(pre_call));
+ mark_irn_visited(start_block);
+
+ ir_node *start = get_irg_start(called_graph);
+ set_new_node(start, pre_call);
+ mark_irn_visited(start);
+
+ ir_node *nomem = get_irg_no_mem(called_graph);
+ set_new_node(nomem, get_irg_no_mem(irg));
+ mark_irn_visited(nomem);
/* entitiy link is used to link entities on old stackframe to the
* new stackframe */
*/
/* Precompute some values */
- end_bl = get_new_node(get_irg_end_block(called_graph));
- end = get_new_node(get_irg_end(called_graph));
- arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
- n_res = get_method_n_ress(get_Call_type(call));
+ ir_node *end_bl = get_new_node(get_irg_end_block(called_graph));
+ ir_node *end = get_new_node(get_irg_end(called_graph));
+ int arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
+ int n_res = get_method_n_ress(get_Call_type(call));
- res_pred = XMALLOCN(ir_node*, n_res);
- cf_pred = XMALLOCN(ir_node*, arity);
+ ir_node **res_pred = XMALLOCN(ir_node*, n_res);
+ ir_node **cf_pred = XMALLOCN(ir_node*, arity);
/* archive keepalives */
- irn_arity = get_irn_arity(end);
- for (i = 0; i < irn_arity; i++) {
+ int irn_arity = get_irn_arity(end);
+ for (int i = 0; i < irn_arity; i++) {
ir_node *ka = get_End_keepalive(end, i);
if (! is_Bad(ka))
add_End_keepalive(get_irg_end(irg), ka);
}
/* replace Return nodes by Jump nodes */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_Block_cfgpred(end_bl, i);
+ int n_ret = 0;
+ for (int i = 0; i < arity; i++) {
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
ir_node *block = get_nodes_block(ret);
cf_pred[n_ret] = new_r_Jmp(block);
* add Phi node if there was more than one Return. */
turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
- n_mem_phi = 0;
- for (i = 0; i < arity; i++) {
- ret = get_Block_cfgpred(end_bl, i);
+ int n_mem_phi = 0;
+ for (int i = 0; i < arity; i++) {
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_mem_phi++] = get_Return_mem(ret);
}
cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 1);
}
}
- phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
+ ir_node *phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
set_Tuple_pred(call, pn_Call_M, phi);
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
}
/* Now the real results */
if (n_res > 0) {
- ir_node *result_tuple;
- for (j = 0; j < n_res; j++) {
+ for (int j = 0; j < n_res; j++) {
ir_type *res_type = get_method_res_type(ctp, j);
ir_mode *res_mode = get_type_mode(res_type);
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_Block_cfgpred(end_bl, i);
+ int n_ret = 0;
+ for (int i = 0; i < arity; i++) {
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
ir_node *res = get_Return_res(ret, j);
if (get_irn_mode(res) != res_mode) {
set_Block_phis(post_bl, phi);
}
}
- result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
+ ir_node *result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
set_Tuple_pred(call, pn_Call_T_result, result_tuple);
} else {
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
branches to the End node.
*/
if (exc_handling == exc_handler) {
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret, *irn;
- ret = get_Block_cfgpred(end_bl, i);
- irn = skip_Proj(ret);
+ int n_exc = 0;
+ for (int i = 0; i < arity; i++) {
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
+ ir_node *irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
++n_exc;
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
}
} else {
- ir_node *main_end_bl;
- int main_end_bl_arity;
- ir_node **end_preds;
-
/* assert(exc_handling == 1 || no exceptions. ) */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
+ int n_exc = 0;
+ for (int i = 0; i < arity; i++) {
ir_node *ret = get_Block_cfgpred(end_bl, i);
ir_node *irn = skip_Proj(ret);
n_exc++;
}
}
- main_end_bl = get_irg_end_block(irg);
- main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
+ ir_node *main_end_bl = get_irg_end_block(irg);
+ int main_end_bl_arity = get_irn_arity(main_end_bl);
+ ir_node **end_preds = XMALLOCN(ir_node*, n_exc+main_end_bl_arity);
- for (i = 0; i < main_end_bl_arity; ++i)
+ for (int i = 0; i < main_end_bl_arity; ++i)
end_preds[i] = get_irn_n(main_end_bl, i);
- for (i = 0; i < n_exc; ++i)
+ for (int i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
if (! rbitset_is_set(env.curr_set, pos))
env.curr_id_2_memop[pos] = NULL;
else {
- ir_node *pred = get_Block_cfgpred_block(bl->block, 0);
- block_t *pred_bl = get_block_entry(pred);
- int need_phi = 0;
- memop_t *first = NULL;
- ir_mode *mode = NULL;
+ int need_phi = 0;
+ memop_t *first = NULL;
+ ir_mode *mode = NULL;
for (i = 0; i < n; ++i) {
- memop_t *mop;
-
- pred = get_Block_cfgpred_block(bl->block, i);
- pred_bl = get_block_entry(pred);
+ ir_node *pred = get_Block_cfgpred_block(bl->block, i);
+ block_t *pred_bl = get_block_entry(pred);
- mop = pred_bl->id_2_memop_avail[pos];
+ memop_t *mop = pred_bl->id_2_memop_avail[pos];
if (first == NULL) {
first = mop;
ins[0] = first->value.value;
hmap->heavy_uses = entry->next;
entry->next = *adr;
*adr = entry;
- entry = hmap->heavy_uses;
/* we have changed the list, check the next one */
goto restart;
*/
static int concretize_selected_entity(ir_node *sel)
{
- ir_node *cast, *ptr = get_Sel_ptr(sel);
- ir_type *orig_tp, *cast_tp;
- ir_entity *new_ent, *sel_ent;
- int res = 0;
-
- sel_ent = get_Sel_entity(sel);
- cast = get_Sel_ptr(sel);
+ int res = 0;
+ ir_entity *sel_ent = get_Sel_entity(sel);
+ ir_node *cast = get_Sel_ptr(sel);
while (is_Cast(cast)) {
- cast_tp = get_Cast_type(cast);
- ptr = get_Cast_op(cast);
- orig_tp = get_irn_typeinfo_type(ptr);
+ ir_type *cast_tp = get_Cast_type(cast);
+ ir_node *ptr = get_Cast_op(cast);
+ ir_type *orig_tp = get_irn_typeinfo_type(ptr);
/* we handle only classes */
if (!is_Pointer_type(orig_tp)|| !is_Pointer_type(cast_tp))
if (get_class_member_index(cast_tp, sel_ent) == (size_t)-1)
return res;
- new_ent = resolve_ent_polymorphy(orig_tp, sel_ent);
+ ir_entity *new_ent = resolve_ent_polymorphy(orig_tp, sel_ent);
/* New ent must be member of orig_tp. */
if (get_class_member_index(orig_tp, new_ent) == (size_t)-1)
fname[0] = '\0';
p = name;
} /* if */
- strncat(fname, "firmstat-", sizeof(fname)-1);
- strncat(fname, phase, sizeof(fname)-1);
- strncat(fname, "-", sizeof(fname)-1);
- strncat(fname, p, sizeof(fname)-1);
+ strncat(fname, "firmstat-", sizeof(fname)-strlen(fname)-1);
+ strncat(fname, phase, sizeof(fname)-strlen(fname)-1);
+ strncat(fname, "-", sizeof(fname)-strlen(fname)-1);
+ strncat(fname, p, sizeof(fname)-strlen(fname)-1);
stat_dump_init(fname);
ir_graph *const_irg = get_const_code_irg();
if (entry->irg == const_irg) {
- name = "<Const code Irg>";
return;
} else {
if (entry->ent)
fp_value *fc_rnd(const fp_value *a, fp_value *result)
{
- if (result == NULL) result = calc_buffer;
-
- (void) a;
+ (void)a;
+ (void)result;
TRACEPRINTF(("%s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
TRACEPRINTF(("rounded to integer "));