* @remark This macro may change arr, so update all references!
*/
#define ARR_EXTO(type, arr, n) \
- ((n) >= ARR_LEN((arr)) ? ARR_RESIZE(type, (arr), (n)+1) : (arr))
+ do { \
+ if ((n) >= ARR_LEN(arr)) { ARR_RESIZE(type, arr, (n)+1); } \
+ } while(0)
/**
* Append one element to a flexible array.
#define hook_exec(what, args) do { \
hook_entry_t *_p; \
for (_p = hooks[what]; _p; _p = _p->next){ \
- void *ctx = _p->context; \
+ void *hook_ctx_ = _p->context; \
_p->hook._##what args; \
} \
} while (0)
-#define hook_new_ir_op(op) hook_exec(hook_new_ir_op, (ctx, op))
-#define hook_free_ir_op(op) hook_exec(hook_free_ir_op, (ctx, op))
-#define hook_new_node(graph, node) hook_exec(hook_new_node, (ctx, graph, node))
+#define hook_new_ir_op(op) hook_exec(hook_new_ir_op, (hook_ctx_, op))
+#define hook_free_ir_op(op) hook_exec(hook_free_ir_op, (hook_ctx_, op))
+#define hook_new_node(graph, node) hook_exec(hook_new_node, (hook_ctx_, graph, node))
#define hook_set_irn_n(src, pos, tgt, old_tgt) \
- hook_exec(hook_set_irn_n, (ctx, src, pos, tgt, old_tgt))
-#define hook_replace(old, nw) hook_exec(hook_replace, (ctx, old, nw))
-#define hook_turn_into_id(node) hook_exec(hook_turn_into_id, (ctx, node))
-#define hook_normalize(node) hook_exec(hook_normalize, (ctx, node))
-#define hook_new_graph(irg, ent) hook_exec(hook_new_graph, (ctx, irg, ent))
-#define hook_free_graph(irg) hook_exec(hook_free_graph, (ctx, irg))
-#define hook_irg_walk(irg, pre, post) hook_exec(hook_irg_walk, (ctx, irg, pre, post))
+ hook_exec(hook_set_irn_n, (hook_ctx_, src, pos, tgt, old_tgt))
+#define hook_replace(old, nw) hook_exec(hook_replace, (hook_ctx_, old, nw))
+#define hook_turn_into_id(node) hook_exec(hook_turn_into_id, (hook_ctx_, node))
+#define hook_normalize(node) hook_exec(hook_normalize, (hook_ctx_, node))
+#define hook_new_graph(irg, ent) hook_exec(hook_new_graph, (hook_ctx_, irg, ent))
+#define hook_free_graph(irg) hook_exec(hook_free_graph, (hook_ctx_, irg))
+#define hook_irg_walk(irg, pre, post) hook_exec(hook_irg_walk, (hook_ctx_, irg, pre, post))
#define hook_irg_walk_blkwise(irg, pre, post) \
- hook_exec(hook_irg_walk_blkwise, (ctx, irg, pre, post))
+ hook_exec(hook_irg_walk_blkwise, (hook_ctx_, irg, pre, post))
#define hook_irg_block_walk(irg, node, pre, post) \
- hook_exec(hook_irg_block_walk, (ctx, irg, node, pre, post))
+ hook_exec(hook_irg_block_walk, (hook_ctx_, irg, node, pre, post))
#define hook_merge_nodes(new_node_array, new_num_entries, old_node_array, old_num_entries, opt) \
- hook_exec(hook_merge_nodes, (ctx, new_node_array, new_num_entries, old_node_array, old_num_entries, opt))
-#define hook_reassociate(start) hook_exec(hook_reassociate, (ctx, start))
-#define hook_lower(node) hook_exec(hook_lower, (ctx, node))
-#define hook_inline(call, irg) hook_exec(hook_inline, (ctx, call, irg))
-#define hook_tail_rec(irg, n_calls) hook_exec(hook_tail_rec, (ctx, irg, n_calls))
+ hook_exec(hook_merge_nodes, (hook_ctx_, new_node_array, new_num_entries, old_node_array, old_num_entries, opt))
+#define hook_reassociate(start) hook_exec(hook_reassociate, (hook_ctx_, start))
+#define hook_lower(node) hook_exec(hook_lower, (hook_ctx_, node))
+#define hook_inline(call, irg) hook_exec(hook_inline, (hook_ctx_, call, irg))
+#define hook_tail_rec(irg, n_calls) hook_exec(hook_tail_rec, (hook_ctx_, irg, n_calls))
#define hook_strength_red(irg, node) \
- hook_exec(hook_strength_red, (ctx, irg, node))
-#define hook_dead_node_elim(irg, start) hook_exec(hook_dead_node_elim, (ctx, irg, start))
+ hook_exec(hook_strength_red, (hook_ctx_, irg, node))
+#define hook_dead_node_elim(irg, start) hook_exec(hook_dead_node_elim, (hook_ctx_, irg, start))
#define hook_dead_node_elim_subst(irg, old, nw) \
- hook_exec(hook_dead_node_elim_subst, (ctx, irg, old, nw))
+ hook_exec(hook_dead_node_elim_subst, (hook_ctx_, irg, old, nw))
#define hook_if_conversion(irg, phi, pos, mux, reason) \
- hook_exec(hook_if_conversion, (ctx, irg, phi, pos, mux, reason))
+ hook_exec(hook_if_conversion, (hook_ctx_, irg, phi, pos, mux, reason))
#define hook_func_call(irg, call) \
- hook_exec(hook_func_call, (ctx, irg, call))
+ hook_exec(hook_func_call, (hook_ctx_, irg, call))
#define hook_arch_dep_replace_mul_with_shifts(irn) \
- hook_exec(hook_arch_dep_replace_mul_with_shifts, (ctx, irn))
+ hook_exec(hook_arch_dep_replace_mul_with_shifts, (hook_ctx_, irn))
#define hook_arch_dep_replace_division_by_const(irn) \
- hook_exec(hook_arch_dep_replace_division_by_const, (ctx, irn))
-#define hook_new_mode(tmpl, mode) hook_exec(hook_new_mode, (ctx, tmpl, mode))
-#define hook_new_entity(ent) hook_exec(hook_new_entity, (ctx, ent))
-#define hook_new_type(tp) hook_exec(hook_new_type, (ctx, tp))
-#define hook_node_info(F, node) hook_exec(hook_node_info, (ctx, F, node))
+ hook_exec(hook_arch_dep_replace_division_by_const, (hook_ctx_, irn))
+#define hook_new_mode(tmpl, mode) hook_exec(hook_new_mode, (hook_ctx_, tmpl, mode))
+#define hook_new_entity(ent) hook_exec(hook_new_entity, (hook_ctx_, ent))
+#define hook_new_type(tp) hook_exec(hook_new_type, (hook_ctx_, tp))
+#define hook_node_info(F, node) hook_exec(hook_node_info, (hook_ctx_, F, node))
#include "end.h"
}
} else if (is_Sel(ptr) && get_irp_callee_info_state() == irg_callee_info_consistent) {
/* is be a polymorphic call but callee information is available */
- int i, n_params = get_Call_n_params(succ);
+ int n_params = get_Call_n_params(succ);
+ int c;
/* simply look into ALL possible callees */
- for (i = get_Call_n_callees(succ) - 1; i >= 0; --i) {
- meth_ent = get_Call_callee(succ, i);
+ for (c = get_Call_n_callees(succ) - 1; c >= 0; --c) {
+ meth_ent = get_Call_callee(succ, c);
/* unknown_entity is used to signal that we don't know what is called */
if (meth_ent == unknown_entity) {
*/
static void free_mark(ir_node *node, eset * set)
{
- size_t i, n;
-
if (get_irn_link(node) == MARK)
return; /* already visited */
case iro_Sel: {
ir_entity *ent = get_Sel_entity(node);
if (is_method_entity(ent)) {
+ size_t i, n;
for (i = 0, n = get_Sel_n_methods(node); i < n; ++i) {
eset_insert(set, get_Sel_method(node, i));
}
static void free_ana_walker(ir_node *node, void *env)
{
eset *set = (eset*) env;
- int i;
if (get_irn_link(node) == MARK) {
/* already visited */
}
break;
}
- default:
+ default: {
+ int i;
/* other nodes: Alle anderen Knoten nehmen wir als Verr�ter an, bis
* jemand das Gegenteil implementiert. */
set_irn_link(node, MARK);
}
break;
}
+ }
}
/**
/* let's check if it's the address of a function */
if (is_Global(irn)) {
- ir_entity *ent = get_Global_entity(irn);
+ ir_entity *ent2 = get_Global_entity(irn);
- if (is_Method_type(get_entity_type(ent)))
- eset_insert(set, ent);
+ if (is_Method_type(get_entity_type(ent2)))
+ eset_insert(set, ent2);
}
}
}
}
for (i = 0; i < n; ++i) {
- dfs_node_t *node = nodes[i];
+ node = nodes[i];
ir_fprintf(file, "\tn%d [label=\"%d\"]\n", node->pre_num, get_Block_dom_tree_pre_num((ir_node*) node->node));
#if 0
ir_fprintf(file, "\tn%d [shape=box,label=\"%+F\\l%d %d/%d %d\"];\n",
for (idx = dfs_get_n_nodes(dfs) - 1; idx >= 0; --idx) {
ir_node *bb = (ir_node *) dfs_get_post_num_node(dfs, size - idx - 1);
- freq_t *freq;
int i;
freq = set_insert_freq(freqs, bb);
ef->max = 0.0;
set_foreach(freqs, freq_t*, freq) {
- int idx = freq->idx;
+ idx = freq->idx;
/* take abs because it sometimes can be -0 in case of endless loops */
freq->freq = fabs(x[idx]) * norm;
* left == Const and we found a movable user of left in a
* dominator of the Cond block
*/
- const ir_edge_t *edge, *next;
- for (edge = get_irn_out_edge_first(user); edge; edge = next) {
- ir_node *usr_of_usr = get_edge_src_irn(edge);
- int npos = get_edge_src_pos(edge);
- ir_node *blk = get_effective_use_block(usr_of_usr, npos);
-
- next = get_irn_out_edge_next(user, edge);
- if (block_dominates(block, blk)) {
+ const ir_edge_t *user_edge;
+ const ir_edge_t *user_next;
+ foreach_out_edge_safe(user, user_edge, user_next) {
+ ir_node *usr_of_usr = get_edge_src_irn(user_edge);
+ int npos = get_edge_src_pos(user_edge);
+ ir_node *user_blk = get_effective_use_block(usr_of_usr, npos);
+
+ if (block_dominates(block, user_blk)) {
/*
* The user of the user is dominated by our true/false
* block. So, create a copy of user WITH the constant
res |= determine_entity_usage(succ, entity);
break;
case iro_Sel: {
- ir_entity *entity = get_Sel_entity(succ);
+ ir_entity *sel_entity = get_Sel_entity(succ);
/* this analysis can't handle unions correctly */
- if (is_Union_type(get_entity_owner(entity))) {
+ if (is_Union_type(get_entity_owner(sel_entity))) {
res |= ir_usage_unknown;
break;
}
/* Check the successor of irn. */
- res |= determine_entity_usage(succ, entity);
+ res |= determine_entity_usage(succ, sel_entity);
break;
}
/* let's check if it's an address */
if (is_Global(irn)) {
- ir_entity *ent = get_Global_entity(irn);
- set_entity_usage(ent, ir_usage_unknown);
+ ir_entity *symconst_ent = get_Global_entity(irn);
+ set_entity_usage(symconst_ent, ir_usage_unknown);
}
}
}
reg->succ[0] = exit;
DEBUG_ONLY({
- size_t i;
DB((dbg, LEVEL_2, " Created %s(%u)\n", reg->type == ir_rk_Switch ? "Switch" : "Case", reg->nr));
for (i = 1; i < ARR_LEN(reg->parts); ++i) {
DB((dbg, LEVEL_2, " Case(%lu)\n", reg->parts[i].region->nr));
/* check for Switch, case */
if (k > 0) {
ir_region *rexit = NULL;
- size_t i, p = 0;
+ size_t i, pos = 0;
nset = NULL; nset_len = 0;
for (i = k; i > 0;) {
n = get_region_succ(node, i--);
if (get_region_n_succs(n) != 1) {
/* must be the exit */
rexit = n;
- ++p;
- if (p > 1)
+ ++pos;
+ if (pos > 1)
break;
}
}
- if (p <= 1) {
+ if (pos <= 1) {
ir_region_kind kind = ir_rk_Case;
ir_region *pos_exit_1 = NULL;
ir_region *pos_exit_2 = NULL;
/* FIXME: No floating point yet */
/* be_abi_call_res_reg(abi, 0,
- mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH) */;
+ mode_is_float(mode) ? &amd64_fp_regs[REG_F0] : &amd64_registers[REG_R0], ABI_CONTEXT_BOTH); */
be_abi_call_res_reg(abi, 0,
&amd64_registers[REG_RAX], ABI_CONTEXT_BOTH);
REG_FL,
};
-/* determine how function parameters and return values are passed. */
+static const arch_register_t* const param_regs[] = {
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3]
+};
+
+static const arch_register_t* const result_regs[] = {
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3]
+};
+
+static const arch_register_t* const float_result_regs[] = {
+ &arm_registers[REG_F0],
+ &arm_registers[REG_F1]
+};
+
calling_convention_t *arm_decide_calling_convention(const ir_graph *irg,
ir_type *function_type)
{
- int stack_offset = 0;
+ unsigned stack_offset = 0;
+ unsigned n_param_regs_used = 0;
reg_or_stackslot_t *params;
reg_or_stackslot_t *results;
int n_param_regs
const arch_register_t *reg = param_regs[regnum++];
param->reg1 = reg;
} else {
- ir_mode *mode = param_regs[0]->reg_class->mode;
- ir_type *type = get_type_for_mode(mode);
- param->type = type;
- param->offset = stack_offset;
- assert(get_mode_size_bits(mode) == 32);
+ ir_mode *pmode = param_regs[0]->reg_class->mode;
+ ir_type *type = get_type_for_mode(pmode);
+ param->type = type;
+ param->offset = stack_offset;
+ assert(get_mode_size_bits(pmode) == 32);
stack_offset += 4;
}
}
}
+ n_param_regs_used = regnum;
n_results = get_method_n_ress(function_type);
regnum = 0;
cconv = XMALLOCZ(calling_convention_t);
cconv->parameters = params;
cconv->param_stack_size = stack_offset;
+ cconv->n_reg_params = n_param_regs_used;
cconv->results = results;
/* setup allocatable registers */
#include "../be_types.h"
#include "gen_arm_regalloc_if.h"
-static const arch_register_t *const callee_saves[] = {
- &arm_registers[REG_R4],
- &arm_registers[REG_R5],
- &arm_registers[REG_R6],
- &arm_registers[REG_R7],
- &arm_registers[REG_R8],
- &arm_registers[REG_R9],
- &arm_registers[REG_R10],
- &arm_registers[REG_R11],
- &arm_registers[REG_LR],
-};
-
-static const arch_register_t *const caller_saves[] = {
- &arm_registers[REG_R0],
- &arm_registers[REG_R1],
- &arm_registers[REG_R2],
- &arm_registers[REG_R3],
- &arm_registers[REG_LR],
-
- &arm_registers[REG_F0],
- &arm_registers[REG_F1],
- &arm_registers[REG_F2],
- &arm_registers[REG_F3],
- &arm_registers[REG_F4],
- &arm_registers[REG_F5],
- &arm_registers[REG_F6],
- &arm_registers[REG_F7],
-};
-
-static const arch_register_t* const param_regs[] = {
- &arm_registers[REG_R0],
- &arm_registers[REG_R1],
- &arm_registers[REG_R2],
- &arm_registers[REG_R3]
-};
-
-static const arch_register_t* const result_regs[] = {
- &arm_registers[REG_R0],
- &arm_registers[REG_R1],
- &arm_registers[REG_R2],
- &arm_registers[REG_R3]
-};
-
-static const arch_register_t* const float_result_regs[] = {
- &arm_registers[REG_F0],
- &arm_registers[REG_F1]
-};
-
/** information about a single parameter or result */
typedef struct reg_or_stackslot_t
{
const arch_register_t *reg1; /**< if != NULL, the second register used. */
ir_type *type; /**< indicates that an entity of the specific
type is needed */
- int offset; /**< if transmitted via stack, the offset for this parameter. */
+ unsigned offset; /**< if transmitted via stack, the offset for this parameter. */
ir_entity *entity; /**< entity in frame type */
} reg_or_stackslot_t;
typedef struct calling_convention_t
{
reg_or_stackslot_t *parameters; /**< parameter info. */
- int param_stack_size; /**< needed stack size for parameters */
+ unsigned param_stack_size; /**< needed stack size for parameters */
+ unsigned n_reg_params;
reg_or_stackslot_t *results; /**< result info. */
} calling_convention_t;
be_emit_write_line();
} else {
ir_tarval *tv = entry->u.tv;
- int i, size = get_mode_size_bytes(get_tarval_mode(tv));
- unsigned v;
+ int vi;
+ int size = get_mode_size_bytes(get_tarval_mode(tv));
/* beware: ARM fpa uses big endian format */
- for (i = ((size + 3) & ~3) - 4; i >= 0; i -= 4) {
+ for (vi = ((size + 3) & ~3) - 4; vi >= 0; vi -= 4) {
/* get 32 bits */
- v = get_tarval_sub_bits(tv, i+3);
- v = (v << 8) | get_tarval_sub_bits(tv, i+2);
- v = (v << 8) | get_tarval_sub_bits(tv, i+1);
- v = (v << 8) | get_tarval_sub_bits(tv, i+0);
+ unsigned v;
+ v = get_tarval_sub_bits(tv, vi+3);
+ v = (v << 8) | get_tarval_sub_bits(tv, vi+2);
+ v = (v << 8) | get_tarval_sub_bits(tv, vi+1);
+ v = (v << 8) | get_tarval_sub_bits(tv, vi+0);
be_emit_irprintf("\t.word\t%u\n", v);
be_emit_write_line();
}
first = node;
block = get_nodes_block(node);
for (cnt = 1; cnt < v.ops; ++cnt) {
- int value = sign * arm_ror(v.values[cnt], v.rors[cnt]);
- ir_node *next = be_new_IncSP(&arm_registers[REG_SP], block, node,
+ int value = sign * arm_ror(v.values[cnt], v.rors[cnt]);
+ ir_node *incsp = be_new_IncSP(&arm_registers[REG_SP], block, node,
value, 1);
- sched_add_after(node, next);
- node = next;
+ sched_add_after(node, incsp);
+ node = incsp;
}
/* reattach IncSP users */
static pmap *node_to_stack;
+static const arch_register_t *const callee_saves[] = {
+ &arm_registers[REG_R4],
+ &arm_registers[REG_R5],
+ &arm_registers[REG_R6],
+ &arm_registers[REG_R7],
+ &arm_registers[REG_R8],
+ &arm_registers[REG_R9],
+ &arm_registers[REG_R10],
+ &arm_registers[REG_R11],
+ &arm_registers[REG_LR],
+};
+
+static const arch_register_t *const caller_saves[] = {
+ &arm_registers[REG_R0],
+ &arm_registers[REG_R1],
+ &arm_registers[REG_R2],
+ &arm_registers[REG_R3],
+ &arm_registers[REG_LR],
+
+ &arm_registers[REG_F0],
+ &arm_registers[REG_F1],
+ &arm_registers[REG_F2],
+ &arm_registers[REG_F3],
+ &arm_registers[REG_F4],
+ &arm_registers[REG_F5],
+ &arm_registers[REG_F6],
+ &arm_registers[REG_F7],
+};
+
static bool mode_needs_gp_reg(ir_mode *mode)
{
return mode_is_int(mode) || mode_is_reference(mode);
}
if (try_encode_as_immediate(op2, &imm)) {
- ir_node *new_op1 = be_transform_node(op1);
+ new_op1 = be_transform_node(op1);
return factory->new_binop_imm(dbgi, block, new_op1, imm.imm_8, imm.rot);
}
new_op2 = be_transform_node(op2);
/* just produce a 0 */
ir_mode *mode = get_irn_mode(node);
if (mode_is_float(mode)) {
- ir_tarval *tv = get_mode_null(mode);
- ir_node *node = new_bd_arm_fConst(dbgi, new_block, tv);
- return node;
+ ir_tarval *tv = get_mode_null(mode);
+ ir_node *fconst = new_bd_arm_fConst(dbgi, new_block, tv);
+ return fconst;
} else if (mode_needs_gp_reg(mode)) {
return create_const_graph_value(dbgi, new_block, 0);
}
ir_type *type = get_Call_type(node);
calling_convention_t *cconv = arm_decide_calling_convention(NULL, type);
size_t n_params = get_Call_n_params(node);
- size_t n_param_regs = sizeof(param_regs)/sizeof(param_regs[0]);
+ size_t n_param_regs = cconv->n_reg_params;
/* max inputs: memory, callee, register arguments */
int max_inputs = 2 + n_param_regs;
ir_node **in = ALLOCAN(ir_node*, max_inputs);
const arm_attr_t *attr = get_arm_attr_const(irn);
if (is_arm_FrameAddr(irn)) {
- const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(irn);
- return attr->entity;
+ const arm_SymConst_attr_t *frame_attr = get_arm_SymConst_attr_const(irn);
+ return frame_attr->entity;
}
if (attr->is_load_store) {
const arm_load_store_attr_t *load_store_attr
break;
}
succ_entry = succ_entry->prev;
- };
+ }
if (irn_visited(succ_entry->block))
continue;
fclose(self->f);
}
-const plotter_if_t ps_plotter_vtab = {
+static const plotter_if_t ps_plotter_vtab = {
ps_begin,
ps_setcolor,
get_color,
{ NULL, 0 }
};
-static const lc_opt_enum_int_items_t lower_perm_stat_items[] = {
- { NULL, 0 }
-};
-
static const lc_opt_enum_int_items_t dump_items[] = {
{ "none", BE_CH_DUMP_NONE },
{ "spill", BE_CH_DUMP_SPILL },
be_assure_liveness(irg);
be_liveness_assure_chk(be_get_irg_liveness(irg));
- stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq));
+ if (stat_ev_enabled) {
+ pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq);
+ }
/* put all ignore registers into the ignore register set. */
be_put_allocatable_regs(irg, pse->cls, chordal_env->allocatable_regs);
chordal_env->ifg = be_create_ifg(chordal_env);
be_timer_pop(T_RA_IFG);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_ifg_stat_t stat;
be_node_stats_t node_stats;
be_timer_pop(T_RA_PROLOG);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_collect_node_stats(&last_node_stats, irg);
}
stat_ev_ctx_push_str("bechordal_cls", cls->name);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_do_stat_reg_pressure(irg, cls);
}
post_spill(&pse, 0);
- stat_ev_if {
+ if (stat_ev_enabled) {
be_node_stats_t node_stats;
be_collect_node_stats(&node_stats, irg);
LC_OPT_LAST
};
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyilp);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyilp)
void be_init_copyilp(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
cst_idx = lpp_add_cst(ienv->lp, NULL, lpp_equal, 1.0);
bitset_foreach(colors, col) {
- int var_idx = lpp_add_var(ienv->lp, name_cdd(buf, 'x', node_nr, col), lpp_binary, 0.0);
+ int var_idx = lpp_add_var(ienv->lp, name_cdd(buf, 'x', node_nr, (int)col), lpp_binary, 0.0);
lpp_set_start_value(ienv->lp, var_idx, (col == (unsigned) curr_node_color) ? 1.0 : 0.0);
lpp_set_factor_fast(ienv->lp, cst_idx, var_idx, 1);
/* add register constraint constraints */
bitset_foreach_clear(colors, col) {
int cst_idx = lpp_add_cst(ienv->lp, NULL, lpp_equal, 0.0);
- int var_idx = lpp_add_var(ienv->lp, name_cdd(buf, 'x', node_nr, col), lpp_binary, 0.0);
+ int var_idx = lpp_add_var(ienv->lp, name_cdd(buf, 'x', node_nr, (int)col), lpp_binary, 0.0);
lpp_set_start_value(ienv->lp, var_idx, 0.0);
lpp_set_factor_fast(ienv->lp, cst_idx, var_idx, 1);
int growed;
/* get 2 starting nodes to form a clique */
- for (e=set_first(edges); !e->n1; e=set_next(edges))
- /*nothing*/ ;
+ for (e=set_first(edges); !e->n1; e=set_next(edges)) {
+ }
/* we could be stepped out of the loop before the set iterated to the end */
set_break(edges);
lpp_sol_state_t state = lpp_get_solution(ienv->lp, sol, lenv->first_x_var, lenv->last_x_var);
if (state != lpp_optimal) {
- printf("WARNING %s: Solution state is not 'optimal': %d\n", ienv->co->name, state);
+ printf("WARNING %s: Solution state is not 'optimal': %d\n", ienv->co->name, (int)state);
assert(state >= lpp_feasible && "The solution should at least be feasible!");
}
#endif
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyilp2);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyilp2)
void be_init_copyilp2(void)
{
static co_algo_info copyheur = {
/**
* Holds current values. Values are added till next copystat_reset
*/
-int curr_vals[ASIZE];
+static int curr_vals[ASIZE];
static ir_nodeset_t *all_phi_nodes;
static ir_nodeset_t *all_copy_nodes;
size_t i2;
fputs("\t.byte ", file);
for (i2 = i; i2 < i + 30 && i2 < len; ++i2) {
- fprintf(file, "0x%02X", buffer[i2]);
+ fprintf(file, "0x%02X", (unsigned)buffer[i2]);
}
i = i2;
fputs("\n", file);
/* We define a generic dummy unit */
be_execution_unit_t be_machine_execution_units_DUMMY[1];
-be_execution_unit_type_t be_machine_execution_unit_types[] = {
+static be_execution_unit_type_t be_machine_execution_unit_types[] = {
{ 1, 1, "DUMMY", be_machine_execution_units_DUMMY },
};
LC_OPT_ENT_BOOL ("statev", "dump statistic events", &be_options.statev),
LC_OPT_ENT_STR ("filtev", "filter for stat events (regex if support is active", &be_options.filtev, sizeof(be_options.filtev)),
- LC_OPT_ENT_STR ("ilp.server", "the ilp server name", be_options.ilp_server, sizeof(be_options.ilp_server)),
- LC_OPT_ENT_STR ("ilp.solver", "the ilp solver name", be_options.ilp_solver, sizeof(be_options.ilp_solver)),
+ LC_OPT_ENT_STR ("ilp.server", "the ilp server name", &be_options.ilp_server, sizeof(be_options.ilp_server)),
+ LC_OPT_ENT_STR ("ilp.solver", "the ilp solver name", &be_options.ilp_solver, sizeof(be_options.ilp_solver)),
LC_OPT_LAST
};
/* set the current graph (this is important for several firm functions) */
current_ir_graph = irg;
- stat_ev_if {
+ if (stat_ev_enabled) {
stat_ev_ctx_push_fobj("bemain_irg", irg);
be_stat_ev("bemain_insns_start", be_count_insns(irg));
be_stat_ev("bemain_blocks_start", be_count_blocks(irg));
be_sched_verify(irg, be_options.verify_option);
be_timer_pop(T_VERIFY);
- stat_ev_if {
+ if (stat_ev_enabled) {
stat_ev_dbl("bemain_costs_before_ra",
be_estimate_irg_costs(irg, birg->exec_freq));
be_stat_ev("bemain_insns_before_ra", be_count_insns(irg));
dump(DUMP_FINAL, irg, "finish");
- stat_ev_if {
+ if (stat_ev_enabled) {
be_stat_ev("bemain_insns_finish", be_count_insns(irg));
be_stat_ev("bemain_blocks_finish", be_count_blocks(irg));
}
if (be_options.timing == BE_TIME_ON) {
ir_timer_stop(t);
ir_timer_leave_high_priority();
- stat_ev_if {
+ if (stat_ev_enabled) {
stat_ev_dbl("bemain_backend_time", ir_timer_elapsed_msec(t));
} else {
double val = ir_timer_elapsed_usec(t) / 1000.0;
(void) len;
for (module = *(moddata->list_head); module != NULL; module = module->next) {
- size_t len = strlen(module->name);
+ size_t name_len = strlen(module->name);
if (module != *(moddata->list_head)) {
p = strncat(p, ", ", buflen - 1);
p = strncat(p, module->name, buflen - 1);
- if (len >= buflen)
+ if (name_len >= buflen)
break;
- buflen -= len;
+ buflen -= name_len;
}
return strlen(buf);
if (additional_types == 0) {
req = reg->single_req;
} else {
- ir_graph *irg = get_irn_irg(node);
struct obstack *obst = be_get_be_obst(irg);
req = be_create_reg_req(obst, reg, additional_types);
}
if (fc == NULL) {
irn_cost_pair* costs;
- int i;
ir_node* block = get_nodes_block(irn);
fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity);
for (i = 0; i < arity; ++i) {
ir_node* pred = get_irn_n(irn, i);
- int cost;
if (is_Phi(irn) || get_irn_mode(pred) == mode_M || is_Block(pred)) {
cost = 0;
DEBUG_ONLY(
memset(spilllist, 0, spillcount * sizeof(spilllist[0]));
- );
+ )
i = 0;
foreach_set(env->spills, spill_t*, spill) {
return;
} /* if */
- if (0 && get_mode_size_bits(mode) & 7) {
+#if 0
+ if (get_mode_size_bits(mode) & 7) {
/* this is a bitfield type, ignore it */
return;
} /* if */
+#endif
type_num = get_type_number(h, tp);
ofs = get_entity_offset(ent);
if (is_Struct_type(mtp) && get_type_mode(mtp) != NULL) {
/* this structure is a bitfield, skip */
- int i, n;
+ int m;
+ int n_members = get_struct_n_members(mtp);
- for (i = 0, n = get_struct_n_members(mtp); i < n; ++i) {
- ir_entity *ent = get_struct_member(mtp, i);
- ir_type *tp = get_entity_type(ent);
+ for (m = 0; m < n_members; ++m) {
+ ir_entity *member = get_struct_member(mtp, m);
+ ir_type *member_tp = get_entity_type(member);
int bofs;
- type_num = get_type_number(h, tp);
- size = get_type_size_bytes(tp) * 8;
- bofs = (ofs + get_entity_offset(ent)) * 8 + get_entity_offset_bits_remainder(ent);
+ type_num = get_type_number(h, member_tp);
+ size = get_type_size_bytes(member_tp) * 8;
+ bofs = (ofs + get_entity_offset(member)) * 8 + get_entity_offset_bits_remainder(member);
/* name:type, bit offset from the start of the struct', number of bits in the element. */
- be_emit_irprintf("%s:%u,%d,%u;", get_entity_name(ent), type_num, bofs, size);
+ be_emit_irprintf("%s:%u,%d,%u;", get_entity_name(member), type_num, bofs, size);
}
} else {
/* no bitfield */
for (i = 0, n = get_method_n_params(mtp); i < n; ++i) {
ir_type *ptp = get_method_param_type(mtp, i);
const char *name = NULL;
- unsigned type_num = get_type_number(h, ptp);
char buf[16];
int ofs = 0;
ir_entity *stack_ent;
/* create entries for automatic variables on the stack */
frame_size = get_type_size_bytes(layout->frame_type);
for (i = 0, n = get_compound_n_members(layout->frame_type); i < n; ++i) {
- ir_entity *ent = get_compound_member(layout->frame_type, i);
+ ir_entity *member = get_compound_member(layout->frame_type, i);
ir_type *tp;
int ofs;
unsigned type_num;
/* ignore spill slots and other helper objects */
- if (is_entity_compiler_generated(ent))
+ if (is_entity_compiler_generated(member))
continue;
- tp = get_entity_type(ent);
+ tp = get_entity_type(member);
/* should not happen in backend but ... */
if (is_Method_type(tp))
continue;
type_num = get_type_number(h, tp);
- ofs = -frame_size + get_entity_offset(ent);
+ ofs = -frame_size + get_entity_offset(member);
be_emit_irprintf("\t.stabs\t\"%s:%u\",%d,0,0,%d\n",
- get_entity_name(ent), type_num, N_LSYM, ofs);
+ get_entity_name(member), type_num, N_LSYM, ofs);
be_emit_write_line();
}
/* we need a lexical block here */
ir_graph *irg = get_irn_irg(phi);
ir_node *block = get_nodes_block(phi);
int arity = get_irn_arity(phi);
- ir_node **in = ALLOCAN(ir_node*, arity);
+ ir_node **phi_in = ALLOCAN(ir_node*, arity);
ir_node *dummy = new_r_Dummy(irg, mode_M);
ir_node *spill_to_kill = NULL;
spill_info_t *spill_info;
/* create a new phi-M with bad preds */
for (i = 0; i < arity; ++i) {
- in[i] = dummy;
+ phi_in[i] = dummy;
}
DBG((dbg, LEVEL_2, "\tcreate Phi-M for %+F\n", phi));
/* create a Phi-M */
- spill_info->spill = be_new_Phi(block, arity, in, mode_M, NULL);
+ spill_info->spill = be_new_Phi(block, arity, phi_in, mode_M, NULL);
sched_add_after(block, spill_info->spill);
if (spill_to_kill != NULL) {
{
unsigned step;
ir_node *block = get_nodes_block(from);
- ir_node *next_use;
+ ir_node *next_use_node;
ir_node *node;
unsigned timestep;
unsigned next_use_step;
from = sched_next(from);
}
- next_use = NULL;
+ next_use_node = NULL;
next_use_step = INT_MAX;
timestep = get_step(from);
foreach_out_edge(def, edge) {
- ir_node *node = get_edge_src_irn(edge);
- unsigned node_step;
+ node = get_edge_src_irn(edge);
+ unsigned node_step;
if (is_Anchor(node))
continue;
if (node_step < timestep)
continue;
if (node_step < next_use_step) {
- next_use = node;
+ next_use_node = node;
next_use_step = node_step;
}
}
- if (next_use != NULL) {
+ if (next_use_node != NULL) {
be_next_use_t result;
result.time = next_use_step - timestep + skip_from_uses;
result.outermost_loop = get_loop_depth(get_irn_loop(block));
- result.before = next_use;
+ result.before = next_use_node;
return result;
}
* (except mode_X projs)
*/
sched_foreach(block, node) {
- int i, arity;
int timestep;
/* this node is scheduled */
/* Check that all uses come before their definitions */
if (!is_Phi(node)) {
+ int i;
+ int arity;
sched_timestep_t nodetime = sched_get_time_step(node);
for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node *arg = get_irn_n(node, i);
prev = sched_prev(prev);
while (true) {
+ int i;
for (i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
in = skip_Proj(in);
/* phis should be NOPs at this point, which means all input regs
* must be the same as the output reg */
if (is_Phi(node)) {
- int i, arity;
-
reg = arch_get_irn_register(node);
arity = get_irn_arity(node);
registers = ALLOCANZ(ir_node*, n_regs);
be_lv_foreach(lv, block, be_lv_state_end, idx) {
- ir_node *node = be_lv_get_irn(lv, block, idx);
- value_used(block, node);
+ ir_node *lv_node = be_lv_get_irn(lv, block, idx);
+ value_used(block, lv_node);
}
sched_foreach_reverse(block, node) {
}
be_lv_foreach(lv, block, be_lv_state_in, idx) {
- ir_node *node = be_lv_get_irn(lv, block, idx);
- value_def(node);
+ ir_node *lv_node = be_lv_get_irn(lv, block, idx);
+ value_def(lv_node);
}
/* set must be empty now */
REG_Y,
};
+static const arch_register_t* const param_regs[] = {
+ &sparc_registers[REG_I0],
+ &sparc_registers[REG_I1],
+ &sparc_registers[REG_I2],
+ &sparc_registers[REG_I3],
+ &sparc_registers[REG_I4],
+ &sparc_registers[REG_I5],
+};
+
+static const arch_register_t* const float_result_regs[] = {
+ &sparc_registers[REG_F0],
+ &sparc_registers[REG_F1],
+ &sparc_registers[REG_F2],
+ &sparc_registers[REG_F3],
+};
+
/**
* Maps an input register representing the i'th register input
* to the i'th register output.
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type,
ir_graph *irg)
{
- int stack_offset = 0;
+ unsigned stack_offset = 0;
+ unsigned n_param_regs_used = 0;
int n_param_regs = ARRAY_SIZE(param_regs);
int n_float_result_regs = ARRAY_SIZE(float_result_regs);
bool omit_fp = false;
}
}
}
+ n_param_regs_used = regnum;
/* determine how results are passed */
n_results = get_method_n_ress(function_type);
cconv = XMALLOCZ(calling_convention_t);
cconv->parameters = params;
cconv->param_stack_size = stack_offset;
+ cconv->n_param_regs = n_param_regs_used;
cconv->results = results;
cconv->omit_fp = omit_fp;
#include "../be_types.h"
#include "gen_sparc_regalloc_if.h"
-static const arch_register_t *const caller_saves[] = {
- &sparc_registers[REG_G1],
- &sparc_registers[REG_G2],
- &sparc_registers[REG_G3],
- &sparc_registers[REG_G4],
- &sparc_registers[REG_O0],
- &sparc_registers[REG_O1],
- &sparc_registers[REG_O2],
- &sparc_registers[REG_O3],
- &sparc_registers[REG_O4],
- &sparc_registers[REG_O5],
-
- &sparc_registers[REG_F0],
- &sparc_registers[REG_F1],
- &sparc_registers[REG_F2],
- &sparc_registers[REG_F3],
- &sparc_registers[REG_F4],
- &sparc_registers[REG_F5],
- &sparc_registers[REG_F6],
- &sparc_registers[REG_F7],
- &sparc_registers[REG_F8],
- &sparc_registers[REG_F9],
- &sparc_registers[REG_F10],
- &sparc_registers[REG_F11],
- &sparc_registers[REG_F12],
- &sparc_registers[REG_F13],
- &sparc_registers[REG_F14],
- &sparc_registers[REG_F15],
- &sparc_registers[REG_F16],
- &sparc_registers[REG_F17],
- &sparc_registers[REG_F18],
- &sparc_registers[REG_F19],
- &sparc_registers[REG_F20],
- &sparc_registers[REG_F21],
- &sparc_registers[REG_F22],
- &sparc_registers[REG_F23],
- &sparc_registers[REG_F24],
- &sparc_registers[REG_F25],
- &sparc_registers[REG_F26],
- &sparc_registers[REG_F27],
- &sparc_registers[REG_F28],
- &sparc_registers[REG_F29],
- &sparc_registers[REG_F30],
- &sparc_registers[REG_F31],
-};
-
-static const arch_register_t *const omit_fp_callee_saves[] = {
- &sparc_registers[REG_L0],
- &sparc_registers[REG_L1],
- &sparc_registers[REG_L2],
- &sparc_registers[REG_L3],
- &sparc_registers[REG_L4],
- &sparc_registers[REG_L5],
- &sparc_registers[REG_L6],
- &sparc_registers[REG_L7],
- &sparc_registers[REG_I0],
- &sparc_registers[REG_I1],
- &sparc_registers[REG_I2],
- &sparc_registers[REG_I3],
- &sparc_registers[REG_I4],
- &sparc_registers[REG_I5],
-};
-
-static const arch_register_t* const param_regs[] = {
- &sparc_registers[REG_I0],
- &sparc_registers[REG_I1],
- &sparc_registers[REG_I2],
- &sparc_registers[REG_I3],
- &sparc_registers[REG_I4],
- &sparc_registers[REG_I5],
-};
-
-static const arch_register_t* const float_result_regs[] = {
- &sparc_registers[REG_F0],
- &sparc_registers[REG_F1],
- &sparc_registers[REG_F2],
- &sparc_registers[REG_F3],
-};
-
/** information about a single parameter or result */
typedef struct reg_or_stackslot_t
{
const arch_register_t *reg1; /**< if != NULL, the second register used. */
ir_type *type; /**< indicates that an entity of the specific
type is needed */
- int offset; /**< if transmitted via stack, the offset for
+ unsigned offset; /**< if transmitted via stack, the offset for
this parameter. */
ir_entity *entity; /**< entity in frame type */
} reg_or_stackslot_t;
bool omit_fp; /**< do not use frame pointer (and no
save/restore) */
reg_or_stackslot_t *parameters; /**< parameter info. */
- int param_stack_size; /**< stack size for parameters */
+ unsigned param_stack_size; /**< stack size for parameters */
+ unsigned n_param_regs; /**< number of values passed in a
+ register */
reg_or_stackslot_t *results; /**< result info. */
} calling_convention_t;
//static ir_mode *mode_fp4;
static pmap *node_to_stack;
+static const arch_register_t *const caller_saves[] = {
+ &sparc_registers[REG_G1],
+ &sparc_registers[REG_G2],
+ &sparc_registers[REG_G3],
+ &sparc_registers[REG_G4],
+ &sparc_registers[REG_O0],
+ &sparc_registers[REG_O1],
+ &sparc_registers[REG_O2],
+ &sparc_registers[REG_O3],
+ &sparc_registers[REG_O4],
+ &sparc_registers[REG_O5],
+
+ &sparc_registers[REG_F0],
+ &sparc_registers[REG_F1],
+ &sparc_registers[REG_F2],
+ &sparc_registers[REG_F3],
+ &sparc_registers[REG_F4],
+ &sparc_registers[REG_F5],
+ &sparc_registers[REG_F6],
+ &sparc_registers[REG_F7],
+ &sparc_registers[REG_F8],
+ &sparc_registers[REG_F9],
+ &sparc_registers[REG_F10],
+ &sparc_registers[REG_F11],
+ &sparc_registers[REG_F12],
+ &sparc_registers[REG_F13],
+ &sparc_registers[REG_F14],
+ &sparc_registers[REG_F15],
+ &sparc_registers[REG_F16],
+ &sparc_registers[REG_F17],
+ &sparc_registers[REG_F18],
+ &sparc_registers[REG_F19],
+ &sparc_registers[REG_F20],
+ &sparc_registers[REG_F21],
+ &sparc_registers[REG_F22],
+ &sparc_registers[REG_F23],
+ &sparc_registers[REG_F24],
+ &sparc_registers[REG_F25],
+ &sparc_registers[REG_F26],
+ &sparc_registers[REG_F27],
+ &sparc_registers[REG_F28],
+ &sparc_registers[REG_F29],
+ &sparc_registers[REG_F30],
+ &sparc_registers[REG_F31],
+};
+
+static const arch_register_t *const omit_fp_callee_saves[] = {
+ &sparc_registers[REG_L0],
+ &sparc_registers[REG_L1],
+ &sparc_registers[REG_L2],
+ &sparc_registers[REG_L3],
+ &sparc_registers[REG_L4],
+ &sparc_registers[REG_L5],
+ &sparc_registers[REG_L6],
+ &sparc_registers[REG_L7],
+ &sparc_registers[REG_I0],
+ &sparc_registers[REG_I1],
+ &sparc_registers[REG_I2],
+ &sparc_registers[REG_I3],
+ &sparc_registers[REG_I4],
+ &sparc_registers[REG_I5],
+};
+
static inline bool mode_needs_gp_reg(ir_mode *mode)
{
if (mode_is_int(mode) || mode_is_reference(mode)) {
dbg_info *dbgi = get_irn_dbg_info(node);
ir_type *type = get_Call_type(node);
size_t n_params = get_Call_n_params(node);
- size_t n_param_regs = sizeof(param_regs)/sizeof(param_regs[0]);
/* max inputs: memory, callee, register arguments */
- int max_inputs = 2 + n_param_regs;
- ir_node **in = ALLOCAN(ir_node*, max_inputs);
ir_node **sync_ins = ALLOCAN(ir_node*, n_params);
struct obstack *obst = be_get_be_obst(irg);
- const arch_register_req_t **in_req
- = OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
calling_convention_t *cconv
= sparc_decide_calling_convention(type, NULL);
+ size_t n_param_regs = cconv->n_param_regs;
+ unsigned max_inputs = 2 + n_param_regs;
+ ir_node **in = ALLOCAN(ir_node*, max_inputs);
+ const arch_register_req_t **in_req
+ = OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
int in_arity = 0;
int sync_arity = 0;
int n_caller_saves
set_irn_pinned(str, op_pin_state_floats);
sync_ins[sync_arity++] = str;
}
- assert(in_arity <= max_inputs);
+ assert(in_arity <= (int)max_inputs);
/* construct memory input */
if (sync_arity == 0) {
#include <stddef.h>
-static inline void _time_get(ir_timer_val_t *val);
static inline void _time_reset(ir_timer_val_t *val);
-static inline unsigned long _time_to_msec(const ir_timer_val_t *val);
-static inline ir_timer_val_t *_time_add(ir_timer_val_t *res,
- const ir_timer_val_t *lhs, const ir_timer_val_t *rhs);
-static inline ir_timer_val_t *_time_sub(ir_timer_val_t *res,
- const ir_timer_val_t *lhs, const ir_timer_val_t *rhs);
/**
* A timer.
ir_node *block = get_irn_n(div, -1);
ir_mode *mode = get_irn_mode(n);
int bits = get_mode_size_bits(mode);
- ir_node *q, *t, *c;
+ ir_node *q;
/* Beware: do not transform bad code */
if (is_Bad(n) || is_Bad(block))
struct ms mag = magic(tv);
/* generate the Mulh instruction */
- c = new_r_Const(irg, mag.M);
+ ir_node *c = new_r_Const(irg, mag.M);
+ ir_node *t;
q = new_rd_Mulh(dbg, block, n, c, mode);
/* do we need an Add or Sub */
q = new_rd_Add(dbg, block, q, t, mode);
} else {
struct mu mag = magicu(tv);
- ir_node *c;
ir_graph *irg = get_irn_irg(div);
/* generate the Mulh instruction */
- c = new_r_Const(irg, mag.M);
+ ir_node *c = new_r_Const(irg, mag.M);
q = new_rd_Mulh(dbg, block, n, c, mode);
if (mag.need_add) {
if (mag.s > 0) {
/* use the GM scheme */
- t = new_rd_Sub(dbg, block, n, q, mode);
+ ir_node *t = new_rd_Sub(dbg, block, n, q, mode);
c = new_r_Const(irg, get_mode_one(mode_Iu));
t = new_rd_Shr(dbg, block, t, c, mode);
res = new_rd_Shrs(dbg, block, curr, k_node, mode);
if (n_flag) { /* negate the result */
- ir_node *k_node;
-
k_node = new_r_Const(irg, get_mode_null(mode));
res = new_rd_Sub(dbg, block, k_node, res, mode);
}
int arity = get_irn_arity(block);
/* no predecessors: use unknown value */
if (arity == 0 && block == get_irg_start_block(get_irn_irg(block))) {
- ir_graph *irg = get_irn_irg(block);
if (default_initialize_local_variable != NULL) {
ir_node *rem = get_r_cur_block(irg);
set_r_cur_block(irg, block);
break;
default:
- ;
+ break;
} /* end switch */
}
* from irg.
*/
for (i = get_irp_n_irgs(); i > 0;) {
- ir_graph *irg = get_irp_irg(--i);
- ir_node **arr = (ir_node**)ird_get_irg_link(irg);
+ ir_graph *other_irg = get_irp_irg(--i);
+ ir_node **arr = (ir_node**)ird_get_irg_link(other_irg);
if (arr == NULL)
continue;
- dump_graph_from_list(out, irg);
+ dump_graph_from_list(out, other_irg);
DEL_ARR_F(arr);
}
}
print_dbg_info(F, get_entity_dbg_info(ent));
for (i = get_irp_n_irgs(); i > 0;) {
- ir_graph *irg = get_irp_irg(--i);
- list_tuple *lists = (list_tuple*)ird_get_irg_link(irg);
+ ir_graph *other_irg = get_irp_irg(--i);
+ list_tuple *lists = (list_tuple*)ird_get_irg_link(other_irg);
if (lists) {
/* dump the extended blocks first */
if (ARR_LEN(lists->extbb_list)) {
- ird_set_irg_link(irg, lists->extbb_list);
- dump_extblock_graph(F, irg);
+ ird_set_irg_link(other_irg, lists->extbb_list);
+ dump_extblock_graph(F, other_irg);
}
/* we may have blocks without extended blocks, bad for instance */
if (ARR_LEN(lists->blk_list)) {
- ird_set_irg_link(irg, lists->blk_list);
- dump_block_graph(F, irg);
+ ird_set_irg_link(other_irg, lists->blk_list);
+ dump_block_graph(F, other_irg);
}
DEL_ARR_F(lists->extbb_list);
{
FILE *F = (FILE*)env;
int i;
- ir_node *pred;
if (is_Bad(block) && get_irn_mode(block) == mode_X) {
dump_node(F, block);
/* Dump dominator/postdominator edge */
if (ir_get_dump_flags() & ir_dump_flag_dominance) {
if (get_irg_dom_state(current_ir_graph) == dom_consistent && get_Block_idom(block)) {
- pred = get_Block_idom(block);
+ ir_node *pred = get_Block_idom(block);
fprintf(F, "edge: { sourcename: \"");
PRINT_NODEID(block);
fprintf(F, "\" targetname: \"");
fprintf(F, "\" " DOMINATOR_EDGE_ATTR "}\n");
}
if (get_irg_postdom_state(current_ir_graph) == dom_consistent && get_Block_ipostdom(block)) {
- pred = get_Block_ipostdom(block);
+ ir_node *pred = get_Block_ipostdom(block);
fprintf(F, "edge: { sourcename: \"");
PRINT_NODEID(block);
fprintf(F, "\" targetname: \"");
/* Write the irnode and all its attributes to the file passed. */
void dump_irnode_to_file(FILE *F, ir_node *n)
{
- int i;
char comma;
ir_graph *irg;
vrp_attr *vrp_info;
if (ir_get_dump_flags() & ir_dump_flag_analysed_types)
fprintf (F, " addr: %p\n", (void *)n);
fprintf (F, " mode: %s\n", get_mode_name(get_irn_mode(n)));
- fprintf (F, " visited: %ld\n", get_irn_visited(n));
+ fprintf (F, " visited: %lu\n", get_irn_visited(n));
irg = get_irn_irg(n);
if (irg != get_const_code_irg())
fprintf (F, " irg: %s\n", get_ent_dump_name(get_irg_entity(irg)));
dump_node_opcode(F, get_irn_n(n, -1));
fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, -1)));
}
- for ( i = 0; i < get_irn_arity(n); ++i) {
- fprintf(F, " %d: %s ", i, is_backedge(n, i) ? "be" : " ");
- dump_node_opcode(F, get_irn_n(n, i));
- fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, i)));
+
+ {
+ int i;
+ for (i = 0; i < get_irn_arity(n); ++i) {
+ fprintf(F, " %d: %s ", i, is_backedge(n, i) ? "be" : " ");
+ dump_node_opcode(F, get_irn_n(n, i));
+ fprintf(F, " %ld\n", get_irn_node_nr(get_irn_n(n, i)));
+ }
}
fprintf(F, " Private Attributes:\n");
}
/* This is not nice, output it as a marker in the predecessor list. */
- if (is_Block(n) ||
- get_irn_op(n) == op_Phi) {
+ if (is_Block(n) || get_irn_op(n) == op_Phi) {
+ int i;
fprintf(F, " backedges:");
comma = ' ';
for (i = 0; i < get_irn_arity(n); i++)
case iro_Block: {
if (has_Block_entity(n))
fprintf(F, " Label: %lu\n", get_entity_label(get_Block_entity(n)));
- fprintf(F, " block visited: %ld\n", get_Block_block_visited(n));
+ fprintf(F, " block visited: %lu\n", get_Block_block_visited(n));
fprintf(F, " block marked: %u\n", get_Block_mark(n));
if (get_irg_dom_state(get_irn_irg(n)) == dom_consistent) {
fprintf(F, " dom depth %d\n", get_Block_dom_depth(n));
- fprintf(F, " domtree pre num %d\n", get_Block_dom_tree_pre_num(n));
- fprintf(F, " max subtree pre num %d\n", get_Block_dom_max_subtree_pre_num(n));
+ fprintf(F, " domtree pre num %u\n", get_Block_dom_tree_pre_num(n));
+ fprintf(F, " max subtree pre num %u\n", get_Block_dom_max_subtree_pre_num(n));
}
if (get_irg_postdom_state(get_irn_irg(n)) == dom_consistent) {
fprintf(F, " pdom depth %d\n", get_Block_postdom_depth(n));
- fprintf(F, " pdomtree pre num %d\n", get_Block_pdom_tree_pre_num(n));
- fprintf(F, " max pdomsubtree pre num %d\n", get_Block_pdom_max_subtree_pre_num(n));
+ fprintf(F, " pdomtree pre num %u\n", get_Block_pdom_tree_pre_num(n));
+ fprintf(F, " max pdomsubtree pre num %u\n", get_Block_pdom_max_subtree_pre_num(n));
}
fprintf(F, " Execution frequency statistics:\n");
fprintf(F, " assembler text: %s", get_id_str(get_ASM_text(n)));
l = get_ASM_n_input_constraints(n);
if (l > 0) {
+ int i;
fprintf(F, "\n inputs: ");
cons = get_ASM_input_constraints(n);
for (i = 0; i < l; ++i)
}
l = get_ASM_n_output_constraints(n);
if (l > 0) {
+ int i;
fprintf(F, "\n outputs: ");
cons = get_ASM_output_constraints(n);
for (i = 0; i < l; ++i)
}
l = get_ASM_n_clobbers(n);
if (l > 0) {
+ int i;
fprintf(F, "\n clobber: ");
clobber = get_ASM_clobbers(n);
for (i = 0; i < l; ++i)
size_t j;
compound_graph_path *path = get_compound_ent_value_path(ent, i);
ir_entity *ent0 = get_compound_graph_path_node(path, 0);
- fprintf(F, "\n%s %3d:%u ", prefix, get_entity_offset(ent0), get_entity_offset_bits_remainder(ent0));
+ fprintf(F, "\n%s %3d:%d ", prefix, get_entity_offset(ent0), get_entity_offset_bits_remainder(ent0));
if (get_type_state(type) == layout_fixed)
fprintf(F, "(%3u:%u) ", get_compound_ent_value_offset_bytes(ent, i), get_compound_ent_value_offset_bit_remainder(ent, i));
fprintf(F, "%s", get_entity_name(ent));
case tpo_array:
if (verbosity & dump_verbosity_typeattrs) {
- size_t i, n_dim;
+ size_t n_dim;
ir_type *elem_tp = get_array_element_type(tp);
fprintf(F, "\n array ");
irg_edge_info_t *info;
ir_edgeset_t *edges;
ir_edge_t templ;
- ir_edge_t *edge;
assert(edges_activated_kind(irg, kind));
*/
if (tgt == NULL) {
/* search the edge in the set. */
- edge = ir_edgeset_find(edges, &templ);
+ ir_edge_t *edge = ir_edgeset_find(edges, &templ);
/* mark the edge invalid if it was found */
if (edge) {
/* If the old target is not null, the edge is moved. */
if (old_tgt) {
- edge = ir_edgeset_find(edges, &templ);
+ ir_edge_t *edge = ir_edgeset_find(edges, &templ);
assert(edge && "edge to redirect not found!");
assert(! edge->invalid && "Invalid edge encountered");
const ir_edge_t *next;
foreach_out_edge_kind_safe(old_tgt, edge, next, EDGE_KIND_BLOCK) {
ir_node *succ = get_edge_src_irn(edge);
- int pos = get_edge_src_pos(edge);
- ir_node *block_pred = get_Block_cfgpred(succ, pos);
+ int succ_pos = get_edge_src_pos(edge);
+ ir_node *block_pred = get_Block_cfgpred(succ, succ_pos);
if (block_pred != src)
continue;
- edges_notify_edge_kind(succ, pos, tgt, old_tgt,
+ edges_notify_edge_kind(succ, succ_pos, tgt, old_tgt,
EDGE_KIND_BLOCK, irg);
}
}
return;
}
- for (p = hooks[hook]; p && p->next != entry; p = p->next);
+ for (p = hooks[hook]; p && p->next != entry; p = p->next) {
+ }
if (p) {
p->next = entry->next;
default:
panic("export_type_or_ent_post: Unknown type or entity.");
- break;
}
}
default:
parse_error(env, "Unknown escape sequence '\\%c'\n", env->c);
exit(1);
- break;
}
} else {
obstack_1grow(&env->obst, env->c);
type = new_type_method(nparams, nresults);
for (i = 0; i < nparams; i++) {
- long typenr = read_long(env);
- ir_type *paramtype = get_type(env, typenr);
+ long ptypenr = read_long(env);
+ ir_type *paramtype = get_type(env, ptypenr);
set_method_param_type(type, i, paramtype);
}
for (i = 0; i < nresults; i++) {
- long typenr = read_long(env);
- ir_type *restype = get_type(env, typenr);
+ long ptypenr = read_long(env);
+ ir_type *restype = get_type(env, ptypenr);
set_method_res_type(type, i, restype);
}
skip_ws(env);
while (!isdigit(env->c)) {
- char *str = read_word(env);
+ char *vstr = read_word(env);
unsigned v;
skip_ws(env);
- v = symbol(str, tt_visibility);
+ v = symbol(vstr, tt_visibility);
if (v != SYMERROR) {
visibility = (ir_visibility)v;
continue;
}
- v = symbol(str, tt_linkage);
+ v = symbol(vstr, tt_linkage);
if (v != SYMERROR) {
linkage |= (ir_linkage)v;
continue;
}
printf("Parser error, expected visibility or linkage, got '%s'\n",
- str);
+ vstr);
break;
}
}
return sm_bits <= lm_bits;
}
- break;
case irms_float_number:
/* int to float works if the float is large enough */
b_vrp = vrp_get_info(b);
if (a_vrp && b_vrp) {
- ir_tarval *c = tarval_and(a_vrp->bits_not_set, b_vrp->bits_not_set);
+ ir_tarval *vrp_val = tarval_and(a_vrp->bits_not_set, b_vrp->bits_not_set);
- if (tarval_is_null(c)) {
+ if (tarval_is_null(vrp_val)) {
dbg_info *dbgi = get_irn_dbg_info(n);
return new_rd_Or(dbgi, get_nodes_block(n), a, b, mode);
}
new_shift = new_rd_Shl(dbg_shift, block, new_bitop, shift_right, mode);
} else if (is_Shr(left)) {
new_shift = new_rd_Shr(dbg_shift, block, new_bitop, shift_right, mode);
- } else if (is_Rotl(left)) {
+ } else {
assert(is_Rotl(left));
new_shift = new_rd_Rotl(dbg_shift, block, new_bitop, shift_right, mode);
}
CASE_PROJ_EX(Load);
CASE_PROJ_EX(Mod);
default:
- /* leave NULL */;
+ break;
}
return ops;
ASSERT_AND_RET_DBG(
/* Sel: BB x M x ref x int^n --> ref */
(op1mode == mode_M && op2mode == mymode && mode_is_reference(mymode)),
- "Sel node", 0, show_node_failure(n)
+ "Sel node", 0, show_node_failure(n);
);
for (i = get_Sel_n_indexs(n) - 1; i >= 0; --i) {
- ASSERT_AND_RET_DBG(mode_is_int(get_irn_mode(get_Sel_index(n, i))), "Sel node", 0, show_node_failure(n));
+ ASSERT_AND_RET_DBG(mode_is_int(get_irn_mode(get_Sel_index(n, i))), "Sel node", 0, show_node_failure(n););
}
ent = get_Sel_entity(n);
- ASSERT_AND_RET_DBG(ent, "Sel node with empty entity", 0, show_node_failure(n));
+ ASSERT_AND_RET_DBG(ent, "Sel node with empty entity", 0, show_node_failure(n););
return 1;
}
/* Sync: BB x M^n --> M */
for (i = get_Sync_n_preds(n) - 1; i >= 0; --i) {
ASSERT_AND_RET( get_irn_mode(get_Sync_pred(n, i)) == mode_M, "Sync node", 0 );
- };
+ }
ASSERT_AND_RET( mymode == mode_M, "Sync node", 0 );
return 1;
}
unsigned idx = get_irn_idx(n);
ir_node *node_from_map = get_idx_irn(irg, idx);
ASSERT_AND_RET_DBG(node_from_map == n, "Node index and index map entry differ", 0,
- ir_printf("node %+F node in map %+F(%p)\n", n, node_from_map, node_from_map));
+ ir_printf("node %+F node in map %+F(%p)\n", n, node_from_map, node_from_map);
+ );
}
op = get_irn_op(n);
state == op_pin_state_floats ||
state == op_pin_state_pinned,
"invalid pin state", 0,
- ir_printf("node %+F", n));
+ ir_printf("node %+F", n);
+ );
} else if (!is_Block(n) && is_irn_pinned_in_irg(n)
&& !is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
ASSERT_AND_RET_DBG(is_Block(get_nodes_block(n)) || is_Anchor(n),
"block input is not a block", 0,
- ir_printf("node %+F", n));
+ ir_printf("node %+F", n);
+ );
}
if (op->ops.verify_node)
ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->reachable_blocks, block),
"Block is not reachable by blockwalker (endless loop with no kept block?)", 0,
- ir_printf("block %+F\n", block));
+ ir_printf("block %+F\n", block);
+ );
n_cfgpreds = get_Block_n_cfgpreds(block);
branch_nodes = env->branch_nodes;
former_dest = pmap_get(branch_nodes, branch);
ASSERT_AND_RET_DBG(former_dest==NULL || is_unknown_jump(skip_Proj(branch)),
"Multiple users on mode_X node", 0,
- ir_printf("node %+F\n", branch));
+ ir_printf("node %+F\n", branch);
+ );
pmap_insert(branch_nodes, branch, (void*)block);
/* check that there's only 1 branching instruction in each block */
ASSERT_AND_RET_DBG(former_branch == NULL || former_branch == branch,
"Multiple branching nodes in a block", 0,
ir_printf("nodes %+F,%+F in block %+F\n",
- branch, former_branch, branch_block));
+ branch, former_branch, branch_block);
+ );
pmap_insert(branch_nodes, branch_block, branch);
if (is_Cond(branch)) {
|| ir_nodeset_contains(&env->kept_nodes, block)
|| block == get_irg_end_block(get_irn_irg(block)),
"block contains no cfop", 0,
- ir_printf("block %+F\n", block));
+ ir_printf("block %+F\n", block);
+ );
return 1;
}
if (get_irn_mode(get_Cond_selector(cond)) == mode_b) {
ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->true_projs, cond),
"Cond node lacks true proj", 0,
- ir_printf("Cond %+F\n", cond));
+ ir_printf("Cond %+F\n", cond);
+ );
ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->false_projs, cond),
"Cond node lacks false proj", 0,
- ir_printf("Cond %+F\n", cond));
+ ir_printf("Cond %+F\n", cond);
+ );
} else {
ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->true_projs, cond),
"Cond node lacks default Proj", 0,
- ir_printf("Cond %+F\n", cond));
+ ir_printf("Cond %+F\n", cond);
+ );
}
return 1;
}
CASE(CopyB);
CASE(Bound);
default:
- /* leave NULL */;
+ break;
}
#undef CASE
CASE(CopyB);
CASE(Bound);
default:
- /* leave NULL */;
+ break;
}
#undef CASE
}
do { \
if (!(expr)) { \
firm_verify_failure_msg = #expr " && " string; \
- if (opt_do_node_verification != FIRM_VERIFICATION_ERROR_ONLY) { blk; } \
+ if (opt_do_node_verification != FIRM_VERIFICATION_ERROR_ONLY) { blk } \
if (opt_do_node_verification == FIRM_VERIFICATION_REPORT) \
fprintf(stderr, #expr " : " string "\n"); \
else if (opt_do_node_verification == FIRM_VERIFICATION_ON) { \
break;
default:
panic("Only nodes with degree one or two should be in this bucket");
- break;
}
}
}
#include "timing.h"
pbqp_edge_t **edge_bucket;
-pbqp_edge_t **rm_bucket;
+static pbqp_edge_t **rm_bucket;
pbqp_node_t **node_buckets[4];
pbqp_node_t **reduced_bucket = NULL;
pbqp_node_t *merged_node = NULL;
unsigned *mapping;
unsigned src_len;
unsigned tgt_len;
- unsigned src_index;
unsigned tgt_index;
unsigned edge_index;
unsigned edge_len;
/* Check that each column has at most one zero entry. */
for (tgt_index = 0; tgt_index < tgt_len; ++tgt_index) {
unsigned onlyOneZero = 0;
+ unsigned src_index;
if (tgt_vec->entries[tgt_index].data == INF_COSTS)
continue;
vector_t *other_vec;
unsigned other_len;
unsigned other_index;
- unsigned tgt_index;
assert(old_edge);
if (old_edge == edge)
unsigned src_len;
unsigned tgt_len;
unsigned src_index;
- unsigned tgt_index;
unsigned edge_index;
unsigned edge_len;
/* Check that each row has at most one zero entry. */
for (src_index = 0; src_index < src_len; ++src_index) {
unsigned onlyOneZero = 0;
+ unsigned tgt_index;
if (src_vec->entries[src_index].data == INF_COSTS)
continue;
vector_t *other_vec;
unsigned other_len;
unsigned other_index;
- unsigned src_index;
assert(old_edge);
break;
default:
panic("Only nodes with degree one or two should be in this bucket");
- break;
}
}
}
#if KAPS_USE_UNSIGNED
typedef unsigned num;
- static const num INF_COSTS = UINT_MAX;
+ #define INF_COSTS UINT_MAX
#else
typedef intmax_t num;
- static const num INF_COSTS = INTMAX_MAX;
+ #define INF_COSTS INTMAX_MAX
#endif
#include "matrix_t.h"
#include <stdio.h>
#include <string.h>
-#include "lc_common_t.h"
#include "lc_defines.h"
#include "lc_printf.h"
app->init(env);
}
-static void default_init(UNUSED(lc_appendable_t *env))
+static void default_init(lc_appendable_t *env)
{
+ (void) env;
}
-static void default_finish(UNUSED(lc_appendable_t *env))
+static void default_finish(lc_appendable_t *env)
{
+ (void) env;
}
/*
+++ /dev/null
-/*
- libcore: library for basic data structures and algorithms.
- Copyright (C) 2005 IPD Goos, Universit"at Karlsruhe, Germany
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-*/
-
-#ifndef _COMMON_T_H
-#define _COMMON_T_H
-
-#include <obstack.h>
-#include <stdarg.h>
-#include <stdio.h>
-
-#define obstack_chunk_alloc malloc
-#define obstack_chunk_free free
-
-#define bcopy(src,dest,n) memcpy(dest,src,n)
-
-#include "lc_config.h"
-
-#define FUNCNAME LC_FUNCNAME
-#define UNUSED(x) LC_UNUSED(x)
-#define LONGLONG long /* LC_LONGLONG */
-#define LONGDOUBLE double /* LC_LONGDOUBLE */
-
-#ifdef _WIN32
-/* Windows names for non-POSIX calls */
-#define snprintf _snprintf
-#define vsnprintf _vsnprintf
-#endif /* WIN32 */
-
-#endif /* _COMMON_T_H */
#define inline __inline__
#define LC_FUNCNAME __FUNCTION__
-#define LC_UNUSED(x) x __attribute__((__unused__))
#define LC_PRINTF(m) __attribute__((format(printf,m,(m)+1)))
#ifdef __STRICT_ANSI__
#elif defined(_MSC_VER)
#define LC_FUNCNAME "<unknown>"
-#define LC_UNUSED(x) x
#define LC_PRINTF(m)
#define LC_LONGLONG __int64
#define inline
#define LC_FUNCNAME "<unknown>"
-#define LC_UNUSED(x)
#define LC_LONGLONG long
#define LC_LONGDOUBLE double
#define LC_PRINTF(m)
#include <string.h>
#include <ctype.h>
-#ifdef _WIN32
-#include <malloc.h>
-#endif
-
-/* Includes to determine user's home directory */
-#ifdef _WIN32
-#include <shlobj.h>
-#else
-#include <sys/types.h>
-#include <unistd.h>
-#include <pwd.h>
-#endif
-
-/* maximum length of a path. */
-#ifndef MAX_PATH
-#define MAX_PATH 2048
-#endif
-
-
-#include "lc_common_t.h"
#include "lc_opts_t.h"
#include "lc_opts_enum.h"
#include "hashptr.h"
#include "lc_printf.h"
#include "xmalloc.h"
+#include "obst.h"
#define ERR_STRING "In argument \"%s\": "
return buf;
}
-int lc_opt_std_cb(UNUSED(const char *name), lc_opt_type_t type, void *data, size_t length, ...)
+int lc_opt_std_cb(const char *name, lc_opt_type_t type, void *data, size_t length, ...)
{
va_list args;
int res = 0;
int integer;
+ (void) name;
va_start(args, length);
return res;
}
-int lc_opt_std_dump(char *buf, size_t n, UNUSED(const char *name), lc_opt_type_t type, void *data, UNUSED(size_t length))
+int lc_opt_std_dump(char *buf, size_t n, const char *name, lc_opt_type_t type, void *data, size_t length)
{
int res;
+ (void) name;
+ (void) length;
if (data) {
switch (type) {
return res;
}
-int lc_opt_bool_dump_vals(char *buf, size_t n, UNUSED(const char *name), UNUSED(lc_opt_type_t type), UNUSED(void *data), UNUSED(size_t length))
+int lc_opt_bool_dump_vals(char *buf, size_t n, const char *name, lc_opt_type_t type, void *data, size_t length)
{
+ (void) name;
+ (void) type;
+ (void) data;
+ (void) length;
strncpy(buf, "true, false", n);
return n;
}
return options_set;
}
-static int opt_arg_type(UNUSED(const lc_arg_occ_t *occ))
+static int opt_arg_type(const lc_arg_occ_t *occ)
{
+ (void) occ;
return lc_arg_type_ptr;
}
#include <stdio.h>
#include <string.h>
-#ifdef _WIN32
-#include <malloc.h>
-#endif
-
#include "lc_opts_t.h"
#include "lc_opts_enum.h"
+#include "xmalloc.h"
static const char *delim = " \t|,";
#define DECL_CB(N, op) \
-int lc_opt_enum_ ## N ## _cb(LC_UNUSED(const char *name), LC_UNUSED(lc_opt_type_t type), void *data, size_t len, ...) \
+int lc_opt_enum_ ## N ## _cb(const char *name, lc_opt_type_t type, void *data, size_t len, ...) \
{ \
lc_opt_enum_ ## N ## _var_t *var = (lc_opt_enum_ ## N ## _var_t*)data; \
const lc_opt_enum_ ## N ## _items_t *items = var->items; \
const char *arg; \
int res = 0; \
\
+ (void) name; \
+ (void) type; \
va_start(args, len); \
arg = va_arg(args, const char *); \
va_end(args); \
DECL_CB(func_ptr, =)
#define DECL_DUMP(T, N, cond) \
-int lc_opt_enum_ ## N ## _dump(char *buf, size_t n, LC_UNUSED(const char *name), LC_UNUSED(lc_opt_type_t type), void *data, LC_UNUSED(size_t len)) \
+int lc_opt_enum_ ## N ## _dump(char *buf, size_t n, const char *name, lc_opt_type_t type, void *data, size_t len) \
{ \
lc_opt_enum_ ## N ## _var_t *var = (lc_opt_enum_ ## N ## _var_t*)data; \
const lc_opt_enum_ ## N ## _items_t *items = var->items; \
TYPE(value) = *var->value; \
int i; \
size_t l = strlen(buf); \
+ (void) name; \
+ (void) type; \
+ (void) len; \
\
if (l >= n) \
return (int)l; \
#define DECL_DUMP_VALS(T, N) \
-int lc_opt_enum_ ## N ## _dump_vals(char *buf, size_t n, LC_UNUSED(const char *name), LC_UNUSED(lc_opt_type_t type), void *data, LC_UNUSED(size_t len)) \
+int lc_opt_enum_ ## N ## _dump_vals(char *buf, size_t n, const char *name, lc_opt_type_t type, void *data, size_t len) \
{ \
lc_opt_enum_ ## N ## _var_t *var = (lc_opt_enum_ ## N ## _var_t*) data; \
const lc_opt_enum_ ## N ## _items_t *items = var->items; \
const char *prefix = ""; \
int i; \
size_t l = strlen(buf); \
+ (void) name; \
+ (void) type; \
+ (void) len; \
\
if (l >= n) \
return (int)l; \
#include "lc_opts.h"
#include "list.h"
-#include "lc_common_t.h"
#include "lc_defines.h"
typedef struct {
#include <assert.h>
#include <ctype.h>
-#include "lc_common_t.h"
#include "xmalloc.h"
#include "lc_printf.h"
#include "lc_defines.h"
return _lc_arg_get_default_env();
}
-static int lc_arg_cmp(const void *p1, const void *p2, UNUSED(size_t size))
+static int lc_arg_cmp(const void *p1, const void *p2, size_t size)
{
const lc_arg_t *a1 = (const lc_arg_t*)p1;
const lc_arg_t *a2 = (const lc_arg_t*)p2;
+ (void) size;
return strcmp(a1->name, a2->name);
}
return ent != NULL;
}
-void lc_arg_unregister(UNUSED(lc_arg_env_t *env), UNUSED(const char *name))
+void lc_arg_unregister(lc_arg_env_t *env, const char *name)
{
+ (void) env;
+ (void) name;
}
int lc_arg_append(lc_appendable_t *app, const lc_arg_occ_t *occ, const char *str, size_t len)
return modlen > 1 && mod[1] == 'h' ? lc_arg_type_char : lc_arg_type_short;
case 'l':
return modlen > 1 && mod[1] == 'l' ? lc_arg_type_long_long : lc_arg_type_long;
-#define TYPE_CASE(letter,type) case letter: return lc_arg_type_ ## type;
+#define TYPE_CASE(letter,type) case letter: return lc_arg_type_ ## type
TYPE_CASE('j', intmax_t);
TYPE_CASE('z', size_t);
TYPE_CASE('t', ptrdiff_t);
/* read the precision if given */
if (*s == '.') {
- int val;
- s = read_int(s + 1, &val);
+ int precision;
+ s = read_int(s + 1, &precision);
/* Negative or lacking precision after a '.' is treated as
* precision 0. */
- occ.precision = LC_MAX(0, val);
+ occ.precision = LC_MAX(0, precision);
}
/*
const char *named = ++s;
/* Read until the closing brace or end of the string. */
- for (ch = *s; ch != '}' && ch != '\0'; ch = *++s);
+ for (ch = *s; ch != '}' && ch != '\0'; ch = *++s) {
+ }
if (s - named) {
size_t n = s - named;
set_irn_in(ret, j, new_in);
if (n_cr_opt > 0) {
- size_t i, n;
+ size_t c;
+ size_t n;
irg_walk_graph(irg, NULL, do_copy_return_opt, cr_opt);
- for (i = 0, n = ARR_LEN(cr_opt); i < n; ++i) {
- free_entity(cr_opt[i].ent);
+ for (c = 0, n = ARR_LEN(cr_opt); c < n; ++c) {
+ free_entity(cr_opt[c].ent);
}
}
}
ir_graph *irg = get_irn_irg(node);
ir_node *adr = get_Load_ptr(node);
ir_node *mem = get_Load_mem(node);
- ir_node *low, *high, *proj;
+ ir_node *low;
+ ir_node *high;
+ ir_node *proj_m;
dbg_info *dbg;
ir_node *block = get_nodes_block(node);
ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
}
/* create two loads */
- dbg = get_irn_dbg_info(node);
- low = new_rd_Load(dbg, block, mem, low, low_mode, volatility);
- proj = new_r_Proj(low, mode_M, pn_Load_M);
- high = new_rd_Load(dbg, block, proj, high, mode, volatility);
+ dbg = get_irn_dbg_info(node);
+ low = new_rd_Load(dbg, block, mem, low, low_mode, volatility);
+ proj_m = new_r_Proj(low, mode_M, pn_Load_M);
+ high = new_rd_Load(dbg, block, proj_m, high, mode, volatility);
foreach_out_edge_safe(node, edge, next) {
ir_node *proj = get_edge_src_irn(edge);
{
ir_graph *irg;
ir_node *block, *adr, *mem;
- ir_node *low, *high, *proj;
+ ir_node *low, *high, *proj_m;
dbg_info *dbg;
ir_node *value = get_Store_value(node);
const lower64_entry_t *entry = get_node_entry(value);
}
/* create two Stores */
- dbg = get_irn_dbg_info(node);
- low = new_rd_Store(dbg, block, mem, low, entry->low_word, volatility);
- proj = new_r_Proj(low, mode_M, pn_Store_M);
- high = new_rd_Store(dbg, block, proj, high, entry->high_word, volatility);
+ dbg = get_irn_dbg_info(node);
+ low = new_rd_Store(dbg, block, mem, low, entry->low_word, volatility);
+ proj_m = new_r_Proj(low, mode_M, pn_Store_M);
+ high = new_rd_Store(dbg, block, proj_m, high, entry->high_word, volatility);
foreach_out_edge_safe(node, edge, next) {
ir_node *proj = get_edge_src_irn(edge);
panic("Shr lowering only implemented for two-complement modes");
}
+ block = get_nodes_block(node);
+
/* if the right operand is a 64bit value, we're only interested in the
* lower word */
if (get_irn_mode(right) == env->high_unsigned) {
right = get_lowered_low(right);
} else {
/* shift should never have signed mode on the right */
- ir_node *block = get_nodes_block(node);
assert(get_irn_mode(right) != env->high_signed);
right = create_conv(block, right, low_unsigned);
}
ir_node *res_low = new_rd_shrs(dbgi, block_false, conv, right,
low_unsigned);
int cnsti = modulo_shift2-1;
- ir_node *cnst = new_r_Const_long(irg, low_unsigned, cnsti);
+ ir_node *cnst2 = new_r_Const_long(irg, low_unsigned, cnsti);
ir_node *res_high;
if (new_rd_shrs == new_rd_Shrs) {
- res_high = new_rd_shrs(dbgi, block_false, left_high, cnst, mode);
+ res_high = new_rd_shrs(dbgi, block_false, left_high, cnst2, mode);
} else {
res_high = new_r_Const(irg, get_mode_null(mode));
}
/**
* Translate a Cond.
*/
-static void lower_Cond(ir_node *node, ir_mode *mode)
+static void lower_Cond(ir_node *node, ir_mode *high_mode)
{
ir_node *left, *right, *block;
ir_node *sel = get_Cond_selector(node);
ir_mode *m = get_irn_mode(sel);
ir_mode *cmp_mode;
const lower64_entry_t *lentry, *rentry;
- ir_node *proj, *projT = NULL, *projF = NULL;
+ ir_node *projT = NULL, *projF = NULL;
ir_node *new_bl, *irn;
ir_node *projHF, *projHT;
ir_node *dst_blk;
const ir_edge_t *edge;
const ir_edge_t *next;
- (void) mode;
+ (void) high_mode;
if (m != mode_b) {
if (m == env->high_signed || m == env->high_unsigned) {
}
if (relation == ir_relation_equal) {
+ ir_node *proj;
/* simple case:a == b <==> a_h == b_h && a_l == b_l */
dst_blk = get_cfop_destination(projF);
mark_irn_visited(proj);
exchange(projT, proj);
} else if (relation == ir_relation_less_greater) {
+ ir_node *proj;
/* simple case:a != b <==> a_h != b_h || a_l != b_l */
dst_blk = get_cfop_destination(projT);
mark_irn_visited(proj);
exchange(projF, proj);
} else {
+ ir_node *proj;
/* a rel b <==> a_h REL b_h || (a_h == b_h && a_l rel b_l) */
ir_node *dstT, *dstF, *newbl_eq, *newbl_l;
ir_node *projEqF;
*/
static void lower_Cmp(ir_node *cmp, ir_mode *m)
{
- ir_node *l = get_Cmp_left(cmp);
- ir_mode *mode = get_irn_mode(l);
+ ir_node *l = get_Cmp_left(cmp);
+ ir_mode *cmp_mode = get_irn_mode(l);
ir_node *r, *low, *high, *t, *res;
ir_relation relation;
ir_node *block;
const lower64_entry_t *rentry;
(void) m;
- if (mode != env->high_signed && mode != env->high_unsigned)
+ if (cmp_mode != env->high_signed && cmp_mode != env->high_unsigned)
return;
r = get_Cmp_right(cmp);
/* check if this return must be lowered */
for (i = 0, n = get_Return_n_ress(node); i < n; ++i) {
- ir_node *pred = get_Return_res(node, i);
- ir_mode *mode = get_irn_op_mode(pred);
+ ir_node *pred = get_Return_res(node, i);
+ ir_mode *rmode = get_irn_op_mode(pred);
- if (mode == env->high_signed || mode == env->high_unsigned)
+ if (rmode == env->high_signed || rmode == env->high_unsigned)
need_conv = 1;
}
if (! need_conv)
/**
* Translate the parameters.
*/
-static void lower_Start(ir_node *node, ir_mode *mode)
+static void lower_Start(ir_node *node, ir_mode *high_mode)
{
ir_graph *irg = get_irn_irg(node);
ir_entity *ent = get_irg_entity(irg);
size_t i, j, n_params;
const ir_edge_t *edge;
const ir_edge_t *next;
- (void) mode;
+ (void) high_mode;
if (!mtp_must_be_lowered(tp))
return;
new_projs[i] = j;
if (is_Primitive_type(ptp)) {
- ir_mode *mode = get_type_mode(ptp);
-
- if (mode == env->high_signed || mode == env->high_unsigned)
+ ir_mode *amode = get_type_mode(ptp);
+ if (amode == env->high_signed || amode == env->high_unsigned)
++j;
}
}
ir_type *ptp = get_method_param_type(tp, p);
if (is_Primitive_type(ptp)) {
- ir_mode *mode = get_type_mode(ptp);
-
- if (mode == env->high_signed || mode == env->high_unsigned) {
+ ir_mode *pmode = get_type_mode(ptp);
+ if (pmode == env->high_signed || pmode == env->high_unsigned) {
need_lower = true;
break;
}
res_numbers[i] = j;
if (is_Primitive_type(ptp)) {
- ir_mode *mode = get_type_mode(ptp);
-
- if (mode == env->high_signed || mode == env->high_unsigned) {
+ ir_mode *rmode = get_type_mode(ptp);
+ if (rmode == env->high_signed || rmode == env->high_unsigned) {
need_lower = true;
++j;
}
ir_node *block = get_nodes_block(asmn);
int arity = get_irn_arity(asmn);
ir_node **in = get_irn_in(asmn) + 1;
- int n_outs = get_ASM_n_output_constraints(asmn);
int new_n_outs = 0;
int n_clobber = get_ASM_n_clobbers(asmn);
long *proj_map = ALLOCAN(long, n_outs);
static void lower_bitfields_loads(ir_node *proj, ir_node *load)
{
ir_node *sel = get_Load_ptr(load);
- ir_node *block, *n_proj, *res, *ptr;
+ ir_node *block, *res, *ptr;
ir_graph *irg;
ir_entity *ent;
ir_type *bf_type;
/* create new proj, switch off CSE or we may get the old one back */
old_cse = get_opt_cse();
set_opt_cse(0);
- res = n_proj = new_r_Proj(load, mode, pn_Load_res);
+ res = new_r_Proj(load, mode, pn_Load_res);
set_opt_cse(old_cse);
if (mode_is_signed(mode)) { /* signed */
set_irn_mode(node, mode);
res = node;
goto own_replacement;
- } else {
- panic("unexpected projb: %+F (pred: %+F)", node, pred);
}
- break;
+ panic("unexpected projb: %+F (pred: %+F)", node, pred);
}
case iro_Const: {
/* adapt default block */
n_default_preds = ARR_LEN(default_preds);
if (n_default_preds > 1) {
- size_t i;
+ size_t p;
/* create new intermediate blocks so we don't have critical edges */
- for (i = 0; i < n_default_preds; ++i) {
- ir_node *proj = default_preds[i];
- ir_node *block;
- ir_node *in[1];
+ for (p = 0; p < n_default_preds; ++p) {
+ ir_node *pred = default_preds[p];
+ ir_node *split_block;
+ ir_node *block_in[1];
- in[0] = proj;
- block = new_r_Block(irg, 1, in);
+ block_in[0] = pred;
+ split_block = new_r_Block(irg, 1, block_in);
- default_preds[i] = new_r_Jmp(block);
+ default_preds[p] = new_r_Jmp(split_block);
}
}
set_irn_in(env->default_block, n_default_preds, default_preds);
} walk_env_t;
/** debug handle */
-DEBUG_ONLY(firm_dbg_module_t *dbgHandle;)
+DEBUG_ONLY(static firm_dbg_module_t *dbgHandle;)
/**
* checks whether a Raise leaves a method
break;
default:
- ;
+ break;
}
} /* do_load_store_optimize */
ir_node *pred = get_Block_cfgpred(endblk, i);
pred = skip_Proj(pred);
- if (is_Return(pred))
+ if (is_Return(pred)) {
dfs(get_Return_mem(pred), env);
- else if (is_Raise(pred))
+ } else if (is_Raise(pred)) {
dfs(get_Raise_mem(pred), env);
- else if (is_fragile_op(pred))
+ } else if (is_fragile_op(pred)) {
dfs(get_fragile_op_mem(pred), env);
- else if (is_Bad(pred))
- /* ignore non-optimized block predecessor */;
- else {
+ } else if (is_Bad(pred)) {
+ /* ignore non-optimized block predecessor */
+ } else {
assert(0 && "Unknown EndBlock predecessor");
}
}
ir_node **ins, **phi_ins;
phi_t *repr_phi, *phi;
pair_t *repr_pair, *pair;
- int i, j, k, n, block_nr, n_phis;
+ int i, j, k, n, n_phis;
list_del(&repr->block_list);
/* collect new in arrays */
end = get_irg_end(irg);
- block_nr = 0;
list_for_each_entry(block_t, bl, &part->blocks, block_list) {
block = bl->block;
- ++block_nr;
DB((dbg, LEVEL_1, "%+F, ", block));
#define DBG_OUT_TR(l_relation, l_bound, r_relation, r_bound, relation, v) \
ir_printf("In %e:\na %= %n && b %= %n ==> a %= b == %s\n", \
get_irg_entity(current_ir_graph), \
- l_relation, l_bound, r_relation, r_bound, relation, v);
+ l_relation, l_bound, r_relation, r_bound, relation, v)
/* right side */
#define DBG_OUT_R(r_relation, r_bound, left, relation, right, v) \
ir_printf("In %e:\na %= %n ==> %n %= %n == %s\n", \
get_irg_entity(current_ir_graph), \
- r_relation, r_bound, left, relation, right, v);
+ r_relation, r_bound, left, relation, right, v)
/* left side */
#define DBG_OUT_L(l_relation, l_bound, left, relation, right, v) \
ir_printf("In %e:\na %= %n ==> %n %= %n == %s\n", \
get_irg_entity(current_ir_graph), \
- l_relation, l_bound, left, relation, right, v);
+ l_relation, l_bound, left, relation, right, v)
#else
-#define DBG_OUT_TR(l_relation, l_bound, r_relation, r_bound, relation, v)
-#define DBG_OUT_R(r_relation, r_bound, left, relation, right, v)
-#define DBG_OUT_L(l_relation, l_bound, left, relation, right, v)
+#define DBG_OUT_TR(l_relation, l_bound, r_relation, r_bound, relation, v) (void)0
+#define DBG_OUT_R(r_relation, r_bound, left, relation, right, v) (void)0
+#define DBG_OUT_L(l_relation, l_bound, left, relation, right, v) (void)0
#endif /* DEBUG_CONFIRM */
*/
FIRM_API int value_not_zero(const ir_node *n, ir_node_cnst_ptr *confirm)
{
-#define RET_ON(x) if (x) { *confirm = n; return 1; }; break
+#define RET_ON(x) if (x) { *confirm = n; return 1; } break
ir_tarval *tv;
ir_mode *mode = get_irn_mode(n);
ptr = get_Confirm_value(ptr);
continue;
default:
- ;
+ break;
}
break;
}
CASE(Eor);
CASE(Shl);
default:
- /* leave NULL */;
+ break;
}
return ops;
} /* if */
break;
default:
- ;
+ break;
} /* switch */
return op;
} /* if */
} /* if */
default:
- ;
+ break;
} /* switch */
} /* stat_update_address */
}
}
default:
- ;
+ break;
} /* switch */
/* we want to count the constant IN nodes, not the CSE'ed constant's itself */
#ifdef DISABLE_STATEV
-#define stat_ev_do(expr) 0
#define stat_ev_enabled 0
-#define stat_ev_if if (0)
#define stat_ev_dbl(name, val) ((void)0)
#define stat_ev_int(name, val) ((void)0)
#define stat_ev(name) ((void)0)
#define stat_ev_cnt_inc(var) do { ++stat_ev_cnt_var_ ## var; } while(0)
#define stat_ev_cnt_done(var, name) stat_ev_emit((name), stat_ev_cnt_var_ ## var)
-#define stat_ev_do(expr) (stat_ev_enabled ? ((expr), 1) : 0)
-#define stat_ev_if if (stat_ev_enabled)
-
/**
* Initialize the stat ev machinery.
* @param filename_prefix The prefix of the filename (.ev or .ev.gz will be appended).
ccs_env *ccs = (ccs_env *)env;
ir_class_cast_state this_state = ir_class_casts_any;
ir_type *fromtype, *totype;
- int ref_depth = 0;
if (!is_Cast(n)) return;
while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
totype = get_pointer_points_to_type(totype);
fromtype = get_pointer_points_to_type(fromtype);
- ref_depth++;
}
if (!is_Class_type(totype)) return;
static const char *firm_verify_failure_msg;
+#if 0
/**
* Show diagnostic if an entity overwrites another one not
* in direct superclasses.
ir_fprintf(stderr, " %+F:\n", super);
}
}
+#endif
/**
* Show diagnostic if an entity overwrites a wrong number of things.
show_ent_overwrite_cnt(mem)
);
- if (false) {
+#if 0
+ {
size_t j, m;
/* check if the overwrite relation is flat, i.e. every overwrite
* is visible in every direct superclass. */
}
}
}
+#endif
}
return 0;
}
const char *get_type_state_name(ir_type_state s)
{
-#define X(a) case a: return #a;
+#define X(a) case a: return #a
switch (s) {
X(layout_undefined);
X(layout_fixed);
return;
}
- if (exp_val > a->desc.mantissa_size) {
+ if (exp_val > (long)a->desc.mantissa_size) {
if (a != result)
memcpy(result, a, calc_buffer_size);
fp_value *value;
fp_value *temp = NULL;
- int byte_offset;
+ unsigned byte_offset;
uint32_t sign;
uint32_t exponent;
if (0 < v && v < (1 << desc->exponent_size) - 1) {
/* exponent can be encoded, now check the mantissa */
v = value->desc.mantissa_size + ROUNDING_BITS - sc_get_lowest_set_bit(_mant(value));
- return v <= desc->mantissa_size;
+ return v <= (int)desc->mantissa_size;
}
return 0;
}
else
panic("%s:%d: Invalid tarval (null)", file, line);
}
+
+inline static
#ifdef __GNUC__
-inline static void tarval_verify(ir_tarval *tv) __attribute__ ((unused));
+ __attribute__((unused))
#endif
-
-inline static void tarval_verify(ir_tarval *tv)
+void tarval_verify(ir_tarval *tv)
{
assert(tv);
assert(tv->mode);
case TV_OVERFLOW_SATURATE:
return get_mode_min(mode);
case TV_OVERFLOW_WRAP: {
- char *temp = (char*) alloca(sc_get_buffer_length());
+ temp = (char*) alloca(sc_get_buffer_length());
memcpy(temp, value, sc_get_buffer_length());
sc_truncate(get_mode_size_bits(mode), temp);
return get_tarval(temp, length, mode);
real payload */
/* case 128: return &quad_desc; */
default:
+ (void) quad_desc;
panic("Unsupported mode in get_descriptor()");
}
}
return get_tarval(buffer, sc_get_buffer_length(), dst_mode);
default:
- /* the rest can't be converted */
- return tarval_bad;
+ break;
}
- break;
+ /* the rest can't be converted */
+ return tarval_bad;
/* cast int/characters to something */
case irms_int_number:
default:
return snprintf(buf, len, "%s%s%s", prefix, fc_print((const fp_value*) tv->value, tv_buf, sizeof(tv_buf), FC_DEC), suffix);
}
- break;
case irms_internal_boolean:
switch (mode_info->mode_output) {