}
case iro_Rotl: {
- const vrp_attr *vrp_left, *vrp_right;
+ const vrp_attr *vrp_left;
const ir_node *right = get_Rotl_right(node);
vrp_left = get_vrp_attr(get_Rotl_left(node));
- vrp_right = get_vrp_attr(get_Rotl_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
}
case iro_Shl: {
- const vrp_attr *vrp_left, *vrp_right;
+ const vrp_attr *vrp_left;
const ir_node *right = get_Shl_right(node);
vrp_left = get_vrp_attr(get_Shl_left(node));
- vrp_right = get_vrp_attr(get_Shl_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
}
case iro_Shr: {
- const vrp_attr *vrp_left, *vrp_right;
+ const vrp_attr *vrp_left;
const ir_node *right = get_Shr_right(node);
vrp_left = get_vrp_attr(get_Shr_left(node));
- vrp_right = get_vrp_attr(get_Shr_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
}
case iro_Shrs: {
- const vrp_attr *vrp_left, *vrp_right;
+ const vrp_attr *vrp_left;
const ir_node *right = get_Shrs_right(node);
vrp_left = get_vrp_attr(get_Shrs_left(node));
- vrp_right = get_vrp_attr(get_Shrs_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
} else { /* complete in gp registers */
int src_bits = get_mode_size_bits(src_mode);
int dst_bits = get_mode_size_bits(dst_mode);
- int min_bits;
ir_mode *min_mode;
if (src_bits == dst_bits) {
}
if (src_bits < dst_bits) {
- min_bits = src_bits;
min_mode = src_mode;
} else {
- min_bits = dst_bits;
min_mode = dst_mode;
}
{
const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(irn);
sym_or_tv_t key, *entry;
- unsigned label;
key.u.entity = attr->entity;
key.is_entity = true;
/* allocate a label */
entry->label = get_unique_label();
}
- label = entry->label;
/* load the symbol indirect */
be_emit_cstring("\tldr ");
static void emit_arm_fConst(const ir_node *irn)
{
sym_or_tv_t key, *entry;
- unsigned label;
ir_mode *mode;
key.u.tv = get_fConst_value(irn);
/* allocate a label */
entry->label = get_unique_label();
}
- label = entry->label;
/* load the tarval indirect */
mode = get_irn_mode(irn);
{
/* get predecessor in stack_order list */
ir_node *stack_pred = be_get_stack_pred(abihelper, node);
- ir_node *stack_pred_transformed;
ir_node *stack;
if (stack_pred == NULL) {
return sp_proj;
}
- stack_pred_transformed = be_transform_node(stack_pred);
- stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
+ be_transform_node(stack_pred);
+ stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
if (stack == NULL) {
return get_stack_pointer_for(stack_pred);
}
static ir_node *add_to_keep(ir_node *last_keep,
const arch_register_class_t *cls, ir_node *node)
{
- const ir_node *op;
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, node);
} else {
sched_add_after(schedpoint, last_keep);
}
}
- op = skip_Proj_const(node);
return last_keep;
}
{
const ir_node *irn = ci->inh.irn;
int *front = FRONT_BASE(ci, col);
- int i, ok;
+ int i;
struct list_head changed;
INIT_LIST_HEAD(&changed);
DBG((ci->cloud->env->dbg, LEVEL_2, "%2{firm:indent}setting %+F to %d\n", depth, irn, col));
- ok = change_color_single(ci->cloud->env, irn, col, &changed, depth);
- // assert(ok && "Color changing may not fail while committing the coloring");
+ change_color_single(ci->cloud->env, irn, col, &changed, depth);
materialize_coloring(&changed);
for (i = 0; i < ci->mst_n_childs; ++i) {
ir_nodeset_t live;
ir_nodeset_iterator_t iter;
- ir_node *curr, *irn, *perm, **nodes;
+ ir_node *irn, *perm, **nodes;
size_t i, n;
DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
sched_add_after(pos, perm);
free(nodes);
- curr = perm;
for (i = 0; i < n; ++i) {
ir_node *perm_op = get_irn_n(perm, i);
const arch_register_t *reg = arch_get_irn_register(perm_op);
ir_node *proj = new_r_Proj(perm, mode, i);
arch_set_irn_register(proj, reg);
- curr = proj;
-
be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copy(&senv, perm_op);
be_ssa_construction_add_copy(&senv, proj);
static const char suffix[] = ".prof";
size_t i, num_birgs;
- int stat_active = 0;
be_main_env_t env;
char prof_filename[256];
be_irg_t *birgs;
num_birgs++;
}
- stat_active = stat_is_active();
-
/* For all graphs */
for (i = 0; i < num_birgs; ++i) {
be_irg_t *birg = &birgs[i];
{
ir_node *irn;
ir_node *in[n_be_AddSP_last];
- const arch_register_class_t *cls;
ir_graph *irg;
be_node_attr_t *attr;
be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp,
arch_register_req_type_produces_sp);
- cls = arch_register_get_class(sp);
-
return irn;
}
ir_node *irn, *cand = NULL;
int max_prio = INT_MIN;
int cur_prio = INT_MIN;
- int reg_fact, cand_reg_fact;
+ int reg_fact;
ir_nodeset_iterator_t iter;
/* Note: register pressure calculation needs an overhaul, you need correct
* tracking for each register class indidually and weight by each class
if (cur_prio > max_prio) {
cand = irn;
max_prio = cur_prio;
- cand_reg_fact = reg_fact;
}
DBG((trace_env->dbg, LEVEL_4, "checked NODE %+F\n", irn));
const ir_mode *mode, int align)
{
ir_node *spillnode = get_memory_edge(node);
- spill_t *spill;
assert(spillnode != NULL);
/* walk upwards and collect all phis and spills on this way */
if (is_Phi(spillnode)) {
- spill = collect_memphi(env, spillnode, mode, align);
+ collect_memphi(env, spillnode, mode, align);
} else {
- spill = collect_spill(env, spillnode, mode, align);
+ collect_spill(env, spillnode, mode, align);
}
ARR_APP1(ir_node *, env->reloads, node);
/* We use the cpuid instruction to detect the CPU features */
if (x86_toogle_cpuid()) {
cpuid_registers regs;
- unsigned highest_level;
char vendorid[13];
x86_cpu_info_t cpu_info;
/* get vendor ID */
x86_cpuid(®s, 0);
- highest_level = regs.r.eax;
memcpy(&vendorid[0], ®s.r.ebx, 4);
memcpy(&vendorid[4], ®s.r.edx, 4);
memcpy(&vendorid[8], ®s.r.ecx, 4);
const ir_asm_constraint *in_constraints;
const ir_asm_constraint *out_constraints;
ident **clobbers;
- int clobbers_flags = 0;
unsigned clobber_bits[N_IA32_CLASSES];
int out_size;
backend_info_t *info;
memset(&clobber_bits, 0, sizeof(clobber_bits));
- /* workaround for lots of buggy code out there as most people think volatile
- * asm is enough for everything and forget the flags (linux kernel, etc.)
- */
- if (get_irn_pinned(node) == op_pin_state_pinned) {
- clobbers_flags = 1;
- }
-
arity = get_irn_arity(node);
in = ALLOCANZ(ir_node*, arity);
if (strcmp(c, "memory") == 0)
continue;
if (strcmp(c, "cc") == 0) {
- clobbers_flags = 1;
continue;
}
*/
static void emit_ia32_Jmp(const ir_node *node)
{
- ir_node *block;
-
- /* for now, the code works for scheduled and non-schedules blocks */
- block = get_nodes_block(node);
-
/* we have a block schedule */
if (can_be_fallthrough(node)) {
ia32_emitf(node, "\t/* fallthrough to %L */\n");
const ir_node *proj_false;
const ir_node *dest_true;
const ir_node *dest_false;
- const ir_node *block;
cc = determine_final_cc(node, 0, cc);
proj_false = get_proj(node, pn_ia32_Jcc_false);
assert(proj_false && "Jcc without false Proj");
- block = get_nodes_block(node);
-
if (can_be_fallthrough(proj_true)) {
/* exchange both proj's so the second one can be omitted */
const ir_node *t = proj_true;
*/
static void peephole_ia32_Return(ir_node *node)
{
- ir_node *block, *irn;
+ ir_node *irn;
if (!ia32_cg_config.use_pad_return)
return;
- block = get_nodes_block(node);
-
/* check if this return is the first on the block */
sched_foreach_reverse_from(node, irn) {
switch (get_irn_opcode(irn)) {
int i, maxslot, inc_ofs, ofs;
ir_node *node, *pred_sp, *block;
ir_node *loads[MAXPUSH_OPTIMIZE];
- ir_graph *irg;
unsigned regmask = 0;
unsigned copymask = ~0;
/* create a new IncSP if needed */
block = get_nodes_block(irn);
- irg = get_irn_irg(irn);
if (inc_ofs > 0) {
pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
sched_add_before(irn, pred_sp);
if (exists($n->{"init_attr"})) {
$temp .= "\tattr = (${attr_type}*)get_irn_generic_attr(res);\n";
+ $temp .= "\t(void) attr; /* avoid potential warning */\n";
$temp .= "\t".$n->{"init_attr"}."\n";
}
{
/* get predecessor in stack_order list */
ir_node *stack_pred = be_get_stack_pred(abihelper, node);
- ir_node *stack_pred_transformed;
ir_node *stack;
if (stack_pred == NULL) {
return sp_proj;
}
- stack_pred_transformed = be_transform_node(stack_pred);
- stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
+ be_transform_node(stack_pred);
+ stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
if (stack == NULL) {
return get_stack_pointer_for(stack_pred);
}
and backwards without loss. */
int values_in_mode(const ir_mode *sm, const ir_mode *lm)
{
- int sm_bits, lm_bits;
ir_mode_arithmetic arith;
assert(sm);
if (sm == mode_b)
return mode_is_int(lm);
- sm_bits = get_mode_size_bits(sm);
- lm_bits = get_mode_size_bits(lm);
-
arith = get_mode_arithmetic(sm);
if (arith != get_mode_arithmetic(lm))
return 0;
int i, n_preds;
ir_node *oldn = n;
- ir_node *block;
ir_node *first_val = NULL; /* to shutup gcc */
if (!get_opt_optimize() &&
n_preds = get_Phi_n_preds(n);
- block = get_nodes_block(n);
-
/* Phi of dead Region without predecessors. */
if (n_preds == 0)
return n;
unsigned col_len;
unsigned row_index;
unsigned row_len;
- unsigned node_len;
assert(pbqp_node_get_degree(node) == 2);
row_len = src_vec->len;
col_len = tgt_vec->len;
- node_len = node_vec->len;
mat = pbqp_matrix_alloc(pbqp, row_len, col_len);
static void select_row(pbqp_edge_t *edge, unsigned row_index)
{
pbqp_matrix_t *mat;
- pbqp_node_t *src_node;
pbqp_node_t *tgt_node;
vector_t *tgt_vec;
unsigned tgt_len;
unsigned tgt_index;
unsigned new_infinity = 0;
- src_node = edge->src;
tgt_node = edge->tgt;
tgt_vec = tgt_node->costs;
static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
{
ir_node *call, *next, *mem, *blk;
- ir_type *ftp;
/* kill all dead allocs */
for (call = env->dead_allocs; call; call = next) {
}
/* convert all non-escaped heap allocs into frame variables */
- ftp = get_irg_frame_type(irg);
for (call = env->found_allocs; call; call = next) {
next = (ir_node*)get_irn_link(call);
}
static void insert_nodes(ir_node *block, void *ctx)
{
pre_env *env = (pre_env*)ctx;
- ir_node *value, *expr, *idom, *first_s, *worklist;
+ ir_node *value, *expr, *idom, *first_s;
block_info *curr_info, *idom_info;
int pos, arity = get_irn_arity(block);
int all_same, by_some, updated;
/* convert the set into a list. This allows the removal of
* elements from the set */
- worklist = NULL;
foreach_valueset(curr_info->antic_in, value, expr, iter) {
ir_mode *mode;
static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
{
int i, n;
- ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+ ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+#ifdef DO_CACHEOPT
+ ir_node *old_store;
+#endif
ir_mode *mode;
ir_node **inM, **inD, **projMs;
int *idx;
return 0;
store = skip_Proj(projM);
+#ifdef DO_CACHEOPT
old_store = store;
+#endif
if (!is_Store(store))
return 0;
static void move_loads_out_of_loops(scc *pscc, loop_env *env)
{
ir_node *phi, *load, *next, *other, *next_other;
- ir_entity *ent;
int j;
phi_entry *phi_list = NULL;
set *avail;
/* for now, we can only move Load(Global) */
if (! is_Global(ptr))
continue;
- ent = get_Global_entity(ptr);
load_mode = get_Load_mode(load);
for (other = pscc->head; other != NULL; other = next_other) {
node_entry *ne = get_irn_ne(other, env);
int depth_adaption = opt_params.depth_adaption;
unsigned do_inversion = 1;
- unsigned has_cc = 0;
/* Depth of 0 is the procedure and 1 a topmost loop. */
loop_depth = get_loop_depth(cur_loop) - 1;
/* Search for condition chains and temporarily save the blocks in an array. */
cc_blocks = NEW_ARR_F(ir_node *, 0);
inc_irg_visited(current_ir_graph);
- has_cc = find_condition_chain(loop_head);
+ find_condition_chain(loop_head);
unmark_not_allowed_cc_blocks();
DEL_ARR_F(cc_blocks);
{
ir_node *projres, *loop_condition, *iteration_path;
- unsigned success, is_latest_val;
+ unsigned success;
ir_tarval *step_tar;
ir_mode *mode;
* Until now we only have end_val. */
if (is_Add(iteration_path) || is_Sub(iteration_path)) {
- /* We test against the latest value of the iv. */
- is_latest_val = 1;
-
loop_info.add = iteration_path;
DB((dbg, LEVEL_4, "Case 1: Got add %N (maybe not sane)\n", loop_info.add));
} else if (is_Phi(iteration_path)) {
ir_node *new_iteration_phi;
- /* We compare with the value the iv had entering this run. */
- is_latest_val = 0;
-
loop_info.iteration_phi = iteration_path;
DB((dbg, LEVEL_4, "Case 2: Got phi %N\n", loop_info.iteration_phi));
k = 0;
for (i = n - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(block, i);
- ir_node *pred_block;
/* pred must be a direct jump to us */
if (! is_Jmp(pred) && ! is_Raise(pred) && !is_Bad(pred))
continue;
- pred_block = get_nodes_block(skip_Proj(pred));
-
preds[k].pred = pred;
preds[k].index = i;
}
ir_relation l_relation, res_relation, neg_relation;
interval_t l_iv, r_iv;
ir_tarval *tv;
- ir_mode *mode;
if (is_Confirm(right)) {
/* we want the Confirm on the left side */
* We know that a CMP b and check for a ~CMP b
*/
else {
- mode = get_irn_mode(left);
neg_relation = get_negated_relation(relation);
if ((r_relation == neg_relation) || (r_relation == (neg_relation & ~ir_relation_equal))) {
* We know that a CMP b and check for a ~CMP b
*/
else {
- mode = get_irn_mode(left);
neg_relation = get_negated_relation(relation);
if ((l_relation == neg_relation) || (l_relation == (neg_relation & ~ir_relation_equal))) {
*/
static int check_users_for_reg_pressure(ir_node *iv, iv_env *env)
{
- ir_node *irn, *header;
+ ir_node *irn;
ir_node *have_user = NULL;
ir_node *have_cmp = NULL;
node_entry *e = get_irn_ne(iv, env);
scc *pscc = e->pscc;
- header = e->header;
for (irn = pscc->head; irn != NULL; irn = e->next) {
const ir_edge_t *edge;
*/
static void process_call(ir_node *call, ir_entity *callee, q_set *hmap)
{
- ir_type *mtp;
entry_t *key, *entry;
ir_node *call_param;
size_t i, n_params;
* last non-variadic one, which might be needed for the va_start()
* magic
*/
- mtp = get_Call_type(call);
/* In this for loop we collect the calls, that have
an constant parameter. */
{
ir_entity *new_entity;
ident *clone_ident;
- symconst_symbol sym;
/* A counter for the clones.*/
static size_t nr = 0;
/* We need now a new ir_graph for our clone method. */
create_clone_proc_irg(new_entity, q);
- /* We must set the atomic value of our "new_entity". */
- sym.entity_p = new_entity;
-
/* The "new_entity" don't have this information. */
new_entity->attr.mtd_attr.param_access = NULL;
new_entity->attr.mtd_attr.param_weight = NULL;
static void do_reassociation(walker_t *wenv)
{
int i, res, changed;
- ir_node *n, *blk;
+ ir_node *n;
while (! waitq_empty(wenv->wq)) {
n = (ir_node*)waitq_get(wenv->wq);
set_irn_link(n, NULL);
- blk = get_nodes_block(n);
-
hook_reassociate(1);
/* reassociation must run until a fixpoint is reached. */
if (n_params > 0) {
ir_node *calls;
ir_node *args;
- ir_node *args_bl;
NEW_ARR_A(ir_node **, call_params, env->n_tail_calls);
/* build new Proj's and Phi's */
args = get_irg_args(irg);
- args_bl = get_nodes_block(args);
for (i = 0; i < n_params; ++i) {
ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i));
* const_code_irg. */
int is_irn_const_expression(ir_node *n)
{
- ir_mode *m;
-
/* we are in danger iff an exception will arise. TODO: be more precisely,
* for instance Div. will NOT rise if divisor != 0
*/
if (is_binop(n) && !is_fragile_op(n))
return is_irn_const_expression(get_binop_left(n)) && is_irn_const_expression(get_binop_right(n));
- m = get_irn_mode(n);
switch (get_irn_opcode(n)) {
case iro_Const:
case iro_SymConst:
size_t k;
size_t l;
int overwritten;
- ir_type *super, *inhenttype;
+ ir_type *super;
ir_entity *inhent, *thisent;
mangle_inherited_name_func *mfunc = *(mangle_inherited_name_func **)env;
assert(is_Class_type(super) && "not a class");
for (j = 0; j < get_class_n_members(super); j++) {
inhent = get_class_member(super, j);
- inhenttype = get_entity_type(inhent);
/* check whether inhent is already overwritten */
overwritten = 0;
for (k = 0; (k < get_class_n_members(clss)) && (overwritten == 0); k++) {