+
+ 22.2. Goetz
+ irgopt: inline_small_irgs implemented
+
+ 30.1. - 20.2. Goetz
+ Bugfixes, some access functions ...
+
29.1.2002 Goetz
New directory: ana for analyses. Adapted configure/makefiles
implemented irout: backedges. Added one field to ir_graph, one to ir_node.
/* SymConst */
} else if (n->op->code == iro_SymConst) {
if (get_SymConst_kind(n) == linkage_ptr_info) {
- xfprintf (F, "SymC %I", get_SymConst_ptrinfo(n));
+ /* don't use get_SymConst_ptr_info as it mangles the name. */
+ xfprintf (F, "SymC %I", n->attr.i.tori.ptrinfo);
} else {
assert(get_kind(get_SymConst_type(n)) == k_type);
assert(get_type_ident(get_SymConst_type(n)));
break;
case iro_Sel: {
assert(get_kind(get_Sel_entity(n)) == k_entity);
- xfprintf (F, "%s", id_to_str(get_entity_ident(get_Sel_entity(n))));
+ xfprintf (F, "%I", get_entity_ident(get_Sel_entity(n)));
} break;
default:
} /* end switch */
case iro_Sel:
assert(get_kind(get_Sel_entity(n)) == k_entity);
xfprintf (F, "\"%I ", get_irn_opident(n));
- xfprintf (F, "%s", id_to_str(get_entity_ident(get_Sel_entity(n))));
+ xfprintf (F, "%I", get_entity_ident(get_Sel_entity(n)));
xfprintf (F, DEFAULT_NODE_ATTR);
break;
case iro_SymConst:
switch (get_entity_visibility(ent)) {
case local: fprintf (F, "local\n"); break;
case external_visible: fprintf (F, "external_visible\n"); break;
- case external_allocated: fprintf (F, "external_allocate\nd");break;
+ case external_allocated: fprintf (F, "external_allocate\n");break;
}
switch (get_entity_variability(ent)) {
case uninitialized: fprintf (F, "uninitialized");break;
case part_constant: fprintf (F, "part_constant");break;
case constant: fprintf (F, "constant"); break;
}
+ if (is_method_type(get_entity_type(ent)))
+ xfprintf (F, "\n irg = %p ", get_entity_irg(ent));
xfprintf(F, "\"}\n");
/* The Edges */
/* skip this to reduce graph. Member edge of type is parallel to this edge. *
if (get_irn_opcode(block) == iro_Block) {
/* This is a block. Dump a node for the block. */
- xfprintf (F, "node: {title: \"%p\" label: \"%I\"}", block,
- block->op->name);
+ xfprintf (F, "node: {title:\""); PRINT_NODEID(block);
+ xfprintf (F, "\" label: \"%I ", block->op->name); PRINT_NODEID(block);
+ xfprintf (F, "\"}\n");
/* Dump the edges */
for ( i = 0; i < get_Block_n_cfgpreds(block); i++) {
pred = get_nodes_Block(skip_Proj(get_Block_cfgpred(block, i)));
# include <assert.h>
+# include "irprog.h"
# include "irgopt.h"
# include "irnode_t.h"
# include "irgraph_t.h"
# include "ircons.h"
# include "misc.h"
# include "irgmod.h"
-
+# include "array.h"
# include "pset.h"
/* Defined in iropt.c */
void
optimize_in_place_wrapper (ir_node *n, void *env) {
- int i;
+ int start, i;
ir_node *optimized;
+ if (get_irn_op(n) == op_Block)
+ start = 0;
+ else
+ start = -1;
+
/* optimize all sons after recursion, i.e., the sons' sons are
optimized already. */
- for (i = -1; i < get_irn_arity(n); i++) {
+ for (i = start; i < get_irn_arity(n); i++) {
optimized = optimize_in_place_2(get_irn_n(n, i));
set_irn_n(n, i, optimized);
+ assert(get_irn_op(optimized) != op_Id);
}
}
current_ir_graph = irg;
/* Handle graph state */
- // assert(get_irg_phase_state(irg) != phase_building);
+ assert(get_irg_phase_state(irg) != phase_building);
if (get_opt_global_cse())
set_irg_pinned(current_ir_graph, floats);
if (get_irg_outs_state(current_ir_graph) == outs_consistent)
return n->link;
}
-
/* We use the block_visited flag to mark that we have computed the
number of useful predecessors for this block.
Further we encode the new arity in this flag in the old blocks.
for (i = -1; i < get_irn_arity(n); i++)
set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
}
- /* Now the new node is complete. We can add it to the hash table for cse. */
- add_identities (current_ir_graph->value_table, nn);
+ /* Now the new node is complete. We can add it to the hash table for cse.
+ @@@ inlinening aborts if we identify End. Why? */
+ if(get_irn_op(nn) != op_End)
+ add_identities (current_ir_graph->value_table, nn);
}
-/* Copies the graph resucsively, compacts the keepalive of the end node. */
+/* Copies the graph recursively, compacts the keepalive of the end node. */
void
copy_graph () {
ir_node *oe, *ne; /* old end, new end */
current_ir_graph = irg;
/* Handle graph state */
- // assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
free_outs(current_ir_graph);
if (get_optimize() && get_opt_dead_node_elimination()) {
if (!get_opt_inline()) return;
/* Handle graph state */
- // assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
if (get_irg_outs_state(current_ir_graph) == outs_consistent)
set_irg_outs_inconsistent(current_ir_graph);
free(cf_pred);
}
}
+
+/********************************************************************/
+/* Apply inlineing to small methods. */
+/********************************************************************/
+
+static int pos;
+
+/* It makes no sense to inline too many calls in one procedure. Anyways,
+ I didn't get a version with NEW_ARR_F to run. */
+#define MAX_INLINE 1024
+
+static void collect_calls(ir_node *call, void *env) {
+ ir_node **calls = (ir_node **)env;
+ ir_node *addr;
+ tarval *tv;
+ ir_graph *called_irg;
+
+ if (get_irn_op(call) != op_Call) return;
+
+ addr = get_Call_ptr(call);
+ if (get_irn_op(addr) == op_Const) {
+ /* Check whether the constant is the pointer to a compiled entity. */
+ tv = get_Const_tarval(addr);
+ if (tv->u.p.ent) {
+ called_irg = get_entity_irg(tv->u.p.ent);
+ if (called_irg && pos < MAX_INLINE) {
+ /* The Call node calls a locally defined method. Remember to inline. */
+ calls[pos] = call;
+ pos++;
+ }
+ }
+ }
+}
+
+
+/* Inlines all small methods at call sites where the called address comes
+ from a Const node that references the entity representing the called
+ method.
+ The size argument is a rough measure for the code size of the method:
+ Methods where the obstack containing the firm graph is smaller than
+ size are inlined. */
+void inline_small_irgs(ir_graph *irg, int size) {
+ int i;
+ ir_node *calls[MAX_INLINE];
+ ir_graph *rem = current_ir_graph;
+
+ if (!(get_optimize() && get_opt_inline())) return;
+
+ /*DDME(get_irg_ent(current_ir_graph));*/
+
+ current_ir_graph = irg;
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+
+ /* Find Call nodes to inline.
+ (We can not inline during a walk of the graph, as inlineing the same
+ method several times changes the visited flag of the walked graph:
+ after the first inlineing visited of the callee equals visited of
+ the caller. With the next inlineing both are increased.) */
+ pos = 0;
+ irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
+
+ if ((pos > 0) && (pos < MAX_INLINE)) {
+ /* There are calls to inline */
+ collect_phiprojs(irg);
+ for (i = 0; i < pos; i++) {
+ tarval *tv;
+ ir_graph *callee;
+ tv = get_Const_tarval(get_Call_ptr(calls[i]));
+ callee = get_entity_irg(tv->u.p.ent);
+ if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
+ /*printf(" inlineing "); DDME(tv->u.p.ent);*/
+ inline_method(calls[i], callee);
+ }
+ }
+ }
+
+ current_ir_graph = rem;
+}
/* Performs dead node elimination by copying the ir graph to a new obstack.
Further removes Bad predecesors from Blocks and the corresponding
inputs to Phi nodes.
+ Optimization is only performed if options `optimize' and
+ `opt_dead_node_elimination' are set.
The graph may not be in state phase_building. The outs datasturcture
is freed, the outs state set to no_outs. (@@@ Change this? -> inconsistent.)
Removes old attributes of nodes. Sets link field to NULL.
development/debugging are not conserved by copying. */
void dead_node_elimination(ir_graph *irg);
+/* Removes Bad Bad predecesors from Blocks and the corresponding
+ inputs to Phi nodes as in dead_node_elimination but without
+ copying the graph.
+ @@@ not implemented! */
+void remove_bad_predecessors(ir_graph *irg);
+
/* Inlines a method at the given call site.
Assumes that call is a Call node in current_ir_graph and that
the type in the Call nodes type attribute is the same as the
Further it assumes that all Phi nodes in a block of current_ir_graph
are assembled in a "link" list in the link field of the corresponding
block nodes. Further assumes that all Proj nodes are in a "link" list
- in the nodes producing the tuple. (This is only a optical feature
+ in the nodes producing the tuple. (This is only an optical feature
for the graph.) Conserves this feature for the old
nodes of the graph. This precondition can be established by a call to
collect_phisprojs(), see irgmod.h.
Called_graph must be unequal to current_ir_graph. Will not inline
if they are equal.
- Sets visited masterflag in curren_ir_graph to max of flag in current
- and called graphs.
+ Sets visited masterflag in current_ir_graph to the max of the flag in
+ current and called graph.
Removes the call node and splits the basic block the call node
belongs to. Inserts a copy of the called graph between these nodes.
It is recommended to call local_optimize_graph after inlining as this
combination as control flow operation. */
void inline_method(ir_node *call, ir_graph *called_graph);
+
+/* Inlines all small methods at call sites where the called address comes
+ from a Const node that references the entity representing the called
+ method.
+ The size argument is a rough measure for the code size of the method:
+ Methods where the obstack containing the firm graph is smaller than
+ size are inlined. Further only a limited number of calls are inlined.
+ If the method contains more than 1024 inlineable calls none will be
+ inlined.
+ Inlining is only performed if flags `optimize' and `inlineing' are set.
+ The graph may not be in state phase_building.
+ It is recommended to call local_optimize_graph after inlining as this
+ function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
+ combination as control flow operation. */
+void inline_small_irgs(ir_graph *irg, int size);
+
# endif /* _IRGOPT_H_ */
inline ir_node *
skip_Tuple (ir_node *node) {
- if ((node->op == op_Proj) && (get_irn_op(get_Proj_pred(node)) == op_Tuple))
- return get_Tuple_pred(get_Proj_pred(node), get_Proj_proj(node));
+ ir_node *pred;
+ if (get_irn_op(node) == op_Proj) {
+ pred = skip_nop(get_Proj_pred(node));
+ if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
+ pred = skip_nop(skip_Tuple(pred));
+ if (get_irn_op(pred) == op_Tuple)
+ return get_Tuple_pred(pred, get_Proj_proj(node));
+ }
return node;
}
-
inline ir_node *
skip_nop (ir_node *node) {
/* don't assert node !!! */
*/
int i, n_preds;
-
ir_node *block = NULL; /* to shutup gcc */
ir_node *first_val = NULL; /* to shutup gcc */
ir_node *scnd_val = NULL; /* to shutup gcc */
res->ld_name = NULL;
res->overwrites = NEW_ARR_F(entity *, 1);
+ res->irg = NULL;
+
res->visit = 0;
/* Remember entity in it's owner. */
#endif
/* Creates a new entity.
- Automatically inserts the entity as a member of owner. */
+ Automatically inserts the entity as a member of owner.
+ Entity is automatic_allocated and uninitialize except if the type
+ is type_method, then it is static_allocated and constant. The constant
+ value is a pointer to the method.
+ Visibility is local, offset -1, and it is not volatile. */
entity *new_entity (type *owner, ident *name, type *type);
/* Copies the entity if the new_owner is different from the
owner of the old entity. Else returns the old entity.
if (tp != get_glob_type())
for (i = 0; i < get_class_n_member(tp); i++) {
assert(get_entity_offset(get_class_member(tp, i)) > -1);
- /* assert(get_entity_allocation(get_class_member(tp, i)) == automatic_allocated); @@@ lowerfirm geht nicht durch */
+ assert(is_method_type(get_entity_type(get_class_member(tp, i))) ||
+ (get_entity_allocation(get_class_member(tp, i)) == automatic_allocated));
+ /* @@@ lowerfirm geht nicht durch */
}
} break;
case tpo_struct:
/* assert(get_type_size(tp) > -1); @@@ lowerfirm geht nicht durch */
for (i = 0; i < get_struct_n_member(tp); i++) {
assert(get_entity_offset(get_struct_member(tp, i)) > -1);
- /* assert(get_entity_allocation(get_struct_member(tp, i)) == automatic_allocated); @@@ lowerfirm geht nicht durch */
+ assert((get_entity_allocation(get_struct_member(tp, i)) == automatic_allocated));
}
} break;
case tpo_union:
/* manipulate private fields of struct */
void add_struct_member (type *strct, entity *member) {
assert(strct && (strct->type_op == type_struct));
- /*assert(get_type_tpop(get_entity_type(member)) != type_method); @@@ lowerfirm geht nicht durch */
+ assert(get_type_tpop(get_entity_type(member)) != type_method);
+ /* @@@ lowerfirm geht nicht durch */
ARR_APP1 (entity *, strct->attr.sa.members, member);
}
int get_struct_n_member (type *strct) {
void set_struct_member (type *strct, int pos, entity *member) {
assert(strct && (strct->type_op == type_struct));
assert(pos >= 0 && pos < get_struct_n_member(strct));
- /* assert(get_entity_type(member)->type_op != type_method); @@@ lowerfirm !!*/
+ assert(get_entity_type(member)->type_op != type_method);/* @@@ lowerfirm !!*/
strct->attr.sa.members[pos+1] = member;
}
void remove_struct_member(type *strct, entity *member) {
array->attr.aa.lower_bound[dimension] = lower_bound;
array->attr.aa.upper_bound[dimension] = upper_bound;
}
+void set_array_lower_bound_int (type *array, int dimension, int lower_bound) {
+ ir_graph *rem;
+ assert(array && (array->type_op == type_array));
+ rem = current_ir_graph;
+ current_ir_graph = get_const_code_irg();
+ array->attr.aa.lower_bound[dimension] =
+ new_Const(mode_I, tarval_from_long (mode_I, lower_bound));
+ current_ir_graph = rem;
+}
void set_array_lower_bound (type *array, int dimension, ir_node * lower_bound) {
assert(array && (array->type_op == type_array));
array->attr.aa.lower_bound[dimension] = lower_bound;
/* create a new type primitive */
type *new_type_primitive (ident *name, ir_mode *mode) {
type *res;
+ /* @@@ assert( mode_is_data(mode) && (!mode == mode_p)); */
res = new_type(type_primitive, mode, name);
res->size = get_mode_size(mode);
res->state = layout_fixed;
void set_array_bounds (type *array, int dimension, ir_node *lower_bound,
ir_node *upper_bound);
void set_array_lower_bound (type *array, int dimension, ir_node *lower_bound);
+void set_array_lower_bound_int (type *array, int dimension, int lower_bound);
void set_array_upper_bound (type *array, int dimension, ir_node *upper_bound);
ir_node * get_array_lower_bound (type *array, int dimension);
ir_node * get_array_upper_bound (type *array, int dimension);
/****************************************************************************/
-
collect_phiprojs(main_irg);
current_ir_graph = main_irg;
printf("Inlining set_a ...\n");