typedef struct ir_region ir_region, *ir_region_ptr;
typedef struct ir_reg_tree ir_reg_tree, *ir_reg_tree_ptr;
typedef struct ir_entity ir_entity, *ir_entity_ptr;
-typedef struct _ir_phase ir_phase, *ir_phase_ptr;
typedef struct _ir_extblk ir_extblk, *ir_extblk_ptr;
typedef struct ir_exec_freq ir_exec_freq, *ir_exec_freq_ptr;
typedef struct ir_cdep ir_cdep, *ir_cdep_ptr;
*/
ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem);
-/************************************************************************/
-/* Begin Old Interface */
-/************************************************************************/
-
const ir_edge_t *get_irn_edge(ir_graph *irg, const ir_node *src, int pos);
#define edges_reroute(old, nw, irg) edges_reroute_kind(old, nw, EDGE_KIND_NORMAL, irg)
*/
void edges_reset_private_data(ir_graph *irg, int offset, unsigned size);
-/************************************************************************/
-/* End Old Interface */
-/************************************************************************/
-
#endif
* (get_irn_*, set_irn_*) is influenced by this flag. */
int get_interprocedural_view(void);
void set_interprocedural_view(int state);
-#else
-#define get_interprocedural_view() 0
#endif
/**
#include "array.h"
#include "pqueue.h"
+#include "error.h"
/*
* Implements a heap.
{
switch (ARR_LEN(q->elems)) {
case 0:
- assert(0 && "Attempt to retrieve element from empty priority queue.");
- return NULL;
- break;
+ panic("Attempt to retrieve element from empty priority queue.");
case 1:
ARR_SHRINKLEN(q->elems, 0);
return q->elems[0].data;
- break;
default: {
void *data = q->elems[0].data;
int len = ARR_LEN(q->elems) - 1;
#include "set.h"
#include "pdeq.h"
#include "hashptr.h"
+#include "error.h"
#include "irprog_t.h"
#include "irgraph_t.h"
/* both are exceptions */
if ((get_ProjX_probability(p0) == Cond_prob_exception_taken) &&
(get_ProjX_probability(p1) == Cond_prob_exception_taken) ) {
- assert(0 && "I tried to avoid these!");
+ panic("I tried to avoid these!");
+#if 0
/* It's a */
set_ProjX_probability(p0, Cond_prob_normal);
set_ProjX_probability(p1, Cond_prob_normal);
+#endif
}
/* p0 is exception */
heights_t *heights_new(ir_graph *irg)
{
heights_t *res = XMALLOC(heights_t);
- phase_init(&res->ph, "heights", irg, PHASE_DEFAULT_GROWTH, irn_height_init, NULL);
+ phase_init(&res->ph, irg, irn_height_init);
res->dump_handle = dump_add_node_info_callback(height_dump_cb, res);
heights_recompute(res);
void heights_free(heights_t *h)
{
- phase_free(&h->ph);
+ phase_deinit(&h->ph);
dump_remv_node_info_callback(h->dump_handle);
xfree(h);
}
switch (get_irn_opcode(n)) {
case iro_Block:
if (!get_Block_matured(n)) return NULL;
+#ifdef INTERPROCEDURAL_VIEW
if (get_interprocedural_view() && n->attr.block.in_cg) {
assert(n->attr.block.cg_backedge && "backedge array not allocated!");
return n->attr.block.cg_backedge;
- } else {
- assert(n->attr.block.backedge && "backedge array not allocated!");
- return n->attr.block.backedge;
}
- break;
+#endif
+
+ assert(n->attr.block.backedge && "backedge array not allocated!");
+ return n->attr.block.backedge;
case iro_Phi:
assert(n->attr.phi.u.backedge && "backedge array not allocated!");
return n->attr.phi.u.backedge;
break;
case iro_Filter:
+#ifdef INTERPROCEDURAL_VIEW
if (get_interprocedural_view()) {
assert(n->attr.filter.backedge && "backedge array not allocated!");
return n->attr.filter.backedge;
}
+#endif
+ break;
+ default:
break;
- default: ;
}
return NULL;
}
if (opc == iro_Phi)
n->attr.phi.u.backedge = arr;
else if (opc == iro_Block) {
+#ifdef INTERPROCEDURAL_VIEW
if (!get_interprocedural_view())
n->attr.block.backedge = arr;
else
+#endif
n->attr.block.cg_backedge = arr;
}
else if (opc == iro_Filter)
struct obstack temp;
int i;
+#ifdef INTERPROCEDURAL_VIEW
assert(!get_interprocedural_view() &&
"use construct_ip_cf_backedges()");
+#endif
max_loop_depth = 0;
current_ir_graph = irg;
#define get_block_info(lv, bl) ((bl_info_t *) phase_get_irn_data(&(lv)->ph, bl))
struct _lv_chk_t {
- ir_phase ph;
+ ir_phase ph;
const dfs_t *dfs;
- int n_blocks;
- bitset_t *back_edge_src;
- bitset_t *back_edge_tgt;
- bl_info_t **map;
+ int n_blocks;
+ bitset_t *back_edge_src;
+ bitset_t *back_edge_tgt;
+ bl_info_t **map;
DEBUG_ONLY(firm_dbg_module_t *dbg;)
};
compute_doms(irg);
stat_ev_tim_push();
- phase_init(&res->ph, "liveness check", irg, PHASE_DEFAULT_GROWTH, init_block_data, NULL);
+ phase_init(&res->ph, irg, init_block_data);
obst = phase_obst(&res->ph);
FIRM_DBG_REGISTER(res->dbg, "ir.ana.lvchk");
void lv_chk_free(lv_chk_t *lv)
{
- phase_free(&lv->ph);
+ phase_deinit(&lv->ph);
xfree(lv);
}
#include "irloop_t.h"
#include "irprog_t.h"
+#include "error.h"
-void add_loop_son(ir_loop *loop, ir_loop *son) {
+void add_loop_son(ir_loop *loop, ir_loop *son)
+{
loop_element lson;
assert(loop && loop->kind == k_ir_loop);
assert(get_kind(son) == k_ir_loop);
loop->flags |= loop_outer_loop;
}
-void add_loop_node(ir_loop *loop, ir_node *n) {
+void add_loop_node(ir_loop *loop, ir_node *n)
+{
loop_element ln;
ln.node = n;
assert(loop && loop->kind == k_ir_loop);
loop->n_nodes++;
}
-void add_loop_irg(ir_loop *loop, ir_graph *irg) {
+void add_loop_irg(ir_loop *loop, ir_graph *irg)
+{
loop_element ln;
ln.irg = irg;
assert(loop && loop->kind == k_ir_loop);
* @param loop the loop to mature
* @param obst an obstack, where the new arrays are allocated on
*/
-void mature_loops(ir_loop *loop, struct obstack *obst) {
+void mature_loops(ir_loop *loop, struct obstack *obst)
+{
loop_element *new_children = DUP_ARR_D(loop_element, obst, loop->children);
DEL_ARR_F(loop->children);
loop->children = new_children;
}
/* Returns outer loop, itself if outermost. */
-ir_loop *(get_loop_outer_loop)(const ir_loop *loop) {
+ir_loop *(get_loop_outer_loop)(const ir_loop *loop)
+{
return _get_loop_outer_loop(loop);
}
/* Returns nesting depth of this loop */
-int (get_loop_depth)(const ir_loop *loop) {
+int (get_loop_depth)(const ir_loop *loop)
+{
return _get_loop_depth(loop);
}
/* Returns the number of inner loops */
-int (get_loop_n_sons)(const ir_loop *loop) {
+int (get_loop_n_sons)(const ir_loop *loop)
+{
return _get_loop_n_sons(loop);
}
/* Returns the pos`th loop_node-child *
* TODO: This method isn`t very efficient ! *
* Returns NULL if there isn`t a pos`th loop_node */
-ir_loop *get_loop_son(ir_loop *loop, int pos) {
+ir_loop *get_loop_son(ir_loop *loop, int pos)
+{
int child_nr = 0, loop_nr = -1;
assert(loop && loop->kind == k_ir_loop);
}
/* Returns the number of nodes in the loop */
-int get_loop_n_nodes(const ir_loop *loop) {
+int get_loop_n_nodes(const ir_loop *loop)
+{
assert(loop); assert(loop->kind == k_ir_loop);
return loop->n_nodes;
}
/* Returns the pos'th ir_node-child *
* TODO: This method isn't very efficient ! *
* Returns NULL if there isn't a pos'th ir_node */
-ir_node *get_loop_node(const ir_loop *loop, int pos) {
+ir_node *get_loop_node(const ir_loop *loop, int pos)
+{
int child_nr, node_nr = -1;
assert(loop && loop->kind == k_ir_loop);
return loop -> children[child_nr].node;
}
- assert(0 && "no child at pos found");
- return NULL;
+ panic("no child at pos found");
}
/* Returns the number of elements contained in loop. */
-int get_loop_n_elements(const ir_loop *loop) {
+int get_loop_n_elements(const ir_loop *loop)
+{
assert(loop && loop->kind == k_ir_loop);
return(ARR_LEN(loop->children));
}
to check the *(loop_element.kind) field for "k_ir_node" or "k_ir_loop"
and then select the appropriate "loop_element.node" or "loop_element.son".
*/
-loop_element get_loop_element(const ir_loop *loop, int pos) {
+loop_element get_loop_element(const ir_loop *loop, int pos)
+{
assert(loop && loop->kind == k_ir_loop && pos < ARR_LEN(loop->children));
return(loop -> children[pos]);
}
-int get_loop_element_pos(const ir_loop *loop, void *le) {
+int get_loop_element_pos(const ir_loop *loop, void *le)
+{
int i, n;
assert(loop && loop->kind == k_ir_loop);
/**
* Sets the loop for a node.
*/
-void set_irn_loop(ir_node *n, ir_loop *loop) {
+void set_irn_loop(ir_node *n, ir_loop *loop)
+{
n->loop = loop;
}
-ir_loop *(get_irn_loop)(const ir_node *n) {
+ir_loop *(get_irn_loop)(const ir_node *n)
+{
return _get_irn_loop(n);
}
-int get_loop_loop_nr(const ir_loop *loop) {
+int get_loop_loop_nr(const ir_loop *loop)
+{
assert(loop && loop->kind == k_ir_loop);
#ifdef DEBUG_libfirm
return loop->loop_nr;
#endif
}
-void set_loop_link(ir_loop *loop, void *link) {
+void set_loop_link(ir_loop *loop, void *link)
+{
assert(loop && loop->kind == k_ir_loop);
loop->link = link;
}
-void *get_loop_link(const ir_loop *loop) {
+
+void *get_loop_link(const ir_loop *loop)
+{
assert(loop && loop->kind == k_ir_loop);
return loop->link;
}
-int (is_ir_loop)(const void *thing) {
+int (is_ir_loop)(const void *thing)
+{
return _is_ir_loop(thing);
}
/* The outermost loop is remarked in the surrounding graph. */
-void (set_irg_loop)(ir_graph *irg, ir_loop *loop) {
+void (set_irg_loop)(ir_graph *irg, ir_loop *loop)
+{
_set_irg_loop(irg, loop);
}
/* Returns the root loop info (if exists) for an irg. */
-ir_loop *(get_irg_loop)(const ir_graph *irg) {
+ir_loop *(get_irg_loop)(const ir_graph *irg)
+{
return _get_irg_loop(irg);
}
* Allocates a new loop as son of father on the given obstack.
* If father is equal NULL, a new root loop is created.
*/
-ir_loop *alloc_loop(ir_loop *father, struct obstack *obst) {
+ir_loop *alloc_loop(ir_loop *father, struct obstack *obst)
+{
ir_loop *son;
son = OALLOCZ(obst, ir_loop);
X(ir_no_alias);
X(ir_may_alias);
X(ir_sure_alias);
- default: assert(0); return "UNKNOWN";
+ default:
+ panic("UNKNOWN alias relation");
}
#undef X
}
return 0;
}
+static inline int is_ip_Filter(ir_node *n)
+{
+#ifdef INTERPROCEDURAL_VIEW
+ return is_Filter(n) && get_interprocedural_view();
+#else
+ (void) n;
+ return 0;
+#endif
+}
+
/* When to walk from nodes to blocks. Only for Control flow operations? */
static inline int get_start_index(ir_node *n)
{
#if BLOCK_BEFORE_NODE
/* This version assures, that all nodes are ordered absolutely. This allows
- to undef all nodes in the heap analysis if the block is false, which means
- not reachable.
- I.e., with this code, the order on the loop tree is correct. But a (single)
- test showed the loop tree is deeper. */
- if (get_irn_op(n) == op_Phi ||
- is_Block(n) ||
- (is_Filter(n) && get_interprocedural_view()) || (
+ to undef all nodes in the heap analysis if the block is false, which
+ means not reachable.
+ I.e., with this code, the order on the loop tree is correct. But a
+ (single) test showed the loop tree is deeper. */
+ if (get_irn_op(n) == op_Phi ||
+ is_Block(n) ||
+ (is_ip_Filter(n)) || (
get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats &&
get_irn_pinned(n) == op_pin_state_floats
))
ir_op *op = get_irn_op(n);
return ((op == op_Block) ||
(op == op_Phi) ||
- ((op == op_Filter) && get_interprocedural_view()));
+ (is_ip_Filter(n)));
}
/**
ir_loop *head_rem;
struct obstack temp;
+#ifdef INTERPROCEDURAL_VIEW
assert(!get_interprocedural_view() &&
"not implemented, use construct_ip_backedges()");
+#endif
max_loop_depth = 0;
current_ir_graph = irg;
waitq *workqueue;
};
+static vrp_attr *get_vrp_attr(const ir_node *node)
+{
+ return (vrp_attr*) get_or_set_irn_phase_info(node, PHASE_VRP);
+}
+
static int vrp_update_node(ir_node *node)
{
tarval *new_bits_set = get_tarval_bad();
enum range_types new_range_type = VRP_UNDEFINED;
int something_changed = 0;
vrp_attr *vrp;
- ir_phase *phase;
if (!mode_is_int(get_irn_mode(node))) {
return 0; /* we don't optimize for non-int-nodes*/
}
- phase = get_irg_phase(get_irn_irg(node), PHASE_VRP);
-
ir_printf("update_vrp for %d called\n", get_irn_node_nr(node));
- vrp = phase_get_or_set_irn_data(phase, node);
+ vrp = get_vrp_attr(node);
/* TODO: Check if all predecessors have valid VRP information*/
-
-
switch (get_irn_opcode(node)) {
case iro_Const: {
tarval *tv = get_Const_tarval(node);
left = get_And_left(node);
right = get_And_right(node);
- vrp_left = phase_get_or_set_irn_data(phase, left);
- vrp_right = phase_get_or_set_irn_data(phase, right);
+ vrp_left = get_vrp_attr(left);
+ vrp_right = get_vrp_attr(right);
new_bits_set = tarval_and(vrp_left->bits_set, vrp_right->bits_set);
new_bits_not_set = tarval_or(vrp_left->bits_not_set, vrp_right->bits_not_set);
int overflow_top, overflow_bottom;
tarval *new_top, *new_bottom;
vrp_attr *vrp_left, *vrp_right;
- vrp_left = phase_get_or_set_irn_data(phase, get_Add_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Add_right(node));
+ vrp_left = get_vrp_attr(get_Add_left(node));
+ vrp_right = get_vrp_attr(get_Add_right(node));
if (vrp_left->range_type == VRP_UNDEFINED || vrp_right->range_type ==
VRP_UNDEFINED || vrp_left->range_type == VRP_VARYING ||
int overflow_top, overflow_bottom;
tarval *new_top, *new_bottom;
vrp_attr *vrp_left, *vrp_right;
- vrp_left = phase_get_or_set_irn_data(phase, get_Sub_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Sub_right(node));
+ vrp_left = get_vrp_attr(get_Sub_left(node));
+ vrp_right = get_vrp_attr(get_Sub_right(node));
if (vrp_left->range_type == VRP_UNDEFINED || vrp_right->range_type ==
VRP_UNDEFINED) {
left = get_Or_left(node);
right = get_Or_right(node);
- vrp_left = phase_get_or_set_irn_data(phase, get_Or_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Or_right(node));
+ vrp_left = get_vrp_attr(get_Or_left(node));
+ vrp_right = get_vrp_attr(get_Or_right(node));
new_bits_set = tarval_or(vrp_left->bits_set, vrp_right->bits_set);
new_bits_not_set = tarval_and(vrp_left->bits_not_set, vrp_right->bits_not_set);
vrp_attr *vrp_left, *vrp_right;
ir_node *right = get_Rotl_right(node);
- vrp_left = phase_get_or_set_irn_data(phase, get_Rotl_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Rotl_right(node));
+ vrp_left = get_vrp_attr(get_Rotl_left(node));
+ vrp_right = get_vrp_attr(get_Rotl_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
case iro_Shl: {
vrp_attr *vrp_left, *vrp_right;
ir_node *right = get_Shl_right(node);
- vrp_left = phase_get_or_set_irn_data(phase, get_Shl_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Shl_right(node));
+ vrp_left = get_vrp_attr(get_Shl_left(node));
+ vrp_right = get_vrp_attr(get_Shl_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
vrp_attr *vrp_left, *vrp_right;
ir_node *right = get_Shr_right(node);
- vrp_left = phase_get_or_set_irn_data(phase, get_Shr_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Shr_right(node));
+ vrp_left = get_vrp_attr(get_Shr_left(node));
+ vrp_right = get_vrp_attr(get_Shr_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
vrp_attr *vrp_left, *vrp_right;
ir_node *right = get_Shrs_right(node);
- vrp_left = phase_get_or_set_irn_data(phase, get_Shrs_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Shrs_right(node));
+ vrp_left = get_vrp_attr(get_Shrs_left(node));
+ vrp_right = get_vrp_attr(get_Shrs_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
tarval *bits_set, *bits_not_set;
vrp_attr *vrp_left, *vrp_right;
- vrp_left = phase_get_or_set_irn_data(phase, get_Eor_left(node));
- vrp_right = phase_get_or_set_irn_data(phase, get_Eor_right(node));
+ vrp_left = get_vrp_attr(get_Eor_left(node));
+ vrp_right = get_vrp_attr(get_Eor_right(node));
bits_not_set = tarval_or(
tarval_and(vrp_left->bits_set, vrp_right->bits_set),
}
case iro_Id: {
- vrp_attr *vrp_pred = phase_get_or_set_irn_data(phase, get_Id_pred(node));
+ vrp_attr *vrp_pred = get_vrp_attr(get_Id_pred(node));
new_bits_set = vrp_pred->bits_set;
new_bits_not_set = vrp_pred->bits_not_set;
new_range_top = vrp_pred->range_top;
}
case iro_Not: {
- vrp_attr *vrp_pred = phase_get_or_set_irn_data(phase, get_Not_op(node));
+ vrp_attr *vrp_pred = get_vrp_attr(get_Not_op(node));
new_bits_set = tarval_or(vrp_pred->bits_not_set, vrp->bits_set);
new_bits_not_set = tarval_or(vrp_pred->bits_set, vrp->bits_not_set);
break;
case iro_Conv: {
ir_node *pred = get_Conv_op(node);
ir_mode *old_mode = get_irn_mode(pred);
- vrp_attr *vrp_pred = phase_get_or_set_irn_data(phase, pred);
+ vrp_attr *vrp_pred = get_vrp_attr(pred);
ir_mode *new_mode;
tarval *bits_not_set;
int i;
ir_node *pred = get_Phi_pred(node,0);
- vrp_attr *vrp_pred = phase_get_or_set_irn_data(phase, pred);
+ vrp_attr *vrp_pred = get_vrp_attr(pred);
new_range_top = vrp_pred->range_top;
new_range_bottom = vrp_pred->range_bottom;
new_range_type = vrp_pred->range_type;
for (i = 1; i < num; i++) {
pred = get_Phi_pred(node, i);
- vrp_pred = phase_get_or_set_irn_data(phase, pred);
+ vrp_pred = get_vrp_attr(pred);
if (new_range_type == VRP_RANGE && vrp_pred->range_type ==
VRP_RANGE) {
cmp = tarval_cmp(new_range_top, vrp_pred->range_top);
void set_vrp_data(ir_graph *irg)
{
-
ir_node *succ, *node;
int i;
struct vrp_env_t *env;
ir_phase *phase;
- if (!irg) {
- /* no graph, skip */
- return;
- }
-
assure_irg_outs(irg); /* ensure that out edges are consistent*/
- if (!(phase = get_irg_phase(irg, PHASE_VRP))) {
- /* this is our first run */
- phase = init_irg_phase(irg, PHASE_VRP, 0, vrp_init_node);
- env = phase_alloc(phase, sizeof(struct vrp_env_t));
+ phase = irg_get_phase(irg, PHASE_VRP);
+ if (phase == NULL) {
+ /* this is our first run */
+ phase = new_phase(irg, vrp_init_node);
+ irg_register_phase(irg, PHASE_VRP, phase);
+ env = phase_alloc(phase, sizeof(*env));
phase->priv = env;
} else {
env = phase->priv;
env->workqueue = new_waitq();
-
irg_walk_graph(irg, NULL, vrp_first_pass, env);
/* while there are entries in the worklist, continue*/
return pn_Cmp_False;
}
-vrp_attr *vrp_get_info(const ir_node *n) {
- ir_graph *irg = get_irn_irg(n);
- ir_phase *phase = get_irg_phase(irg, PHASE_VRP);
-
+vrp_attr *vrp_get_info(const ir_node *node)
+{
+ const ir_graph *irg = get_irn_irg(node);
+ const ir_phase *phase = irg_get_phase(irg, PHASE_VRP);
- if (!phase) {
+ if (phase == NULL) {
/* phase has not yet been initialized */
return NULL;
}
- return phase_get_irn_data(phase, n);
+ return phase_get_irn_data(phase, node);
}
{
(void) irn;
/* TODO */
- assert(0);
return NULL;
}
{
(void) self;
/* TODO */
- assert(0);
return NULL;
}
assert(size > 0 && "CopyB needs size > 0" );
if (size & 3) {
- assert(!"strange hack enabled: copy more bytes than needed!");
+ fprintf(stderr, "strange hack enabled: copy more bytes than needed!");
size += 4;
}
arm_emit_source_register(irn, 0);
be_emit_finish_line_gas(irn);
} else {
- assert(0 && "move not supported for this mode");
panic("emit_be_Copy: move not supported for this mode");
}
} else if (mode_is_data(mode)) {
arm_emit_dest_register(irn, 0);
be_emit_cstring(", ");
arm_emit_source_register(irn, 0);
- be_emit_finish_line_gas(irn);
+ be_emit_finish_line_gas(irn);
} else {
- assert(0 && "move not supported for this mode");
panic("emit_be_Copy: move not supported for this mode");
}
}
}
} else if (USE_VFP(env_cg->isa)) {
panic("VFP not supported yet");
- return NULL;
} else {
panic("Softfloat not supported yet");
- return NULL;
}
} else { /* complete in gp registers */
int src_bits = get_mode_size_bits(src_mode);
} else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
- return NULL;
}
else {
panic("Softfloat not supported yet");
- return NULL;
}
} else {
#if 0
else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
- return NULL;
}
else {
panic("Softfloat not supported yet");
- return NULL;
}
}
assert(mode_is_data(mode));
}
else {
panic("Softfloat not supported yet");
- return NULL;
}
}
} else if (USE_VFP(env_cg->isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
- return NULL;
} else {
panic("Softfloat not supported yet");
- return NULL;
}
} else {
return gen_int_binop(node, MATCH_SIZE_NEUTRAL,
if (new_list != NULL) {
/* ok, change the graph */
ir_node *start_bl = get_irg_start_block(irg);
- ir_node *first_bl = NULL;
+ ir_node *first_bl = get_first_block_succ(start_bl);
ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
- const ir_edge_t *edge;
optimization_state_t state;
unsigned offset;
- foreach_block_succ(start_bl, edge) {
- first_bl = get_edge_src_irn(edge);
- break;
- }
assert(first_bl && first_bl != start_bl);
/* we had already removed critical edges, so the following
assertion should be always true. */
}
panic("unknown blocksched algo");
- return NULL;
}
co2_t env;
FILE *f;
- phase_init(&env.ph, "co2", co->cenv->birg->irg, PHASE_DEFAULT_GROWTH, co2_irn_init, NULL);
+ phase_init(&env.ph, co->cenv->birg->irg, co2_irn_init);
env.touched = NULL;
env.visited = 0;
env.co = co;
}
writeback_colors(&env);
- phase_free(&env.ph);
+ phase_deinit(&env.ph);
return 0;
}
stat_ev_tim_push();
/* init phase */
- phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
+ phase_init(&mst_env.ph, co->irg, co_mst_irn_init);
+ phase_set_private(&mst_env.ph, &mst_env);
k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
k = n_regs - k;
/* free allocated memory */
del_pqueue(mst_env.chunks);
- phase_free(&mst_env.ph);
+ phase_deinit(&mst_env.ph);
del_pset(mst_env.chunkset);
stat_ev_tim_pop("heur4_total");
static coloring_t *coloring_init(coloring_t *c, ir_graph *irg)
{
- phase_init(&c->ph, "regs_map", irg, PHASE_DEFAULT_GROWTH, regs_irn_data_init, NULL);
+ phase_init(&c->ph, irg, regs_irn_data_init);
c->irg = irg;
return c;
}
switch (ifg_flavor) {
default:
- assert(0);
- fprintf(stderr, "no valid ifg flavour selected. falling back to std\n");
+ panic("invalid ifg flavour selected");
case BE_IFG_STD:
case BE_IFG_FAST:
ifg = be_ifg_std_new(chordal_env);
{
ifg_pointer_t *ifg = self;
obstack_free(&ifg->obst, NULL);
- phase_free(&ifg->ph);
+ phase_deinit(&ifg->ph);
free(self);
}
ifg->impl = &ifg_pointer_impl;
ifg->env = env;
- phase_init(&ifg->ph, "ptr_map", env->irg, PHASE_DEFAULT_GROWTH, ptr_irn_data_init, NULL);
+ phase_init(&ifg->ph, env->irg, ptr_irn_data_init);
obstack_init(&ifg->obst);
dom_tree_walk_irg(env->irg, find_neighbour_walker, NULL, ifg);
void be_ilp_sched(const be_irg_t *birg, be_options_t *be_opts)
{
be_ilpsched_env_t env;
- const char *name = "be ilp scheduling";
ir_graph *irg = be_get_birg_irg(birg);
const arch_env_t *arch_env = be_get_birg_arch_env(birg);
const ilp_sched_selector_t *sel = arch_env->impl->get_ilp_sched_selector(arch_env);
env.opts = &ilp_opts;
env.birg = birg;
env.be_opts = be_opts;
- phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn, NULL);
+ phase_init(&env.ph, env.irg, init_ilpsched_irn);
/* assign a unique per block number to all interesting nodes */
irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
irg_block_walk_graph(env.irg, NULL, clear_unwanted_data, &env);
/* free all allocated object */
- phase_free(&env.ph);
+ phase_deinit(&env.ph);
heights_free(env.height);
/* notify backend */
be_timer_push(T_LIVE);
lv->nodes = bitset_malloc(2 * get_irg_last_idx(lv->irg));
- phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
+ phase_init(&lv->ph, lv->irg, lv_phase_data_init);
compute_liveness(lv);
/* be_live_chk_compare(lv, lv->lvc); */
{
if (lv && lv->nodes) {
unregister_hook(hook_node_info, &lv->hook_info);
- phase_free(&lv->ph);
+ phase_deinit(&lv->ph);
bitset_free(lv->nodes);
lv->nodes = NULL;
}
} else
bitset_clear_all(lv->nodes);
- phase_free(&lv->ph);
- phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
+ phase_deinit(&lv->ph);
+ phase_init(&lv->ph, lv->irg, lv_phase_data_init);
compute_liveness(lv);
be_timer_pop(T_LIVE);
if (be_options.vrfy_option == BE_VRFY_WARN) {
be_check_dominance(irg);
- be_verify_out_edges(irg);
} else if (be_options.vrfy_option == BE_VRFY_ASSERT) {
- assert(be_verify_out_edges(irg));
assert(be_check_dominance(irg) && "Dominance verification failed");
}
if (be_options.vrfy_option == BE_VRFY_WARN) {
be_check_dominance(irg);
- be_verify_out_edges(irg);
} else if (be_options.vrfy_option == BE_VRFY_ASSERT) {
- assert(be_verify_out_edges(irg));
assert(be_check_dominance(irg) && "Dominance verification failed");
}
if (be_options.vrfy_option == BE_VRFY_WARN) {
irg_verify(irg, VRFY_ENFORCE_SSA);
be_check_dominance(irg);
- be_verify_out_edges(irg);
be_verify_schedule(birg);
be_verify_register_allocation(birg);
} else if (be_options.vrfy_option == BE_VRFY_ASSERT) {
assert(irg_verify(irg, VRFY_ENFORCE_SSA) && "irg verification failed");
- assert(be_verify_out_edges(irg) && "out edge verification failed");
assert(be_check_dominance(irg) && "Dominance verification failed");
assert(be_verify_schedule(birg) && "Schedule verification failed");
assert(be_verify_register_allocation(birg)
return 0;
if (get_irn_mode(last) == mode_T) {
- const ir_edge_t *edge;
- foreach_out_edge(last, edge) {
- last = get_edge_src_irn(edge);
- break;
- }
+ const ir_edge_t *edge = get_irn_out_edge_first(last);
+ last = get_edge_src_irn(edge);
}
/* irn now points to the last node in lineage u; mi has the info for the node _before_ the terminator of the lineage. */
mris_env_t *env = XMALLOC(mris_env_t);
ir_graph *irg = be_get_birg_irg(birg);
- phase_init(&env->ph, "mris", irg, 2 * PHASE_DEFAULT_GROWTH, mris_irn_data_init, NULL);
+ phase_init(&env->ph, irg, mris_irn_data_init);
env->irg = irg;
env->visited = 0;
env->heights = heights_new(irg);
void be_sched_mris_free(mris_env_t *env)
{
- phase_free(&env->ph);
+ phase_deinit(&env->ph);
heights_free(env->heights);
free(env);
}
plist_element_t *el;
(void) rss;
- assert(is_Sink(v->irn) || ((plist_count(v->descendant_list) > 0 && v->descendants) || 1));
- assert(is_Sink(u->irn) || ((plist_count(u->consumer_list) > 0 && u->consumer) || 1));
-
/* as we loop over the list: loop over the shorter one */
if (plist_count(v->descendant_list) > plist_count(u->consumer_list)) {
list = u->consumer_list;
int i, n;
const ir_edge_t *edge;
- phase_init(&rss->ph, "rss block preprocessor", rss->irg, PHASE_DEFAULT_GROWTH, init_rss_irn, NULL);
+ phase_init(&rss->ph, rss->irg, init_rss_irn);
DBG((rss->dbg, LEVEL_1, "preprocessing block %+F\n", block));
rss->block = block;
ir_nodeset_destroy(&rss->live_block);
}
- phase_free(&rss->ph);
+ phase_deinit(&rss->ph);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_schedrss);
sched_renumber(bi->bl);
- phase_init(&bi->next_uses, "next uses", bi->bel->irg, PHASE_DEFAULT_GROWTH, next_use_init, NULL);
+ phase_init(&bi->next_uses, bi->bel->irg, next_use_init);
sched_foreach_reverse(bi->bl, irn) {
int i;
env->instr_nr++;
}
- phase_free(&block_info->next_uses);
+ phase_deinit(&block_info->next_uses);
/* Remember end-workset for this block */
block_info->ws_end = workset_clone(env, &env->ob, env->ws);
return new_node;
}
-/**
- * Calls transformation function for given node and marks it visited.
- */
ir_node *be_transform_node(ir_node *node)
{
ir_op *op;
return new_node;
}
-/**
- * enqueue all inputs into the transform queue.
- */
void be_enqueue_preds(ir_node *node)
{
int i, arity;
free_trouts();
free_loop_information(irg);
set_irg_doms_inconsistent(irg);
+ irg_invalidate_phases(irg);
be_liveness_invalidate(be_get_birg_liveness(birg));
/* Hack for now, something is buggy with invalidate liveness... */
{
ir_node *node;
ir_node *succ_block = NULL;
- const ir_edge_t *edge;
int arity, i;
#if 1
#endif
return 0;
- foreach_block_succ(block, edge) {
- succ_block = get_edge_src_irn(edge);
- break;
- }
+ succ_block = get_first_block_succ(block);
arity = get_Block_n_cfgpreds(succ_block);
if (arity <= 1)
#include "irgopt.h"
#include "irtools.h"
#include "irprintf.h"
-#include "iredges.h"
+#include "iredges_t.h"
#include "beutil.h"
#include "besched.h"
return list;
}
+
+ir_node *get_first_block_succ(const ir_node *block)
+{
+ const ir_edge_t *edge = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
+ assert(edge != NULL);
+ return get_edge_src_irn(edge);
+}
*/
FILE *be_ffopen(const char *base, const char *ext, const char *mode);
-#endif /* FIRM_BE_BEUTIL_H */
+/**
+ * convenience function to return the first successor block
+ * (it is often known that there is exactly 1 successor anyway)
+ */
+ir_node *get_first_block_succ(const ir_node *block);
+
+#endif
return !problem_found;
}
-
-
-
-/*--------------------------------------------------------------------------- */
-
-
-
-typedef struct _verify_out_dead_nodes_env {
- ir_graph *irg;
- bitset_t *reachable;
- int problem_found;
-} verify_out_dead_nodes_env;
-
-static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env)
-{
- ir_graph *irg = env->irg;
- const ir_edge_t* edge;
-
- if (irn_visited_else_mark(node))
- return;
-
- /* we find too many (uncritical) dead nodes in block out edges */
- if (is_Block(node))
- return;
-
- foreach_out_edge(node, edge) {
- ir_node* src = get_edge_src_irn(edge);
-
- if (!bitset_is_set(env->reachable, get_irn_idx(src)) && !is_Block(src)) {
- ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) only reachable through out edges from %+F\n",
- src, get_nodes_block(src), get_irg_dump_name(irg), node);
- env->problem_found = 1;
- continue;
- }
-
- check_out_edges(src, env);
- }
-}
-
-static void set_reachable(ir_node *node, void* data)
-{
- bitset_t* reachable = data;
- bitset_set(reachable, get_irn_idx(node));
-}
-
-int be_verify_out_edges(ir_graph *irg)
-{
- verify_out_dead_nodes_env env;
-
-return 1;
- env.irg = irg;
- env.reachable = bitset_alloca(get_irg_last_idx(irg));
- env.problem_found = edges_verify(irg);
-
- irg_walk_in_or_dep_graph(irg, set_reachable, NULL, env.reachable);
- irg_walk_anchors(irg, set_reachable, NULL, env.reachable);
- inc_irg_visited(irg);
- check_out_edges(get_irg_start(irg), &env);
-
- return ! env.problem_found;
-}
*/
int be_verify_register_allocation(const be_irg_t *birg);
-/**
- * Verify that out edges are valid.
- *
- * @param irg The irg to check
- * @param 1 if verify succeeded, 0 otherwise
- *
- * @note: This function requires O(|nodes|^2) memory. Too much for
- * the Java Grande benchmark for instance!
- */
-int be_verify_out_edges(ir_graph *irg);
-
-#endif /* FIRM_BE_BEVERIFY_H */
+#endif
*/
static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obst)
{
+ (void) irn;
+ (void) i;
+ (void) inverse;
+ (void) obst;
+ return NULL;
+
+#if 0
ir_mode *mode;
ir_mode *irn_mode;
ir_node *block, *noreg, *nomem;
}
return inverse;
+#endif
}
static ir_mode *get_spill_mode_mode(const ir_mode *mode)
break;
#endif
- default: panic("invalid transformer");
+ default:
+ panic("invalid transformer");
}
res = new_bd_ia32_LdTls(dbgi, block, mode_Iu);
} else {
panic("unsupported Unknown-Mode");
}
- return NULL;
}
const arch_register_req_t *make_register_req(const constraint_t *constraint,
for (i = 0; i < 32; ++i) {
if (other & (1U << i)) return i;
}
- assert(! "same position not found");
- return 32;
+ panic("same position not found");
}
static inline bool is_unknown_reg(const arch_register_t *reg)
static ir_node *bad_transform(ir_node *node)
{
panic("No transform function for %+F available.", node);
- return NULL;
}
static ir_node *gen_Proj_l_FloattoLL(ir_node *node)
const arch_register_t *op1;
const arch_register_class_t *cls;
ir_node *node, *next;
- ia32_x87_attr_t *attr;
int op1_idx, out_idx;
unsigned live;
if (out_idx >= 0 && out_idx != op1_idx) {
/* Matze: out already on stack? how can this happen? */
- assert(0);
+ panic("invalid stack state in x87 simulator");
+#if 0
/* op1 must be killed and placed where out is */
if (out_idx == 0) {
+ ia32_x87_attr_t *attr;
/* best case, simple remove and rename */
x87_patch_insn(n, op_ia32_Pop);
attr = get_ia32_x87_attr(n);
x87_pop(state);
x87_set_st(state, arch_register_get_index(out), n, op1_idx - 1);
} else {
+ ia32_x87_attr_t *attr;
/* move op1 to tos, store and pop it */
if (op1_idx != 0) {
x87_create_fxch(state, n, op1_idx);
x87_set_st(state, arch_register_get_index(out), n, out_idx - 1);
}
DB((dbg, LEVEL_1, "<<< %+F %s\n", n, op1->name));
+#endif
} else {
/* just a virtual copy */
x87_set_st(state, arch_register_get_index(out), get_unop_op(n), op1_idx);
return NO_NODE_ADDED;
} /* sim_Call */
-/**
- * Simulate a be_Spill.
- *
- * @param state the x87 state
- * @param n the node that should be simulated (and patched)
- *
- * Should not happen, spills are lowered before x87 simulator see them.
- */
-static int sim_Spill(x87_state *state, ir_node *n)
-{
- panic("Spill not lowered before x87 simulator run");
- return sim_fst(state, n);
-} /* sim_Spill */
-
-/**
- * Simulate a be_Reload.
- *
- * @param state the x87 state
- * @param n the node that should be simulated (and patched)
- *
- * Should not happen, reloads are lowered before x87 simulator see them.
- */
-static int sim_Reload(x87_state *state, ir_node *n)
-{
- panic("Reload not lowered before x87 simulator run");
- return sim_fld(state, n);
-} /* sim_Reload */
-
/**
* Simulate a be_Return.
*
register_sim(op_ia32_vFucomFnstsw, sim_Fucom);
register_sim(op_ia32_vFucomi, sim_Fucom);
register_sim(op_be_Copy, sim_Copy);
- register_sim(op_be_Spill, sim_Spill);
- register_sim(op_be_Reload, sim_Reload);
register_sim(op_be_Return, sim_Return);
register_sim(op_be_Perm, sim_Perm);
register_sim(op_be_Keep, sim_Keep);
sp = new_bd_mips_addu(NULL, block, sp,
mips_create_Immediate(initialstackframesize));
arch_set_irn_register(sp, &mips_gp_regs[REG_SP]);
- panic("FIXME Use IncSP or set register requirement with ignore");
/* TODO: where to get an edge with a0-a3
int i;
sp = new_bd_mips_addu(NULL, block, sp,
mips_create_Immediate(-initialstackframesize));
arch_set_irn_register(sp, &mips_gp_regs[REG_SP]);
- panic("FIXME Use IncSP or set register requirement with ignore");
reg = be_abi_reg_map_get(reg_map, &mips_gp_regs[REG_FP]);
store = new_bd_mips_sw(NULL, block, sp, reg, *mem, NULL, 0);
fp = new_bd_mips_addu(NULL, block, sp,
mips_create_Immediate(-initialstackframesize));
arch_set_irn_register(fp, &mips_gp_regs[REG_FP]);
- panic("FIXME Use IncSP or set register requirement with ignore");
be_abi_reg_map_set(reg_map, &mips_gp_regs[REG_FP], fp);
be_abi_reg_map_set(reg_map, &mips_gp_regs[REG_SP], sp);
// copy fp to sp
sp = new_bd_mips_or(NULL, block, fp, mips_create_zero());
arch_set_irn_register(sp, &mips_gp_regs[REG_SP]);
- panic("FIXME Use be_Copy or set register requirement with ignore");
// 1. restore fp
load = new_bd_mips_lw(NULL, block, sp, *mem, NULL,
fp_save_offset - initial_frame_size);
- panic("FIXME register requirement with ignore");
fp = new_r_Proj(load, mode_Iu, pn_mips_lw_res);
*mem = new_r_Proj(load, mode_Iu, pn_mips_lw_M);
(void) irn;
/* TODO */
panic("Unimplemented ppc32_get_allowed_execution_units()");
- return NULL;
}
static const be_machine_t *ppc32_get_machine(const void *self)
(void) self;
/* TODO */
panic("Unimplemented ppc32_get_machine()");
- return NULL;
}
/**
} else if (regclass == &ppc32_reg_classes[CLASS_ppc32_condition]) {
be_emit_cstring("\tmcrf ");
} else {
- assert(0 && "Illegal register class for Copy");
panic("ppc32 Emitter: Illegal register class for Copy");
}
ppc32_emit_dest_register(irn, 0);
ppc32_emit_source_register(irn, 1);
be_emit_cstring(", cr7");
} else {
- assert(0 && "Illegal register class for Perm");
panic("ppc32 Emitter: Illegal register class for Perm");
}
be_emit_finish_line_gas(irn);
break;
default:
- fprintf(stderr, "Mode for Mod not supported: %s\n", get_mode_name(res_mode));
- assert(0);
- return NULL;
-
+ panic("Mode for Mod not supported: %s\n", get_mode_name(res_mode));
}
proj_div = new_rd_Proj(env->dbg, div_result, res_mode, pn_DivMod_res_div);
break;
}
panic("Mode for Abs not supported: %F", env->mode);
- return NULL;
}
/**
{
(void) irn;
/* TODO */
- assert(0);
- return NULL;
+ panic("sparc_get_allowed_execution_units not implemented yet");
}
static const be_machine_t *sparc_get_machine(const void *self)
{
(void) self;
/* TODO */
- assert(0);
- return NULL;
+ panic("sparc_get_machine not implemented yet");
}
static ir_graph **sparc_get_backend_irg_list(const void *self,
sparc_emit_dest_register(irn, 0);
be_emit_finish_line_gas(irn);
} else {
- assert(0 && "move not supported for this mode");
panic("emit_be_Copy: move not supported for this mode");
}
}
#include "irnode_t.h"
#include "type_t.h"
#include "entity_t.h"
+#include "error.h"
merge_pair_func *__dbg_info_merge_pair = default_dbg_info_merge_pair;
merge_sets_func *__dbg_info_merge_sets = default_dbg_info_merge_sets;
if (a <= dbg_max)
return "string conversion not implemented";
else
- assert(!"Missing debug action in dbg_action_2_str()");
- return NULL;
+ panic("Missing debug action in dbg_action_2_str()");
}
#undef CASE
}
return inst->irn = new_Const(get_mode_null(env->mode));
default:
panic("Unsupported instruction kind");
- return NULL;
}
}
#include "irflag_t.h"
#include "iredges_t.h"
#include "irflag_t.h"
+#include "error.h"
/* when we need verifying */
#ifdef NDEBUG
return 1;
}
/* Hmm, exception but not a Proj? */
- assert(!"unexpected condition: fragile op without a proj");
- return 1;
+ panic("unexpected condition: fragile op without a proj");
}
return 0;
} /* is_exception_flow */
}
break;
+#ifdef INTERPROCEDURAL_VIEW
case iro_Filter:
if (!get_interprocedural_view())
fprintf(F, "Proj'");
else
goto default_case;
break;
+#endif
case iro_Proj: {
ir_node *pred = get_Proj_pred(n);
else
goto default_case;
} break;
+
+#ifdef INTERPROCEDURAL_VIEW
case iro_Start:
case iro_End:
case iro_EndExcept:
break;
} else
goto default_case;
+#endif
case iro_CallBegin: {
ir_node *addr = get_CallBegin_ptr(n);
return ops->dump_node(n, F, dump_node_nodeattr_txt);
switch (get_irn_opcode(n)) {
- case iro_Start:
- if (0 && get_interprocedural_view()) {
- fprintf(F, "%s ", get_ent_dump_name(get_irg_entity(current_ir_graph)));
- }
- break;
-
case iro_Const:
ir_fprintf(F, "%T ", get_Const_tarval(n));
break;
case iro_Proj:
pred = get_Proj_pred(n);
proj_nr = get_Proj_proj(n);
+#ifdef INTERPROCEDURAL_VIEW
handle_lut:
+#endif
code = get_irn_opcode(pred);
if (code == iro_Cmp)
break;
case iro_Filter:
proj_nr = get_Filter_proj(n);
+#ifdef INTERPROCEDURAL_VIEW
if (! get_interprocedural_view()) {
/* it's a Proj' */
pred = get_Filter_pred(n);
goto handle_lut;
} else
+#endif
fprintf(F, "%ld ", proj_nr);
break;
case iro_Sel:
static void dump_ir_data_edges(FILE *F, ir_node *n)
{
int i, num;
- ir_visited_t visited = get_irn_visited(n);
if (!dump_keepalive && is_End(n)) {
/* the End node has only keep-alive edges */
ir_node *pred = get_irn_n(n, i);
assert(pred);
- if ((get_interprocedural_view() && get_irn_visited(pred) < visited))
+#ifdef INTERPROCEDURAL_VIEW
+ if ((get_interprocedural_view() && get_irn_visited(pred) < get_irn_visited(n)))
continue; /* pred not dumped */
+#endif
if (dump_backedge_information_flag && is_backedge(n, i))
fprintf(F, "backedge: {sourcename: \"");
rem = current_ir_graph;
current_ir_graph = irg;
+ (void) suffix_ip;
+#ifdef INTERPROCEDURAL_VIEW
if (get_interprocedural_view())
suffix1 = suffix_ip;
else
+#endif
suffix1 = suffix_nonip;
current_ir_graph = rem;
return F;
}
+static inline int is_ip_Filter(ir_node *n)
+{
+#ifdef INTERPROCEDURAL_VIEW
+ return is_Filter(n) && get_interprocedural_view();
+#else
+ (void) n;
+ return 0;
+#endif
+}
+
/* Write the irnode and all its attributes to the file passed. */
int dump_irnode_to_file(FILE *F, ir_node *n)
{
/* This is not nice, output it as a marker in the predecessor list. */
if (is_Block(n) ||
get_irn_op(n) == op_Phi ||
- (is_Filter(n) && get_interprocedural_view())) {
+ (is_ip_Filter(n))) {
fprintf(F, " backedges:");
comma = ' ';
for (i = 0; i < get_irn_arity(n); i++)
ir_fprintf(F, " cast to type: %+F\n", get_Cast_type(n));
} break;
case iro_Return: {
+#ifdef INTERPROCEDURAL_VIEW
if (!get_interprocedural_view()) {
+#endif
ir_type *tp = get_entity_type(get_irg_entity(get_irn_irg(n)));
ir_fprintf(F, " return in method of type %+F\n", tp);
for (i = 0; i < get_method_n_ress(tp); ++i) {
ir_fprintf(F, " result %d type: %+F\n", i,
get_method_res_type(tp, i));
}
+#ifdef INTERPROCEDURAL_VIEW
}
+#endif
} break;
case iro_Const: {
assert(get_Const_type(n) != firm_none_type);
#include "iredges_t.h"
#include "type_t.h"
#include "irmemory.h"
+#include "irphase.h"
#define INITIAL_IDX_IRN_MAP_SIZE 1024
{
ir_graph *res;
size_t size = sizeof(ir_graph) + additional_graph_data_size;
- char *ptr = xmalloc(size);
- memset(ptr, 0, size);
+ char *ptr = XMALLOCNZ(char, size);
res = (ir_graph *)(ptr + additional_graph_data_size);
res->kind = k_ir_graph;
return irg->loc_descriptions ? irg->loc_descriptions[n] : NULL;
}
+void irg_register_phase(ir_graph *irg, ir_phase_id id, ir_phase *phase)
+{
+ assert(id <= PHASE_LAST);
+ assert(irg->phases[id] == NULL);
+ irg->phases[id] = phase;
+}
+
+void irg_invalidate_phases(ir_graph *irg)
+{
+ int p;
+ for (p = 0; p <= PHASE_LAST; ++p) {
+ ir_phase *phase = irg->phases[p];
+ if (phase == NULL)
+ continue;
+
+ phase_free(phase);
+ irg->phases[p] = NULL;
+ }
+}
+
#ifndef NDEBUG
void ir_reserve_resources(ir_graph *irg, ir_resources_t resources)
{
set_irn_n(irg->anchor, idx, irn);
}
+
+
+/**
+ * Register a phase on an irg.
+ * The phase will then be managed by the irg. This means you can easily
+ * access the phase when you only have a graph handle, the memory will be
+ * freed when the graph is freed and some care is taken that the phase data
+ * will be invalidated/preserved on events like dead code elemination and
+ * code selection.
+ */
+void irg_register_phase(ir_graph *irg, ir_phase_id id, ir_phase *phase);
+
+/**
+ * Frees all phase infos attached to an irg
+ */
+void irg_invalidate_phases(ir_graph *irg);
+
+/**
+ * return phase with given id
+ */
+static inline ir_phase *irg_get_phase(const ir_graph *irg, ir_phase_id id)
+{
+ assert(id <= PHASE_LAST);
+ return irg->phases[id];
+}
+
+
#ifdef INTERPROCEDURAL_VIEW
extern int firm_interprocedural_view;
{
assert(is_ir_node(node));
+#ifdef INTERPROCEDURAL_VIEW
if (get_interprocedural_view()) {
- assert(0 && "This is not yet implemented.");
- } else {
- ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
- inc_irg_visited(current_ir_graph);
- nodes_touched = irg_walk_in_or_dep_2(node, pre, post, env);
- ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ panic("This is not yet implemented.");
}
+#endif
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
+ inc_irg_visited(current_ir_graph);
+ nodes_touched = irg_walk_in_or_dep_2(node, pre, post, env);
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_VISITED);
}
/*
/***************************************************************************/
+#ifdef INTERPROCEDURAL_VIEW
/**
* Returns current_ir_graph and sets it to the irg of predecessor index
* of node n.
return old_current;
}
-#ifdef INTERPROCEDURAL_VIEW
static void cg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
{
int i;
hook_irg_block_walk(irg, node, (generic_func *)pre, (generic_func *)post);
assert(node);
+#ifdef INTERPROCEDURAL_VIEW
assert(!get_interprocedural_view()); /* interprocedural_view not implemented, because it
* interleaves with irg_walk */
+#endif
ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
inc_irg_block_visited(irg);
block = is_Block(node) ? node : get_nodes_block(node);
}
if (num_of_elem <= 1) {
- assert(0 && "vector modes should have at least 2 elements");
- return NULL;
+ panic("vector modes should have at least 2 elements");
}
/* sanity checks */
#include "iredgekinds.h"
#include "iredges_t.h"
#include "ircons.h"
+#include "error.h"
#include "irhooks.h"
#include "irtools.h"
case iro_Bad :
case iro_Unknown:
return node;
- default: ;
- assert(0 && "should not be reached");
- return NULL;
+ default:
+ panic("should not be reached");
}
}
case iro_DivMod: return get_DivMod_resmode(node);
case iro_Div : return get_Div_resmode(node);
case iro_Mod : return get_Mod_resmode(node);
- default: ;
- assert(0 && "should not be reached");
- return NULL;
+ default:
+ panic("should not be reached");
}
}
return NULL;
}
-ir_phase *init_irg_phase(ir_graph *irg, ir_phase_id id, size_t size, phase_irn_init *data_init)
+void phase_init(ir_phase *phase, ir_graph *irg, phase_irn_init *data_init)
{
- ir_phase *ph;
+ memset(phase, 0, sizeof(*phase));
- size = MAX(sizeof(*ph), size);
- assert(id != PHASE_NOT_IRG_MANAGED && id < PHASE_LAST);
- assert(irg->phases[id] == NULL && "you cannot overwrite another irg managed phase");
-
- ph = xmalloc(size);
- memset(ph, 0, size);
- obstack_init(&ph->obst);
- ph->id = id;
- ph->growth_factor = PHASE_DEFAULT_GROWTH;
- ph->data_init = data_init;
- ph->irg = irg;
- ph->n_data_ptr = 0;
- ph->data_ptr = NULL;
-
- irg->phases[id] = ph;
-
- return ph;
+ obstack_init(&phase->obst);
+ phase->data_init = data_init;
+ phase->irg = irg;
+ phase->n_data_ptr = 0;
+ phase->data_ptr = NULL;
}
-void free_irg_phase(ir_graph *irg, ir_phase_id id)
+ir_phase *new_phase(ir_graph *irg, phase_irn_init *data_init)
{
- ir_phase *ph = get_irg_phase(irg, id);
- phase_free(ph);
- xfree(ph);
- irg->phases[id] = NULL;
+ ir_phase *phase = xmalloc(sizeof(*phase));
+ phase_init(phase, irg, data_init);
+ return phase;
}
-ir_phase *phase_init(ir_phase *ph, const char *name, ir_graph *irg, unsigned growth_factor, phase_irn_init *data_init, void *priv)
+void phase_deinit(ir_phase *phase)
{
- obstack_init(&ph->obst);
-
- (void) name;
- ph->id = PHASE_NOT_IRG_MANAGED;
- ph->growth_factor = growth_factor;
- ph->data_init = data_init;
- ph->irg = irg;
- ph->n_data_ptr = 0;
- ph->data_ptr = NULL;
- ph->priv = priv;
- return ph;
+ obstack_free(&phase->obst, NULL);
+ if (phase->data_ptr)
+ xfree(phase->data_ptr);
}
void phase_free(ir_phase *phase)
{
- obstack_free(&phase->obst, NULL);
- if (phase->data_ptr)
- xfree(phase->data_ptr);
+ phase_deinit(phase);
+ xfree(phase);
}
phase_stat_t *phase_stat(const ir_phase *phase, phase_stat_t *stat)
--- /dev/null
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief Phase information handling using node indexes.
+ * @author Sebastian Hack
+ * @version $Id: irphase_t.h 27270 2010-03-07 22:20:43Z matze $
+ */
+#ifndef FIRM_IR_PHASE_H
+#define FIRM_IR_PHASE_H
+
+#include "firm_types.h"
+
+typedef struct ir_phase ir_phase;
+typedef void *(phase_irn_init)(ir_phase *phase, const ir_node *irn, void *old);
+
+/**
+ * Allocate and initialize a new phase object
+ *
+ * @param irg The graph the phase will run on.
+ * @param irn_data_init A callback that is called to initialize newly created
+ * node data. Must be non-null. You could use
+ * @return A new phase object.
+ */
+ir_phase *new_phase(ir_graph *irg, phase_irn_init *data_init);
+
+/**
+ * Variant for custom memory-management/classes. Just initialize given phase
+ * structure (performs no allocation, you do not need to call this for phases
+ * allocated wiht new_phase)
+ */
+void phase_init(ir_phase *phase, ir_graph *irg, phase_irn_init *data_init);
+
+/**
+ * frees all internal memory used by the phase but does not free the
+ * phase struct itself.
+ */
+void phase_deinit(ir_phase *phase);
+
+/**
+ * free memory allocated by a phase
+ */
+void phase_free(ir_phase *phase);
+
+/**
+ * Re-initialize the irn data for all nodes in the node => data map using the given callback.
+ *
+ * @param phase The phase.
+ */
+void phase_reinit_irn_data(ir_phase *phase);
+
+/**
+ * Re-initialize the irn data for all nodes having phase data in the given block.
+ *
+ * @param phase The phase.
+ * @param block The block.
+ *
+ * @note Beware: iterates over all nodes in the graph to find the nodes of the given block.
+ */
+void phase_reinit_block_irn_data(ir_phase *phase, ir_node *block);
+
+/**
+ * A default node initializer.
+ * It does nothing and returns NULL.
+ */
+extern phase_irn_init phase_irn_init_default;
+
+#endif
#include "obst.h"
#include "irgraph_t.h"
#include "irtools.h"
-#include "irphases_t.h"
-
-struct _ir_phase_info {
- ir_phase_id id;
- const char buf[128];
-};
-
-typedef struct _ir_phase_info ir_phase_info;
-
-typedef void *(phase_irn_init)(ir_phase *phase, const ir_node *irn, void *old);
-
-/**
- * A default node initializer.
- * It does nothing and returns NULL.
- */
-extern phase_irn_init phase_irn_init_default;
+#include "irphase.h"
/**
* A phase object.
*/
-struct _ir_phase {
- struct obstack obst; /**< The obstack where the irn phase data will be stored on. */
- ir_phase_id id; /**< The phase ID. */
- const char *name; /**< The name of the phase. */
- ir_graph *irg; /**< The irg this phase will we applied to. */
- unsigned growth_factor; /**< The factor to leave room for additional nodes. 256 means 1.0. */
- void *priv; /**< Some pointer private to the user of the phase. */
- size_t n_data_ptr; /**< The length of the data_ptr array. */
- void **data_ptr; /**< Map node indexes to irn data on the obstack. */
- phase_irn_init *data_init; /**< A callback that is called to initialize newly created node data. */
+struct ir_phase {
+ void **data_ptr; /**< Map node indexes to irn data on the obstack. */
+ ir_graph *irg; /**< The irg this phase will we applied to. */
+ phase_irn_init *data_init; /**< A callback that is called to initialize newly created node data. */
+ size_t n_data_ptr; /**< The length of the data_ptr array. */
+ struct obstack obst; /**< The obstack where the irn phase data will be stored on. */
+ void *priv; /**< Some pointer private to the user of the phase. */
};
-#define PHASE_DEFAULT_GROWTH (256)
-
-
/**
* For statistics: A type containing statistic data of a phase object.
*/
*/
phase_stat_t *phase_stat(const ir_phase *phase, phase_stat_t *stat);
-/**
- * Initialize a phase object.
- *
- * @param name The name of the phase. Just for debugging.
- * @param irg The graph the phase will run on.
- * @param growth_factor A factor denoting how many node slots will be additionally allocated,
- * if the node => data is full. The factor is given in units of 1/256, so
- * 256 means 1.0.
- * @param irn_data_init A callback that is called to initialize newly created node data.
- * Must be non-null.
- * @param priv Some private pointer which is kept in the phase and can be retrieved with phase_get_private().
- * @return A new phase object.
- */
-ir_phase *phase_init(ir_phase *ph, const char *name, ir_graph *irg, unsigned growth_factor, phase_irn_init *data_init, void *priv);
-
-/**
- * Init an irg managed phase.
- *
- * The first sizeof(ir_phase) bytes will be considered to be a phase object;
- * they will be properly initialized. The remaining bytes are at the user's disposal.
- * The returned phase object will be inserted in the phase slot of the @p irg designated by the phase ID (@p id).
- * Note that you cannot allocate phases with an ID <code>PHASE_NOT_IRG_MANAGED</code>.
- *
- * @param irg The irg.
- * @param id The ID of the irg-managed phase (see irphaselist.h).
- * @param size The size of the phase
- * @param data_init The node data initialization function.
- * @return The allocated phase object.
- */
-ir_phase *init_irg_phase(ir_graph *irg, ir_phase_id id, size_t size, phase_irn_init *data_init);
-
-void free_irg_phase(ir_graph *irg, ir_phase_id id);
-
-/**
- * Free the phase and all node data associated with it.
- *
- * @param phase The phase.
- */
-void phase_free(ir_phase *phase);
-
-/**
- * Re-initialize the irn data for all nodes in the node => data map using the given callback.
- *
- * @param phase The phase.
- */
-void phase_reinit_irn_data(ir_phase *phase);
-
-/**
- * Re-initialize the irn data for all nodes having phase data in the given block.
- *
- * @param phase The phase.
- * @param block The block.
- *
- * @note Beware: iterates over all nodes in the graph to find the nodes of the given block.
- */
-void phase_reinit_block_irn_data(ir_phase *phase, ir_node *block);
/**
* Re-initialize the irn data for the given node.
#define foreach_phase_irn(phase, irn) \
for (irn = phase_get_first_node(phase); irn; irn = phase_get_next_node(phase, irn))
-/**
- * Get the name of the phase.
- *
- * @param phase The phase.
- */
-static inline const char *phase_get_name(const ir_phase *phase)
-{
- return phase->name;
-}
-
/**
* Get the irg the phase runs on.
*
}
/**
- * Allocate memory in the phase's memory pool.
- *
- * @param phase The phase.
- * @param size Number of bytes to allocate.
+ * Attach pointer with private data to phase
*/
+static inline void phase_set_private(ir_phase *phase, void *priv)
+{
+ phase->priv = priv;
+}
+
static inline void *phase_alloc(ir_phase *phase, size_t size)
{
return obstack_alloc(&phase->obst, size);
/* make the maximum index at least as big as the largest index in the graph. */
max_idx = MAX(max_idx, last_irg_idx);
- new_cap = (size_t) (max_idx * phase->growth_factor / 256);
+ new_cap = (size_t) (max_idx + 256);
phase->data_ptr = XREALLOC(phase->data_ptr, void*, new_cap);
return res;
}
+
/**
- * Get the irg-managed phase for a given phase ID.
- * @param irg The irg.
- * @param id The ID.
- * @return The corresponding phase, or NULL if there is none.
+ * convenience function that returns phase information attached to a node
*/
-static inline ir_phase *get_irg_phase(const ir_graph *irg, ir_phase_id id)
-{
- return irg->phases[id];
-}
-
static inline void *get_irn_phase_info(const ir_node *irn, ir_phase_id id)
{
const ir_graph *irg = get_irn_irg(irn);
- const ir_phase *ph = get_irg_phase(irg, id);
- assert(ph && "phase info has to be computed");
+ const ir_phase *ph = irg_get_phase(irg, id);
return phase_get_irn_data(ph, irn);
}
static inline void *get_or_set_irn_phase_info(const ir_node *irn, ir_phase_id id)
{
const ir_graph *irg = get_irn_irg(irn);
- ir_phase *ph = get_irg_phase(irg, id);
- assert(ph && "phase info has to be computed");
+ ir_phase *ph = irg_get_phase(irg, id);
return phase_get_or_set_irn_data(ph, irn);
}
static inline void *set_irn_phase_info(const ir_node *irn, ir_phase_id id, void *data)
{
const ir_graph *irg = get_irn_irg(irn);
- ir_phase *ph = get_irg_phase(irg, id);
- assert(ph && "phase info has to be computed");
+ ir_phase *ph = irg_get_phase(irg, id);
return phase_set_irn_data(ph, irn, data);
}
+
+
+
#endif
+++ /dev/null
-/* Do not delete! */
-PH(NOT_IRG_MANAGED, "A phase not managed by an IRG")
-
-/* BEGIN Enter your phases here */
-/* The name of the phase, A description */
-
-PH(BE_ARCH, "Backend architecture abstraction")
-PH(BE_SCHED, "Scheduler")
-PH(BE_REG_ALLOC, "Register allocation")
-PH(VRP, "Value Range Propagation")
-
-/* END Enter your phases here */
-
-/* Do not delete! */
-PH(LAST, "The last phase")
+++ /dev/null
-/**
- * @file irphases_t.h
- * @date 18.11.2007
- * @author Sebastian Hack
- *
- * Copyright (C) 2007 Inria Rhone-Alpes
- * Released under the GPL
- */
-
-#ifndef _IRPHASES_T_H
-#define _IRPHASES_T_H
-
-enum _ir_phase_id {
-#define PH(name, description) PHASE_ ## name,
-#include "irphaselist.h"
-#undef PH
-};
-
-typedef enum _ir_phase_id ir_phase_id;
-
-#endif /* _IRPHASES_T_H */
#include "callgraph.h"
#include "irprog.h"
#include "field_temperature.h"
-#include "irphases_t.h"
+#include "irphase.h"
#include "pset.h"
#include "set.h"
#include "obst.h"
#include "vrp.h"
+/**
+ * List of phases. (We will add a register/unregister interface if managing
+ * this gets too tedious)
+ */
+typedef enum ir_phase_id {
+ PHASE_VRP,
+ PHASE_LAST = PHASE_VRP
+} ir_phase_id;
+
/** The type of an ir_op. */
struct ir_op {
unsigned code; /**< The unique opcode of the op. */
ir_node **idx_irn_map; /**< Array mapping node indexes to nodes. */
int index; /**< a unique number for each graph */
- ir_phase *phases[PHASE_LAST]; /**< Phase information. */
+ ir_phase *phases[PHASE_LAST+1]; /**< Phase information. */
void *be_data; /**< backend can put in private data here */
#ifdef DEBUG_libfirm
int n_outs; /**< Size wasted for outs */
ASSERT_AND_RET(
!is_NoMem(get_Call_mem(n)),
"Exception Proj from FunctionCall", 0);
- else if (proj == pn_Call_M)
- ASSERT_AND_RET(
- (!is_NoMem(get_Call_mem(n)) || 1),
- "Memory Proj from FunctionCall", 0);
return 1;
}
}
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
- ir_node *pred = get_Block_cfgpred(n, i);
+ ir_node *pred = get_Block_cfgpred(n, i);
ASSERT_AND_RET(
is_Bad(pred) || (get_irn_mode(pred) == mode_X),
"Block node must have a mode_X predecessor", 0);
"End Block node", 0);
}
/* irg attr must == graph we are in. */
- if (! get_interprocedural_view()) {
- ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
- }
+ ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
return 1;
}
if (!get_node_verification_mode())
return 1;
- if (!get_interprocedural_view()) {
- /*
- * do NOT check placement in interprocedural view, as we don't always know
- * the "right" graph ...
- */
+ /*
+ * do NOT check placement in interprocedural view, as we don't always
+ * know the "right" graph ...
+ */
#ifndef NDEBUG
- /* this seems to be an expensive check in VS compile (9% over all runtime),
- so do NOT run it in release mode */
- ASSERT_AND_RET_DBG(
- node_is_in_irgs_storage(irg, n),
- "Node is not stored on proper IR graph!", 0,
- show_node_on_graph(irg, n);
- );
+ /* this is an expensive check for large graphs (it has a quadratic
+ * runtime but with a small constant); so do NOT run it in release mode
+ */
+ ASSERT_AND_RET_DBG(
+ node_is_in_irgs_storage(irg, n),
+ "Node is not stored on proper IR graph!", 0,
+ show_node_on_graph(irg, n);
+ );
#endif
- assert(get_irn_irg(n) == irg);
- {
- unsigned idx = get_irn_idx(n);
- ir_node *node_from_map = get_idx_irn(irg, idx);
- ASSERT_AND_RET_DBG(node_from_map == n, "Node index and index map entry differ", 0,
- ir_printf("node %+F node in map %+F(%p)\n", n, node_from_map, node_from_map));
- }
+ assert(get_irn_irg(n) == irg);
+ {
+ unsigned idx = get_irn_idx(n);
+ ir_node *node_from_map = get_idx_irn(irg, idx);
+ ASSERT_AND_RET_DBG(node_from_map == n, "Node index and index map entry differ", 0,
+ ir_printf("node %+F node in map %+F(%p)\n", n, node_from_map, node_from_map));
}
op = get_irn_op(n);
new_node->node_nr = node->node_nr;
/* copy phase information for this node */
- for (i = PHASE_NOT_IRG_MANAGED+1; i < PHASE_LAST; i++) {
- ir_phase *phase = get_irg_phase(irg, i);
+ for (i = 0; i <= PHASE_LAST; i++) {
+ ir_phase *phase = irg_get_phase(irg, i);
if (phase == NULL)
continue;
if (!phase_get_irn_data(phase, node))
*/
static void copy_graph_env(ir_graph *irg)
{
- ir_node *new_anchor;
+ ir_node *new_anchor;
int i;
/* init the new_phases array */
- for (i = PHASE_NOT_IRG_MANAGED+1; i < PHASE_LAST; i++) {
- ir_phase *old_ph = get_irg_phase(irg, i);
+ /* TODO: this is wrong, it should only allocate a new data_ptr inside
+ * the phase! */
+ for (i = 0; i <= PHASE_LAST; i++) {
+ ir_phase *old_ph = irg_get_phase(irg, i);
if (old_ph == NULL) {
new_phases[i] = NULL;
} else {
- new_phases[i] = xmalloc(sizeof(ir_phase));
- phase_init(new_phases[i], "", irg, old_ph->growth_factor,
- old_ph->data_init, old_ph->priv);
+ new_phases[i] = new_phase(irg, old_ph->data_init);
+ new_phases[i]->priv = old_ph->priv;
}
}
irg->anchor = new_anchor;
/* copy the new phases into the irg */
- for (i = PHASE_NOT_IRG_MANAGED+1; i < PHASE_LAST; i++) {
- ir_phase *old_ph = get_irg_phase(irg, i);
+ for (i = 0; i <= PHASE_LAST; i++) {
+ ir_phase *old_ph = irg_get_phase(irg, i);
if (old_ph == NULL)
continue;
- free_irg_phase(irg, i);
+ phase_free(old_ph);
irg->phases[i] = new_phases[i];
}
}
#include "ircons.h"
#include "irprintf.h"
#include "debug.h"
+#include "error.h"
/**
* walker environment
/* Hmm: no ProjX from a Raise? This should be a verification
* error. For now we just assert and return.
*/
- assert(! "No ProjX after Raise found");
- return 1;
+ panic("No ProjX after Raise found");
}
if (get_irn_n_outs(proj) != 1) {
/* Hmm: more than one user of ProjX: This is a verification
* error.
*/
- assert(! "More than one user of ProjX");
- return 1;
+ panic("More than one user of ProjX");
}
n = get_irn_out(proj, 0);
env.nextDFSnum = 0;
env.POnum = 0;
env.changes = 0;
- phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
+ phase_init(&env.ph, irg, init_loop_data);
/* calculate the SCC's and drive loop optimization. */
do_dfs(irg, &env);
DEL_ARR_F(env.stack);
- phase_free(&env.ph);
+ phase_deinit(&env.ph);
return env.changes;
} /* optimize_loops */
break;
default:
panic("Unsupported opcode");
- result = NULL;
}
return result;
} /* do_apply */
break;
default:
panic("Unsupported opcode");
- tv = tarval_bad;
}
if (pscc->code == iro_Add) {
#include "counter.h"
#include "pattern_dmp.h"
#include "hashptr.h"
+#include "error.h"
#ifdef FIRM_STATISTICS
return code;
} /* if */
/* should not happen */
- assert(0 && "Wrong code in buffer");
-
- return 0;
+ panic("Wrong code in buffer");
} /* get_code */
/**
case iro_Unknown:
nn = new_Unknown(m); break;
default:
- assert(0 && "opcode invalid or not implemented");
- nn = NULL;
- break;
+ panic("opcode invalid or not implemented");
}
return nn;
} /* copy_const_value */
#include "fltcalc.h"
#include "strcalc.h"
+#include "error.h"
#include <math.h>
/* undef some reused constants defined by math.h */
TRACEPRINTF(("%s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
TRACEPRINTF(("rounded to integer "));
- assert(!"fc_rnd() not yet implemented");
-
- TRACEPRINTF(("= %s\n", fc_print(result, buffer, sizeof(buffer), FC_PACKED)));
- return result;
+ panic("fc_rnd() not yet implemented");
}
/*
#define N_CONSTANTS 2048
/* unused, float to int doesn't work yet */
-enum float_to_int_mode {
+typedef enum float_to_int_mode {
TRUNCATE,
ROUND
-};
+} float_to_int_mode;
-#define GET_FLOAT_TO_INT_MODE() TRUNCATE
+static float_to_int_mode current_float_to_int_mode = TRUNCATE;
#define SWITCH_NOINFINITY 0
#define SWITCH_NODENORMALS 0
break;
case irms_float_number:
- if (SWITCH_NOINFINITY && fc_is_inf(value)) {
+#ifdef SWITCH_NOINFINITY
+ if (fc_is_inf(value)) {
/* clip infinity to maximum value */
return fc_is_negative(value) ? get_mode_min(mode) : get_mode_max(mode);
}
+#endif
- if (SWITCH_NODENORMALS && fc_is_subnormal(value)) {
+#ifdef SWITCH_NODENORMALS
+ if (fc_is_subnormal(value)) {
/* clip denormals to zero */
return get_mode_null(mode);
}
+#endif
break;
default:
/* case 128: return &quad_desc; */
default:
panic("Unsupported mode in get_descriptor()");
- return NULL;
}
}
return new_tarval_from_double((long double)l, mode);
default:
- assert(0 && "unsupported mode sort");
+ panic("unsupported mode sort");
}
- return NULL;
}
/* returns non-zero if can be converted to long */
if (a == tarval_bad || b == tarval_bad) {
panic("Comparison with tarval_bad");
- return pn_Cmp_False;
}
if (a == tarval_undefined || b == tarval_undefined)
if (get_mode_n_vector_elems(a->mode) > 1) {
/* vector arithmetic not implemented yet */
- assert(0 && "cmp not implemented for vector modes");
+ panic("cmp not implemented for vector modes");
}
/* Here the two tarvals are unequal and of the same mode */
return get_tarval(fc_get_buffer(), fc_get_buffer_length(), dst_mode);
case irms_int_number:
- switch (GET_FLOAT_TO_INT_MODE()) {
+ switch (current_float_to_int_mode) {
case TRUNCATE:
res = fc_int(src->value, NULL);
break;
case ROUND:
res = fc_rnd(src->value, NULL);
break;
- default:
- panic("Unsupported float to int conversion mode in tarval_convert_to()");
- break;
}
buffer = alloca(sc_get_buffer_length());
if (! fc_flt2int(res, buffer, dst_mode))
return tarval_bad;
default:
- assert(0 && "bitwise negation is only allowed for integer and boolean");
- return tarval_bad;
+ panic("bitwise negation is only allowed for integer and boolean");
}
}
return a;
default:
- return tarval_bad;
+ break;
}
return tarval_bad;
}
return get_tarval(sc_get_buffer(), sc_get_buffer_length(), a->mode);
default:
- assert(0 && "operation not defined on mode");
- return tarval_bad;
+ panic("operation not defined on mode");
}
}
return get_tarval(sc_get_buffer(), sc_get_buffer_length(), a->mode);
default:
- assert(0 && "operation not defined on mode");
- return tarval_bad;
+ panic("operation not defined on mode");
}
}
return get_tarval(sc_get_buffer(), sc_get_buffer_length(), a->mode);
default:
- assert(0 && "operation not defined on mode");
- return tarval_bad;
+ panic("operation not defined on mode");
}
}
return get_tarval(sc_get_buffer(), sc_get_buffer_length(), a->mode);
default:
- assert(0 && "operation not defined on mode");
- return tarval_bad;;
+ panic("operation not defined on mode");
}
}