*
* @remark Helper function, use NEW_ARR_D() instead.
*/
-void *ir_new_arr_d(struct obstack *obstack, int nelts, size_t elts_size) {
+void *ir_new_arr_d(struct obstack *obstack, int nelts, size_t elts_size)
+{
ir_arr_descr *dp;
assert(obstack && (nelts >= 0));
*
* @remark Helper function, use NEW_ARR_F() instead.
*/
-void *ir_new_arr_f(int nelts, size_t elts_size) {
+void *ir_new_arr_f(int nelts, size_t elts_size)
+{
ir_arr_descr *new;
assert (nelts >= 0);
*
* @remark Helper function, use DEL_ARR_F() instead.
*/
-void ir_del_arr_f(void *elts) {
+void ir_del_arr_f(void *elts)
+{
ir_arr_descr *dp = ARR_DESCR (elts);
ARR_VRFY (elts);
*
* @remark Helper function, use ARR_SETLEN() instead.
*/
-void *ir_arr_setlen (void *elts, int nelts, size_t elts_size) {
+void *ir_arr_setlen (void *elts, int nelts, size_t elts_size)
+{
ir_arr_descr *dp = ARR_DESCR (elts);
assert ((dp->magic == ARR_F_MAGIC) && (nelts >= 0));
*
* @remark Helper function, use ARR_RESIZE() instead.
*/
-void *ir_arr_resize(void *elts, int nelts, size_t eltsize) {
+void *ir_arr_resize(void *elts, int nelts, size_t eltsize)
+{
ir_arr_descr *dp = ARR_DESCR(elts);
int n;
* Do NOT use is in code, use ARR_LEN() macro!
* This function is intended to be called from a debugger.
*/
-int array_len(const void *arr) {
+int array_len(const void *arr)
+{
return ARR_LEN(arr);
}
* Do NOT use is in code!.
* This function is intended to be called from a debugger.
*/
-ir_arr_descr *array_descr(const void *arr) {
+ir_arr_descr *array_descr(const void *arr)
+{
if (! arr)
return NULL;
return ARR_DESCR(arr);
}
}
-void bipartite_dump(const char *name, const bipartite_t *gr) {
+void bipartite_dump(const char *name, const bipartite_t *gr)
+{
FILE *f = fopen(name, "w");
if (f) {
#define INITIAL_SLOTS 64
-static int pcmp(const void *p1, const void *p2, size_t size) {
+static int pcmp(const void *p1, const void *p2, size_t size)
+{
const void **q1 = (const void **)p1;
const void **q2 = (const void **)p2;
(void) size;
}
-eset * eset_create(void) {
+eset * eset_create(void)
+{
return (eset *) new_set(pcmp, INITIAL_SLOTS);
}
-eset * eset_copy(eset *source) {
+eset * eset_copy(eset *source)
+{
eset * ret = eset_create();
void * p;
for (p = eset_first(source); p; p = eset_next(source)) {
}
-void eset_destroy(eset *s) {
+void eset_destroy(eset *s)
+{
del_set((set *)s);
}
/* Returns the number of elements in the set. */
-int eset_count(eset *s) {
+int eset_count(eset *s)
+{
return set_count((set *)s);
}
-void eset_insert(eset *s, void *p) {
+void eset_insert(eset *s, void *p)
+{
if (!eset_contains(s, p)) {
set_insert((set *)s, &p, sizeof(p), HASH_PTR(p));
}
}
-int eset_contains(eset *s, void *p) {
+int eset_contains(eset *s, void *p)
+{
return set_find((set *)s, &p, sizeof(p), HASH_PTR(p)) != NULL;
}
-void * eset_first(eset *s) {
+void * eset_first(eset *s)
+{
void * p = set_first((set *) s);
return p == NULL ? NULL : *((void **)p);
}
-void * eset_next(eset *s) {
+void * eset_next(eset *s)
+{
void *p = set_next((set *) s);
return p == NULL ? NULL : *((void **)p);
}
-void eset_insert_all(eset *target, eset *source) {
+void eset_insert_all(eset *target, eset *source)
+{
void *p;
for (p = eset_first(source); p; p = eset_next(source)) {
eset_insert(target, p);
row_col_t *rows;
};
-static inline void alloc_cols(row_col_t *row, int c_cols) {
+static inline void alloc_cols(row_col_t *row, int c_cols)
+{
assert(c_cols > row->c_cols);
row->c_cols = c_cols;
row->cols = XREALLOC(row->cols, col_val_t, c_cols);
}
-static inline void alloc_rows(gs_matrix_t *m, int c_rows, int c_cols, int begin_init) {
+static inline void alloc_rows(gs_matrix_t *m, int c_rows, int c_cols, int begin_init)
+{
int i;
assert(c_rows > m->c_rows);
}
}
-gs_matrix_t *gs_new_matrix(int n_init_rows, int n_init_cols) {
+gs_matrix_t *gs_new_matrix(int n_init_rows, int n_init_cols)
+{
gs_matrix_t *res = XMALLOCZ(gs_matrix_t);
if (n_init_rows < 16)
n_init_rows = 16;
return res;
}
-void gs_delete_matrix(gs_matrix_t *m) {
+void gs_delete_matrix(gs_matrix_t *m)
+{
int i;
for (i = 0; i < m->c_rows; ++i) {
if (m->rows[i].c_cols)
xfree(m);
}
-unsigned gs_matrix_get_n_entries(const gs_matrix_t *m) {
+unsigned gs_matrix_get_n_entries(const gs_matrix_t *m)
+{
int i;
unsigned n_entries = 0;
return n_entries - m->n_zero_entries;
}
-int gs_matrix_get_sizeof_allocated_memory(const gs_matrix_t *m) {
+int gs_matrix_get_sizeof_allocated_memory(const gs_matrix_t *m)
+{
int i, n_col_val_ts = 0;
for (i = 0; i < m->c_rows; ++i)
n_col_val_ts += m->rows[i].c_cols;
return n_col_val_ts * sizeof(col_val_t) + m->c_rows * sizeof(row_col_t) + sizeof(gs_matrix_t);
}
-void gs_matrix_assure_row_capacity(gs_matrix_t *m, int row, int min_capacity) {
+void gs_matrix_assure_row_capacity(gs_matrix_t *m, int row, int min_capacity)
+{
row_col_t *the_row = &m->rows[row];
if (the_row->c_cols < min_capacity)
alloc_cols(the_row, min_capacity);
}
-void gs_matrix_trim_row_capacities(gs_matrix_t *m) {
+void gs_matrix_trim_row_capacities(gs_matrix_t *m)
+{
int i;
for (i = 0; i < m->c_rows; ++i) {
row_col_t *the_row = &m->rows[i];
}
}
-void gs_matrix_delete_zero_entries(gs_matrix_t *m) {
+void gs_matrix_delete_zero_entries(gs_matrix_t *m)
+{
int i, read_pos;
for (i = 0; i < m->c_rows; ++i) {
row_col_t *the_row = &m->rows[i];
m->n_zero_entries = 0;
}
-void gs_matrix_set(gs_matrix_t *m, int row, int col, double val) {
+void gs_matrix_set(gs_matrix_t *m, int row, int col, double val)
+{
row_col_t *the_row;
col_val_t *cols;
int min, max, c, i;
assert(c>=the_row->n_cols-1 || the_row->cols[c].col_idx < the_row->cols[c+1].col_idx);
}
-double gs_matrix_get(const gs_matrix_t *m, int row, int col) {
+double gs_matrix_get(const gs_matrix_t *m, int row, int col)
+{
row_col_t *the_row;
int c;
*
* Note that the diagonal element is stored separately in this matrix implementation.
* */
-double gs_matrix_gauss_seidel(const gs_matrix_t *m, double *x, int n) {
+double gs_matrix_gauss_seidel(const gs_matrix_t *m, double *x, int n)
+{
double res = 0.0;
int r;
}
}
-void gs_matrix_dump(const gs_matrix_t *m, int a, int b, FILE *out) {
+void gs_matrix_dump(const gs_matrix_t *m, int a, int b, FILE *out)
+{
int effective_rows = MIN(a, m->c_rows);
int r, c, i;
double *elems = XMALLOCN(double, b);
xfree(elems);
}
-void gs_matrix_self_test(int d) {
+void gs_matrix_self_test(int d)
+{
int i, o;
gs_matrix_t *m = gs_new_matrix(10, 10);
DEBUG_ONLY(firm_dbg_module_t *dbg);
};
-static void hungarian_dump_f(FILE *f, int **C, int rows, int cols, int width) {
+static void hungarian_dump_f(FILE *f, int **C, int rows, int cols, int width)
+{
int i, j;
fprintf(f , "\n");
fprintf(f, "\n");
}
-void hungarian_print_cost_matrix(hungarian_problem_t *p, int width) {
+void hungarian_print_cost_matrix(hungarian_problem_t *p, int width)
+{
hungarian_dump_f(stderr, p->cost, p->num_rows, p->num_cols, width);
}
/**
* Create the object and allocate memory for the data structures.
*/
-hungarian_problem_t *hungarian_new(int rows, int cols, int match_type) {
+hungarian_problem_t *hungarian_new(int rows, int cols, int match_type)
+{
int i;
hungarian_problem_t *p = XMALLOCZ(hungarian_problem_t);
/**
* Prepare the cost matrix.
*/
-void hungarian_prepare_cost_matrix(hungarian_problem_t *p, int mode) {
+void hungarian_prepare_cost_matrix(hungarian_problem_t *p, int mode)
+{
int i, j;
if (mode == HUNGARIAN_MODE_MAXIMIZE_UTIL) {
/**
* Set cost[left][right] to cost.
*/
-void hungarian_add(hungarian_problem_t *p, int left, int right, int cost) {
+void hungarian_add(hungarian_problem_t *p, int left, int right, int cost)
+{
assert(p->num_rows > left && "Invalid row selected.");
assert(p->num_cols > right && "Invalid column selected.");
assert(cost >= 0);
/**
* Set cost[left][right] to 0.
*/
-void hungarian_remv(hungarian_problem_t *p, int left, int right) {
+void hungarian_remv(hungarian_problem_t *p, int left, int right)
+{
assert(p->num_rows > left && "Invalid row selected.");
assert(p->num_cols > right && "Invalid column selected.");
/**
* Frees all allocated memory.
*/
-void hungarian_free(hungarian_problem_t* p) {
+void hungarian_free(hungarian_problem_t* p)
+{
obstack_free(&p->obst, NULL);
xfree(p);
}
/**
* Do the assignment.
*/
-int hungarian_solve(hungarian_problem_t* p, int *assignment, int *final_cost, int cost_threshold) {
+int hungarian_solve(hungarian_problem_t* p, int *assignment, int *final_cost, int cost_threshold)
+{
int i, j, m, n, k, l, s, t, q, unmatched, cost;
int *col_mate;
int *row_mate;
* @param list the list for which to allocate the element.
* @return the newly allocated, uninitialized element.
*/
-static plist_element_t *allocate_element(plist_t* list) {
+static plist_element_t *allocate_element(plist_t* list)
+{
plist_element_t *new_element;
if (list->first_free_element != NULL) {
return new_element;
}
-plist_t *plist_new(void) {
+plist_t *plist_new(void)
+{
plist_t *list = xmalloc(sizeof(*list) + sizeof(*list->obst));
list->obst = (struct obstack *)&list[1];
return list;
}
-plist_t *plist_obstack_new(struct obstack *obst) {
+plist_t *plist_obstack_new(struct obstack *obst)
+{
plist_t *list = OALLOC(obst, plist_t);
list->obst = obst;
return list;
}
-void plist_free(plist_t *list) {
+void plist_free(plist_t *list)
+{
list->first_element = NULL;
list->last_element = NULL;
list->first_free_element = NULL;
}
}
-void plist_insert_back(plist_t *list, void *value) {
+void plist_insert_back(plist_t *list, void *value)
+{
if (list->last_element != NULL) {
plist_insert_after(list, list->last_element, value);
}
}
}
-void plist_insert_front(plist_t *list, void *value) {
+void plist_insert_front(plist_t *list, void *value)
+{
if (list->first_element != NULL) {
plist_insert_before(list, list->first_element, value);
}
}
}
-void plist_insert_before(plist_t *list, plist_element_t *element, void *value) {
+void plist_insert_before(plist_t *list, plist_element_t *element, void *value)
+{
plist_element_t *prevElement;
plist_element_t *newElement = allocate_element(list);
++list->element_count;
}
-void plist_insert_after(plist_t* list, plist_element_t* element, void* value) {
+void plist_insert_after(plist_t* list, plist_element_t* element, void* value)
+{
plist_element_t *nextElement;
plist_element_t *newElement = allocate_element(list);
++list->element_count;
}
-int plist_has_value(plist_t *list, void *value) {
+int plist_has_value(plist_t *list, void *value)
+{
plist_element_t *iter;
for (iter = plist_first(list); iter; iter = plist_element_get_next(iter)) {
return 0;
}
-plist_element_t *plist_find_value(plist_t *list, void *value) {
+plist_element_t *plist_find_value(plist_t *list, void *value)
+{
plist_element_t *iter;
for (iter = plist_first(list); iter; iter = plist_element_get_next(iter)) {
return NULL;
}
-void plist_erase(plist_t *list, plist_element_t *element) {
+void plist_erase(plist_t *list, plist_element_t *element)
+{
plist_element_t *next_element = element->next;
plist_element_t *prev_element = element->prev;
list->first_free_element = element;
}
-void plist_clear(plist_t *list) {
+void plist_clear(plist_t *list)
+{
plist_element_t *curr_element = list->first_element;
while (curr_element != NULL) {
/**
* compare the keys of two entry pairs
*/
-static int pmap_entry_cmp(const void *p1, const void *p2, size_t size) {
+static int pmap_entry_cmp(const void *p1, const void *p2, size_t size)
+{
const pmap_entry *entry1 = p1;
const pmap_entry *entry2 = p2;
(void) size;
}
/* Creates a new empty map with an initial number of slots. */
-pmap *pmap_create_ex(int slots) {
+pmap *pmap_create_ex(int slots)
+{
return (pmap *)new_set(pmap_entry_cmp, slots);
}
-pmap *pmap_create(void) {
+pmap *pmap_create(void)
+{
return pmap_create_ex(INITIAL_SLOTS);
}
-void pmap_destroy(pmap *map) {
+void pmap_destroy(pmap *map)
+{
del_set(M2S(map));
}
-void pmap_insert(pmap *map, const void *key, void *value) {
+void pmap_insert(pmap *map, const void *key, void *value)
+{
pmap_entry entry, *p;
entry.key = key;
p->value = value;
}
-int pmap_contains(pmap *map, const void *key) {
+int pmap_contains(pmap *map, const void *key)
+{
return set_find(M2S(map), &key, sizeof(pmap_entry), HASH_PTR(key)) != NULL;
}
-pmap_entry * pmap_find(pmap *map, const void *key) {
+pmap_entry * pmap_find(pmap *map, const void *key)
+{
return (pmap_entry *)set_find(M2S(map), &key, sizeof(pmap_entry), HASH_PTR(key));
}
-void * pmap_get(pmap *map, const void *key) {
+void * pmap_get(pmap *map, const void *key)
+{
pmap_entry * entry = pmap_find(map, key);
return entry == NULL ? NULL : entry->value;
}
-int pmap_count(pmap *map) {
+int pmap_count(pmap *map)
+{
return set_count(M2S(map));
}
-pmap_entry *pmap_first(pmap *map) {
+pmap_entry *pmap_first(pmap *map)
+{
return (pmap_entry *) set_first(M2S(map));
}
-pmap_entry *pmap_next(pmap *map) {
+pmap_entry *pmap_next(pmap *map)
+{
return (pmap_entry *) set_next(M2S(map));
}
-void pmap_break(pmap *map) {
+void pmap_break(pmap *map)
+{
set_break(M2S(map));
}
* Enforces the heap characteristics if the queue
* starting from element at position @p pos.
*/
-static void pqueue_heapify(pqueue_t *q, unsigned pos) {
+static void pqueue_heapify(pqueue_t *q, unsigned pos)
+{
unsigned len = ARR_LEN(q->elems);
while (pos * 2 < len) {
/**
* Sifts up a newly inserted element at position @p pos.
*/
-static void pqueue_sift_up(pqueue_t *q, unsigned pos) {
+static void pqueue_sift_up(pqueue_t *q, unsigned pos)
+{
while(q->elems[pos].priority > q->elems[pos / 2].priority) {
pqueue_el_t tmp;
}
}
-pqueue_t *new_pqueue(void) {
+pqueue_t *new_pqueue(void)
+{
pqueue_t *res = XMALLOC(pqueue_t);
res->elems = NEW_ARR_F(pqueue_el_t, 0);
return res;
}
-void del_pqueue(pqueue_t *q) {
+void del_pqueue(pqueue_t *q)
+{
DEL_ARR_F(q->elems);
free(q);
}
-void pqueue_put(pqueue_t *q, void *data, int priority) {
+void pqueue_put(pqueue_t *q, void *data, int priority)
+{
pqueue_el_t el;
el.data = data;
pqueue_sift_up(q, ARR_LEN(q->elems) - 1);
}
-void *pqueue_pop_front(pqueue_t *q) {
+void *pqueue_pop_front(pqueue_t *q)
+{
switch(ARR_LEN(q->elems)) {
case 0:
assert(0 && "Attempt to retrieve element from empty priority queue.");
}
}
-int pqueue_length(const pqueue_t *q) {
+int pqueue_length(const pqueue_t *q)
+{
return ARR_LEN(q->elems);
}
-int pqueue_empty(const pqueue_t *q) {
+int pqueue_empty(const pqueue_t *q)
+{
return ARR_LEN(q->elems) == 0;
}
return pset_hinsert (se, key, hash);
}
-void pset_insert_pset_ptr(pset *target, pset *src) {
+void pset_insert_pset_ptr(pset *target, pset *src)
+{
void *elt;
for (elt = pset_first(src); elt; elt = pset_next(src)) {
pset_insert_ptr(target, elt);
#include "xmalloc.h"
#include "error.h"
-static NORETURN xnomem(void) {
+static NORETURN xnomem(void)
+{
panic("out of memory");
}
-void *xmalloc(size_t size) {
+void *xmalloc(size_t size)
+{
void *res = malloc(size);
if (!res) xnomem();
return res;
}
-void *xrealloc(void *ptr, size_t size) {
+void *xrealloc(void *ptr, size_t size)
+{
/* ANSI blesses realloc (0, x) but SunOS chokes on it */
void *res = ptr ? realloc (ptr, size) : malloc (size);
return res;
}
-char *xstrdup(const char *str) {
+char *xstrdup(const char *str)
+{
size_t len = strlen (str) + 1;
return memcpy((xmalloc) (len), str, len);
}
* @param arg The graph argument with mode reference,
* that must be checked.
*/
-static unsigned analyze_arg(ir_node *arg, unsigned bits) {
+static unsigned analyze_arg(ir_node *arg, unsigned bits)
+{
int i, p;
ir_node *succ;
*
* @param irg The ir graph to analyze.
*/
-static void analyze_ent_args(ir_entity *ent) {
+static void analyze_ent_args(ir_entity *ent)
+{
ir_graph *irg;
ir_node *irg_args, *arg;
ir_mode *arg_mode;
*
* @param irg The ir graph to analyze.
*/
-void analyze_irg_args(ir_graph *irg) {
+void analyze_irg_args(ir_graph *irg)
+{
ir_entity *ent;
if (irg == get_const_code_irg())
*
* @param arg The parameter them weight muss be computed.
*/
-static unsigned calc_method_param_weight(ir_node *arg) {
+static unsigned calc_method_param_weight(ir_node *arg)
+{
int i, j, k;
ir_node *succ, *op;
unsigned weight = null_weight;
*
* @param ent The entity of the ir_graph.
*/
-static void analyze_method_params_weight(ir_entity *ent) {
+static void analyze_method_params_weight(ir_entity *ent)
+{
ir_type *mtp;
ir_graph *irg;
int nparams, i, proj_nr;
*
* @param irg The ir graph to analyze.
*/
-void analyze_irg_args_weight(ir_graph *irg) {
+void analyze_irg_args_weight(ir_graph *irg)
+{
ir_entity *ent;
ent = get_irg_entity(irg);
static inline void set_cg_irg_visited (ir_graph *n, ir_visited_t i);
/** Returns the callgraph state of the program representation. */
-irp_callgraph_state get_irp_callgraph_state(void) {
+irp_callgraph_state get_irp_callgraph_state(void)
+{
return irp->callgraph_state;
}
/* Sets the callgraph state of the program representation. */
-void set_irp_callgraph_state(irp_callgraph_state s) {
+void set_irp_callgraph_state(irp_callgraph_state s)
+{
irp->callgraph_state = s;
}
/* Returns the number of procedures that call the given irg. */
-int get_irg_n_callers(const ir_graph *irg) {
+int get_irg_n_callers(const ir_graph *irg)
+{
if (irg->callers) return ARR_LEN(irg->callers);
return -1;
}
/* Returns the caller at position pos. */
-ir_graph *get_irg_caller(const ir_graph *irg, int pos) {
+ir_graph *get_irg_caller(const ir_graph *irg, int pos)
+{
assert(pos >= 0 && pos < get_irg_n_callers(irg));
if (irg->callers) return irg->callers[pos];
return NULL;
}
/* Returns non-zero if the caller at position pos is "a backedge", i.e. a recursion. */
-int is_irg_caller_backedge(const ir_graph *irg, int pos) {
+int is_irg_caller_backedge(const ir_graph *irg, int pos)
+{
assert(pos >= 0 && pos < get_irg_n_callers(irg));
return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
}
/** Search the caller in the list of all callers and set it's backedge property. */
-static void set_irg_caller_backedge(ir_graph *irg, ir_graph *caller) {
+static void set_irg_caller_backedge(ir_graph *irg, ir_graph *caller)
+{
int i, n_callers = get_irg_n_callers(irg);
/* allocate a new array on demand */
}
/* Returns non-zero if the irg has a backedge caller. */
-int has_irg_caller_backedge(const ir_graph *irg) {
+int has_irg_caller_backedge(const ir_graph *irg)
+{
int i, n_callers = get_irg_n_callers(irg);
if (irg->caller_isbe != NULL) {
* Given the position pos_caller of an caller of irg, return
* irg's callee position on that caller.
*/
-static int reverse_pos(const ir_graph *callee, int pos_caller) {
+static int reverse_pos(const ir_graph *callee, int pos_caller)
+{
ir_graph *caller = get_irg_caller(callee, pos_caller);
/* search the other relation for the corresponding edge. */
int pos_callee = -1;
}
/* Returns the maximal loop depth of call nodes that call along this edge. */
-int get_irg_caller_loop_depth(const ir_graph *irg, int pos) {
+int get_irg_caller_loop_depth(const ir_graph *irg, int pos)
+{
ir_graph *caller = get_irg_caller(irg, pos);
int pos_callee = reverse_pos(irg, pos);
/* Returns the number of procedures that are called by the given irg. */
-int get_irg_n_callees(const ir_graph *irg) {
+int get_irg_n_callees(const ir_graph *irg)
+{
if (irg->callees) return ARR_LEN(irg->callees);
return -1;
}
/* Returns the callee at position pos. */
-ir_graph *get_irg_callee(const ir_graph *irg, int pos) {
+ir_graph *get_irg_callee(const ir_graph *irg, int pos)
+{
assert(pos >= 0 && pos < get_irg_n_callees(irg));
if (irg->callees) return irg->callees[pos]->irg;
return NULL;
}
/* Returns non-zero if the callee at position pos is "a backedge", i.e. a recursion. */
-int is_irg_callee_backedge(const ir_graph *irg, int pos) {
+int is_irg_callee_backedge(const ir_graph *irg, int pos)
+{
assert(pos >= 0 && pos < get_irg_n_callees(irg));
return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
}
/* Returns non-zero if the irg has a backedge callee. */
-int has_irg_callee_backedge(const ir_graph *irg) {
+int has_irg_callee_backedge(const ir_graph *irg)
+{
int i, n_callees = get_irg_n_callees(irg);
if (irg->callee_isbe != NULL) {
/**
* Mark the callee at position pos as a backedge.
*/
-static void set_irg_callee_backedge(ir_graph *irg, int pos) {
+static void set_irg_callee_backedge(ir_graph *irg, int pos)
+{
int n = get_irg_n_callees(irg);
/* allocate a new array on demand */
}
/* Returns the maximal loop depth of call nodes that call along this edge. */
-int get_irg_callee_loop_depth(const ir_graph *irg, int pos) {
+int get_irg_callee_loop_depth(const ir_graph *irg, int pos)
+{
assert(pos >= 0 && pos < get_irg_n_callees(irg));
if (irg->callees) return irg->callees[pos]->max_depth;
return -1;
}
-double get_irg_callee_execution_frequency(const ir_graph *irg, int pos) {
+double get_irg_callee_execution_frequency(const ir_graph *irg, int pos)
+{
ir_node **arr = irg->callees[pos]->call_list;
int i, n_Calls = ARR_LEN(arr);
double freq = 0.0;
return freq;
}
-double get_irg_callee_method_execution_frequency(const ir_graph *irg, int pos) {
+double get_irg_callee_method_execution_frequency(const ir_graph *irg, int pos)
+{
double call_freq = get_irg_callee_execution_frequency(irg, pos);
double meth_freq = get_irg_method_execution_frequency(irg);
return call_freq * meth_freq;
}
-double get_irg_caller_method_execution_frequency(const ir_graph *irg, int pos) {
+double get_irg_caller_method_execution_frequency(const ir_graph *irg, int pos)
+{
ir_graph *caller = get_irg_caller(irg, pos);
int pos_callee = reverse_pos(irg, pos);
/**
* Walker called by compute_callgraph(), analyses all Call nodes.
*/
-static void ana_Call(ir_node *n, void *env) {
+static void ana_Call(ir_node *n, void *env)
+{
int i, n_callees;
ir_graph *irg;
(void) env;
}
/** compare two ir graphs in a cg_callee_entry */
-static int cg_callee_entry_cmp(const void *elt, const void *key) {
+static int cg_callee_entry_cmp(const void *elt, const void *key)
+{
const cg_callee_entry *e1 = elt;
const cg_callee_entry *e2 = key;
return e1->irg != e2->irg;
}
/** compare two ir graphs for pointer identity */
-static int graph_cmp(const void *elt, const void *key) {
+static int graph_cmp(const void *elt, const void *key)
+{
const ir_graph *e1 = elt;
const ir_graph *e2 = key;
return e1 != e2;
/* Construct and destruct the callgraph. */
-void compute_callgraph(void) {
+void compute_callgraph(void)
+{
int i, n_irgs;
#ifdef INTERPROCEDURAL_VIEW
}
/* Destruct the callgraph. */
-void free_callgraph(void) {
+void free_callgraph(void)
+{
int i, n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
/* ----------------------------------------------------------------------------------- */
-static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env) {
+static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
+{
int i, n_callees;
if (cg_irg_visited(irg))
post(irg, env);
}
-void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env) {
+void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
+{
int i, n_irgs = get_irp_n_irgs();
++master_cg_visited;
/**
* allocates a new scc_info on the obstack
*/
-static inline scc_info *new_scc_info(struct obstack *obst) {
+static inline scc_info *new_scc_info(struct obstack *obst)
+{
return OALLOCZ(obst, scc_info);
}
/**
* Returns non-zero if a graph was already visited.
*/
-static inline int cg_irg_visited(ir_graph *irg) {
+static inline int cg_irg_visited(ir_graph *irg)
+{
return irg->self_visited >= master_cg_visited;
}
/**
* Marks a graph as visited.
*/
-static inline void mark_cg_irg_visited(ir_graph *irg) {
+static inline void mark_cg_irg_visited(ir_graph *irg)
+{
irg->self_visited = master_cg_visited;
}
/**
* Set a graphs visited flag to i.
*/
-static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i) {
+static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i)
+{
irg->self_visited = i;
}
/**
* Returns the visited flag of a graph.
*/
-static inline ir_visited_t get_cg_irg_visited(ir_graph *irg) {
+static inline ir_visited_t get_cg_irg_visited(ir_graph *irg)
+{
return irg->self_visited;
}
-static inline void mark_irg_in_stack(ir_graph *irg) {
+static inline void mark_irg_in_stack(ir_graph *irg)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->in_stack = 1;
}
-static inline void mark_irg_not_in_stack(ir_graph *irg) {
+static inline void mark_irg_not_in_stack(ir_graph *irg)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->in_stack = 0;
}
-static inline int irg_is_in_stack(ir_graph *irg) {
+static inline int irg_is_in_stack(ir_graph *irg)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->in_stack;
}
-static inline void set_irg_uplink(ir_graph *irg, int uplink) {
+static inline void set_irg_uplink(ir_graph *irg, int uplink)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->uplink = uplink;
}
-static inline int get_irg_uplink(ir_graph *irg) {
+static inline int get_irg_uplink(ir_graph *irg)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->uplink;
}
-static inline void set_irg_dfn(ir_graph *irg, int dfn) {
+static inline void set_irg_dfn(ir_graph *irg, int dfn)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->dfn = dfn;
}
-static inline int get_irg_dfn(ir_graph *irg) {
+static inline int get_irg_dfn(ir_graph *irg)
+{
scc_info *info = get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->dfn;
/**
* Initialize the irg stack.
*/
-static inline void init_stack(void) {
+static inline void init_stack(void)
+{
if (stack) {
ARR_RESIZE(ir_graph *, stack, 1000);
} else {
* push a graph on the irg stack
* @param n the graph to be pushed
*/
-static inline void push(ir_graph *irg) {
+static inline void push(ir_graph *irg)
+{
if (tos == ARR_LEN(stack)) {
int nlen = ARR_LEN(stack) * 2;
ARR_RESIZE(ir_node *, stack, nlen);
/**
* return the topmost graph on the stack and pop it
*/
-static inline ir_graph *pop(void) {
+static inline ir_graph *pop(void)
+{
ir_graph *irg = stack[--tos];
mark_irg_not_in_stack(irg);
return irg;
* The nodes up to irg belong to the current loop.
* Removes them from the stack and adds them to the current loop.
*/
-static inline void pop_scc_to_loop(ir_graph *irg) {
+static inline void pop_scc_to_loop(ir_graph *irg)
+{
ir_graph *m;
do {
/* GL ??? my last son is my grandson??? Removes cfloops with no
ir_nodes in them. Such loops have only another loop as son. (Why
can't they have two loops as sons? Does it never get that far? ) */
-static void close_loop(ir_loop *l) {
+static void close_loop(ir_loop *l)
+{
int last = get_loop_n_elements(l) - 1;
loop_element lelement = get_loop_element(l, last);
ir_loop *last_son = lelement.son;
* Removes and unmarks all nodes up to n from the stack.
* The nodes must be visited once more to assign them to a scc.
*/
-static inline void pop_scc_unmark_visit(ir_graph *n) {
+static inline void pop_scc_unmark_visit(ir_graph *n)
+{
ir_graph *m = NULL;
while (m != n) {
* Allocates a new loop as son of current_loop. Sets current_loop
* to the new loop and returns the father.
*/
-static ir_loop *new_loop(void) {
+static ir_loop *new_loop(void)
+{
ir_loop *father = current_loop;
ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
/* Initialization steps. **********************************************/
-static void init_scc(struct obstack *obst) {
+static void init_scc(struct obstack *obst)
+{
int i;
int n_irgs;
*
* @param root: only needed for assertion.
*/
-static int is_head(ir_graph *n, ir_graph *root) {
+static int is_head(ir_graph *n, ir_graph *root)
+{
int i, arity;
int some_outof_loop = 0, some_in_loop = 0;
}
/** Returns index of the predecessor with the largest dfn number. */
-static int largest_dfn_pred(ir_graph *n) {
+static int largest_dfn_pred(ir_graph *n)
+{
int i, index = -2, max = -1;
int arity = get_irg_n_callees(n);
}
#ifndef INTERPROCEDURAL_VIEW
-static ir_graph *find_tail(ir_graph *n) {
+static ir_graph *find_tail(ir_graph *n)
+{
ir_graph *m;
int i, res_index = -2;
return get_irg_callee(m, res_index);
}
#else
-static ir_graph *find_tail(ir_graph *n) {
+static ir_graph *find_tail(ir_graph *n)
+{
ir_graph *m;
int i, res_index = -2;
*-----------------------------------------------------------*/
-static void cgscc(ir_graph *n) {
+static void cgscc(ir_graph *n)
+{
int i, arity;
if (cg_irg_visited(n)) return;
/**
* reset the backedge information for all callers in all irgs
*/
-static void reset_isbe(void) {
+static void reset_isbe(void)
+{
int i, n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i) {
/* weight. Assign graphs the maximal depth. */
/* ----------------------------------------------------------------------------------- */
-static void compute_loop_depth(ir_graph *irg, void *env) {
+static void compute_loop_depth(ir_graph *irg, void *env)
+{
int current_nesting = *(int *) env;
int old_nesting = irg->callgraph_loop_depth;
ir_visited_t old_visited = get_cg_irg_visited(irg);
/**
* push a loop entry on the stack
*/
-static void push2(ana_entry2 *e, ir_loop *g) {
+static void push2(ana_entry2 *e, ir_loop *g)
+{
if (ARR_LEN(e->loop_stack) == e->tos) {
ARR_APP1(ir_loop *, e->loop_stack, g);
} else {
/**
* returns the top of stack and pop it
*/
-static ir_loop *pop2(ana_entry2 *e) {
+static ir_loop *pop2(ana_entry2 *e)
+{
return e->loop_stack[--e->tos];
}
/**
* check if a loop g in on the stack. Did not check the TOS.
*/
-static int in_stack(ana_entry2 *e, ir_loop *g) {
+static int in_stack(ana_entry2 *e, ir_loop *g)
+{
int i;
for (i = e->tos-1; i >= 0; --i) {
if (e->loop_stack[i] == g) return 1;
return 0;
}
-static void compute_rec_depth(ir_graph *irg, void *env) {
+static void compute_rec_depth(ir_graph *irg, void *env)
+{
ana_entry2 *e = (ana_entry2 *)env;
ir_loop *l = irg->l;
int depth, old_depth = irg->callgraph_recursion_depth;
/* ----------------------------------------------------------------------------------- */
/* Returns the method execution frequency of a graph. */
-double get_irg_method_execution_frequency(const ir_graph *irg) {
+double get_irg_method_execution_frequency(const ir_graph *irg)
+{
return irg->method_execution_frequency;
}
* Increase the method execution frequency to freq if its current value is
* smaller then this.
*/
-static void set_irg_method_execution_frequency(ir_graph *irg, double freq) {
+static void set_irg_method_execution_frequency(ir_graph *irg, double freq)
+{
irg->method_execution_frequency = freq;
if (irp->max_method_execution_frequency < freq)
irp->max_method_execution_frequency = freq;
}
-static void compute_method_execution_frequency(ir_graph *irg, void *env) {
+static void compute_method_execution_frequency(ir_graph *irg, void *env)
+{
int i, n_callers;
double freq;
int found_edge;
/* ----------------------------------------------------------------------------------- */
/* Compute the backedges that represent recursions. */
-void find_callgraph_recursions(void) {
+void find_callgraph_recursions(void)
+{
int i, n_irgs;
struct obstack temp;
}
/* Compute interprocedural performance estimates. */
-void compute_performance_estimates(void) {
+void compute_performance_estimates(void)
+{
int i, n_irgs = get_irp_n_irgs();
int current_nesting;
ana_entry2 e;
/* Returns the maximal loop depth of all paths from an external visible method to
this irg. */
-int get_irg_loop_depth(const ir_graph *irg) {
+int get_irg_loop_depth(const ir_graph *irg)
+{
assert(irp->callgraph_state == irp_callgraph_consistent ||
irp->callgraph_state == irp_callgraph_and_calltree_consistent);
return irg->callgraph_loop_depth;
/* Returns the maximal recursion depth of all paths from an external visible method to
this irg. */
-int get_irg_recursion_depth(const ir_graph *irg) {
+int get_irg_recursion_depth(const ir_graph *irg)
+{
assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
return irg->callgraph_recursion_depth;
}
/* Computes the interprocedural loop nesting information. */
-void analyse_loop_nesting_depth(void) {
+void analyse_loop_nesting_depth(void)
+{
ir_entity **free_methods = NULL;
int arr_len;
set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
}
-loop_nesting_depth_state get_irp_loop_nesting_depth_state(void) {
+loop_nesting_depth_state get_irp_loop_nesting_depth_state(void)
+{
return irp->lnd_state;
}
-void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s) {
+void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)
+{
irp->lnd_state = s;
}
-void set_irp_loop_nesting_depth_state_inconsistent(void) {
+void set_irp_loop_nesting_depth_state_inconsistent(void)
+{
if (irp->lnd_state == loop_nesting_depth_consistent)
irp->lnd_state = loop_nesting_depth_inconsistent;
}
static cdep_info *cdep_data;
/* Return a list of all control dependences of a block. */
-ir_cdep *find_cdep(const ir_node *block) {
+ir_cdep *find_cdep(const ir_node *block)
+{
return pmap_get(cdep_data->cdep_map, block);
}
/* Replace the control dependence info of old by the info of nw. */
-void exchange_cdep(ir_node *old, const ir_node *nw) {
+void exchange_cdep(ir_node *old, const ir_node *nw)
+{
ir_cdep *cdep = find_cdep(nw);
pmap_insert(cdep_data->cdep_map, old, cdep);
}
/**
* Adds a control dependence from node to dep_on.
*/
-static void add_cdep(ir_node *node, ir_node *dep_on) {
+static void add_cdep(ir_node *node, ir_node *dep_on)
+{
ir_cdep *dep = find_cdep(node);
#if 0
ir_fprintf(stderr, "Adding cdep of %+F on %+F\n", node, dep_on);
/**
* Pre-block-walker: calculate the control dependence
*/
-static void cdep_pre(ir_node *node, void *ctx) {
+static void cdep_pre(ir_node *node, void *ctx)
+{
cdep_env *env = ctx;
int i;
}
/* Compute the control dependence graph for a graph. */
-void compute_cdep(ir_graph *irg) {
+void compute_cdep(ir_graph *irg)
+{
ir_node *rem;
cdep_env env;
}
/* Free the control dependence info. */
-void free_cdep(ir_graph *irg) {
+void free_cdep(ir_graph *irg)
+{
(void) irg;
if (cdep_data != NULL) {
pmap_destroy(cdep_data->cdep_map);
}
/* Check whether dependee is (directly) control dependent on candidate. */
-int is_cdep_on(const ir_node *dependee, const ir_node *candidate) {
+int is_cdep_on(const ir_node *dependee, const ir_node *candidate)
+{
const ir_cdep *dep;
for (dep = find_cdep(dependee); dep != NULL; dep = dep->next) {
}
/* Check whether dependee is (possible iterated) control dependent on candidate. */
-int is_iterated_cdep_on(ir_node *dependee, ir_node *candidate) {
+int is_iterated_cdep_on(ir_node *dependee, ir_node *candidate)
+{
const ir_cdep *dep;
while ((dep = find_cdep(dependee)) != NULL) {
}
/* If block is control dependent on exactly one node, return this node, else NULL. */
-ir_node *get_unique_cdep(const ir_node *block) {
+ir_node *get_unique_cdep(const ir_node *block)
+{
ir_cdep *cdep = find_cdep(block);
return cdep != NULL && cdep->next == NULL ? cdep->node : NULL;
}
/* Check if the given block is control dependent of more than one node. */
-int has_multiple_cdep(const ir_node *block) {
+int has_multiple_cdep(const ir_node *block)
+{
ir_cdep *cdep = find_cdep(block);
return cdep != NULL && cdep->next != NULL;
/* We use this set for all nodes in all irgraphs. */
static set *exec_freq_set = NULL;
-static int exec_freq_cmp(const void *e1, const void *e2, size_t size) {
+static int exec_freq_cmp(const void *e1, const void *e2, size_t size)
+{
reg_exec_freq *ef1 = (reg_exec_freq *)e1;
reg_exec_freq *ef2 = (reg_exec_freq *)e2;
(void) size;
return (ef1->reg != ef2->reg);
}
-static inline unsigned int exec_freq_hash(reg_exec_freq *e) {
+static inline unsigned int exec_freq_hash(reg_exec_freq *e)
+{
return HASH_PTR(e->reg);
}
-static inline void set_region_exec_freq(void *reg, double freq) {
+static inline void set_region_exec_freq(void *reg, double freq)
+{
reg_exec_freq ef;
ef.reg = reg;
ef.freq = freq;
set_insert(exec_freq_set, &ef, sizeof(ef), exec_freq_hash(&ef));
}
-double get_region_exec_freq(void *reg) {
+double get_region_exec_freq(void *reg)
+{
reg_exec_freq ef, *found;
ef.reg = reg;
assert(exec_freq_set);
}
/* Returns the number of times the block is executed. */
-double get_Block_exec_freq(ir_node *b) {
+double get_Block_exec_freq(ir_node *b)
+{
return get_region_exec_freq((void *)b);
}
-double get_irn_exec_freq(ir_node *n) {
+double get_irn_exec_freq(ir_node *n)
+{
if (!is_Block(n)) n = get_nodes_block(n);
return get_Block_exec_freq(n);
}
static ir_node *Cond_list = NULL;
/* We do not use an extra set, as Projs are not yet in the existing one. */
-void set_ProjX_probability(ir_node *n, Cond_prob prob) {
+void set_ProjX_probability(ir_node *n, Cond_prob prob)
+{
reg_exec_freq ef;
ef.reg = n;
ef.prob = prob;
set_insert(exec_freq_set, &ef, sizeof(ef), exec_freq_hash(&ef));
}
-Cond_prob get_ProjX_probability(ir_node *n) {
+Cond_prob get_ProjX_probability(ir_node *n)
+{
reg_exec_freq ef, *found;
ef.reg = n;
/* A walker that only visits the nodes we want to see. */
static void
-my_irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) {
+my_irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
+{
int i;
set_irn_visited(node, current_ir_graph->visited);
post(node, env);
}
-static void my_irg_walk_current_graph(irg_walk_func *pre, irg_walk_func *post, void *env) {
+static void my_irg_walk_current_graph(irg_walk_func *pre, irg_walk_func *post, void *env)
+{
inc_irg_visited(current_ir_graph);
my_irg_walk_2_both(get_irg_end(current_ir_graph), pre, post, env);
}
/** Precompute which Conds test for an exception.
*
* Operates on current_ir_graph. */
-void precompute_cond_evaluation(void) {
+void precompute_cond_evaluation(void)
+{
ir_node *c;
compute_irg_outs(current_ir_graph);
}
}
-int is_fragile_Proj(ir_node *n) {
+int is_fragile_Proj(ir_node *n)
+{
return is_Proj(n) && (get_ProjX_probability(n) == Cond_prob_exception_taken);
}
*
* Given all outs of the predecessor region, we can compute the weight of
* this single edge. */
-static inline double get_weighted_region_exec_freq(void *reg, int pos) {
+static inline double get_weighted_region_exec_freq(void *reg, int pos)
+{
void *pred_reg = get_region_in(reg, pos);
double res, full_freq = get_region_exec_freq (pred_reg);
int n_outs = get_region_n_outs (pred_reg);
return res;
}
-static inline void compute_region_freqency(void *reg, double head_weight) {
+static inline void compute_region_freqency(void *reg, double head_weight)
+{
int i, n_ins = get_region_n_ins(reg);
double my_freq = 0;
}
/* Compute the ex freq for current_ir_graph */
-static void compute_frequency(int default_loop_weight) {
+static void compute_frequency(int default_loop_weight)
+{
ir_loop *outermost_l = get_irg_loop(current_ir_graph);
pdeq *block_worklist = new_pdeq1(outermost_l);
* irg: The graph to be analyzed.
* default_loop_weight: The number of executions of a loop.
*/
-void compute_execution_frequency(ir_graph *irg, int default_loop_weight, double exception_probability) {
+void compute_execution_frequency(ir_graph *irg, int default_loop_weight, double exception_probability)
+{
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
exception_prob = exception_probability;
}
-void compute_execution_frequencies(int default_loop_weight, double exception_probability) {
+void compute_execution_frequencies(int default_loop_weight, double exception_probability)
+{
int i, n_irgs = get_irp_n_irgs();
free_intervals();
for (i = 0; i < n_irgs; ++i) {
}
/** free occupied memory, reset */
-void free_execution_frequency(void) {
+void free_execution_frequency(void)
+{
int i, n_irgs = get_irp_n_irgs();
free_intervals();
del_set(exec_freq_set);
set_irp_exec_freq_state(exec_freq_none);
}
-exec_freq_state get_irg_exec_freq_state(ir_graph *irg) {
+exec_freq_state get_irg_exec_freq_state(ir_graph *irg)
+{
return irg->execfreq_state;
}
-void set_irg_exec_freq_state(ir_graph *irg, exec_freq_state s) {
+void set_irg_exec_freq_state(ir_graph *irg, exec_freq_state s)
+{
if ((get_irp_exec_freq_state() == exec_freq_consistent && s != exec_freq_consistent) ||
(get_irp_exec_freq_state() == exec_freq_none && s != exec_freq_none))
irp->execfreq_state = exec_freq_inconsistent;
}
/* Sets irg and irp exec freq state to inconsistent if it is set to consistent. */
-void set_irg_exec_freq_state_inconsistent(ir_graph *irg) {
+void set_irg_exec_freq_state_inconsistent(ir_graph *irg)
+{
if (get_irg_exec_freq_state(irg) == exec_freq_consistent)
set_irg_exec_freq_state(irg, exec_freq_inconsistent);
}
-void set_irp_exec_freq_state(exec_freq_state s) {
+void set_irp_exec_freq_state(exec_freq_state s)
+{
irp->execfreq_state = s;
}
-exec_freq_state get_irp_exec_freq_state(void) {
+exec_freq_state get_irp_exec_freq_state(void)
+{
return irp->execfreq_state;
}
/* Sets irp and all irg exec freq states to inconsistent if it is set to consistent. */
-void set_irp_exec_freq_state_inconsistent(void) {
+void set_irp_exec_freq_state_inconsistent(void)
+{
if (get_irp_exec_freq_state() != exec_freq_none) {
int i, n_irgs = get_irp_n_irgs();
set_irp_exec_freq_state(exec_freq_inconsistent);
/* *************************************************************************** */
/* The entities that can be accessed by this Sel node. */
-int get_Sel_n_accessed_entities(ir_node *sel) {
+int get_Sel_n_accessed_entities(ir_node *sel)
+{
(void) sel;
return 1;
}
-ir_entity *get_Sel_accessed_entity(ir_node *sel, int pos) {
+ir_entity *get_Sel_accessed_entity(ir_node *sel, int pos)
+{
(void) pos;
return get_Sel_entity(sel);
}
/* The heuristic */
/* *************************************************************************** */
-int get_irn_loop_call_depth(ir_node *n) {
+int get_irn_loop_call_depth(ir_node *n)
+{
ir_graph *irg = get_irn_irg(n);
return get_irg_loop_depth(irg);
}
-int get_irn_cfloop_depth(ir_node *n) {
+int get_irn_cfloop_depth(ir_node *n)
+{
ir_loop *l = get_irn_loop(get_nodes_block(n));
if (l)
return get_loop_depth(l);
return 0;
}
-int get_irn_recursion_depth(ir_node *n) {
+int get_irn_recursion_depth(ir_node *n)
+{
ir_graph *irg = get_irn_irg(n);
return get_irg_recursion_depth(irg);
}
/** @@@ the second version of the heuristic. */
-int get_weighted_loop_depth(ir_node *n) {
+int get_weighted_loop_depth(ir_node *n)
+{
int loop_call_depth = get_irn_loop_call_depth(n);
int loop_depth = get_irn_cfloop_depth(n);
int recursion_depth = get_irn_recursion_depth(n);
@@@ the second version of the heuristic.
Return 0 if the node is neither in a loop nor in a recursion. */
-double get_irn_final_cost(ir_node *n) {
+double get_irn_final_cost(ir_node *n)
+{
double cost_loop = get_irn_exec_freq(n);
double cost_method = get_irg_method_execution_frequency(get_irn_irg(n));
int rec_depth = get_irn_recursion_depth(n);
return cost_loop*(cost_method + cost_rec);
}
-double get_type_estimated_n_instances(ir_type *tp) {
+double get_type_estimated_n_instances(ir_type *tp)
+{
int i, n_allocs = get_type_n_allocs(tp);
double n_instances = 0;
for (i = 0; i < n_allocs; ++i) {
return n_instances;
}
-double get_type_estimated_mem_consumption_bytes(ir_type *tp) {
+double get_type_estimated_mem_consumption_bytes(ir_type *tp)
+{
(void) tp;
assert(0);
return 0.0;
}
-int get_type_estimated_n_fields(ir_type *tp) {
+int get_type_estimated_n_fields(ir_type *tp)
+{
int s = 0;
switch(get_type_tpop_code(tp)) {
return s;
}
-int get_type_estimated_size_bytes(ir_type *tp) {
+int get_type_estimated_size_bytes(ir_type *tp)
+{
int s = 0;
switch(get_type_tpop_code(tp)) {
return s;
}
-double get_type_estimated_n_casts(ir_type *tp) {
+double get_type_estimated_n_casts(ir_type *tp)
+{
int i, n_casts = get_type_n_casts(tp);
double n_instances = 0;
for (i = 0; i < n_casts; ++i) {
return n_instances;
}
-double get_class_estimated_n_upcasts(ir_type *clss) {
+double get_class_estimated_n_upcasts(ir_type *clss)
+{
double n_instances = 0;
int i, j, n_casts, n_pointertypes;
return n_instances;
}
-double get_class_estimated_n_downcasts(ir_type *clss) {
+double get_class_estimated_n_downcasts(ir_type *clss)
+{
double n_instances = 0;
int i, j, n_casts, n_pointertypes;
}
-double get_class_estimated_dispatch_writes(ir_type *clss) {
+double get_class_estimated_dispatch_writes(ir_type *clss)
+{
return get_type_estimated_n_instances(clss);
}
/** Returns the number of reads of the dispatch pointer. */
-double get_class_estimated_dispatch_reads (ir_type *clss) {
+double get_class_estimated_dispatch_reads (ir_type *clss)
+{
int i, n_mems = get_class_n_members(clss);
double n_calls = 0;
for (i = 0; i < n_mems; ++i) {
return n_calls;
}
-double get_class_estimated_n_dyncalls(ir_type *clss) {
+double get_class_estimated_n_dyncalls(ir_type *clss)
+{
return get_class_estimated_dispatch_reads(clss) +
get_class_estimated_dispatch_writes(clss);
}
-double get_entity_estimated_n_loads(ir_entity *ent) {
+double get_entity_estimated_n_loads(ir_entity *ent)
+{
int i, n_acc = get_entity_n_accesses(ent);
double n_loads = 0;
for (i = 0; i < n_acc; ++i) {
return n_loads;
}
-double get_entity_estimated_n_stores(ir_entity *ent) {
+double get_entity_estimated_n_stores(ir_entity *ent)
+{
int i, n_acc = get_entity_n_accesses(ent);
double n_stores = 0;
for (i = 0; i < n_acc; ++i) {
}
/* @@@ Should we evaluate the callee array? */
-double get_entity_estimated_n_calls(ir_entity *ent) {
+double get_entity_estimated_n_calls(ir_entity *ent)
+{
int i, n_acc = get_entity_n_accesses(ent);
double n_calls = 0;
for (i = 0; i < n_acc; ++i) {
return n_calls;
}
-double get_entity_estimated_n_dyncalls(ir_entity *ent) {
+double get_entity_estimated_n_dyncalls(ir_entity *ent)
+{
int i, n_acc = get_entity_n_accesses(ent);
double n_calls = 0;
for (i = 0; i < n_acc; ++i) {
/* Auxiliary */
/* ------------------------------------------------------------------------- */
-int is_jack_rts_name(ident *name) {
+int is_jack_rts_name(ident *name)
+{
if (id_is_suffix(new_id_from_str("Exception"), name)) return 1;
if (id_is_suffix(new_id_from_str("Throwable"), name)) return 1;
if (id_is_suffix(new_id_from_str("Error"), name)) return 1;
}
-int is_jack_rts_class(ir_type *t) {
+int is_jack_rts_class(ir_type *t)
+{
ident *name = get_type_ident(t);
return is_jack_rts_name(name);
}
#include "entity_t.h" // for the assertion.
-int is_jack_rts_entity(ir_entity *e) {
+int is_jack_rts_entity(ir_entity *e)
+{
ident *name;
assert(e->ld_name);
/**
* Compare two region attributes for identical regions.
*/
-static int region_attr_cmp(const void *e1, const void *e2, size_t size) {
+static int region_attr_cmp(const void *e1, const void *e2, size_t size)
+{
region_attr *ra1 = (region_attr *)e1;
region_attr *ra2 = (region_attr *)e2;
(void) size;
}
/** Hash a region attribute (the region only). */
-static inline int attr_set_hash(region_attr *a) {
+static inline int attr_set_hash(region_attr *a)
+{
return HASH_PTR(a->reg);
}
*
* @param region the region
*/
-static inline region_attr *get_region_attr(void *region) {
+static inline region_attr *get_region_attr(void *region)
+{
region_attr r_attr, *res;
r_attr.reg = region;
return res;
}
-int get_region_n_ins(void *region) {
+int get_region_n_ins(void *region)
+{
return ARR_LEN(get_region_attr(region)->in_array);
}
-void *get_region_in(void *region, int pos) {
+void *get_region_in(void *region, int pos)
+{
assert(0 <= pos && pos < get_region_n_ins(region));
return ((get_region_attr(region)->in_array)[pos]);
}
-void add_region_in(void *region, void *in) {
+void add_region_in(void *region, void *in)
+{
ARR_APP1(void *, get_region_attr(region)->in_array, in);
get_region_attr(in)->n_outs++;
}
-int get_region_n_outs(void *region) {
+int get_region_n_outs(void *region)
+{
return get_region_attr(region)->n_outs;
}
-int get_region_n_exc_outs(void *region) {
+int get_region_n_exc_outs(void *region)
+{
return get_region_attr(region)->n_exc_outs;
}
-void inc_region_n_exc_outs(void *region) {
+void inc_region_n_exc_outs(void *region)
+{
(get_region_attr(region)->n_exc_outs)++;
}
-void *get_loop_cfop(void *region, int pos) {
+void *get_loop_cfop(void *region, int pos)
+{
assert(0 <= pos && pos < get_region_n_ins(region));
return ((get_region_attr(region)->op_array)[pos]);
}
/** Add a control flow op to a loop region. */
-static inline void add_loop_cfop(void *region, void *cfop) {
+static inline void add_loop_cfop(void *region, void *cfop)
+{
assert(cfop);
ARR_APP1(void *, get_region_attr(region)->op_array, cfop);
}
* @param reg a region
* @param cfop a control flow operation leaving this region
*/
-static inline void exc_outs(void *reg, ir_node *cfop) {
+static inline void exc_outs(void *reg, ir_node *cfop)
+{
if (is_fragile_op(cfop) || is_fragile_Proj(cfop))
inc_region_n_exc_outs(reg);
}
*
* @return non-zero if outer can be reached from inner via the outer loop relation
*/
-static int find_outer_loop(ir_loop *inner, ir_loop *outer, ir_node *blk, ir_node *cfop) {
+static int find_outer_loop(ir_loop *inner, ir_loop *outer, ir_node *blk, ir_node *cfop)
+{
if (get_loop_outer_loop(inner) == outer) {
add_region_in(inner, blk);
add_loop_cfop(inner, cfop);
* @param blk a block
* @param loop a loop
*/
-static int test_loop_nest(ir_node *blk, ir_loop *loop) {
+static int test_loop_nest(ir_node *blk, ir_loop *loop)
+{
int i, n_elems = get_loop_n_elements(loop);
for (i = 0; i < n_elems; ++i) {
*
* @return non-zero if pred is from an inner loop
*/
-static int find_inner_loop(ir_node *blk, ir_loop *l, ir_node *pred, ir_node *cfop) {
+static int find_inner_loop(ir_node *blk, ir_loop *l, ir_node *pred, ir_node *cfop)
+{
int i, n_elems = get_loop_n_elements(l);
int found = 0;
* branches directly from loop k to loop l. Add an edge l->k. Watch it: k must
* not be a direct predecessor of l in the loop tree!
*/
-static void construct_interval_block(ir_node *blk, ir_loop *l) {
+static void construct_interval_block(ir_node *blk, ir_loop *l)
+{
int i, n_cfgpreds;
if (blk == get_irg_start_block(current_ir_graph))
*
* @param l the cf loop
*/
-static void construct_interval_edges(ir_loop *l) {
+static void construct_interval_edges(ir_loop *l)
+{
int i, n_elems = get_loop_n_elements(l);
for (i = 0; i < n_elems; ++i) {
loop_element e = get_loop_element(l, i);
}
}
-void construct_intervals(ir_graph *irg) {
+void construct_intervals(ir_graph *irg)
+{
ir_loop *l;
ir_graph *rem = current_ir_graph;
current_ir_graph = rem;
}
-void free_intervals(void) {
+void free_intervals(void)
+{
region_attr *res;
if (region_attr_set == NULL)
/* */
/*------------------------------------------------------------------*/
-void dump_region_edges(FILE *F, void *reg) {
+void dump_region_edges(FILE *F, void *reg)
+{
int i, n_ins = get_region_n_ins(reg);
if (is_ir_node(reg)) {
#include "execution_frequency.h"
-static void dump_interval_block(FILE *F, ir_node *block) {
+static void dump_interval_block(FILE *F, ir_node *block)
+{
int i, fl;
/* This is a block. Dump a node for the block. */
fprintf(F, "node: {title: \""); PRINT_NODEID(block);
fprintf(F, "}\n");
}
-static void dump_interval_loop(FILE *F, ir_loop *l) {
+static void dump_interval_loop(FILE *F, ir_loop *l)
+{
int i, n_elems = get_loop_n_elements(l);
fprintf(F, "graph: { title: \"");
}
-void dump_interval_graph(ir_graph *irg, const char *suffix) {
+void dump_interval_graph(ir_graph *irg, const char *suffix)
+{
FILE *f;
ir_graph *rem;
* Does not assert whether the backarray is correct -- use
* very careful!
*/
-static unsigned *mere_get_backarray(ir_node *n) {
+static unsigned *mere_get_backarray(ir_node *n)
+{
switch (get_irn_opcode(n)) {
case iro_Block:
if (!get_Block_matured(n)) return NULL;
* Returns backarray if the node can have backedges, else returns
* NULL.
*/
-static unsigned *get_backarray(ir_node *n) {
+static unsigned *get_backarray(ir_node *n)
+{
unsigned *ba = mere_get_backarray(n);
#ifndef NDEBUG
* Returns non-zero if node has no backarray, or
* if size of backarray == size of in array.
*/
-static int legal_backarray(ir_node *n) {
+static int legal_backarray(ir_node *n)
+{
unsigned *ba = mere_get_backarray(n);
if (ba && (rbitset_size(ba) != (unsigned) get_irn_arity(n)))
return 0;
}
#endif
-void fix_backedges(struct obstack *obst, ir_node *n) {
+void fix_backedges(struct obstack *obst, ir_node *n)
+{
unsigned *arr = mere_get_backarray(n);
ir_opcode opc;
int arity;
}
#ifdef INTERPROCEDURAL_VIEW
-int is_inter_backedge(ir_node *n, int pos) {
+int is_inter_backedge(ir_node *n, int pos)
+{
int res;
int rem = get_interprocedural_view();
set_interprocedural_view(0);
return res;
}
-int is_intra_backedge(ir_node *n, int pos) {
+int is_intra_backedge(ir_node *n, int pos)
+{
int res;
int rem = get_interprocedural_view();
set_interprocedural_view(1);
/* Returns non-zero if the predecessor pos is a backedge. */
-int is_backedge(ir_node *n, int pos) {
+int is_backedge(ir_node *n, int pos)
+{
unsigned *ba = get_backarray(n);
if (ba)
return rbitset_is_set(ba, pos);
}
/* Remarks that edge pos is a backedge. */
-void set_backedge(ir_node *n, int pos) {
+void set_backedge(ir_node *n, int pos)
+{
unsigned *ba = get_backarray(n);
assert(ba && "can only set backedges at Phi, Filter, Block nodes.");
rbitset_set(ba, pos);
}
/* Remarks that edge pos is a backedge. */
-void set_not_backedge(ir_node *n, int pos) {
+void set_not_backedge(ir_node *n, int pos)
+{
unsigned *ba = get_backarray(n);
assert(ba && "can only set backedges at Phi, Filter, Block nodes.");
rbitset_clear(ba, pos);
}
/* Returns non-zero if n has backedges. */
-int has_backedges(ir_node *n) {
+int has_backedges(ir_node *n)
+{
unsigned *ba = get_backarray(n);
if (ba) {
int arity = get_irn_arity(n);
}
/** Sets all backedge information to zero. */
-void clear_backedges(ir_node *n) {
+void clear_backedges(ir_node *n)
+{
int i, arity;
unsigned *ba;
#ifdef INTERPROCEDURAL_VIEW
}
/* Allocate a new backedge array on the obstack for given size. */
-unsigned *new_backedge_arr(struct obstack *obst, unsigned size) {
+unsigned *new_backedge_arr(struct obstack *obst, unsigned size)
+{
return rbitset_w_size_obstack_alloc(obst, size);
}
/* TODO: add an ir_op operation */
-void new_backedge_info(ir_node *n) {
+void new_backedge_info(ir_node *n)
+{
switch (get_irn_opcode(n)) {
case iro_Block:
n->attr.block.cg_backedge = NULL;
} scc_info;
/** Allocate a new scc_info on the given obstack */
-static inline scc_info *new_scc_info(struct obstack *obst) {
+static inline scc_info *new_scc_info(struct obstack *obst)
+{
return OALLOCZ(obst, scc_info);
}
/**
* Marks the node n to be on the stack.
*/
-static inline void mark_irn_in_stack(ir_node *n) {
+static inline void mark_irn_in_stack(ir_node *n)
+{
scc_info *info = get_irn_link(n);
info->in_stack = 1;
}
/**
* Marks the node n to be not on the stack.
*/
-static inline void mark_irn_not_in_stack(ir_node *n) {
+static inline void mark_irn_not_in_stack(ir_node *n)
+{
scc_info *info = get_irn_link(n);
info->in_stack = 0;
}
/**
* Returns whether node n is on the stack.
*/
-static inline int irn_is_in_stack(ir_node *n) {
+static inline int irn_is_in_stack(ir_node *n)
+{
scc_info *info = get_irn_link(n);
return info->in_stack;
}
/**
* Sets node n uplink value.
*/
-static inline void set_irn_uplink(ir_node *n, int uplink) {
+static inline void set_irn_uplink(ir_node *n, int uplink)
+{
scc_info *info = get_irn_link(n);
info->uplink = uplink;
}
/**
* Return node n uplink value.
*/
-static inline int get_irn_uplink(ir_node *n) {
+static inline int get_irn_uplink(ir_node *n)
+{
scc_info *info = get_irn_link(n);
return info->uplink;
}
/**
* Sets node n dfn value.
*/
-static inline void set_irn_dfn(ir_node *n, int dfn) {
+static inline void set_irn_dfn(ir_node *n, int dfn)
+{
scc_info *info = get_irn_link(n);
info->dfn = dfn;
}
/**
* Returns node n dfn value.
*/
-static inline int get_irn_dfn(ir_node *n) {
+static inline int get_irn_dfn(ir_node *n)
+{
scc_info *info = get_irn_link(n);
return info->dfn;
}
/**
* Initializes the IR-node stack
*/
-static inline void init_stack(void) {
+static inline void init_stack(void)
+{
if (stack) {
ARR_RESIZE(ir_node *, stack, 1000);
} else {
/**
* Push a node n onto the IR-node stack.
*/
-static inline void push(ir_node *n) {
+static inline void push(ir_node *n)
+{
if (tos == ARR_LEN(stack)) {
int nlen = ARR_LEN(stack) * 2;
ARR_RESIZE(ir_node *, stack, nlen);
/**
* Pop a node from the IR-node stack and return it.
*/
-static inline ir_node *pop(void) {
+static inline ir_node *pop(void)
+{
ir_node *n = stack[--tos];
mark_irn_not_in_stack(n);
return n;
* The nodes from tos up to n belong to the current loop.
* Removes them from the stack and adds them to the current loop.
*/
-static inline void pop_scc_to_loop(ir_node *n) {
+static inline void pop_scc_to_loop(ir_node *n)
+{
ir_node *m;
do {
/* GL ??? my last son is my grandson??? Removes cfloops with no
ir_nodes in them. Such loops have only another loop as son. (Why
can't they have two loops as sons? Does it never get that far? ) */
-static void close_loop(ir_loop *l) {
+static void close_loop(ir_loop *l)
+{
int last = get_loop_n_elements(l) - 1;
loop_element lelement = get_loop_element(l, last);
ir_loop *last_son = lelement.son;
* Removes and unmarks all nodes up to n from the stack.
* The nodes must be visited once more to assign them to a scc.
*/
-static inline void pop_scc_unmark_visit(ir_node *n) {
+static inline void pop_scc_unmark_visit(ir_node *n)
+{
ir_node *m;
do {
* to the new loop and returns its father.
* The loop is allocated on the outermost_ir_graphs's obstack.
*/
-static ir_loop *new_loop(void) {
+static ir_loop *new_loop(void)
+{
ir_loop *father = current_loop;
ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
* Clear the backedges for all nodes.
* Called from a walker.
*/
-static inline void init_node(ir_node *n, void *env) {
+static inline void init_node(ir_node *n, void *env)
+{
struct obstack *obst = env;
if (is_Block(n))
set_irn_link(n, new_scc_info(obst));
/**
* Initializes the common global settings for the scc algorithm
*/
-static inline void init_scc_common(void) {
+static inline void init_scc_common(void)
+{
current_dfn = 1;
loop_node_cnt = 0;
init_stack();
* Initializes the scc algorithm for the intraprocedural case.
* Add scc info to every block node.
*/
-static inline void init_scc(ir_graph *irg, struct obstack *obst) {
+static inline void init_scc(ir_graph *irg, struct obstack *obst)
+{
init_scc_common();
irg_walk_graph(irg, init_node, NULL, obst);
}
/**
* Initializes the scc algorithm for the interprocedural case.
*/
-static inline void init_ip_scc(struct obstack *obst) {
+static inline void init_ip_scc(struct obstack *obst)
+{
init_scc_common();
cg_walk(init_node, NULL, obst);
* Condition for breaking the recursion: n is the block
* that gets the initial control flow from the Start node.
*/
-static int is_outermost_StartBlock(ir_node *n) {
+static int is_outermost_StartBlock(ir_node *n)
+{
/* Test whether this is the outermost Start node. If so
recursion must end. */
assert(is_Block(n));
* @param n the block node to check
* @param root only needed for assertion.
*/
-static int is_head(ir_node *n, ir_node *root) {
+static int is_head(ir_node *n, ir_node *root)
+{
int i, arity;
int some_outof_loop = 0, some_in_loop = 0;
(void) root;
* @param n the block node to check
* @param root only needed for assertion.
*/
-static int is_endless_head(ir_node *n, ir_node *root) {
+static int is_endless_head(ir_node *n, ir_node *root)
+{
int i, arity;
int none_outof_loop = 1, some_in_loop = 0;
(void) root;
* Returns index of the predecessor with the smallest dfn number
* greater-equal than limit.
*/
-static int smallest_dfn_pred(ir_node *n, int limit) {
+static int smallest_dfn_pred(ir_node *n, int limit)
+{
int i, index = -2, min = -1;
if (!is_outermost_StartBlock(n)) {
/**
* Returns index of the predecessor with the largest dfn number.
*/
-static int largest_dfn_pred(ir_node *n) {
+static int largest_dfn_pred(ir_node *n)
+{
int i, index = -2, max = -1;
if (!is_outermost_StartBlock(n)) {
* returns the tail of the loop.
* If it finds no backedge returns NULL.
*/
-static ir_node *find_tail(ir_node *n) {
+static ir_node *find_tail(ir_node *n)
+{
ir_node *m;
int i, res_index = -2;
/**
* returns non.zero if l is the outermost loop.
*/
-inline static int is_outermost_loop(ir_loop *l) {
+inline static int is_outermost_loop(ir_loop *l)
+{
return l == get_loop_outer_loop(l);
}
/**
* Walks over all blocks of a graph
*/
-static void cfscc(ir_node *n) {
+static void cfscc(ir_node *n)
+{
int i;
assert(is_Block(n));
}
/* Constructs control flow backedge information for irg. */
-int construct_cf_backedges(ir_graph *irg) {
+int construct_cf_backedges(ir_graph *irg)
+{
ir_graph *rem = current_ir_graph;
ir_loop *head_rem;
ir_node *end = get_irg_end(irg);
return max_loop_depth;
}
-void assure_cf_loop(ir_graph *irg) {
+void assure_cf_loop(ir_graph *irg)
+{
irg_loopinfo_state state = get_irg_loopinfo_state(irg);
if (state != loopinfo_cf_consistent)
}
#ifdef INTERPROCEDURAL_VIEW
-int construct_ip_cf_backedges (void) {
+int construct_ip_cf_backedges (void)
+{
ir_graph *rem = current_ir_graph;
int rem_ipv = get_interprocedural_view();
struct obstack temp;
* Clear the intra- and the interprocedural
* backedge information pf a block.
*/
-static void reset_backedges(ir_node *block) {
+static void reset_backedges(ir_node *block)
+{
int rem;
assert(is_Block(block));
* a loop as well as all loop info for all nodes of this loop.
* Recurse into all nested loops.
*/
-static void loop_reset_backedges(ir_loop *l) {
+static void loop_reset_backedges(ir_loop *l)
+{
int i;
reset_backedges(get_loop_node(l, 0));
for (i = 0; i < get_loop_n_nodes(l); ++i)
/* Removes all cfloop information.
Resets all backedges */
-void free_cfloop_information(ir_graph *irg) {
+void free_cfloop_information(ir_graph *irg)
+{
ir_loop *loop = get_irg_loop(irg);
if (loop != NULL) {
loop_reset_backedges(loop);
}
-void free_all_cfloop_information(void) {
+void free_all_cfloop_information(void)
+{
int i;
#ifdef INTERPROCEDURAL_VIEW
int rem = get_interprocedural_view();
*
* This handles correctly Phi nodes.
*/
-static ir_node *get_effective_use_block(ir_node *node, int pos) {
+static ir_node *get_effective_use_block(ir_node *node, int pos)
+{
if (is_Phi(node)) {
/* the effective use of a Phi argument is in its predecessor block */
node = get_nodes_block(node);
* Branch labels are a simple case. We can replace the value
* by a Const with the branch label.
*/
-static void handle_case(ir_node *block, ir_node *irn, long nr, env_t *env) {
+static void handle_case(ir_node *block, ir_node *irn, long nr, env_t *env)
+{
const ir_edge_t *edge, *next;
ir_node *c = NULL;
* @param pnc the true/false condition branch
* @param env statistical environment
*/
-static void handle_modeb(ir_node *block, ir_node *selector, pn_Cond pnc, env_t *env) {
+static void handle_modeb(ir_node *block, ir_node *selector, pn_Cond pnc, env_t *env)
+{
ir_node *cond, *old, *cond_block = NULL, *other_blk = NULL, *con = NULL;
ir_node *c_b = NULL, *c_o = NULL;
const ir_edge_t *edge, *next;
* @param pnc the Compare relation for taking this branch
* @param env statistical environment
*/
-static void handle_if(ir_node *block, ir_node *cmp, pn_Cmp pnc, env_t *env) {
+static void handle_if(ir_node *block, ir_node *cmp, pn_Cmp pnc, env_t *env)
+{
ir_node *left = get_Cmp_left(cmp);
ir_node *right = get_Cmp_right(cmp);
ir_node *cond_block;
/**
* Pre-block-walker: Called for every block to insert Confirm nodes
*/
-static void insert_Confirm_in_block(ir_node *block, void *env) {
+static void insert_Confirm_in_block(ir_node *block, void *env)
+{
ir_node *cond, *proj, *selector;
ir_mode *mode;
/**
* Checks if a node is a non-null Confirm.
*/
-static int is_non_null_Confirm(const ir_node *ptr) {
+static int is_non_null_Confirm(const ir_node *ptr)
+{
for (;;) {
if (! is_Confirm(ptr))
break;
* @param block the block of the dereferencing instruction
* @param env environment
*/
-static void insert_non_null(ir_node *ptr, ir_node *block, env_t *env) {
+static void insert_non_null(ir_node *ptr, ir_node *block, env_t *env)
+{
const ir_edge_t *edge, *next;
ir_node *c = NULL;
/**
* Pre-walker: Called for every node to insert Confirm nodes
*/
-static void insert_Confirm(ir_node *node, void *env) {
+static void insert_Confirm(ir_node *node, void *env)
+{
ir_node *ptr;
switch (get_irn_opcode(node)) {
/*
* Construct Confirm nodes
*/
-void construct_confirms(ir_graph *irg) {
+void construct_confirms(ir_graph *irg)
+{
env_t env;
int edges_active = edges_activated(irg);
} /* construct_confirms */
/* Construct a pass. */
-ir_graph_pass_t *construct_confirms_pass(const char *name) {
+ir_graph_pass_t *construct_confirms_pass(const char *name)
+{
return def_graph_pass(name ? name : "confirm", construct_confirms);
} /* construct_confirms_pass */
/**
* Post-walker: Remove Confirm nodes
*/
-static void rem_Confirm(ir_node *n, void *env) {
+static void rem_Confirm(ir_node *n, void *env)
+{
(void) env;
if (is_Confirm(n)) {
ir_node *value = get_Confirm_value(n);
/*
* Remove all Confirm nodes from a graph.
*/
-void remove_confirms(ir_graph *irg) {
+void remove_confirms(ir_graph *irg)
+{
int rem = get_opt_remove_confirm();
set_opt_remove_confirm(1);
} /* remove_confirms */
/* Construct a pass. */
-ir_graph_pass_t *remove_confirms_pass(const char *name) {
+ir_graph_pass_t *remove_confirms_pass(const char *name)
+{
return def_graph_pass(name ? name : "rem_confirm", remove_confirms);
} /* remove_confirms_pass */
/** Accessing the dominator and post dominator data structures **/
/*--------------------------------------------------------------------*/
-ir_node *get_Block_idom(const ir_node *bl) {
+ir_node *get_Block_idom(const ir_node *bl)
+{
assert(is_Block(bl));
if (get_Block_dom_depth(bl) == -1) {
/* This block is not reachable from Start */
return get_dom_info(bl)->idom;
}
-void set_Block_idom(ir_node *bl, ir_node *n) {
+void set_Block_idom(ir_node *bl, ir_node *n)
+{
ir_dom_info *bli = get_dom_info(bl);
assert(is_Block(bl));
}
}
-ir_node *get_Block_ipostdom(const ir_node *bl) {
+ir_node *get_Block_ipostdom(const ir_node *bl)
+{
assert(is_Block(bl));
if (get_Block_postdom_depth(bl) == -1) {
/* This block is not reachable from Start */
return get_pdom_info(bl)->idom;
}
-void set_Block_ipostdom(ir_node *bl, ir_node *n) {
+void set_Block_ipostdom(ir_node *bl, ir_node *n)
+{
ir_dom_info *bli = get_pdom_info(bl);
assert(is_Block(bl));
}
}
-int get_Block_dom_pre_num(const ir_node *bl) {
+int get_Block_dom_pre_num(const ir_node *bl)
+{
assert(is_Block(bl));
return get_dom_info(bl)->pre_num;
}
-void set_Block_dom_pre_num(ir_node *bl, int num) {
+void set_Block_dom_pre_num(ir_node *bl, int num)
+{
assert(is_Block(bl));
get_dom_info(bl)->pre_num = num;
}
-int get_Block_dom_depth(const ir_node *bl) {
+int get_Block_dom_depth(const ir_node *bl)
+{
assert(is_Block(bl));
return get_dom_info(bl)->dom_depth;
}
-void set_Block_dom_depth(ir_node *bl, int depth) {
+void set_Block_dom_depth(ir_node *bl, int depth)
+{
assert(is_Block(bl));
get_dom_info(bl)->dom_depth = depth;
}
-int get_Block_postdom_pre_num(const ir_node *bl) {
+int get_Block_postdom_pre_num(const ir_node *bl)
+{
assert(is_Block(bl));
return get_pdom_info(bl)->pre_num;
}
-void set_Block_postdom_pre_num(ir_node *bl, int num) {
+void set_Block_postdom_pre_num(ir_node *bl, int num)
+{
assert(is_Block(bl));
get_pdom_info(bl)->pre_num = num;
}
-int get_Block_postdom_depth(const ir_node *bl) {
+int get_Block_postdom_depth(const ir_node *bl)
+{
assert(is_Block(bl));
return get_pdom_info(bl)->dom_depth;
}
-void set_Block_postdom_depth(ir_node *bl, int depth) {
+void set_Block_postdom_depth(ir_node *bl, int depth)
+{
assert(is_Block(bl));
get_pdom_info(bl)->dom_depth = depth;
}
-unsigned get_Block_dom_tree_pre_num(const ir_node *bl) {
+unsigned get_Block_dom_tree_pre_num(const ir_node *bl)
+{
assert(is_Block(bl));
return get_dom_info(bl)->tree_pre_num;
}
-unsigned get_Block_dom_max_subtree_pre_num(const ir_node *bl) {
+unsigned get_Block_dom_max_subtree_pre_num(const ir_node *bl)
+{
assert(is_Block(bl));
return get_dom_info(bl)->max_subtree_pre_num;
}
-unsigned get_Block_pdom_tree_pre_num(const ir_node *bl) {
+unsigned get_Block_pdom_tree_pre_num(const ir_node *bl)
+{
assert(is_Block(bl));
return get_pdom_info(bl)->tree_pre_num;
}
-unsigned get_Block_pdom_max_subtree_pre_num(const ir_node *bl) {
+unsigned get_Block_pdom_max_subtree_pre_num(const ir_node *bl)
+{
assert(is_Block(bl));
return get_pdom_info(bl)->max_subtree_pre_num;
}
/* Check, if a block dominates another block. */
-int block_dominates(const ir_node *a, const ir_node *b) {
+int block_dominates(const ir_node *a, const ir_node *b)
+{
const ir_dom_info *ai, *bi;
if (is_Block(a) && is_Block(b)) {
}
/* Check, if a block strictly dominates another block. */
-int block_strictly_dominates(const ir_node *a, const ir_node *b) {
+int block_strictly_dominates(const ir_node *a, const ir_node *b)
+{
return (a != b) && block_dominates(a, b);
}
/* Returns the smallest common dominator block of two nodes. */
-ir_node *node_smallest_common_dominator(ir_node *a, ir_node *b) {
+ir_node *node_smallest_common_dominator(ir_node *a, ir_node *b)
+{
ir_node *bl_a = is_Block(a) ? a : get_nodes_block(a);
ir_node *bl_b = is_Block(b) ? b : get_nodes_block(b);
ir_node *dom_bl = NULL;
}
/* Returns the smallest common dominator block of all users of a node. */
-ir_node *node_users_smallest_common_dominator(ir_node *irn, int handle_phi) {
+ir_node *node_users_smallest_common_dominator(ir_node *irn, int handle_phi)
+{
int n, j, i = 0, success;
ir_node **user_blocks, *dom_bl;
const ir_edge_t *edge;
/* Get the first node in the list of nodes dominated by a given block. */
-ir_node *get_Block_dominated_first(const ir_node *bl) {
+ir_node *get_Block_dominated_first(const ir_node *bl)
+{
assert(is_Block(bl));
return get_dom_info(bl)->first;
}
/* Get the next node in a list of nodes which are dominated by some
* other node. */
-ir_node *get_Block_dominated_next(const ir_node *bl) {
+ir_node *get_Block_dominated_next(const ir_node *bl)
+{
assert(is_Block(bl));
return get_dom_info(bl)->next;
}
/* Check, if a block post dominates another block. */
-int block_postdominates(const ir_node *a, const ir_node *b) {
+int block_postdominates(const ir_node *a, const ir_node *b)
+{
const ir_dom_info *ai, *bi;
if (is_Block(a) && is_Block(b)) {
}
/* Check, if a block strictly dominates another block. */
-int block_strictly_postdominates(const ir_node *a, const ir_node *b) {
+int block_strictly_postdominates(const ir_node *a, const ir_node *b)
+{
return (a != b) && block_postdominates(a, b);
}
/* Get the first node in the list of nodes post dominated by a given block. */
-ir_node *get_Block_postdominated_first(const ir_node *bl) {
+ir_node *get_Block_postdominated_first(const ir_node *bl)
+{
assert(is_Block(bl));
return get_pdom_info(bl)->first;
}
/* Get the next node in a list of nodes which are post dominated by some
* other node. */
-ir_node *get_Block_postdominated_next(const ir_node *bl) {
+ir_node *get_Block_postdominated_next(const ir_node *bl)
+{
assert(is_Block(bl));
return get_pdom_info(bl)->next;
}
/**
* count the number of blocks and clears the post dominance info
*/
-static void count_and_init_blocks_pdom(ir_node *bl, void *env) {
+static void count_and_init_blocks_pdom(ir_node *bl, void *env)
+{
int *n_blocks = (int *) env;
(*n_blocks) ++;
}
}
-static void dom_compress(tmp_dom_info *v) {
+static void dom_compress(tmp_dom_info *v)
+{
assert (v->ancestor);
if (v->ancestor->ancestor) {
dom_compress (v->ancestor);
* if V is a root, return v, else return the vertex u, not being the
* root, with minimum u->semi on the path from v to its root.
*/
-inline static tmp_dom_info *dom_eval(tmp_dom_info *v) {
+inline static tmp_dom_info *dom_eval(tmp_dom_info *v)
+{
if (!v->ancestor) return v;
dom_compress (v);
return v->label;
}
/** make V W's ancestor */
-inline static void dom_link(tmp_dom_info *v, tmp_dom_info *w) {
+inline static void dom_link(tmp_dom_info *v, tmp_dom_info *w)
+{
w->ancestor = v;
}
/**
* Walker: count the number of blocks and clears the dominance info
*/
-static void count_and_init_blocks_dom(ir_node *bl, void *env) {
+static void count_and_init_blocks_dom(ir_node *bl, void *env)
+{
int *n_blocks = (int *) env;
(*n_blocks) ++;
* @param irg the graph
* @param pre a walker function that will be called for every block in the graph
*/
-static int init_construction(ir_graph *irg, irg_walk_func *pre) {
+static int init_construction(ir_graph *irg, irg_walk_func *pre)
+{
ir_graph *rem = current_ir_graph;
ir_node *end;
int arity;
/* Computes the dominator trees. Sets a flag in irg to "dom_consistent".
If the control flow of the graph is changed this flag must be set to
"dom_inconsistent". */
-void compute_doms(ir_graph *irg) {
+void compute_doms(ir_graph *irg)
+{
ir_graph *rem = current_ir_graph;
int n_blocks, used, i, j;
tmp_dom_info *tdi_list; /* Ein Golf? */
current_ir_graph = rem;
}
-void assure_doms(ir_graph *irg) {
+void assure_doms(ir_graph *irg)
+{
if (get_irg_dom_state(irg) != dom_consistent)
compute_doms(irg);
}
-void free_dom(ir_graph *irg) {
+void free_dom(ir_graph *irg)
+{
/* Update graph state */
assert(get_irg_phase_state(irg) != phase_building);
irg->dom_state = dom_none;
/* Computes the post dominator trees. Sets a flag in irg to "dom_consistent".
If the control flow of the graph is changed this flag must be set to
"dom_inconsistent". */
-void compute_postdoms(ir_graph *irg) {
+void compute_postdoms(ir_graph *irg)
+{
ir_graph *rem = current_ir_graph;
int n_blocks, used, i, j;
tmp_dom_info *tdi_list;
current_ir_graph = rem;
}
-void assure_postdoms(ir_graph *irg) {
+void assure_postdoms(ir_graph *irg)
+{
if (get_irg_postdom_state(irg) != dom_consistent)
compute_postdoms(irg);
}
-void free_postdom(ir_graph *irg) {
+void free_postdom(ir_graph *irg)
+{
/* Update graph state */
assert(get_irg_phase_state(irg) != phase_building);
irg->pdom_state = dom_none;
ir_node *start_block; /**< the start block of the current graph */
} env_t;
-int (is_ir_extbb)(const void *thing) {
+int (is_ir_extbb)(const void *thing)
+{
return _is_ir_extbb(thing);
}
/**
* allocate a new extended block header.
*/
-static void allocate_extblk(ir_node *block, env_t *env) {
+static void allocate_extblk(ir_node *block, env_t *env)
+{
ir_extblk *extblk = OALLOC(env->obst, ir_extblk);
extblk->kind = k_ir_extblk;
* Returns the number of block successors.
* we are interested only in 1, 2 and >2.
*/
-static int get_block_n_succs(ir_node *block) {
+static int get_block_n_succs(ir_node *block)
+{
if (edges_activated(current_ir_graph)) {
const ir_edge_t *edge;
/**
* Pre block-walker. Calculates the extended block info.
*/
-static void pre_walk_calc_extbb(ir_node *block, void *ctx) {
+static void pre_walk_calc_extbb(ir_node *block, void *ctx)
+{
int n = get_Block_n_cfgpreds(block);
env_t *env = ctx;
/*
* Compute the extended basic blocks for a graph
*/
-void compute_extbb(ir_graph *irg) {
+void compute_extbb(ir_graph *irg)
+{
env_t env;
ir_extblk *extbb, *next;
}
/* free all extended block info. */
-void free_extbb(ir_graph *irg) {
+void free_extbb(ir_graph *irg)
+{
if (irg->extbb_obst) {
obstack_free(irg->extbb_obst, NULL);
xfree(irg->extbb_obst);
}
/* Return the extended block of a node. */
-ir_extblk *get_nodes_extbb(const ir_node *node) {
+ir_extblk *get_nodes_extbb(const ir_node *node)
+{
const ir_node *block = is_Block(node) ? node : get_nodes_block(node);
return get_Block_extbb(block);
}
/* Gets the visited counter of an extended block. */
-ir_visited_t (get_extbb_visited)(const ir_extblk *blk) {
+ir_visited_t (get_extbb_visited)(const ir_extblk *blk)
+{
return _get_extbb_visited(blk);
}
/* Sets the visited counter of an extended block. */
-void (set_extbb_visited)(ir_extblk *blk, ir_visited_t visited) {
+void (set_extbb_visited)(ir_extblk *blk, ir_visited_t visited)
+{
_set_extbb_visited(blk, visited);
}
/* Mark an extended block as visited in a graph. */
-void (mark_extbb_visited)(ir_extblk *blk) {
+void (mark_extbb_visited)(ir_extblk *blk)
+{
_mark_extbb_visited(blk);
}
/* Returns non-zero if an extended was visited. */
-int (extbb_visited)(const ir_extblk *blk) {
+int (extbb_visited)(const ir_extblk *blk)
+{
return _extbb_visited(blk);
}
/* Returns non-zero if an extended block was NOT visited. */
-int (extbb_not_visited)(const ir_extblk *blk) {
+int (extbb_not_visited)(const ir_extblk *blk)
+{
return _extbb_not_visited(blk);
}
/* Returns the link field of an extended block. */
-void *(get_extbb_link)(const ir_extblk *blk) {
+void *(get_extbb_link)(const ir_extblk *blk)
+{
return _get_extbb_link(blk);
}
/* Sets the link field of an extended block. */
-void (set_extbb_link)(ir_extblk *blk, void *link) {
+void (set_extbb_link)(ir_extblk *blk, void *link)
+{
_set_extbb_link(blk, link);
}
/* Return the number of basic blocks of an extended block */
-int (get_extbb_n_blocks)(const ir_extblk *blk) {
+int (get_extbb_n_blocks)(const ir_extblk *blk)
+{
return _get_extbb_n_blocks(blk);
}
/* Return the i'th basic block of an extended block */
-ir_node *(get_extbb_block)(const ir_extblk *blk, int pos) {
+ir_node *(get_extbb_block)(const ir_extblk *blk, int pos)
+{
return _get_extbb_block(blk, pos);
}
/* Return the leader basis block of an extended block. */
-ir_node *(get_extbb_leader)(const ir_extblk *blk) {
+ir_node *(get_extbb_leader)(const ir_extblk *blk)
+{
return _get_extbb_leader(blk);
}
/* Return the node number of an extended block. */
-long get_extbb_node_nr(const ir_extblk *blk) {
+long get_extbb_node_nr(const ir_extblk *blk)
+{
return get_irn_node_nr(get_extbb_leader(blk));
}
-static void irg_extblock_walk_2(ir_extblk *blk, extbb_walk_func *pre, extbb_walk_func *post, void *env) {
+static void irg_extblock_walk_2(ir_extblk *blk, extbb_walk_func *pre, extbb_walk_func *post, void *env)
+{
int i;
ir_node *node;
/* walks only over extended Block nodes in the graph. Has it's own visited
flag, so that it can be interleaved with the other walker. */
-void irg_extblock_walk(ir_extblk *blk, extbb_walk_func *pre, extbb_walk_func *post, void *env) {
+void irg_extblock_walk(ir_extblk *blk, extbb_walk_func *pre, extbb_walk_func *post, void *env)
+{
ir_node *pred, *start_bl = get_irg_start_block(current_ir_graph);
ir_extblk *start_blk = get_Block_extbb(start_bl);
int i;
}
/* Walks only over reachable Extended Basic Block nodes in the graph. */
-void irg_extblock_walk_graph(ir_graph *irg, extbb_walk_func *pre, extbb_walk_func *post, void *env) {
+void irg_extblock_walk_graph(ir_graph *irg, extbb_walk_func *pre, extbb_walk_func *post, void *env)
+{
ir_node *endbl = get_irg_end_block(irg);
ir_extblk *blk = get_Block_extbb(endbl);
ir_graph *rem = current_ir_graph;
* Returns the number of block successors.
* we are interested only in 1, 2 and >2.
*/
-static int get_block_n_succs(ir_node *block) {
+static int get_block_n_succs(ir_node *block)
+{
if (edges_activated(current_ir_graph)) {
const ir_edge_t *edge;
/*
* Compute the extended basic blocks for a graph
*/
-void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs) {
+void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs)
+{
env_t env;
ir_extblk *extbb, *next;
ir_node *endblock;
static unsigned global_mem_disamgig_opt = aa_opt_no_opt;
/* Returns a human readable name for an alias relation. */
-const char *get_ir_alias_relation_name(ir_alias_relation rel) {
+const char *get_ir_alias_relation_name(ir_alias_relation rel)
+{
#define X(a) case a: return #a
switch (rel) {
X(ir_no_alias);
}
/* Get the memory disambiguator options for a graph. */
-unsigned get_irg_memory_disambiguator_options(const ir_graph *irg) {
+unsigned get_irg_memory_disambiguator_options(const ir_graph *irg)
+{
unsigned opt = irg->mem_disambig_opt;
if (opt & aa_opt_inherited)
return global_mem_disamgig_opt;
} /* get_irg_memory_disambiguator_options */
/* Set the memory disambiguator options for a graph. */
-void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options) {
+void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options)
+{
irg->mem_disambig_opt = options & ~aa_opt_inherited;
} /* set_irg_memory_disambiguator_options */
/* Set the global disambiguator options for all graphs not having local options. */
-void set_irp_memory_disambiguator_options(unsigned options) {
+void set_irp_memory_disambiguator_options(unsigned options)
+{
global_mem_disamgig_opt = options;
} /* set_irp_memory_disambiguator_options */
*
* @return the base address.
*/
-static ir_node *find_base_adr(const ir_node *sel, ir_entity **pEnt) {
+static ir_node *find_base_adr(const ir_node *sel, ir_entity **pEnt)
+{
ir_node *ptr = get_Sel_ptr(sel);
while (is_Sel(ptr)) {
*
* @return ir_no_alias if the Const is greater, ir_may_alias else
*/
-static ir_alias_relation check_const(const ir_node *cns, int size) {
+static ir_alias_relation check_const(const ir_node *cns, int size)
+{
tarval *tv = get_Const_tarval(cns);
tarval *tv_size;
* ir_no_alias iff they ALWAYS differ more than size
* ir_may_alias else
*/
-static ir_alias_relation different_index(const ir_node *idx1, const ir_node *idx2, int size) {
+static ir_alias_relation different_index(const ir_node *idx1, const ir_node *idx2, int size)
+{
if (idx1 == idx2)
return ir_sure_alias;
if (is_Const(idx1) && is_Const(idx2)) {
* @param adr1 The first address.
* @param adr2 The second address.
*/
-static ir_alias_relation different_sel_offsets(const ir_node *sel1, const ir_node *sel2) {
+static ir_alias_relation different_sel_offsets(const ir_node *sel1, const ir_node *sel2)
+{
/* seems to be broken */
(void) sel1;
(void) sel2;
*
* @param node the Proj node to test
*/
-static int is_malloc_Result(const ir_node *node) {
+static int is_malloc_Result(const ir_node *node)
+{
node = get_Proj_pred(node);
if (! is_Proj(node))
return 0;
/**
* If adr represents a Bitfield Sel, skip it
*/
-static const ir_node *skip_Bitfield_Sels(const ir_node *adr) {
+static const ir_node *skip_Bitfield_Sels(const ir_node *adr)
+{
if (is_Sel(adr)) {
ir_entity *ent = get_Sel_entity(adr);
ir_type *bf_type = get_entity_type(ent);
} /* get_alias_relation */
/* Set a source language specific memory disambiguator function. */
-void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func) {
+void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func)
+{
language_disambuigator = func;
} /* set_language_memory_disambiguator */
/**
* Compare two relation cache entries.
*/
-static int cmp_mem_disambig_entry(const void *elt, const void *key, size_t size) {
+static int cmp_mem_disambig_entry(const void *elt, const void *key, size_t size)
+{
const mem_disambig_entry *p1 = elt;
const mem_disambig_entry *p2 = key;
(void) size;
/**
* Initialize the relation cache.
*/
-void mem_disambig_init(void) {
+void mem_disambig_init(void)
+{
result_cache = new_set(cmp_mem_disambig_entry, 8);
} /* mem_disambig_init */
} /* get_alias_relation_ex */
/* Free the relation cache. */
-void mem_disambig_term(void) {
+void mem_disambig_term(void)
+{
if (result_cache != NULL) {
del_set(result_cache);
result_cache = NULL;
*
* @return non-zero if the Load/Store is a hidden cast, zero else
*/
-static int is_hidden_cast(const ir_mode *mode, const ir_mode *ent_mode) {
+static int is_hidden_cast(const ir_mode *mode, const ir_mode *ent_mode)
+{
if (ent_mode == NULL)
return false;
*
* @param irn the node
*/
-static ir_entity_usage determine_entity_usage(const ir_node *irn, ir_entity *entity) {
+static ir_entity_usage determine_entity_usage(const ir_node *irn, ir_entity *entity)
+{
int i;
ir_mode *emode, *mode;
ir_node *value;
/**
* Update the usage flags of all frame entities.
*/
-static void analyse_irg_entity_usage(ir_graph *irg) {
+static void analyse_irg_entity_usage(ir_graph *irg)
+{
ir_type *ft = get_irg_frame_type(irg);
ir_node *irg_frame;
int i, j, k, static_link_arg;
irg->entity_usage_state = ir_entity_usage_computed;
}
-ir_entity_usage_computed_state get_irg_entity_usage_state(const ir_graph *irg) {
+ir_entity_usage_computed_state get_irg_entity_usage_state(const ir_graph *irg)
+{
return irg->entity_usage_state;
}
-void set_irg_entity_usage_state(ir_graph *irg, ir_entity_usage_computed_state state) {
+void set_irg_entity_usage_state(ir_graph *irg, ir_entity_usage_computed_state state)
+{
irg->entity_usage_state = state;
}
*
* @param tp a compound type
*/
-static void check_initializers(ir_type *tp) {
+static void check_initializers(ir_type *tp)
+{
int i;
for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
*
* @param tp a compound type
*/
-static void print_entity_usage_flags(ir_type *tp) {
+static void print_entity_usage_flags(ir_type *tp)
+{
int i;
for (i = get_compound_n_members(tp) - 1; i >= 0; --i) {
ir_entity *ent = get_compound_member(tp, i);
/**
* Post-walker: check for global entity address
*/
-static void check_global_address(ir_node *irn, void *env) {
+static void check_global_address(ir_node *irn, void *env)
+{
ir_node *tls = env;
ir_entity *ent;
ir_entity_usage flags;
/**
* Update the entity usage flags of all global entities.
*/
-static void analyse_irp_globals_entity_usage(void) {
+static void analyse_irp_globals_entity_usage(void)
+{
int i;
ir_segment_t s;
}
/* Returns the current address taken state of the globals. */
-ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void) {
+ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void)
+{
return irp->globals_entity_usage_state;
}
/* Sets the current address taken state of the graph. */
-void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state) {
+void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state)
+{
irp->globals_entity_usage_state = state;
}
/* Assure that the address taken flag is computed for the globals. */
-void assure_irp_globals_entity_usage_computed(void) {
+void assure_irp_globals_entity_usage_computed(void)
+{
if (irp->globals_entity_usage_state != ir_entity_usage_not_computed)
return;
analyse_irp_globals_entity_usage();
}
-void firm_init_memory_disambiguator(void) {
+void firm_init_memory_disambiguator(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.ana.irmemory");
FIRM_DBG_REGISTER(dbgcall, "firm.opt.cc");
}
* Walker: clone all call types of Calls to methods having the
* mtp_property_private property set.
*/
-static void update_calls_to_private(ir_node *call, void *env) {
+static void update_calls_to_private(ir_node *call, void *env)
+{
(void) env;
if (is_Call(call)) {
ir_node *ptr = get_Call_ptr(call);
} /* mark_private_methods */
/* create a pass for mark_private_methods() */
-ir_prog_pass_t *mark_private_methods_pass(const char *name) {
+ir_prog_pass_t *mark_private_methods_pass(const char *name)
+{
return def_prog_pass(name ? name : "mark_private_methods", mark_private_methods);
} /* mark_private_methods_pass */
#ifdef DEBUG_libfirm
/** Clear the outs of a node */
-static void reset_outs(ir_node *node, void *unused) {
+static void reset_outs(ir_node *node, void *unused)
+{
(void) unused;
node->out = NULL;
node->out_valid = 0;
}
/* returns the number of successors of the node: */
-int get_irn_n_outs(const ir_node *node) {
+int get_irn_n_outs(const ir_node *node)
+{
assert(node && node->kind == k_ir_node);
#ifdef DEBUG_libfirm
/* assert(node->out_valid); */
}
/* Access successor n */
-ir_node *get_irn_out(const ir_node *def, int pos) {
+ir_node *get_irn_out(const ir_node *def, int pos)
+{
assert(pos >= 0 && pos < get_irn_n_outs(def));
#ifdef DEBUG_libfirm
/* assert(def->out_valid); */
}
/* Access successor n */
-ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos) {
+ir_node *get_irn_out_ex(const ir_node *def, int pos, int *in_pos)
+{
assert(pos >= 0 && pos < get_irn_n_outs(def));
#ifdef DEBUG_libfirm
/* assert(def->out_valid); */
return def->out[pos+1].use;
}
-void set_irn_out(ir_node *def, int pos, ir_node *use, int in_pos) {
+void set_irn_out(ir_node *def, int pos, ir_node *use, int in_pos)
+{
assert(def && use);
assert(pos >= 0 && pos < get_irn_n_outs(def));
#ifdef DEBUG_libfirm
}
/* Return the number of control flow successors, ignore keep-alives. */
-int get_Block_n_cfg_outs(const ir_node *bl) {
+int get_Block_n_cfg_outs(const ir_node *bl)
+{
int i, n_cfg_outs = 0;
assert(bl && is_Block(bl));
#ifdef DEBUG_libfirm
}
/* Return the number of control flow successors, honor keep-alives. */
-int get_Block_n_cfg_outs_ka(const ir_node *bl) {
+int get_Block_n_cfg_outs_ka(const ir_node *bl)
+{
int i, n_cfg_outs = 0;
assert(bl && is_Block(bl));
#ifdef DEBUG_libfirm
}
/* Access predecessor n, ignore keep-alives. */
-ir_node *get_Block_cfg_out(const ir_node *bl, int pos) {
+ir_node *get_Block_cfg_out(const ir_node *bl, int pos)
+{
int i;
assert(bl && is_Block(bl));
#ifdef DEBUG_libfirm
}
/* Access predecessor n, honor keep-alives. */
-ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos) {
+ir_node *get_Block_cfg_out_ka(const ir_node *bl, int pos)
+{
int i, n_outs;
assert(bl && is_Block(bl));
#ifdef DEBUG_libfirm
/** Returns the amount of out edges for not yet visited successors. */
-static int _count_outs(ir_node *n) {
+static int _count_outs(ir_node *n)
+{
int start, i, res, irn_arity;
mark_irn_visited(n);
/** Returns the amount of out edges for not yet visited successors.
* This version handles some special nodes like irg_frame, irg_args etc.
*/
-static int count_outs(ir_graph *irg) {
+static int count_outs(ir_graph *irg)
+{
ir_node *n;
int i, res;
*
* @return The next free address
*/
-static ir_def_use_edge *_set_out_edges(ir_node *use, ir_def_use_edge *free) {
+static ir_def_use_edge *_set_out_edges(ir_node *use, ir_def_use_edge *free)
+{
int n_outs, start, i, irn_arity, pos;
mark_irn_visited(use);
*
* @return The next free address
*/
-static ir_def_use_edge *set_out_edges(ir_graph *irg, ir_def_use_edge *free) {
+static ir_def_use_edge *set_out_edges(ir_graph *irg, ir_def_use_edge *free)
+{
ir_node *n;
int i, n_outs;
}
/* compute the outs for a given graph */
-void compute_irg_outs(ir_graph *irg) {
+void compute_irg_outs(ir_graph *irg)
+{
ir_graph *rem = current_ir_graph;
int n_out_edges = 0;
ir_def_use_edge *end = NULL; /* Only for debugging */
current_ir_graph = rem;
}
-void assure_irg_outs(ir_graph *irg) {
+void assure_irg_outs(ir_graph *irg)
+{
if (get_irg_outs_state(irg) != outs_consistent)
compute_irg_outs(irg);
}
-void compute_irp_outs(void) {
+void compute_irp_outs(void)
+{
int i;
for (i = get_irp_n_irgs() -1; i >= 0; --i)
compute_irg_outs(get_irp_irg(i));
}
-void free_irp_outs(void) {
+void free_irp_outs(void)
+{
int i;
for (i = get_irp_n_irgs() -1; i >= 0; --i)
free_irg_outs(get_irp_irg(i));
* Inits the number of outedges for each node
* before counting.
*/
-static void init_count(ir_node * node, void *env) {
+static void init_count(ir_node * node, void *env)
+{
(void) env;
node->out = (ir_node **) 1; /* 1 for the array size */
}
* and adds the current arity to the overall count,
* which is saved in "env"
*/
-static void node_arity_count(ir_node * node, void * env) {
+static void node_arity_count(ir_node * node, void * env)
+{
int *anz = (int *) env, arity, n_outs, i, start;
ir_node *succ;
* Inits all nodes for setting the outedges
* Returns the overall count of edges
*/
-int count_ip_outs(void) {
+int count_ip_outs(void)
+{
int res = 0;
cg_walk(init_count, node_arity_count, &res);
* in which the outedges are written later.
* The current array start is transported in env
*/
-static void set_array_pointer(ir_node *node, void *env) {
+static void set_array_pointer(ir_node *node, void *env)
+{
int n_outs;
ir_node ***free = (ir_node ***) env;
* Adds an outedge from the predecessor to the
* current node.
*/
-static void set_out_pointer(ir_node * node, void *env) {
+static void set_out_pointer(ir_node * node, void *env)
+{
int i, arity = get_irn_arity(node);
ir_node *succ;
int start = (!is_Block(node)) ? -1 : 0;
/*
* Sets the outedges for all nodes.
*/
-void set_ip_outs(void) {
+void set_ip_outs(void)
+{
ir_node **outedge_array = get_irp_ip_outedges();
cg_walk(set_array_pointer, set_out_pointer, (void *) &outedge_array);
}
* outedges and fills this outedge array in interprocedural
* view!
*/
-void compute_ip_outs(void) {
+void compute_ip_outs(void)
+{
int n_out_edges;
ir_node **out_edges;
set_ip_outs();
}
-void free_ip_outs(void) {
+void free_ip_outs(void)
+{
ir_node **out_edges = get_irp_ip_outedges();
if (out_edges != NULL) {
free(out_edges);
#endif
-void free_irg_outs(ir_graph *irg) {
+void free_irg_outs(ir_graph *irg)
+{
/* current_ir_graph->outs_state = outs_none; */
irg->outs_state = outs_none;
#endif /* defined DEBUG_libfirm */
}
-static void check_out_edges(ir_node *irn, void *env) {
+static void check_out_edges(ir_node *irn, void *env)
+{
int i, j, pos;
int *pError = env;
int error = *pError;
}
/* verify outs edges. */
-void verify_outs(ir_graph *irg) {
+void verify_outs(ir_graph *irg)
+{
int errors = 0;
irg_walk_graph(irg, NULL, check_out_edges, &errors);
if (errors > 0)
/**
* Allocates a new SCC info on the given obstack.
*/
-static inline scc_info *new_scc_info(struct obstack *obst) {
+static inline scc_info *new_scc_info(struct obstack *obst)
+{
return OALLOCZ(obst, scc_info);
}
/**
* Mark node n being on the SCC stack.
*/
-static inline void mark_irn_in_stack(ir_node *n) {
+static inline void mark_irn_in_stack(ir_node *n)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
scc->in_stack = 1;
/**
* Mark node n NOT being on the SCC stack.
*/
-static inline void mark_irn_not_in_stack(ir_node *n) {
+static inline void mark_irn_not_in_stack(ir_node *n)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
scc->in_stack = 0;
/**
* Checks if a node is on the SCC stack.
*/
-static inline int irn_is_in_stack(ir_node *n) {
+static inline int irn_is_in_stack(ir_node *n)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
return scc->in_stack;
/**
* Sets the uplink number for a node.
*/
-static inline void set_irn_uplink(ir_node *n, int uplink) {
+static inline void set_irn_uplink(ir_node *n, int uplink)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
scc->uplink = uplink;
/**
* Returns the uplink number for a node.
*/
-static int get_irn_uplink(ir_node *n) {
+static int get_irn_uplink(ir_node *n)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
return scc->uplink;
/**
* Sets the depth-first-search number for a node.
*/
-static inline void set_irn_dfn(ir_node *n, int dfn) {
+static inline void set_irn_dfn(ir_node *n, int dfn)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
scc->dfn = dfn;
/**
* Returns the depth-first-search number of a node.
*/
-static int get_irn_dfn(ir_node *n) {
+static int get_irn_dfn(ir_node *n)
+{
scc_info *scc = get_irn_link(n);
assert(scc);
return scc->dfn;
}
#if 0
-static ir_loop *find_nodes_loop(ir_node *n, ir_loop *l) {
+static ir_loop *find_nodes_loop(ir_node *n, ir_loop *l)
+{
int i;
ir_loop *res = NULL;
}
/* @@@ temporary implementation, costly!!! */
-ir_loop * get_irn_loop(ir_node *n) {
+ir_loop * get_irn_loop(ir_node *n)
+{
ir_loop *l = get_irg_loop(current_ir_graph);
l = find_nodes_loop(n, l);
return l;
/**
* initializes the stack
*/
-static inline void init_stack(void) {
+static inline void init_stack(void)
+{
if (stack) {
ARR_RESIZE(ir_node *, stack, 1000);
} else {
/**
* Frees the stack.
*/
-static void finish_stack(void) {
+static void finish_stack(void)
+{
DEL_ARR_F(stack);
stack = NULL;
}
*
* @param n The node to push
*/
-static inline void push(ir_node *n) {
+static inline void push(ir_node *n)
+{
if (tos == ARR_LEN(stack)) {
int nlen = ARR_LEN(stack) * 2;
ARR_RESIZE(ir_node *, stack, nlen);
*
* @return The topmost node
*/
-static inline ir_node *pop(void) {
+static inline ir_node *pop(void)
+{
ir_node *n = stack[--tos];
mark_irn_not_in_stack(n);
return n;
* The nodes up to n belong to the current loop.
* Removes them from the stack and adds them to the current loop.
*/
-static inline void pop_scc_to_loop(ir_node *n) {
+static inline void pop_scc_to_loop(ir_node *n)
+{
ir_node *m;
int i = 0;
/* GL ??? my last son is my grandson??? Removes loops with no
ir_nodes in them. Such loops have only another loop as son. (Why
can't they have two loops as sons? Does it never get that far? ) */
-static void close_loop(ir_loop *l) {
+static void close_loop(ir_loop *l)
+{
int last = get_loop_n_elements(l) - 1;
loop_element lelement = get_loop_element(l, last);
ir_loop *last_son = lelement.son;
/* Removes and unmarks all nodes up to n from the stack.
The nodes must be visited once more to assign them to a scc. */
-static inline void pop_scc_unmark_visit(ir_node *n) {
+static inline void pop_scc_unmark_visit(ir_node *n)
+{
ir_node *m = NULL;
while (m != n) {
/* Allocates a new loop as son of current_loop. Sets current_loop
to the new loop and returns the father. */
-static ir_loop *new_loop(void) {
+static ir_loop *new_loop(void)
+{
ir_loop *father = current_loop;
ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
/* Initialization steps. **********************************************/
-static inline void init_node(ir_node *n, void *env) {
+static inline void init_node(ir_node *n, void *env)
+{
struct obstack *obst = env;
set_irn_link(n, new_scc_info(obst));
clear_backedges(n);
}
-static inline void init_scc_common(void) {
+static inline void init_scc_common(void)
+{
current_dfn = 1;
loop_node_cnt = 0;
init_stack();
}
-static inline void init_scc(ir_graph *irg, struct obstack *obst) {
+static inline void init_scc(ir_graph *irg, struct obstack *obst)
+{
init_scc_common();
irg_walk_graph(irg, init_node, NULL, obst);
/*
}
#ifdef INTERPROCEDURAL_VIEW
-static inline void init_ip_scc(struct obstack *obst) {
+static inline void init_ip_scc(struct obstack *obst)
+{
init_scc_common();
cg_walk(init_node, NULL, obst);
*
* This is the condition for breaking the scc recursion.
*/
-static int is_outermost_Start(ir_node *n) {
+static int is_outermost_Start(ir_node *n)
+{
/* Test whether this is the outermost Start node. */
if (is_Block(n) && get_Block_n_cfgpreds(n) == 1) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, 0));
}
/* When to walk from nodes to blocks. Only for Control flow operations? */
-static inline int get_start_index(ir_node *n) {
+static inline int get_start_index(ir_node *n)
+{
#undef BLOCK_BEFORE_NODE
#define BLOCK_BEFORE_NODE 1
*
* @param n the node to check
*/
-static inline int is_possible_loop_head(ir_node *n) {
+static inline int is_possible_loop_head(ir_node *n)
+{
ir_op *op = get_irn_op(n);
return ((op == op_Block) ||
(op == op_Phi) ||
* @param n the node to check
* @param root only needed for assertion.
*/
-static int is_head(ir_node *n, ir_node *root) {
+static int is_head(ir_node *n, ir_node *root)
+{
int i, arity;
int some_outof_loop = 0, some_in_loop = 0;
* @param n the node to check
* @param root only needed for assertion.
*/
-static int is_endless_head(ir_node *n, ir_node *root) {
+static int is_endless_head(ir_node *n, ir_node *root)
+{
int i, arity;
int none_outof_loop = 1, some_in_loop = 0;
/** Returns index of the predecessor with the smallest dfn number
greater-equal than limit. */
-static int smallest_dfn_pred(ir_node *n, int limit) {
+static int smallest_dfn_pred(ir_node *n, int limit)
+{
int i, index = -2, min = -1;
if (!is_outermost_Start(n)) {
/**
* Returns index of the predecessor with the largest dfn number.
*/
-static int largest_dfn_pred(ir_node *n) {
+static int largest_dfn_pred(ir_node *n)
+{
int i, index = -2, max = -1;
if (!is_outermost_Start(n)) {
*
* @param n A node where uplink == dfn.
*/
-static ir_node *find_tail(ir_node *n) {
+static ir_node *find_tail(ir_node *n)
+{
ir_node *m;
int i, res_index = -2;
on the stack.
- -------------------------------------------------------------- */
-int search_endproj_in_stack(ir_node *start_block) {
+int search_endproj_in_stack(ir_node *start_block)
+{
int i, j;
assert(is_Block(start_block));
for(i = tos - 1; i >= 0; --i)
static pmap *projx_link = NULL;
-void link_to_reg_end (ir_node *n, void *env) {
+void link_to_reg_end (ir_node *n, void *env)
+{
if(get_irn_op(n) == op_Proj &&
get_irn_mode(n) == mode_X &&
get_irn_op(get_irn_n(n, 0)) == op_EndReg) {
}
}
-void set_projx_link(ir_node *cb_projx, ir_node *end_projx) {
+void set_projx_link(ir_node *cb_projx, ir_node *end_projx)
+{
if(projx_link == NULL)
projx_link = pmap_create();
pmap_insert(projx_link, (void *)cb_projx, (void *)end_projx);
}
-ir_node *get_projx_link(ir_node *cb_projx) {
+ir_node *get_projx_link(ir_node *cb_projx)
+{
return((ir_node *) pmap_get(projx_link, (void *)cb_projx));
}
#endif
-static inline int is_outermost_loop(ir_loop *l) {
+static inline int is_outermost_loop(ir_loop *l)
+{
return l == get_loop_outer_loop(l);
}
*
* @param n node to start
*/
-static void scc(ir_node *n) {
+static void scc(ir_node *n)
+{
if (irn_visited_else_mark(n))
return;
}
#ifdef INTERPROCEDURAL_VIEW
-static void my_scc(ir_node *n) {
+static void my_scc(ir_node *n)
+{
int i;
if (irn_visited_else_mark(n))
return;
/* Constructs backedge information for irg. In interprocedural view constructs
backedges for all methods called by irg, too. */
-int construct_backedges(ir_graph *irg) {
+int construct_backedges(ir_graph *irg)
+{
ir_graph *rem = current_ir_graph;
ir_loop *head_rem;
struct obstack temp;
#ifdef INTERPROCEDURAL_VIEW
-int construct_ip_backedges(void) {
+int construct_ip_backedges(void)
+{
ir_graph *rem = current_ir_graph;
int rem_ipv = get_interprocedural_view();
int i;
return max_loop_depth;
}
-void my_construct_ip_backedges(void) {
+void my_construct_ip_backedges(void)
+{
ir_graph *rem = current_ir_graph;
int rem_ipv = get_interprocedural_view();
int i;
}
#endif
-static void reset_backedges(ir_node *n) {
+static void reset_backedges(ir_node *n)
+{
if (is_possible_loop_head(n)) {
#ifdef INTERPROCEDURAL_VIEW
int rem = get_interprocedural_view();
/*
-static void loop_reset_backedges(ir_loop *l) {
+static void loop_reset_backedges(ir_loop *l)
+{
int i;
reset_backedges(get_loop_node(l, 0));
for (i = 0; i < get_loop_n_nodes(l); ++i)
}
*/
-static void loop_reset_node(ir_node *n, void *env) {
+static void loop_reset_node(ir_node *n, void *env)
+{
(void) env;
set_irn_loop(n, NULL);
reset_backedges(n);
/** Removes all loop information.
Resets all backedges */
-void free_loop_information(ir_graph *irg) {
+void free_loop_information(ir_graph *irg)
+{
/* We can not use this recursion, as the loop might contain
illegal nodes by now. Why else would we throw away the
representation?
}
-void free_all_loop_information(void) {
+void free_all_loop_information(void)
+{
int i;
#ifdef INTERPROCEDURAL_VIEW
int rem = get_interprocedural_view();
/* Debug stuff *************************************************/
-static int test_loop_node(ir_loop *l) {
+static int test_loop_node(ir_loop *l)
+{
int i, has_node = 0, found_problem = 0;
loop_element le;
* - do not have any firm nodes, only loop sons
* - the header is not a Phi, Block or Filter.
*/
-void find_strange_loop_nodes(ir_loop *l) {
+void find_strange_loop_nodes(ir_loop *l)
+{
int found_problem = 0;
found_problem = test_loop_node(l);
printf("Finished Test\n\n");
/* Simple analyses based on the loop information */
/* ------------------------------------------------------------------- */
-int is_loop_variant(ir_loop *l, ir_loop *b) {
+int is_loop_variant(ir_loop *l, ir_loop *b)
+{
int i, n_elems;
if (l == b) return 1;
*
* Returns non-zero, if the node n is not changed in the loop block
* belongs to or in inner loops of this blocks loop. */
-int is_loop_invariant(const ir_node *n, const ir_node *block) {
+int is_loop_invariant(const ir_node *n, const ir_node *block)
+{
ir_loop *l = get_irn_loop(block);
const ir_node *b = is_Block(n) ? n : get_nodes_block(n);
return !is_loop_variant(l, get_irn_loop(b));
/**
* init type link field so that types point to their pointers.
*/
-static void precompute_pointer_types(void) {
+static void precompute_pointer_types(void)
+{
#if 0
int i;
set_type_link(firm_unknown_type, firm_unknown_type);
* Returns a pointer to type which was stored in the link field
* to speed up search.
*/
-static ir_type *find_pointer_type_to (ir_type *tp) {
+static ir_type *find_pointer_type_to (ir_type *tp)
+{
#if 0
return (ir_type *)get_type_link(tp);
#else
* Try to determine a type for a Proj node.
* If a type cannot be determined, return @p firm_none_type.
*/
-static ir_type *find_type_for_Proj(ir_node *n) {
+static ir_type *find_type_for_Proj(ir_node *n)
+{
ir_type *tp;
/* Avoid nested Tuples. */
* Try to determine the type of a node.
* If a type cannot be determined, return @p firm_none_type.
*/
-static ir_type *find_type_for_node(ir_node *n) {
+static ir_type *find_type_for_node(ir_node *n)
+{
ir_type *tp = firm_unknown_type;
ir_type *tp1 = NULL, *tp2 = NULL;
ir_node *a = NULL, *b = NULL;
}
/** Compute the type of an IR node. */
-static ir_type *compute_irn_type(ir_node *n) {
+static ir_type *compute_irn_type(ir_node *n)
+{
ir_type *tp = get_irn_typeinfo_type(n);
if (tp == initial_type) {
* Post-walking ensures that the types for all predecessor
* nodes are already computed.
*/
-static void compute_type(ir_node *n, void *env) {
+static void compute_type(ir_node *n, void *env)
+{
ir_type *tp = get_irn_typeinfo_type(n);
(void) env;
if (tp == phi_cycle_type) {
/**
* Compute the types for all nodes of a graph.
*/
-static void analyse_irg (ir_graph *irg) {
+static void analyse_irg (ir_graph *irg)
+{
set_irg_typeinfo_state(irg, ir_typeinfo_consistent);
irg_walk_graph(irg, NULL, compute_type, NULL);
}
* Initialize the analysis by creating a phi_cycle_type and
* computing pointer types for all class and struct types.
*/
-static void init_irsimpletype(void) {
+static void init_irsimpletype(void)
+{
init_irtypeinfo();
if (!phi_cycle_type)
phi_cycle_type = new_type_class(new_id_from_str("phi_cycle_type"));
}
/* Computes type information for each node in all ir graphs. */
-void simple_analyse_types(void) {
+void simple_analyse_types(void)
+{
int i;
FIRM_DBG_REGISTER(dbg, "firm.ana.simpletype");
set_irp_typeinfo_state(ir_typeinfo_consistent);
}
-void free_simple_type_information(void) {
+void free_simple_type_information(void)
+{
free_irtypeinfo();
if (phi_cycle_type) {
* Calling set/get_irn_type is invalid before calling init. Requires memory
* in the order of MIN(<calls to set_irn_type>, #irnodes).
*/
-void init_irtypeinfo(void) {
+void init_irtypeinfo(void)
+{
int i;
if (initial_type == NULL)
set_irg_typeinfo_state(get_irp_irg(i), ir_typeinfo_none);
}
-void free_irtypeinfo(void) {
+void free_irtypeinfo(void)
+{
int i;
if (initial_type != NULL) {
/* ------------ Irgraph state handling. ------------------------------- */
-void set_irg_typeinfo_state(ir_graph *irg, ir_typeinfo_state s) {
+void set_irg_typeinfo_state(ir_graph *irg, ir_typeinfo_state s)
+{
assert(is_ir_graph(irg));
irg->typeinfo_state = s;
if ((irg->typeinfo_state == ir_typeinfo_consistent) &&
irp->typeinfo_state = ir_typeinfo_inconsistent;
}
-ir_typeinfo_state get_irg_typeinfo_state(const ir_graph *irg) {
+ir_typeinfo_state get_irg_typeinfo_state(const ir_graph *irg)
+{
assert(is_ir_graph(irg));
return irg->typeinfo_state;
}
* consistent. Returns ir_typeinfo_inconsistent if at least one irg has inconsistent
* or no type information. Returns ir_typeinfo_none if no irg contains type information.
*/
-ir_typeinfo_state get_irp_typeinfo_state(void) {
+ir_typeinfo_state get_irp_typeinfo_state(void)
+{
return irp->typeinfo_state;
}
-void set_irp_typeinfo_state(ir_typeinfo_state s) {
+void set_irp_typeinfo_state(ir_typeinfo_state s)
+{
irp->typeinfo_state = s;
}
/* If typeinfo is consistent, sets it to inconsistent. */
-void set_irp_typeinfo_inconsistent(void) {
+void set_irp_typeinfo_inconsistent(void)
+{
if (irp->typeinfo_state == ir_typeinfo_consistent)
irp->typeinfo_state = ir_typeinfo_inconsistent;
}
* ir_typeinfo_consistent or ir_typeinfo_inconsistent. They
* assume current_ir_graph set properly.
*/
-ir_type *get_irn_typeinfo_type(const ir_node *n) {
+ir_type *get_irn_typeinfo_type(const ir_node *n)
+{
ir_type *res = initial_type;
pmap_entry *entry;
return res;
}
-void set_irn_typeinfo_type(ir_node *n, ir_type *tp) {
+void set_irn_typeinfo_type(ir_node *n, ir_type *tp)
+{
assert(get_irg_typeinfo_state(current_ir_graph) != ir_typeinfo_none);
pmap_insert(type_node_map, (void *)n, (void *)tp);
/**
* Returns the link of a region.
*/
-void *get_region_link(const ir_region *reg) {
+void *get_region_link(const ir_region *reg)
+{
return reg->link;
}
/**
* Sets the link of a region.
*/
-void set_region_link(ir_region *reg, void *data) {
+void set_region_link(ir_region *reg, void *data)
+{
reg->link = data;
}
/**
* Get the immediate region of a block.
*/
-ir_region *get_block_region(const ir_node *block) {
+ir_region *get_block_region(const ir_node *block)
+{
assert(is_Block(block));
return block->attr.block.region;
}
/**
* Sets the immediate region of a block.
*/
-void set_block_region(ir_node *block, ir_region *reg) {
+void set_block_region(ir_node *block, ir_region *reg)
+{
assert(is_Block(block));
block->attr.block.region = reg;
}
/**
* Get the immediate region of a node.
*/
-ir_region *get_irn_region(ir_node *n) {
+ir_region *get_irn_region(ir_node *n)
+{
if (is_no_Block(n))
n = get_nodes_block(n);
return get_block_region(n);
/**
* Return non-zero if a given firm thing is a region.
*/
-int is_region(const void *thing) {
+int is_region(const void *thing)
+{
const firm_kind *kind = thing;
return *kind == k_ir_region;
}
/**
* Return the number of predecessors of a region.
*/
-int get_region_n_preds(const ir_region *reg) {
+int get_region_n_preds(const ir_region *reg)
+{
return ARR_LEN(reg->pred);
}
/**
* Return the predecessor region at position pos.
*/
-ir_region *get_region_pred(const ir_region *reg, int pos) {
+ir_region *get_region_pred(const ir_region *reg, int pos)
+{
assert(0 <= pos && pos <= get_region_n_preds(reg));
return reg->pred[pos];
}
/**
* Set the predecessor region at position pos.
*/
-void set_region_pred(ir_region *reg, int pos, ir_region *n) {
+void set_region_pred(ir_region *reg, int pos, ir_region *n)
+{
assert(0 <= pos && pos <= get_region_n_preds(reg));
reg->pred[pos] = n;
}
/**
* Return the number of successors in a region.
*/
-int get_region_n_succs(const ir_region *reg) {
+int get_region_n_succs(const ir_region *reg)
+{
return ARR_LEN(reg->succ);
}
/**
* Return the successor region at position pos.
*/
-ir_region *get_region_succ(const ir_region *reg, int pos) {
+ir_region *get_region_succ(const ir_region *reg, int pos)
+{
assert(0 <= pos && pos <= get_region_n_succs(reg));
return reg->succ[pos];
}
/**
* Set the successor region at position pos.
*/
-void set_region_succ(ir_region *reg, int pos, ir_region *n) {
+void set_region_succ(ir_region *reg, int pos, ir_region *n)
+{
assert(0 <= pos && pos <= get_region_n_succs(reg));
reg->succ[pos] = n;
}
* Do a DFS search on the initial regions, assign a prenum and a postnum to every
* node and store the region nodes into the post array.
*/
-static void dfs_walk2(ir_region *reg, walk_env *env) {
+static void dfs_walk2(ir_region *reg, walk_env *env)
+{
int i, n;
if (reg->visited == 0) {
* Do a DFS search on the initial regions, assign a prenum and a postnum to every
* node and store the region nodes into the post array.
*/
-static void dfs_walk(ir_graph *irg, walk_env *env) {
+static void dfs_walk(ir_graph *irg, walk_env *env)
+{
ir_graph *rem = current_ir_graph;
ir_region *reg;
* Post-walker: wrap all blocks with a BasicBlock region
* and count them
*/
-static void wrap_BasicBlocks(ir_node *block, void *ctx) {
+static void wrap_BasicBlocks(ir_node *block, void *ctx)
+{
walk_env *env = ctx;
ir_region *reg;
* Post-walker: Create the pred and succ edges for Block wrapper.
* Kill edges to the Start and End blocks.
*/
-static void update_BasicBlock_regions(ir_node *blk, void *ctx) {
+static void update_BasicBlock_regions(ir_node *blk, void *ctx)
+{
walk_env *env = ctx;
ir_region *reg = get_irn_link(blk);
int i, j, len;
/**
* Creates a new Sequence region.
*/
-static ir_region *new_Sequence(struct obstack *obst, ir_region *nset, int nset_len) {
+static ir_region *new_Sequence(struct obstack *obst, ir_region *nset, int nset_len)
+{
ir_region *reg, *next;
int i;
/**
* Create a new IfThenElse region.
*/
-static ir_region *new_IfThenElse(struct obstack *obst, ir_region *if_b, ir_region *then_b, ir_region *else_b) {
+static ir_region *new_IfThenElse(struct obstack *obst, ir_region *if_b, ir_region *then_b, ir_region *else_b)
+{
ir_region *reg;
ALLOC_REG(obst, reg, ir_rk_IfThenElse);
/**
* Create a new IfThen region.
*/
-static ir_region *new_IfThen(struct obstack *obst, ir_region *if_b, ir_region *then_b) {
+static ir_region *new_IfThen(struct obstack *obst, ir_region *if_b, ir_region *then_b)
+{
ir_region *reg;
ALLOC_REG(obst, reg, ir_rk_IfThen);
/**
* Create a new SelfLoop region.
*/
-static ir_region *new_SelfLoop(struct obstack *obst, ir_region *head) {
+static ir_region *new_SelfLoop(struct obstack *obst, ir_region *head)
+{
ir_region *reg, *succ;
int i, j, len;
/**
* Create a new RepeatLoop region.
*/
-static ir_region *new_RepeatLoop(struct obstack *obst, ir_region *head, ir_region *body) {
+static ir_region *new_RepeatLoop(struct obstack *obst, ir_region *head, ir_region *body)
+{
ir_region *reg, *succ;
ALLOC_REG(obst, reg, ir_rk_RepeatLoop);
/**
* Create a new WhileLoop region.
*/
-static ir_region *new_WhileLoop(struct obstack *obst, ir_region *head) {
+static ir_region *new_WhileLoop(struct obstack *obst, ir_region *head)
+{
ir_region *reg, *succ;
ir_region *body = head->link;
int i, j, len;
/**
* Create a new new_NaturalLoop region.
*/
-static ir_region *new_NaturalLoop(struct obstack *obst, ir_region *head) {
+static ir_region *new_NaturalLoop(struct obstack *obst, ir_region *head)
+{
ir_region *reg, *c, *n;
int i, j, k, len, n_pred, n_succ;
/**
* Return true if region a is an ancestor of region b in DFS search.
*/
-static int is_ancestor(const ir_region *a, const ir_region *b) {
+static int is_ancestor(const ir_region *a, const ir_region *b)
+{
return (a->prenum <= b->prenum && a->postnum > b->postnum);
}
/**
* Return true if region pred is a predecessor of region n.
*/
-static int pred_of(const ir_region *pred, const ir_region *n) {
+static int pred_of(const ir_region *pred, const ir_region *n)
+{
int i;
for (i = get_region_n_preds(n) - 1; i >= 0; --i) {
if (get_region_pred(n, i) == pred)
/**
* Return true if region succ is a successor of region n.
*/
-static int succ_of(const ir_region *succ, const ir_region *n) {
+static int succ_of(const ir_region *succ, const ir_region *n)
+{
int i;
for (i = get_region_n_succs(n) - 1; i >= 0; --i) {
if (get_region_succ(n, i) == succ)
/**
* Reverse a linked list of regions.
*/
-static struct ir_region *reverse_list(ir_region *n) {
+static struct ir_region *reverse_list(ir_region *n)
+{
ir_region *prev = NULL, *next;
for (; n; n = next) {
/**
* Find the cyclic region in the subgraph entered by node.
*/
-static ir_region *find_cyclic_region(ir_region *node) {
+static ir_region *find_cyclic_region(ir_region *node)
+{
int i;
ir_region *last = node;
int improper = 0;
/**
* Detect a cyclic region.
*/
-static ir_region *cyclic_region_type(struct obstack *obst, ir_region *node) {
+static ir_region *cyclic_region_type(struct obstack *obst, ir_region *node)
+{
ir_region *list;
/* simple cases first */
/**
* Clear all links on a list. Needed, because we expect cleared links.
*/
-static void clear_list(ir_region *list) {
+static void clear_list(ir_region *list)
+{
ir_region *next;
for (next = list; next; list = next) {
/**
* Detect an acyclic region.
*/
-static ir_region *acyclic_region_type(struct obstack *obst, ir_region *node) {
+static ir_region *acyclic_region_type(struct obstack *obst, ir_region *node)
+{
ir_region *n, *m;
int p, s, i, k;
ir_region *nset = NULL;
* replace all pred edges from region pred that points to any of the set set
* to ONE edge to reg.
*/
-static void replace_pred(ir_region *succ, ir_region *reg) {
+static void replace_pred(ir_region *succ, ir_region *reg)
+{
int i, len = get_region_n_preds(succ);
int have_one = 0;
* replace all succ edges from region pred that points to any of the set set
* to ONE edge to reg.
*/
-static void replace_succ(ir_region *pred, ir_region *reg) {
+static void replace_succ(ir_region *pred, ir_region *reg)
+{
int i, len = get_region_n_succs(pred);
int have_one = 0;
/**
* Reduce the graph by the node reg.
*/
-static void reduce(walk_env *env, ir_region *reg) {
+static void reduce(walk_env *env, ir_region *reg)
+{
int i;
ir_region *head = reg->parts[0].region;
unsigned maxorder = head->postnum;
*
* @param irg the graph
*/
-ir_reg_tree *construct_region_tree(ir_graph *irg) {
+ir_reg_tree *construct_region_tree(ir_graph *irg)
+{
walk_env env;
ir_graph *rem = current_ir_graph;
ir_reg_tree *res = XMALLOC(ir_reg_tree);
* @param post walker function, executed after the children of a tree node are visited
* @param env environment, passed to pre and post
*/
-static void region_tree_walk2(ir_region *reg, irg_reg_walk_func *pre, irg_reg_walk_func *post, void *env) {
+static void region_tree_walk2(ir_region *reg, irg_reg_walk_func *pre, irg_reg_walk_func *post, void *env)
+{
int i, n;
if (pre)
* @param post walker function, executed after the children of a tree node are visited
* @param env environment, passed to pre and post
*/
-void region_tree_walk(ir_reg_tree *tree, irg_reg_walk_func *pre, irg_reg_walk_func *post, void *env) {
+void region_tree_walk(ir_reg_tree *tree, irg_reg_walk_func *pre, irg_reg_walk_func *post, void *env)
+{
region_tree_walk2(tree->top, pre, post, env);
}
* Return a flexible array containing all IR-nodes
* that access a given entity.
*/
-static ir_node **get_entity_access_array(const ir_entity *ent) {
+static ir_node **get_entity_access_array(const ir_entity *ent)
+{
ir_node **res;
if (!entity_access_map) entity_access_map = pmap_create();
return res;
}
-static void set_entity_access_array(const ir_entity *ent, ir_node **accs) {
+static void set_entity_access_array(const ir_entity *ent, ir_node **accs)
+{
ir_node **old = pmap_get(entity_access_map, ent);
if (old != accs)
pmap_insert(entity_access_map, ent, (void *)accs);
* Return a flexible array containing all IR-nodes
* that reference a given entity.
*/
-static ir_node **get_entity_reference_array(const ir_entity *ent) {
+static ir_node **get_entity_reference_array(const ir_entity *ent)
+{
ir_node **res;
if (!entity_reference_map) entity_reference_map = pmap_create();
return res;
}
-static void set_entity_reference_array(const ir_entity *ent, ir_node **refs) {
+static void set_entity_reference_array(const ir_entity *ent, ir_node **refs)
+{
ir_node **old = pmap_get(entity_reference_map, ent);
if (old != refs)
pmap_insert(entity_reference_map, ent, (void *)refs);
* Return a flexible array containing all IR-nodes
* that allocate a given type.
*/
-static ir_node **get_type_alloc_array(const ir_type *tp) {
+static ir_node **get_type_alloc_array(const ir_type *tp)
+{
ir_node **res;
if (!type_alloc_map) type_alloc_map = pmap_create();
return res;
}
-static void set_type_alloc_array(const ir_type *tp, ir_node **alls) {
+static void set_type_alloc_array(const ir_type *tp, ir_node **alls)
+{
ir_node **old = pmap_get(type_alloc_map, tp);
if (old != alls)
pmap_insert(type_alloc_map, tp, (void *)alls);
* Return a flexible array containing all Cast-nodes
* that "create" a given type.
*/
-static ir_node **get_type_cast_array(const ir_type *tp) {
+static ir_node **get_type_cast_array(const ir_type *tp)
+{
ir_node **res;
if (!type_cast_map) type_cast_map = pmap_create();
return res;
}
-static void set_type_cast_array(const ir_type *tp, ir_node **alls) {
+static void set_type_cast_array(const ir_type *tp, ir_node **alls)
+{
ir_node **old = pmap_get(type_cast_map, tp);
if (old != alls)
pmap_insert(type_cast_map, tp, (void *)alls);
* Return a flexible array containing all pointer
* types that points-to a given type.
*/
-static ir_type **get_type_pointertype_array(const ir_type *tp) {
+static ir_type **get_type_pointertype_array(const ir_type *tp)
+{
ir_type **res;
if (!type_pointertype_map) type_pointertype_map = pmap_create();
return res;
}
-static void set_type_pointertype_array(const ir_type *tp, ir_type **pts) {
+static void set_type_pointertype_array(const ir_type *tp, ir_type **pts)
+{
ir_type **old = pmap_get(type_pointertype_map, tp);
if (old != pts)
pmap_insert(type_pointertype_map, tp, (void *)pts);
* Return a flexible array containing all array
* types that have a given type as element type.
*/
-static ir_type **get_type_arraytype_array(const ir_type *tp) {
+static ir_type **get_type_arraytype_array(const ir_type *tp)
+{
ir_type **res;
if (!type_arraytype_map) type_arraytype_map = pmap_create();
return res;
}
-void set_type_arraytype_array(const ir_type *tp, ir_type **pts) {
+void set_type_arraytype_array(const ir_type *tp, ir_type **pts)
+{
ir_type **old = pmap_get(type_arraytype_map, tp);
if (old != pts)
pmap_insert(type_arraytype_map, tp, (void *)pts);
/* Access routines for entities */
/**------------------------------------------------------------------*/
-int get_entity_n_accesses(const ir_entity *ent) {
+int get_entity_n_accesses(const ir_entity *ent)
+{
ir_node ** accs;
assert(ent && is_entity(ent));
return ARR_LEN(accs);
}
-ir_node *get_entity_access(const ir_entity *ent, int pos) {
+ir_node *get_entity_access(const ir_entity *ent, int pos)
+{
ir_node ** accs;
assert(0 <= pos && pos < get_entity_n_accesses(ent));
return accs[pos];
}
-static void add_entity_access(const ir_entity *ent, ir_node *n) {
+static void add_entity_access(const ir_entity *ent, ir_node *n)
+{
ir_node ** accs;
assert(ent && is_entity(ent));
set_entity_access_array(ent, accs);
}
-void set_entity_access(const ir_entity *ent, int pos, ir_node *n) {
+void set_entity_access(const ir_entity *ent, int pos, ir_node *n)
+{
ir_node ** accs;
assert(0 <= pos && pos < get_entity_n_accesses(ent));
/*------------------------------------------------------------------*/
-int get_entity_n_references(const ir_entity *ent) {
+int get_entity_n_references(const ir_entity *ent)
+{
ir_node ** refs;
assert(ent && is_entity(ent));
return ARR_LEN(refs);
}
-ir_node *get_entity_reference(const ir_entity *ent, int pos) {
+ir_node *get_entity_reference(const ir_entity *ent, int pos)
+{
ir_node ** refs;
assert(0 <= pos && pos < get_entity_n_references(ent));
return refs[pos];
}
-static void add_entity_reference(const ir_entity *ent, ir_node *n) {
+static void add_entity_reference(const ir_entity *ent, ir_node *n)
+{
ir_node ** refs;
assert(ent && is_entity(ent));
set_entity_reference_array(ent, refs);
}
-void set_entity_reference(const ir_entity *ent, int pos, ir_node *n) {
+void set_entity_reference(const ir_entity *ent, int pos, ir_node *n)
+{
ir_node ** refs;
assert(0 <= pos && pos < get_entity_n_references(ent));
/**------------------------------------------------------------------*/
/* Number of Alloc nodes that create an instance of this type */
-int get_type_n_allocs(const ir_type *tp) {
+int get_type_n_allocs(const ir_type *tp)
+{
ir_node **allocs;
assert(tp && is_type(tp));
}
/* Alloc node that creates an instance of this type */
-ir_node *get_type_alloc(const ir_type *tp, int pos) {
+ir_node *get_type_alloc(const ir_type *tp, int pos)
+{
ir_node **allocs;
assert(0 <= pos && pos < get_type_n_allocs(tp));
return allocs[pos];
}
-static void add_type_alloc(const ir_type *tp, ir_node *n) {
+static void add_type_alloc(const ir_type *tp, ir_node *n)
+{
ir_node **allocs;
assert(tp && is_type(tp));
set_type_alloc_array(tp, allocs);
}
-void set_type_alloc(const ir_type *tp, int pos, ir_node *n) {
+void set_type_alloc(const ir_type *tp, int pos, ir_node *n)
+{
ir_node **allocs;
assert(0 <= pos && pos < get_type_n_allocs(tp));
}
/* Number of Cast nodes that create an instance of this type */
-int get_type_n_casts(const ir_type *tp) {
+int get_type_n_casts(const ir_type *tp)
+{
ir_node **casts;
assert(tp && is_type(tp));
}
-int get_class_n_upcasts(const ir_type *clss) {
+int get_class_n_upcasts(const ir_type *clss)
+{
int i, n_casts = get_type_n_casts(clss);
int n_instances = 0;
for (i = 0; i < n_casts; ++i) {
return n_instances;
}
-int get_class_n_downcasts(const ir_type *clss) {
+int get_class_n_downcasts(const ir_type *clss)
+{
int i, n_casts = get_type_n_casts(clss);
int n_instances = 0;
for (i = 0; i < n_casts; ++i) {
}
/* Cast node that creates an instance of this type */
-ir_node *get_type_cast(const ir_type *tp, int pos) {
+ir_node *get_type_cast(const ir_type *tp, int pos)
+{
ir_node **casts;
assert(0 <= pos && pos < get_type_n_casts(tp));
return casts[pos];
}
-void add_type_cast(const ir_type *tp, ir_node *n) {
+void add_type_cast(const ir_type *tp, ir_node *n)
+{
ir_node **casts;
assert(tp && is_type(tp));
set_type_cast_array(tp, casts);
}
-void set_type_cast(const ir_type *tp, int pos, ir_node *n) {
+void set_type_cast(const ir_type *tp, int pos, ir_node *n)
+{
ir_node **casts;
assert(0 <= pos && pos < get_type_n_casts(tp));
/*------------------------------------------------------------------*/
-int get_type_n_pointertypes_to(const ir_type *tp) {
+int get_type_n_pointertypes_to(const ir_type *tp)
+{
ir_type ** pts;
assert(tp && is_type(tp));
return ARR_LEN(pts);
}
-ir_type *get_type_pointertype_to(const ir_type *tp, int pos) {
+ir_type *get_type_pointertype_to(const ir_type *tp, int pos)
+{
ir_type ** pts;
assert(0 <= pos && pos < get_type_n_pointertypes_to(tp));
return pts[pos];
}
-void add_type_pointertype_to(const ir_type *tp, ir_type *ptp) {
+void add_type_pointertype_to(const ir_type *tp, ir_type *ptp)
+{
ir_type ** pts;
assert(tp && is_type(tp));
set_type_pointertype_array(tp, pts);
}
-void set_type_pointertype_to(const ir_type *tp, int pos, ir_type *ptp) {
+void set_type_pointertype_to(const ir_type *tp, int pos, ir_type *ptp)
+{
ir_type ** pts;
assert(0 <= pos && pos < get_type_n_pointertypes_to(tp));
/*------------------------------------------------------------------*/
-int get_type_n_arraytypes_of(const ir_type *tp) {
+int get_type_n_arraytypes_of(const ir_type *tp)
+{
ir_type ** pts;
assert(tp && is_type(tp));
return ARR_LEN(pts);
}
-ir_type *get_type_arraytype_of(const ir_type *tp, int pos) {
+ir_type *get_type_arraytype_of(const ir_type *tp, int pos)
+{
ir_type ** pts;
assert(0 <= pos && pos < get_type_n_arraytypes_of(tp));
return pts[pos];
}
-void add_type_arraytype_of(const ir_type *tp, ir_type *atp) {
+void add_type_arraytype_of(const ir_type *tp, ir_type *atp)
+{
ir_type ** pts;
assert(tp && is_type(tp));
set_type_arraytype_array(tp, pts);
}
-void set_type_arraytype_of(const ir_type *tp, int pos, ir_type *atp) {
+void set_type_arraytype_of(const ir_type *tp, int pos, ir_type *atp)
+{
ir_type ** pts;
assert(0 <= pos && pos < get_type_n_arraytypes_of(tp));
/*------------------------------------------------------------------*/
/** Initialize the trouts handling. */
-static void init_trouts(void) {
+static void init_trouts(void)
+{
}
/** The number of entities that can be accessed by this Sel node. */
-static int get_Sel_n_accessed_entities(const ir_node *sel) {
+static int get_Sel_n_accessed_entities(const ir_node *sel)
+{
(void) sel;
return 1;
}
/** The entity that cat be accessed by this Sel node. */
-static ir_entity *get_Sel_accessed_entity(const ir_node *sel) {
+static ir_entity *get_Sel_accessed_entity(const ir_node *sel)
+{
return get_Sel_entity(sel);
}
/** An addr node is a SymConst or a Sel. */
-static int get_addr_n_entities(const ir_node *addr) {
+static int get_addr_n_entities(const ir_node *addr)
+{
switch (get_irn_opcode(addr)) {
case iro_Sel:
/* Treat jack array sels? */
/** An addr node is a SymConst or a Sel.
If Sel follow to outermost of compound. */
-static ir_entity *get_addr_entity(const ir_node *addr, int pos) {
+static ir_entity *get_addr_entity(const ir_node *addr, int pos)
+{
ir_node *ptr;
(void) pos;
}
}
-static void chain_accesses(ir_node *n, void *env) {
+static void chain_accesses(ir_node *n, void *env)
+{
int i, n_ents;
ir_node *addr;
* Handle chain types (pointer, array) by adding them to
* its "inner" type.
*/
-static void chain_types(ir_type *tp) {
+static void chain_types(ir_type *tp)
+{
if (is_Pointer_type(tp)) {
add_type_pointertype_to(get_pointer_points_to_type(tp), tp);
} else if (is_Array_type(tp)) {
}
}
-irg_outs_state get_trouts_state(void) {
+irg_outs_state get_trouts_state(void)
+{
return irp->trouts_state;
}
-void set_trouts_inconsistent(void) {
+void set_trouts_inconsistent(void)
+{
if (irp->trouts_state == outs_consistent)
irp->trouts_state = outs_inconsistent;
}
/* compute the trouts data structures. */
-void compute_trouts(void) {
+void compute_trouts(void)
+{
int i;
free_trouts();
irp->trouts_state = outs_consistent;
}
-void free_trouts(void) {
+void free_trouts(void)
+{
if (entity_access_map) {
ir_node **accs;
for (accs = (ir_node **)pmap_first(entity_access_map);
worklist_t *worklist;
};
-int update_vrp_data( ir_node *node) {
+int update_vrp_data( ir_node *node)
+{
tarval *new_bits_set = get_tarval_bad();
tarval *new_bits_not_set = get_tarval_bad();
return something_changed;
}
-void vrp_first_pass(ir_node *n, void *e) {
+void vrp_first_pass(ir_node *n, void *e)
+{
ir_node *succ;
worklist_t *tmp_entry;
int i;
}
-void set_vrp_data(ir_graph *irg) {
+void set_vrp_data(ir_graph *irg)
+{
ir_node *succ;
int i;
}
-ir_graph_pass_t *set_vrp_pass(const char *name) {
+ir_graph_pass_t *set_vrp_pass(const char *name)
+{
return def_graph_pass(name ? name : "set_vrp", set_vrp_data);
}
-pn_Cmp vrp_cmp(ir_node *left, ir_node *right) {
+pn_Cmp vrp_cmp(ir_node *left, ir_node *right)
+{
if (!left->vrp.valid || !right->vrp.valid) {
return pn_Cmp_False;
}
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *TEMPLATE_get_backend_params(void) {
+static const backend_params *TEMPLATE_get_backend_params(void)
+{
static backend_params p = {
0, /* no dword lowering */
0, /* no inline assembly */
/**
* Emit the name of the source register at given input position.
*/
-void arm_emit_source_register(const ir_node *node, int pos) {
+void arm_emit_source_register(const ir_node *node, int pos)
+{
const arch_register_t *reg = get_in_reg(node, pos);
be_emit_string(arch_register_get_name(reg));
}
/**
* Emit the name of the destination register at given output position.
*/
-void arm_emit_dest_register(const ir_node *node, int pos) {
+void arm_emit_dest_register(const ir_node *node, int pos)
+{
const arch_register_t *reg = get_out_reg(node, pos);
be_emit_string(arch_register_get_name(reg));
}
/**
* Emit the arm fpa instruction suffix depending on the mode.
*/
-static void arm_emit_fpa_postfix(const ir_mode *mode) {
+static void arm_emit_fpa_postfix(const ir_mode *mode)
+{
int bits = get_mode_size_bits(mode);
char c = 'e';
/**
* Emit the instruction suffix depending on the mode.
*/
-void arm_emit_mode(const ir_node *node) {
+void arm_emit_mode(const ir_node *node)
+{
ir_mode *mode;
if (is_arm_irn(node)) {
/**
* Returns a unique label. This number will not be used a second time.
*/
-static unsigned get_unique_label(void) {
+static unsigned get_unique_label(void)
+{
static unsigned id = 0;
return ++id;
}
/**
* Emit a floating point fpa constant.
*/
-static void emit_arm_fpaConst(const ir_node *irn) {
+static void emit_arm_fpaConst(const ir_node *irn)
+{
sym_or_tv_t key, *entry;
unsigned label;
ir_mode *mode;
/**
* Returns the next block in a block schedule.
*/
-static ir_node *sched_next_block(const ir_node *block) {
+static ir_node *sched_next_block(const ir_node *block)
+{
return get_irn_link(block);
}
/**
* Returns the target block for a control flow node.
*/
-static ir_node *get_cfop_target_block(const ir_node *irn) {
+static ir_node *get_cfop_target_block(const ir_node *irn)
+{
return get_irn_link(irn);
}
/**
* Emits a block label for the given block.
*/
-static void arm_emit_block_name(const ir_node *block) {
+static void arm_emit_block_name(const ir_node *block)
+{
if (has_Block_entity(block)) {
ir_entity *entity = get_Block_entity(block);
be_gas_emit_entity(entity);
/**
* Emit the target label for a control flow node.
*/
-static void arm_emit_cfop_target(const ir_node *irn) {
+static void arm_emit_cfop_target(const ir_node *irn)
+{
ir_node *block = get_cfop_target_block(irn);
arm_emit_block_name(block);
}
/** Sort register in ascending order. */
-static int reg_cmp(const void *a, const void *b) {
+static int reg_cmp(const void *a, const void *b)
+{
const arch_register_t * const *ra = a;
const arch_register_t * const *rb = b;
}
}
-static void emit_arm_SwitchJmp(const ir_node *irn) {
+static void emit_arm_SwitchJmp(const ir_node *irn)
+{
const ir_edge_t *edge;
ir_node *proj;
int i;
}
/** Emit an IncSP node */
-static void emit_be_IncSP(const ir_node *irn) {
+static void emit_be_IncSP(const ir_node *irn)
+{
int offs = -be_get_IncSP_offset(irn);
if (offs != 0) {
be_emit_finish_line_gas(irn);
}
-static void emit_be_Copy(const ir_node *irn) {
+static void emit_be_Copy(const ir_node *irn)
+{
ir_mode *mode = get_irn_mode(irn);
if (get_in_reg(irn, 0) == get_out_reg(irn, 0)) {
be_emit_finish_line_gas(node);
}
-static void emit_arm_fpaDbl2GP(const ir_node *irn) {
+static void emit_arm_fpaDbl2GP(const ir_node *irn)
+{
be_emit_cstring("\tstfd ");
arm_emit_source_register(irn, 0);
be_emit_cstring(", [sp, #-8]!");
be_emit_finish_line_gas(irn);
}
-static void emit_arm_LdTls(const ir_node *irn) {
+static void emit_arm_LdTls(const ir_node *irn)
+{
(void) irn;
panic("TLS not supported for this target");
/* Er... our gcc does not support it... Install a newer toolchain. */
/**
* Emits code for a node.
*/
-static void arm_emit_node(const ir_node *irn) {
+static void arm_emit_node(const ir_node *irn)
+{
ir_op *op = get_irn_op(irn);
if (op->ops.generic) {
* Walks over the nodes in a block connected by scheduling edges
* and emits code for each node.
*/
-static void arm_gen_block(ir_node *block, ir_node *prev_block) {
+static void arm_gen_block(ir_node *block, ir_node *prev_block)
+{
ir_node *irn;
arm_emit_block_header(block, prev_block);
* Block-walker:
* Sets labels for control flow nodes (jump target)
*/
-static void arm_gen_labels(ir_node *block, void *env) {
+static void arm_gen_labels(ir_node *block, void *env)
+{
ir_node *pred;
int n = get_Block_n_cfgpreds(block);
(void)env;
/**
* Compare two entries of the symbol or tarval set.
*/
-static int cmp_sym_or_tv(const void *elt, const void *key, size_t size) {
+static int cmp_sym_or_tv(const void *elt, const void *key, size_t size)
+{
const sym_or_tv_t *p1 = elt;
const sym_or_tv_t *p2 = key;
(void) size;
&arm_gp_regs[REG_R3],
};
-const arch_register_t *arm_get_RegParam_reg(int n) {
+const arch_register_t *arm_get_RegParam_reg(int n)
+{
assert(n < 4 && n >=0 && "register param > 3 angefordert");
return gpreg_param_reg_std[n];
}
const arch_register_t *reg;
};
-int arm_cmp_irn_reg_assoc(const void *a, const void *b, size_t size) {
+int arm_cmp_irn_reg_assoc(const void *a, const void *b, size_t size)
+{
const struct arm_irn_reg_assoc *x = a;
const struct arm_irn_reg_assoc *y = b;
(void) size;
return x->irn != y->irn;
}
-static struct arm_irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn, set *reg_set) {
+static struct arm_irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn, set *reg_set)
+{
struct arm_irn_reg_assoc templ;
unsigned int hash;
return set_insert(reg_set, &templ, sizeof(templ), hash);
}
-void arm_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set) {
+void arm_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set)
+{
struct arm_irn_reg_assoc *assoc = get_irn_reg_assoc(irn, reg_set);
assoc->reg = reg;
}
-const arch_register_t *arm_get_firm_reg(const ir_node *irn, set *reg_set) {
+const arch_register_t *arm_get_firm_reg(const ir_node *irn, set *reg_set)
+{
const struct arm_irn_reg_assoc *assoc = get_irn_reg_assoc(irn, reg_set);
return assoc->reg;
}
/**
* Return the fpa immediate from the encoding.
*/
-const char *arm_get_fpa_imm_name(long imm_value) {
+const char *arm_get_fpa_imm_name(long imm_value)
+{
static const char *fpa_imm[] = {
"0",
"1",
return get_irn_generic_attr_const(node);
}
-static const arm_fpaConst_attr_t *get_arm_fpaConst_attr_const(const ir_node *node) {
+static const arm_fpaConst_attr_t *get_arm_fpaConst_attr_const(const ir_node *node)
+{
const arm_attr_t *attr = get_arm_attr_const(node);
const arm_fpaConst_attr_t *fpa_attr = CONST_CAST_ARM_ATTR(arm_fpaConst_attr_t, attr);
return fpa_attr;
}
-static arm_fpaConst_attr_t *get_arm_fpaConst_attr(ir_node *node) {
+static arm_fpaConst_attr_t *get_arm_fpaConst_attr(ir_node *node)
+{
arm_attr_t *attr = get_arm_attr(node);
arm_fpaConst_attr_t *fpa_attr = CAST_ARM_ATTR(arm_fpaConst_attr_t, attr);
}
/* Returns the attributes of a SwitchJmp node. */
-arm_SwitchJmp_attr_t *get_arm_SwitchJmp_attr(ir_node *node) {
+arm_SwitchJmp_attr_t *get_arm_SwitchJmp_attr(ir_node *node)
+{
assert(is_arm_SwitchJmp(node));
return get_irn_generic_attr(node);
}
-const arm_SwitchJmp_attr_t *get_arm_SwitchJmp_attr_const(const ir_node *node) {
+const arm_SwitchJmp_attr_t *get_arm_SwitchJmp_attr_const(const ir_node *node)
+{
assert(is_arm_SwitchJmp(node));
return get_irn_generic_attr_const(node);
}
/**
* Returns the argument register requirements of a arm node.
*/
-const arch_register_req_t **get_arm_in_req_all(const ir_node *node) {
+const arch_register_req_t **get_arm_in_req_all(const ir_node *node)
+{
const arm_attr_t *attr = get_arm_attr_const(node);
return attr->in_req;
}
/**
* Returns the argument register requirement at position pos of an arm node.
*/
-const arch_register_req_t *get_arm_in_req(const ir_node *node, int pos) {
+const arch_register_req_t *get_arm_in_req(const ir_node *node, int pos)
+{
const arm_attr_t *attr = get_arm_attr_const(node);
return attr->in_req[pos];
}
/**
* Sets the IN register requirements at position pos.
*/
-void set_arm_req_in(ir_node *node, const arch_register_req_t *req, int pos) {
+void set_arm_req_in(ir_node *node, const arch_register_req_t *req, int pos)
+{
arm_attr_t *attr = get_arm_attr(node);
attr->in_req[pos] = req;
}
/**
* Returns the fpaConst value
*/
-tarval *get_fpaConst_value(const ir_node *node) {
+tarval *get_fpaConst_value(const ir_node *node)
+{
const arm_fpaConst_attr_t *attr = get_arm_fpaConst_attr_const(node);
return attr->tv;
}
/**
* Sets the tarval value
*/
-void set_fpaConst_value(ir_node *node, tarval *tv) {
+void set_fpaConst_value(ir_node *node, tarval *tv)
+{
arm_fpaConst_attr_t *attr = get_arm_fpaConst_attr(node);
attr->tv = tv;
}
/**
* Returns the proj num
*/
-int get_arm_CondJmp_proj_num(const ir_node *node) {
+int get_arm_CondJmp_proj_num(const ir_node *node)
+{
const arm_CondJmp_attr_t *attr = get_arm_CondJmp_attr_const(node);
return attr->proj_num;
}
/**
* Sets the proj num
*/
-void set_arm_CondJmp_proj_num(ir_node *node, int proj_num) {
+void set_arm_CondJmp_proj_num(ir_node *node, int proj_num)
+{
arm_CondJmp_attr_t *attr = get_arm_CondJmp_attr(node);
attr->proj_num = proj_num;
}
/**
* Returns the number of projs of a SwitchJmp.
*/
-int get_arm_SwitchJmp_n_projs(const ir_node *node) {
+int get_arm_SwitchJmp_n_projs(const ir_node *node)
+{
const arm_SwitchJmp_attr_t *attr = get_arm_SwitchJmp_attr_const(node);
return attr->n_projs;
}
/**
* Sets the number of projs.
*/
-void set_arm_SwitchJmp_n_projs(ir_node *node, int n_projs) {
+void set_arm_SwitchJmp_n_projs(ir_node *node, int n_projs)
+{
arm_SwitchJmp_attr_t *attr = get_arm_SwitchJmp_attr(node);
attr->n_projs = n_projs;
}
/**
* Returns the default_proj_num.
*/
-long get_arm_SwitchJmp_default_proj_num(const ir_node *node) {
+long get_arm_SwitchJmp_default_proj_num(const ir_node *node)
+{
const arm_SwitchJmp_attr_t *attr = get_arm_SwitchJmp_attr_const(node);
return attr->default_proj_num;
}
/**
* Sets the default_proj_num.
*/
-void set_arm_SwitchJmp_default_proj_num(ir_node *node, long default_proj_num) {
+void set_arm_SwitchJmp_default_proj_num(ir_node *node, long default_proj_num)
+{
arm_SwitchJmp_attr_t *attr = get_arm_SwitchJmp_attr(node);
attr->default_proj_num = default_proj_num;
}
}
/** copies the ARM attributes of a node. */
-static void arm_copy_attr(const ir_node *old_node, ir_node *new_node) {
+static void arm_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
ir_graph *irg = get_irn_irg(new_node);
struct obstack *obst = get_irg_obstack(irg);
const arm_attr_t *attr_old = get_arm_attr_const(old_node);
*
* @return The created ia32 Conv node
*/
-static ir_node *gen_Conv(ir_node *node) {
+static ir_node *gen_Conv(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Conv_op(node);
ir_node *new_op = be_transform_node(op);
*
* @return the created arm Mul node
*/
-static ir_node *gen_Mul(ir_node *node) {
+static ir_node *gen_Mul(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op1 = get_Mul_left(node);
ir_node *new_op1 = be_transform_node(op1);
* @param env The transformation environment
* @return the created arm fDiv node
*/
-static ir_node *gen_Quot(ir_node *node) {
+static ir_node *gen_Quot(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op1 = get_Quot_left(node);
ir_node *new_op1 = be_transform_node(op1);
*
* @return the created ARM Load node
*/
-static ir_node *gen_Load(ir_node *node) {
+static ir_node *gen_Load(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *ptr = get_Load_ptr(node);
ir_node *new_ptr = be_transform_node(ptr);
*
* @return The transformed ARM node.
*/
-static ir_node *gen_Const(ir_node *node) {
+static ir_node *gen_Const(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_mode *mode = get_irn_mode(node);
dbg_info *dbg = get_irn_dbg_info(node);
*
* @return The transformed ARM node.
*/
-static ir_node *gen_CopyB(ir_node *node) {
+static ir_node *gen_CopyB(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *src = get_CopyB_src(node);
ir_node *new_src = be_transform_node(src);
/**
* Transform a be_AddSP into an arm_AddSP. Eat up const sizes.
*/
-static ir_node *gen_be_AddSP(ir_node *node) {
+static ir_node *gen_be_AddSP(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
ir_node *new_sz = be_transform_node(sz);
/**
* Transform a be_SubSP into an arm_SubSP. Eat up const sizes.
*/
-static ir_node *gen_be_SubSP(ir_node *node) {
+static ir_node *gen_be_SubSP(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
ir_node *new_sz = be_transform_node(sz);
/**
* Transform a be_Copy.
*/
-static ir_node *gen_be_Copy(ir_node *node) {
+static ir_node *gen_be_Copy(ir_node *node)
+{
ir_node *result = be_duplicate_node(node);
ir_mode *mode = get_irn_mode(result);
/**
* Transform a Proj from a Load.
*/
-static ir_node *gen_Proj_Load(ir_node *node) {
+static ir_node *gen_Proj_Load(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *load = get_Proj_pred(node);
ir_node *new_load = be_transform_node(load);
/**
* Transform and renumber the Projs from a CopyB.
*/
-static ir_node *gen_Proj_CopyB(ir_node *node) {
+static ir_node *gen_Proj_CopyB(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
/**
* Transform and renumber the Projs from a Quot.
*/
-static ir_node *gen_Proj_Quot(ir_node *node) {
+static ir_node *gen_Proj_Quot(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
/**
* Transform the Projs of a be_AddSP.
*/
-static ir_node *gen_Proj_be_AddSP(ir_node *node) {
+static ir_node *gen_Proj_be_AddSP(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
/**
* Transform the Projs of a be_SubSP.
*/
-static ir_node *gen_Proj_be_SubSP(ir_node *node) {
+static ir_node *gen_Proj_be_SubSP(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
/**
* Transform the Projs from a Cmp.
*/
-static ir_node *gen_Proj_Cmp(ir_node *node) {
+static ir_node *gen_Proj_Cmp(ir_node *node)
+{
(void) node;
panic("Mux NYI");
}
/**
* Transform the Thread Local Storage Proj.
*/
-static ir_node *gen_Proj_tls(ir_node *node) {
+static ir_node *gen_Proj_tls(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
dbg_info *dbgi = NULL;
/**
* Transform a Proj node.
*/
-static ir_node *gen_Proj(ir_node *node) {
+static ir_node *gen_Proj(ir_node *node)
+{
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *pred = get_Proj_pred(node);
* Transforms the standard Firm graph into
* a ARM firm graph.
*/
-static void arm_prepare_graph(void *self) {
+static void arm_prepare_graph(void *self)
+{
arm_code_gen_t *cg = self;
/* transform nodes into assembler instructions */
* Emits the code, closes the output file and frees
* the code generator interface.
*/
-static void arm_emit_and_done(void *self) {
+static void arm_emit_and_done(void *self)
+{
arm_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
/**
* Handle graph transformations before the abi converter does its work.
*/
-static void arm_before_abi(void *self) {
+static void arm_before_abi(void *self)
+{
arm_code_gen_t *cg = self;
irg_walk_graph(cg->irg, NULL, handle_calls, cg);
/**
* Initializes the code generator.
*/
-static void *arm_cg_init(be_irg_t *birg) {
+static void *arm_cg_init(be_irg_t *birg)
+{
static ir_type *int_tp = NULL;
arm_isa_t *isa = (arm_isa_t *)birg->main_env->arch_env;
arm_code_gen_t *cg;
/**
* Initializes the backend ISA and opens the output file.
*/
-static arch_env_t *arm_init(FILE *file_handle) {
+static arch_env_t *arm_init(FILE *file_handle)
+{
static int inited = 0;
arm_isa_t *isa;
/**
* Closes the output file and frees the ISA structure.
*/
-static void arm_done(void *self) {
+static void arm_done(void *self)
+{
arm_isa_t *isa = self;
be_gas_emit_decls(isa->arch_env.main_env);
* here to speed up register allocation (and makes dumps
* smaller and more readable).
*/
-static unsigned arm_get_n_reg_class(void) {
+static unsigned arm_get_n_reg_class(void)
+{
return N_CLASSES;
}
/**
* Return the register class with requested index.
*/
-static const arch_register_class_t *arm_get_reg_class(unsigned i) {
+static const arch_register_class_t *arm_get_reg_class(unsigned i)
+{
assert(i < N_CLASSES);
return &arm_reg_classes[i];
}
* @param mode The mode in question.
* @return A register class which can hold values of the given mode.
*/
-const arch_register_class_t *arm_get_reg_class_for_mode(const ir_mode *mode) {
+const arch_register_class_t *arm_get_reg_class_for_mode(const ir_mode *mode)
+{
if (mode_is_float(mode))
return &arm_reg_classes[CLASS_arm_fpa];
else
*
* All nodes which define registers in @p reg_map must keep @p reg_map current.
*/
-static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias) {
+static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
+{
arm_abi_env_t *env = self;
ir_node *store;
ir_graph *irg;
}
-static const ilp_sched_selector_t *arm_get_ilp_sched_selector(const void *self) {
+static const ilp_sched_selector_t *arm_get_ilp_sched_selector(const void *self)
+{
(void) self;
return NULL;
}
return 4;
}
-static const be_execution_unit_t ***arm_get_allowed_execution_units(const ir_node *irn) {
+static const be_execution_unit_t ***arm_get_allowed_execution_units(const ir_node *irn)
+{
(void) irn;
/* TODO */
panic("Unimplemented arm_get_allowed_execution_units()");
}
-static const be_machine_t *arm_get_machine(const void *self) {
+static const be_machine_t *arm_get_machine(const void *self)
+{
(void) self;
/* TODO */
panic("Unimplemented arm_get_machine()");
/**
* Return irp irgs in the desired order.
*/
-static ir_graph **arm_get_irg_list(const void *self, ir_graph ***irg_list) {
+static ir_graph **arm_get_irg_list(const void *self, ir_graph ***irg_list)
+{
(void) self;
(void) irg_list;
return NULL;
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *arm_get_libfirm_params(void) {
+static const backend_params *arm_get_libfirm_params(void)
+{
static const ir_settings_if_conv_t ifconv = {
4, /* maxdepth, doesn't matter for Psi-conversion */
arm_is_mux_allowed /* allows or disallows Mux creation for given selector */
/**
* Update the entity of Sels to the outer value parameters.
*/
-static void update_outer_frame_sels(ir_node *irn, void *env) {
+static void update_outer_frame_sels(ir_node *irn, void *env)
+{
lower_frame_sels_env_t *ctx = env;
ir_node *ptr;
ir_entity *ent;
* Walker: finally lower all Sels of outer frame or parameter
* entities.
*/
-static void lower_outer_frame_sels(ir_node *sel, void *ctx) {
+static void lower_outer_frame_sels(ir_node *sel, void *ctx)
+{
be_abi_irg_t *env = ctx;
ir_node *ptr;
ir_entity *ent;
#define decl_self(type, from) \
type *self = (type *) from
-static void set_color(plotter_t *_self, const color_t *color) {
+static void set_color(plotter_t *_self, const color_t *color)
+{
decl_self(base_plotter_t, _self);
self->color = color;
}
-static const color_t *get_color(const plotter_t *_self) {
+static const color_t *get_color(const plotter_t *_self)
+{
decl_self(const base_plotter_t, _self);
return self->color;
}
-static void set_width(plotter_t *_self, int width) {
+static void set_width(plotter_t *_self, int width)
+{
decl_self(base_plotter_t, _self);
self->width = width;
}
-static int get_width(const plotter_t *_self) {
+static int get_width(const plotter_t *_self)
+{
decl_self(const base_plotter_t, _self);
return self->width;
}
-static void plotter_default_free(plotter_t *self) {
+static void plotter_default_free(plotter_t *self)
+{
(void) self;
}
p->vtab->finish(p);
}
-void draw_interval_tree(const draw_chordal_opts_t *opts, const be_chordal_env_t *chordal_env, plotter_t *plotter) {
+void draw_interval_tree(const draw_chordal_opts_t *opts, const be_chordal_env_t *chordal_env, plotter_t *plotter)
+{
draw_chordal_env_t env;
struct block_dims *start_dims;
ir_node *start_block = get_irg_start_block(chordal_env->irg);
/**
* Perform things which need to be done per register class after spilling.
*/
-static void post_spill(post_spill_env_t *pse, int iteration) {
+static void post_spill(post_spill_env_t *pse, int iteration)
+{
be_chordal_env_t *chordal_env = &pse->cenv;
be_irg_t *birg = pse->birg;
ir_graph *irg = birg->irg;
return be_values_interfere(env->birg->lv, a, b);
}
-static int set_cmp_conflict_t(const void *x, const void *y, size_t size) {
+static int set_cmp_conflict_t(const void *x, const void *y, size_t size)
+{
const conflict_t *xx = x;
const conflict_t *yy = y;
(void) size;
* If a local pinned conflict occurs, a new edge in the conflict graph is added.
* The next maximum independent set build, will regard it.
*/
-static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
+static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2)
+{
conflict_t c;
DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2));
/**
* Checks if two nodes are in a conflict.
*/
-static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
+static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2)
+{
conflict_t c;
/* search for live range interference */
if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2))
return set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0;
}
-static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) {
+static int set_cmp_node_stat_t(const void *x, const void *y, size_t size)
+{
(void) size;
return ((const node_stat_t*)x)->irn != ((const node_stat_t*)y)->irn;
}
/**
* Finds a node status entry of a node if existent. Otherwise return NULL
*/
-static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
+static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn)
+{
node_stat_t find;
find.irn = irn;
return set_find(qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
* Finds a node status entry of a node if existent. Otherwise it will return
* an initialized new entry for this node.
*/
-static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) {
+static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn)
+{
node_stat_t find;
find.irn = irn;
find.new_color = NO_COLOR;
/**
* Returns the virtual color of a node if set before, else returns the real color.
*/
-static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn) {
+static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn)
+{
const node_stat_t *found = qnode_find_node(qn, irn);
if (found)
return found->new_color;
/**
* Sets the virtual color of a node.
*/
-static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) {
+static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color)
+{
node_stat_t *found = qnode_find_or_insert_node(qn, irn);
found->new_color = color;
DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color));
* to the same optimization unit and has been optimized before the current
* processed node.
*/
-static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) {
+static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn)
+{
const node_stat_t *found = qnode_find_node(qn, irn);
if (found)
return found->pinned_local;
* Local-pins a node, so optimizations of further nodes of the same opt unit
* can handle situations in which a color change would undo prior optimizations.
*/
-static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn) {
+static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn)
+{
node_stat_t *found = qnode_find_or_insert_node(qn, irn);
found->pinned_local = 1;
if (found->new_color == NO_COLOR)
* Else the first conflicting ir_node encountered is returned.
*
*/
-static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger) {
+static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger)
+{
copy_opt_t *co = qn->ou->co;
const be_chordal_env_t *chordal_env = co->cenv;
const arch_register_class_t *cls = co->cls;
* @returns 1 iff all members colors could be set
* 0 else
*/
-static int qnode_try_color(const qnode_t *qn) {
+static int qnode_try_color(const qnode_t *qn)
+{
int i;
for (i=0; i<qn->mis_size; ++i) {
ir_node *test_node, *confl_node;
* Determines a maximum weighted independent set with respect to
* the interference and conflict edges of all nodes in a qnode.
*/
-static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
+static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou)
+{
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
bitset_t *curr, *best;
/**
* Creates a new qnode
*/
-static inline qnode_t *new_qnode(const unit_t *ou, int color) {
+static inline qnode_t *new_qnode(const unit_t *ou, int color)
+{
qnode_t *qn = XMALLOC(qnode_t);
qn->ou = ou;
qn->color = color;
/**
* Frees space used by a queue node
*/
-static inline void free_qnode(qnode_t *qn) {
+static inline void free_qnode(qnode_t *qn)
+{
del_set(qn->conflicts);
del_set(qn->changed_nodes);
xfree(qn->mis);
* Inserts a qnode in the sorted queue of the optimization unit. Queue is
* ordered by field 'size' (the size of the mis) in decreasing order.
*/
-static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn) {
+static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn)
+{
struct list_head *lh;
if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) {
* case for approximately 80% of all phi classes and 100% of register constrained
* nodes. (All other phi classes are reduced to this case.)
*/
-static void ou_optimize(unit_t *ou) {
+static void ou_optimize(unit_t *ou)
+{
qnode_t *curr = NULL;
qnode_t *tmp;
const arch_register_req_t *req;
* Solves the problem using a heuristic approach
* Uses the OU data structure
*/
-int co_solve_heuristic(copy_opt_t *co) {
+int co_solve_heuristic(copy_opt_t *co)
+{
unit_t *curr;
ASSERT_OU_AVAIL(co);
/**
* Write a chunk to stderr for debugging.
*/
-static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
+static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
+{
int i, l;
(void) env;
if (c->weight_consistent)
/**
* Dump all admissible colors to stderr.
*/
-static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
+static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
+{
bitset_pos_t idx;
(void) env;
/**
* Dump color-cost pairs to stderr.
*/
-static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost) {
+static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
+{
int i;
for (i = 0; i < env->n_regs; ++i)
fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
#endif /* DEBUG_libfirm */
-static inline int get_mst_irn_col(const co_mst_irn_t *node) {
+static inline int get_mst_irn_col(const co_mst_irn_t *node)
+{
return node->tmp_col >= 0 ? node->tmp_col : node->col;
}
/**
* @return 1 if node @p node has color @p col, 0 otherwise.
*/
-static int decider_has_color(const co_mst_irn_t *node, int col) {
+static int decider_has_color(const co_mst_irn_t *node, int col)
+{
return get_mst_irn_col(node) == col;
}
/**
* @return 1 if node @p node has not color @p col, 0 otherwise.
*/
-static int decider_hasnot_color(const co_mst_irn_t *node, int col) {
+static int decider_hasnot_color(const co_mst_irn_t *node, int col)
+{
return get_mst_irn_col(node) != col;
}
/**
* Always returns true.
*/
-static int decider_always_yes(const co_mst_irn_t *node, int col) {
+static int decider_always_yes(const co_mst_irn_t *node, int col)
+{
(void) node;
(void) col;
return 1;
}
/** compares two affinity edges by its weight */
-static int cmp_aff_edge(const void *a, const void *b) {
+static int cmp_aff_edge(const void *a, const void *b)
+{
const aff_edge_t *e1 = a;
const aff_edge_t *e2 = b;
}
/** compares to color-cost pairs */
-static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b) {
+static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
+{
const col_cost_t *c1 = a;
const col_cost_t *c2 = b;
real_t diff = c1->cost - c2->cost;
return (diff > 0) - (diff < 0);
}
-static int cmp_col_cost_gt(const void *a, const void *b) {
+static int cmp_col_cost_gt(const void *a, const void *b)
+{
const col_cost_t *c1 = a;
const col_cost_t *c2 = b;
real_t diff = c2->cost - c1->cost;
/**
* Creates a new affinity chunk
*/
-static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
+static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
+{
aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
c->n = NEW_ARR_F(const ir_node *, 0);
c->interfere = NEW_ARR_F(const ir_node *, 0);
/**
* Frees all memory allocated by an affinity chunk.
*/
-static inline void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
+static inline void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
+{
pset_remove(env->chunkset, c, c->id);
DEL_ARR_F(c->interfere);
DEL_ARR_F(c->n);
* @return the position where n is found in the array arr or ~pos
* if the nodes is not here.
*/
-static inline int nodes_bsearch(const ir_node **arr, const ir_node *n) {
+static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
+{
int hi = ARR_LEN(arr);
int lo = 0;
}
/** Check if a node n can be found inside arr. */
-static int node_contains(const ir_node **arr, const ir_node *n) {
+static int node_contains(const ir_node **arr, const ir_node *n)
+{
int i = nodes_bsearch(arr, n);
return i >= 0;
}
*
* @return 1 if the node was inserted, 0 else
*/
-static int nodes_insert(const ir_node ***arr, const ir_node *irn) {
+static int nodes_insert(const ir_node ***arr, const ir_node *irn)
+{
int idx = nodes_bsearch(*arr, irn);
if (idx < 0) {
/**
* Adds a node to an affinity chunk
*/
-static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
+static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
+{
int i;
if (! nodes_insert(&c->n, node->irn))
/**
* In case there is no phase information for irn, initialize it.
*/
-static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old) {
+static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old)
+{
co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
co_mst_env_t *env = ph->priv;
/**
* Check if affinity chunk @p chunk interferes with node @p irn.
*/
-static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn) {
+static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
+{
return node_contains(chunk->interfere, irn);
}
* @param c2 Another chunk
* @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
*/
-static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2) {
+static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
+{
int i;
if (c1 == c2)
* Returns the affinity chunk of @p irn or creates a new
* one with @p irn as element if there is none assigned.
*/
-static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
+static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
+{
co_mst_irn_t *node = get_co_mst_irn(env, irn);
return node->chunk;
}
* are no interference edges from chunk(src) to chunk(tgt)).
* @return 1 if successful, 0 if not possible
*/
-static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt) {
+static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
+{
aff_chunk_t *c1 = get_aff_chunk(env, src);
aff_chunk_t *c2 = get_aff_chunk(env, tgt);
/**
* Assures that the weight of the given chunk is consistent.
*/
-static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c) {
+static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
+{
if (! c->weight_consistent) {
int w = 0;
int idx, len, i;
/**
* Count the number of interfering affinity neighbours
*/
-static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an) {
+static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
+{
const neighb_t *neigh;
const ir_node *irn = an->irn;
const co_mst_irn_t *node = get_co_mst_irn(env, irn);
* merged if there are no interference edges from one
* chunk to the other.
*/
-static void build_affinity_chunks(co_mst_env_t *env) {
+static void build_affinity_chunks(co_mst_env_t *env)
+{
void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
ir_node *n;
/**
* Fragment the given chunk into chunks having given color and not having given color.
*/
-static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp) {
+static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
+{
bitset_t *visited = bitset_irg_malloc(env->co->irg);
int idx, len;
aff_chunk_t *best = NULL;
* Resets the temporary fixed color of all nodes within wait queue @p nodes.
* ATTENTION: the queue is empty after calling this function!
*/
-static inline void reject_coloring(struct list_head *nodes) {
+static inline void reject_coloring(struct list_head *nodes)
+{
co_mst_irn_t *n, *temp;
DB((dbg, LEVEL_4, "\treject coloring for"));
list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
DB((dbg, LEVEL_4, "\n"));
}
-static inline void materialize_coloring(struct list_head *nodes) {
+static inline void materialize_coloring(struct list_head *nodes)
+{
co_mst_irn_t *n, *temp;
list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
assert(n->tmp_col >= 0);
* Tries to change node to a color but @p explude_col.
* @return 1 if succeeded, 0 otherwise.
*/
-static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip) {
+static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
+{
int col = get_mst_irn_col(node);
int res = 0;
* ATTENTION: Expect @p costs already sorted by increasing costs.
* @return 1 if coloring could be applied, 0 otherwise.
*/
-static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip) {
+static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
+{
int i;
struct list_head local_changed;
* Tries to bring node @p node and all it's neighbours to color @p tgt_col.
* @return 1 if color @p col could be applied, 0 otherwise
*/
-static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed) {
+static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
+{
int col = get_mst_irn_col(node);
/* if node already has the target color -> good, temporary fix it */
* Tries to color an affinity chunk (or at least a part of it).
* Inserts uncolored parts of the chunk as a new chunk into the priority queue.
*/
-static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
+static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
+{
aff_chunk_t *best_chunk = NULL;
int n_nodes = ARR_LEN(c->n);
int best_color = -1;
/**
* Main driver for mst safe coalescing algorithm.
*/
-static int co_solve_heuristic_mst(copy_opt_t *co) {
+static int co_solve_heuristic_mst(copy_opt_t *co)
+{
unsigned n_regs = co->cls->n_regs;
bitset_t *ignore_regs = bitset_alloca(n_regs);
unsigned i, j, k;
};
-void be_init_copyheur4(void) {
+void be_init_copyheur4(void)
+{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
*****************************************************************************/
-size_red_t *new_size_red(copy_opt_t *co) {
+size_red_t *new_size_red(copy_opt_t *co)
+{
size_red_t *res = XMALLOC(size_red_t);
res->co = co;
return 1;
}
-void sr_remove(size_red_t *sr) {
+void sr_remove(size_red_t *sr)
+{
ir_node *irn;
int redo = 1;
const be_ifg_t *ifg = sr->co->cenv->ifg;
}
}
-void sr_reinsert(size_red_t *sr) {
+void sr_reinsert(size_red_t *sr)
+{
coloring_suffix_t *cs;
be_ifg_t *ifg = sr->co->cenv->ifg;
bitset_t *used_cols = bitset_alloca(arch_register_class_n_regs(sr->co->cls));
}
}
-void free_size_red(size_red_t *sr) {
+void free_size_red(size_red_t *sr)
+{
del_pset(sr->all_removed);
obstack_free(&sr->ob, NULL);
free(sr);
#include <stdio.h>
-ilp_env_t *new_ilp_env(copy_opt_t *co, ilp_callback build, ilp_callback apply, void *env) {
+ilp_env_t *new_ilp_env(copy_opt_t *co, ilp_callback build, ilp_callback apply, void *env)
+{
ilp_env_t *res = XMALLOC(ilp_env_t);
res->co = co;
return res;
}
-lpp_sol_state_t ilp_go(ilp_env_t *ienv) {
+lpp_sol_state_t ilp_go(ilp_env_t *ienv)
+{
be_main_env_t *main_env = ienv->co->cenv->birg->main_env;
sr_remove(ienv->sr);
return lpp_get_sol_state(ienv->lp);
}
-void free_ilp_env(ilp_env_t *ienv) {
+void free_ilp_env(ilp_env_t *ienv)
+{
free_size_red(ienv->sr);
free_lpp(ienv->lp);
free(ienv);
#else /* WITH_ILP */
-static inline void only_that_you_can_compile_without_WITH_ILP_defined(void) {
+static inline void only_that_you_can_compile_without_WITH_ILP_defined(void)
+{
}
#endif /* WITH_ILP */
#else /* WITH_ILP */
-static inline void only_that_you_can_compile_without_WITH_ILP_defined(void) {
+static inline void only_that_you_can_compile_without_WITH_ILP_defined(void)
+{
}
#endif /* WITH_ILP */
DEBUG_ONLY(firm_dbg_module_t *dbg;)
} local_env_t;
-static void build_coloring_cstr(ilp_env_t *ienv) {
+static void build_coloring_cstr(ilp_env_t *ienv)
+{
be_ifg_t *ifg = ienv->co->cenv->ifg;
void *iter = be_ifg_nodes_iter_alloca(ifg);
bitset_t *colors;
* by walking over all affinity edges. Graph structure
* does not provide this walker, yet.
*/
-static void build_affinity_cstr(ilp_env_t *ienv) {
+static void build_affinity_cstr(ilp_env_t *ienv)
+{
local_env_t *lenv = ienv->env;
int n_colors = lenv->n_colors;
unit_t *curr;
ir_node *n1, *n2;
} edge_t;
-static int compare_edge_t(const void *k1, const void *k2, size_t size) {
+static int compare_edge_t(const void *k1, const void *k2, size_t size)
+{
const edge_t *e1 = k1;
const edge_t *e2 = k2;
(void) size;
#define HASH_EDGE(e) (hash_irn((e)->n1) ^ hash_irn((e)->n2))
-static inline edge_t *add_edge(set *edges, ir_node *n1, ir_node *n2, int *counter) {
+static inline edge_t *add_edge(set *edges, ir_node *n1, ir_node *n2, int *counter)
+{
edge_t new_edge;
if (PTR_TO_INT(n1) < PTR_TO_INT(n2)) {
return set_insert(edges, &new_edge, sizeof(new_edge), HASH_EDGE(&new_edge));
}
-static inline edge_t *find_edge(set *edges, ir_node *n1, ir_node *n2) {
+static inline edge_t *find_edge(set *edges, ir_node *n1, ir_node *n2)
+{
edge_t new_edge;
if (PTR_TO_INT(n1) < PTR_TO_INT(n2)) {
return set_find(edges, &new_edge, sizeof(new_edge), HASH_EDGE(&new_edge));
}
-static inline void remove_edge(set *edges, ir_node *n1, ir_node *n2, int *counter) {
+static inline void remove_edge(set *edges, ir_node *n1, ir_node *n2, int *counter)
+{
edge_t new_edge, *e;
if (PTR_TO_INT(n1) < PTR_TO_INT(n2)) {
* with affinity edges to all nodes of the clique.
* At most 1 node of the clique can be colored equally with the external node.
*/
-static void build_clique_star_cstr(ilp_env_t *ienv) {
+static void build_clique_star_cstr(ilp_env_t *ienv)
+{
affinity_node_t *aff;
/* for each node with affinity edges */
}
-static void extend_path(ilp_env_t *ienv, pdeq *path, const ir_node *irn) {
+static void extend_path(ilp_env_t *ienv, pdeq *path, const ir_node *irn)
+{
be_ifg_t *ifg = ienv->co->cenv->ifg;
int i, len;
ir_node **curr_path;
* edges in between.
* Then at least one of these affinity edges must break.
*/
-static void build_path_cstr(ilp_env_t *ienv) {
+static void build_path_cstr(ilp_env_t *ienv)
+{
affinity_node_t *aff_info;
/* for each node with affinity edges */
}
}
-static void ilp2_build(ilp_env_t *ienv) {
+static void ilp2_build(ilp_env_t *ienv)
+{
local_env_t *lenv = ienv->env;
int lower_bound;
lpp_set_time_limit(ienv->lp, lenv->time_limit);
}
-static void ilp2_apply(ilp_env_t *ienv) {
+static void ilp2_apply(ilp_env_t *ienv)
+{
local_env_t *lenv = ienv->env;
int i;
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyilp2);
-int co_solve_ilp2(copy_opt_t *co) {
+int co_solve_ilp2(copy_opt_t *co)
+{
lpp_sol_state_t sol_state;
ilp_env_t *ienv;
local_env_t my;
#else /* WITH_ILP */
-static inline void only_that_you_can_compile_without_WITH_ILP_defined(void) {
+static inline void only_that_you_can_compile_without_WITH_ILP_defined(void)
+{
}
#endif /* WITH_ILP */
return co;
}
-void free_copy_opt(copy_opt_t *co) {
+void free_copy_opt(copy_opt_t *co)
+{
xfree(co->name);
free(co);
}
return 0;
}
-int co_get_costs_loop_depth(const copy_opt_t *co, ir_node *root, ir_node* arg, int pos) {
+int co_get_costs_loop_depth(const copy_opt_t *co, ir_node *root, ir_node* arg, int pos)
+{
int cost = 0;
ir_loop *loop;
ir_node *root_block = get_nodes_block(root);
return 1+cost;
}
-int co_get_costs_exec_freq(const copy_opt_t *co, ir_node *root, ir_node* arg, int pos) {
+int co_get_costs_exec_freq(const copy_opt_t *co, ir_node *root, ir_node* arg, int pos)
+{
int res;
ir_node *root_bl = get_nodes_block(root);
ir_node *copy_bl = is_Phi(root) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
}
-int co_get_costs_all_one(const copy_opt_t *co, ir_node *root, ir_node *arg, int pos) {
+int co_get_costs_all_one(const copy_opt_t *co, ir_node *root, ir_node *arg, int pos)
+{
(void) co;
(void) root;
(void) arg;
* Determines a maximum weighted independent set with respect to
* the interference and conflict edges of all nodes in a qnode.
*/
-static int ou_max_ind_set_costs(unit_t *ou) {
+static int ou_max_ind_set_costs(unit_t *ou)
+{
be_chordal_env_t *chordal_env = ou->co->cenv;
ir_node **safe, **unsafe;
int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
#ifdef QUICK_AND_DIRTY_HACK
-static int compare_ous(const void *k1, const void *k2) {
+static int compare_ous(const void *k1, const void *k2)
+{
const unit_t *u1 = *((const unit_t **) k1);
const unit_t *u2 = *((const unit_t **) k2);
int i, o, u1_has_constr, u2_has_constr;
/**
* Sort the ou's according to constraints and their sort_key
*/
-static void co_sort_units(copy_opt_t *co) {
+static void co_sort_units(copy_opt_t *co)
+{
int i, count = 0, costs;
unit_t *ou, **ous;
}
#endif
-void co_build_ou_structure(copy_opt_t *co) {
+void co_build_ou_structure(copy_opt_t *co)
+{
DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
INIT_LIST_HEAD(&co->units);
irg_walk_graph(co->irg, co_collect_units, NULL, co);
#endif
}
-void co_free_ou_structure(copy_opt_t *co) {
+void co_free_ou_structure(copy_opt_t *co)
+{
unit_t *curr, *tmp;
ASSERT_OU_AVAIL(co);
list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
/* co_solve_heuristic() is implemented in becopyheur.c */
-int co_get_max_copy_costs(const copy_opt_t *co) {
+int co_get_max_copy_costs(const copy_opt_t *co)
+{
int i, res = 0;
unit_t *curr;
return res;
}
-int co_get_inevit_copy_costs(const copy_opt_t *co) {
+int co_get_inevit_copy_costs(const copy_opt_t *co)
+{
int res = 0;
unit_t *curr;
return res;
}
-int co_get_copy_costs(const copy_opt_t *co) {
+int co_get_copy_costs(const copy_opt_t *co)
+{
int i, res = 0;
unit_t *curr;
return res;
}
-int co_get_lower_bound(const copy_opt_t *co) {
+int co_get_lower_bound(const copy_opt_t *co)
+{
int res = 0;
unit_t *curr;
|_| |___/
******************************************************************************/
-static int compare_affinity_node_t(const void *k1, const void *k2, size_t size) {
+static int compare_affinity_node_t(const void *k1, const void *k2, size_t size)
+{
const affinity_node_t *n1 = k1;
const affinity_node_t *n2 = k2;
(void) size;
return (n1->irn != n2->irn);
}
-static void add_edge(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs) {
+static void add_edge(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs)
+{
affinity_node_t new_node, *node;
neighb_t *nbr;
int allocnew = 1;
nbr->costs += costs;
}
-static inline void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs) {
+static inline void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs)
+{
if (! be_ifg_connected(co->cenv->ifg, n1, n2)) {
add_edge(co, n1, n2, costs);
add_edge(co, n2, n1, costs);
}
}
-static void build_graph_walker(ir_node *irn, void *env) {
+static void build_graph_walker(ir_node *irn, void *env)
+{
const arch_register_req_t *req = arch_get_register_req_out(irn);
copy_opt_t *co = env;
int pos, max;
}
}
-void co_build_graph_structure(copy_opt_t *co) {
+void co_build_graph_structure(copy_opt_t *co)
+{
obstack_init(&co->obst);
co->nodes = new_set(compare_affinity_node_t, 32);
irg_walk_graph(co->irg, build_graph_walker, NULL, co);
}
-void co_free_graph_structure(copy_opt_t *co) {
+void co_free_graph_structure(copy_opt_t *co)
+{
ASSERT_GS_AVAIL(co);
del_set(co->nodes);
/* co_solve_ilp1() co_solve_ilp2() are implemented in becopyilpX.c */
-int co_gs_is_optimizable(copy_opt_t *co, ir_node *irn) {
+int co_gs_is_optimizable(copy_opt_t *co, ir_node *irn)
+{
affinity_node_t new_node, *n;
ASSERT_GS_AVAIL(co);
static ir_nodeset_t *all_phi_nodes;
static ir_nodeset_t *all_copy_nodes;
-void be_init_copystat(void) {
+void be_init_copystat(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.be.copystat");
all_phi_nodes = ir_nodeset_new(64);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copystat);
-void be_quit_copystat(void) {
+void be_quit_copystat(void)
+{
ir_nodeset_del(all_phi_nodes);
ir_nodeset_del(all_copy_nodes);
}
* @return 1 if the block at pos @p pos removed a critical edge
* 0 else
*/
-static inline int was_edge_critical(const ir_node *bl, int pos) {
+static inline int was_edge_critical(const ir_node *bl, int pos)
+{
const ir_edge_t *edge;
const ir_node *bl_at_pos, *bl_before;
assert(is_Block(bl));
return get_block_succ_next(bl_before, edge) ? 1 : 0;
}
-void copystat_add_max_costs(int costs) {
+void copystat_add_max_costs(int costs)
+{
curr_vals[I_COPIES_MAX] += costs;
}
-void copystat_add_inevit_costs(int costs) {
+void copystat_add_inevit_costs(int costs)
+{
curr_vals[I_COPIES_IF] += costs;
}
-void copystat_add_init_costs(int costs) {
+void copystat_add_init_costs(int costs)
+{
curr_vals[I_COPIES_INIT] += costs;
}
-void copystat_add_heur_costs(int costs) {
+void copystat_add_heur_costs(int costs)
+{
curr_vals[I_COPIES_HEUR] += costs;
}
-void copystat_add_ilp_5_sec_costs(int costs) {
+void copystat_add_ilp_5_sec_costs(int costs)
+{
curr_vals[I_COPIES_5SEC] += costs;
}
-void copystat_add_ilp_30_sec_costs(int costs) {
+void copystat_add_ilp_30_sec_costs(int costs)
+{
curr_vals[I_COPIES_30SEC] += costs;
}
-void copystat_add_opt_costs(int costs) {
+void copystat_add_opt_costs(int costs)
+{
curr_vals[I_COPIES_OPT] += costs;
}
-void copystat_add_heur_time(int time) {
+void copystat_add_heur_time(int time)
+{
curr_vals[I_HEUR_TIME] += time;
}
#ifdef WITH_ILP
-void copystat_add_ilp_time(int time) {
+void copystat_add_ilp_time(int time)
+{
curr_vals[I_ILP_TIME] += time;
}
-void copystat_add_ilp_vars(int vars) {
+void copystat_add_ilp_vars(int vars)
+{
curr_vals[I_ILP_VARS] += vars;
}
-void copystat_add_ilp_csts(int csts) {
+void copystat_add_ilp_csts(int csts)
+{
curr_vals[I_ILP_CSTR] += csts;
}
-void copystat_add_ilp_iter(int iters) {
+void copystat_add_ilp_iter(int iters)
+{
curr_vals[I_ILP_ITER] += iters;
}
#endif /* WITH_ILP */
-void copystat_dump(ir_graph *irg) {
+void copystat_dump(ir_graph *irg)
+{
int i;
char buf[1024];
FILE *out;
fclose(out);
}
-void copystat_dump_pretty(ir_graph *irg) {
+void copystat_dump_pretty(ir_graph *irg)
+{
int i;
char buf[1024];
FILE *out;
static dbg_handle *handle = NULL;
-void be_dbg_close(void) {
+void be_dbg_close(void)
+{
if (handle->ops->close)
handle->ops->close(handle);
}
-void be_dbg_so(const char *filename) {
+void be_dbg_so(const char *filename)
+{
if (handle->ops->so)
handle->ops->so(handle, filename);
}
-void be_dbg_main_program(void) {
+void be_dbg_main_program(void)
+{
if (handle->ops->main_program)
handle->ops->main_program(handle);
}
-void be_dbg_method_begin(ir_entity *ent, const be_stack_layout_t *layout) {
+void be_dbg_method_begin(ir_entity *ent, const be_stack_layout_t *layout)
+{
if (handle->ops->method_begin)
handle->ops->method_begin(handle, ent, layout);
}
-void be_dbg_method_end(void) {
+void be_dbg_method_end(void)
+{
if (handle->ops->method_end)
handle->ops->method_end(handle);
}
-void be_dbg_types(void) {
+void be_dbg_types(void)
+{
if (handle->ops->types)
handle->ops->types(handle);
}
-void be_dbg_variable(ir_entity *ent) {
+void be_dbg_variable(ir_entity *ent)
+{
if (handle->ops->variable)
handle->ops->variable(handle, ent);
}
-void be_dbg_set_dbg_info(dbg_info *dbgi) {
+void be_dbg_set_dbg_info(dbg_info *dbgi)
+{
if (handle->ops->set_dbg_info)
handle->ops->set_dbg_info(handle, dbgi);
}
static be_module_list_entry_t *dbgout_modules = NULL;
static be_create_dbgout_module_func selected_dbgout_module = NULL;
-void be_dbg_open(void) {
+void be_dbg_open(void)
+{
handle = selected_dbgout_module();
}
return &null_handle;
}
-void be_init_dbgout(void) {
+void be_init_dbgout(void)
+{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
be_add_module_list_opt(be_grp, "debuginfo", "debug info format",
}
}
-static int is_modify_flags(ir_node *node) {
+static int is_modify_flags(ir_node *node)
+{
int i, arity;
if (arch_irn_is(node, modify_flags))
}
}
-static void find_nodes(const void *self, void *iter) {
+static void find_nodes(const void *self, void *iter)
+{
const ifg_std_t *ifg = self;
nodes_iter_t *it = iter;
it->valid = 0;
}
-static ir_node *get_next_neighbour(adj_iter_t *it) {
+static ir_node *get_next_neighbour(adj_iter_t *it)
+{
ir_node *res = ir_nodeset_iterator_next(&it->iter);
if (res == NULL) {
pset *living;
} cliques_iter_t;
-static inline void free_clique_iter(cliques_iter_t *it) {
+static inline void free_clique_iter(cliques_iter_t *it)
+{
it->n_blocks = -1;
obstack_free(&it->ob, NULL);
del_pset(it->living);
}
-static void get_blocks_dom_order(ir_node *blk, void *env) {
+static void get_blocks_dom_order(ir_node *blk, void *env)
+{
cliques_iter_t *it = env;
obstack_ptr_grow(&it->ob, blk);
}
* NOTE: Be careful when changing this function!
* First understand the control flow of consecutive calls.
*/
-static inline int get_next_clique(cliques_iter_t *it) {
+static inline int get_next_clique(cliques_iter_t *it)
+{
/* continue in the block we left the last time */
for (; it->blk < it->n_blocks; it->blk++) {
* Check if irn is a Proj, which has no execution units assigned.
* @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
*/
-static inline int is_normal_Proj(const arch_env_t *env, const ir_node *irn) {
+static inline int is_normal_Proj(const arch_env_t *env, const ir_node *irn)
+{
return is_Proj(irn) && (arch_env_get_allowed_execution_units(env, irn) == NULL);
}
* Skips normal Projs.
* @return predecessor if irn is a normal Proj, otherwise irn.
*/
-static inline ir_node *skip_normal_Proj(const arch_env_t *env, ir_node *irn) {
+static inline ir_node *skip_normal_Proj(const arch_env_t *env, ir_node *irn)
+{
if (is_normal_Proj(env, irn))
return get_Proj_pred(irn);
return irn;
}
-static inline int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
+static inline int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env)
+{
unsigned lat = be_ilp_sched_latency(sel, irn, env);
if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
lat = 1;
return lat;
}
-static int cmp_live_in_nodes(const void *a, const void *b) {
+static int cmp_live_in_nodes(const void *a, const void *b)
+{
const ilp_livein_node_t *n1 = a;
const ilp_livein_node_t *n2 = b;
/**
* Compare scheduling time steps of two be_ilpsched_irn's.
*/
-static int cmp_ilpsched_irn(const void *a, const void *b) {
+static int cmp_ilpsched_irn(const void *a, const void *b)
+{
be_ilpsched_irn_t *n1 = *(be_ilpsched_irn_t **)a;
be_ilpsched_irn_t *n2 = *(be_ilpsched_irn_t **)b;
ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
/**
* In case there is no phase information for irn, initialize it.
*/
-static void *init_ilpsched_irn(ir_phase *ph, const ir_node *irn, void *old) {
+static void *init_ilpsched_irn(ir_phase *ph, const ir_node *irn, void *old)
+{
be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
if (res == old) {
/**
* Assign a per block unique number to each node.
*/
-static void build_block_idx(ir_node *irn, void *walk_env) {
+static void build_block_idx(ir_node *irn, void *walk_env)
+{
be_ilpsched_env_t *env = walk_env;
be_ilpsched_irn_t *node, *block_node;
ilpsched_node_attr_t *na;
/**
* Add all nodes having no user in current block to last_nodes list.
*/
-static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
+static void collect_alap_root_nodes(ir_node *irn, void *walk_env)
+{
ir_node *block;
const ir_edge_t *edge;
be_ilpsched_irn_t *block_node, *node;
/**
* Calculate the ASAP scheduling step for current irn.
*/
-static void calculate_irn_asap(ir_node *irn, void *walk_env) {
+static void calculate_irn_asap(ir_node *irn, void *walk_env)
+{
be_ilpsched_env_t *env = walk_env;
int i;
ir_node *block;
* Calculate the ALAP scheduling step of all irns in current block.
* Depends on max_steps being calculated.
*/
-static void calculate_block_alap(ir_node *block, void *walk_env) {
+static void calculate_block_alap(ir_node *block, void *walk_env)
+{
be_ilpsched_env_t *env = walk_env;
be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
/**
* Free list of root nodes and the set of live-in nodes.
*/
-static void clear_unwanted_data(ir_node *block, void *walk_env) {
+static void clear_unwanted_data(ir_node *block, void *walk_env)
+{
be_ilpsched_env_t *env = walk_env;
be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
* Refine the {ASAP(n), ALAP(n)} interval for the nodes.
* Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
*/
-static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
+static void refine_asap_alap_times(ir_node *irn, void *walk_env)
+{
be_ilpsched_env_t *env = walk_env;
ir_node *pred = irn;
be_ilpsched_irn_t *node, *pred_node;
*
*******************************************/
-static inline void check_for_keeps(waitq *keeps, const ir_node *block, const ir_node *irn) {
+static inline void check_for_keeps(waitq *keeps, const ir_node *block, const ir_node *irn)
+{
const ir_edge_t *edge;
(void) block;
* Adds a node, it's Projs (in case of mode_T nodes) and
* it's Keeps to schedule.
*/
-static void add_to_sched(be_ilpsched_env_t *env, const ir_node *block, const ir_node *irn, unsigned cycle) {
+static void add_to_sched(be_ilpsched_env_t *env, const ir_node *block, const ir_node *irn, unsigned cycle)
+{
const ir_edge_t *edge;
waitq *keeps = new_waitq();
/**
* Schedule all nodes in the given block, according to the ILP solution.
*/
-static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
+static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block)
+{
be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
be_ilpsched_irn_t **sched_nodes;
/**
* Check if node can be executed on given unit type.
*/
-static inline int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
+static inline int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node)
+{
int i;
ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
*
************************************************/
-static int be_ilpsched_set_type_info(be_ilpsched_env_t *env, ir_node *irn, struct obstack *obst) {
+static int be_ilpsched_set_type_info(be_ilpsched_env_t *env, ir_node *irn, struct obstack *obst)
+{
const be_execution_unit_t ***execunits = arch_env_get_allowed_execution_units(env->arch_env, irn);
unsigned n_unit_types = 0;
be_ilpsched_irn_t *node;
* Returns the largest alap time of a user of @p irn.
* The user must be in block @p block.
*/
-static unsigned be_ilpsched_get_max_alap_user(be_ilpsched_env_t *env, const ir_node *irn, const ir_node *block) {
+static unsigned be_ilpsched_get_max_alap_user(be_ilpsched_env_t *env, const ir_node *irn, const ir_node *block)
+{
const ir_edge_t *edge;
unsigned max_alap = 0;
* ==>> These variables represent the register pressure
*
*/
-static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
+static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst)
+{
char buf[1024];
ir_node *irn;
unsigned num_block_var, num_nodes;
* Collect all operands and nodes @p irn depends on.
* If there is a Proj within the dependencies, all other Projs of the parent node are added as well.
*/
-static void sta_collect_in_deps(ir_node *irn, ir_nodeset_t *deps) {
+static void sta_collect_in_deps(ir_node *irn, ir_nodeset_t *deps)
+{
int i;
for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
* - the precedence constraints:
* assure that no data dependencies are violated
*/
-static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
unsigned num_cst_assign, num_cst_prec, num_cst_dead;
char buf[1024];
ir_node *irn;
* - assure that for each time step not more instructions are scheduled
* to the same unit types as units of this type are available
*/
-static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
int glob_type_idx;
char buf[1024];
unsigned num_cst_resrc = 0;
* - assure, at most bundle_size * bundles_per_cycle instructions
* can be started at a certain point.
*/
-static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
char buf[1024];
unsigned t;
unsigned num_cst_bundle = 0;
* Create ILP alive nodes constraints:
* - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
*/
-static void create_alive_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_alive_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
char buf[1024];
ir_node *irn;
unsigned num_cst = 0;
* Create ILP alive nodes constraints for live-in nodes:
* - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
*/
-static void create_alive_livein_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_alive_livein_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
char buf[1024];
ilp_livein_node_t *livein;
unsigned num_cst = 0;
* - add additional costs to objective function if a node is scheduled
* on a unit although all units of this type are currently occupied
*/
-static void create_pressure_alive_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_pressure_alive_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
char buf[1024];
ir_node *cur_irn;
unsigned num_cst = 0;
* Create ILP branch constraints:
* Assure, alle nodes are scheduled prior to cfg op.
*/
-static void create_branch_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_branch_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
char buf[1024];
ir_node *cur_irn, *cfop;
unsigned num_cst = 0;
}
#if 0
-static void create_proj_keep_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
+static void create_proj_keep_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
+{
char buf[1024];
ir_node *irn;
unsigned num_cst = 0;
/**
* Create the ilp (add variables, build constraints, solve, build schedule from solution).
*/
-static void create_ilp(ir_node *block, void *walk_env) {
+static void create_ilp(ir_node *block, void *walk_env)
+{
be_ilpsched_env_t *env = walk_env;
be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
/**
* Perform ILP scheduling on the given irg.
*/
-void be_ilp_sched(const be_irg_t *birg, be_options_t *be_opts) {
+void be_ilp_sched(const be_irg_t *birg, be_options_t *be_opts)
+{
be_ilpsched_env_t env;
const char *name = "be ilp scheduling";
ir_graph *irg = be_get_birg_irg(birg);
* @param insn the be_insn that is build
* @param mach_op the machine operand for which uses are added
*/
-static void add_machine_operands(const be_insn_env_t *env, be_insn_t *insn, ir_node *mach_op) {
+static void add_machine_operands(const be_insn_env_t *env, be_insn_t *insn, ir_node *mach_op)
+{
struct obstack *obst = env->obst;
int i, n;
/**
* This function searches linearly for the node in the array.
*/
-static inline unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx) {
+static inline unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx)
+{
unsigned n = arr[0].u.head.n_members;
unsigned i;
be_irg_t *birg;
};
-static int cmp_loop_info(const void *a, const void *b, size_t size) {
+static int cmp_loop_info(const void *a, const void *b, size_t size)
+{
const be_loop_info_t *i1 = a;
const be_loop_info_t *i2 = b;
(void) size;
* @param cls The register class to compute pressure for.
* @return The highest register pressure in the given loop.
*/
-static unsigned be_compute_loop_pressure(be_loopana_t *loop_ana, ir_loop *loop, const arch_register_class_t *cls) {
+static unsigned be_compute_loop_pressure(be_loopana_t *loop_ana, ir_loop *loop, const arch_register_class_t *cls)
+{
int i, max;
unsigned pressure;
be_loop_info_t *entry, key;
* Returns the computed register pressure for the given class and loop.
* @return The pressure or INT_MAX if not found
*/
-unsigned be_get_loop_pressure(be_loopana_t *loop_ana, const arch_register_class_t *cls, ir_loop *loop) {
+unsigned be_get_loop_pressure(be_loopana_t *loop_ana, const arch_register_class_t *cls, ir_loop *loop)
+{
unsigned pressure = INT_MAX;
be_loop_info_t *entry, key;
/**
* Frees the loop analysis object.
*/
-void be_free_loop_pressure(be_loopana_t *loop_ana) {
+void be_free_loop_pressure(be_loopana_t *loop_ana)
+{
del_set(loop_ana->data);
xfree(loop_ana);
}
* @param reg The register to look for
* @return The corresponding node or NULL if not found
*/
-static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_register_t *reg) {
+static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_register_t *reg)
+{
int i;
for (i = 0; i < n; i++) {
* @param reg The register to look for
* @return The corresponding node or NULL if not found
*/
-static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_register_t *reg) {
+static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_register_t *reg)
+{
int i;
for (i = 0; i < n; i++) {
*
* @return The corresponding index in pairs or -1 if not found
*/
-static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) {
+static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx)
+{
int i;
for (i = 0; i < n; i++) {
*
* @return The corresponding index in pairs or -1 if not found
*/
-static int get_pairidx_for_out_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) {
+static int get_pairidx_for_out_regidx(reg_pair_t *pairs, int n, unsigned reg_idx)
+{
int i;
for (i = 0; i < n; i++) {
-static int has_irn_users(const ir_node *irn) {
+static int has_irn_users(const ir_node *irn)
+{
return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0;
}
}
}
-static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) {
+static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env)
+{
ir_graph *irg;
ir_nodemap_t *op_set;
ir_node *block;
* @param skipped_irn if irn is a Proj node, its predecessor, else irn
* @param env the constraint environment
*/
-static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env) {
+static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env)
+{
const arch_register_req_t *req = arch_get_register_req_out(irn);
if (arch_register_req_is(req, must_be_different)) {
* @param block The block to be checked
* @param walk_env The walker environment
*/
-static void assure_constraints_walker(ir_node *block, void *walk_env) {
+static void assure_constraints_walker(ir_node *block, void *walk_env)
+{
ir_node *irn;
sched_foreach_reverse(block, irn) {
* Melt all copykeeps pointing to the same node
* (or Projs of the same node), copying the same operand.
*/
-static void melt_copykeeps(constraint_env_t *cenv) {
+static void melt_copykeeps(constraint_env_t *cenv)
+{
ir_nodemap_iterator_t map_iter;
ir_nodemap_entry_t map_entry;
*
* @param birg The birg structure containing the irg
*/
-void assure_constraints(be_irg_t *birg) {
+void assure_constraints(be_irg_t *birg)
+{
ir_graph *irg = be_get_birg_irg(birg);
constraint_env_t cenv;
ir_nodemap_iterator_t map_iter;
* @param birg The birg object
* @param do_copy 1 == resolve cycles with a free reg if available
*/
-void lower_nodes_after_ra(be_irg_t *birg, int do_copy) {
+void lower_nodes_after_ra(be_irg_t *birg, int do_copy)
+{
lower_env_t env;
ir_graph *irg;
/**
* Initialize generic dummy unit.
*/
-void be_machine_init_dummy_unit(void) {
+void be_machine_init_dummy_unit(void)
+{
be_machine_execution_units_DUMMY[0].name = "GENERIC_DUMMY_UNIT";
be_machine_execution_units_DUMMY[0].tp = &be_machine_execution_unit_types[0];
}
/**
* Returns the generic dummy unit.
*/
-be_execution_unit_t *be_machine_get_dummy_unit(void) {
+be_execution_unit_t *be_machine_get_dummy_unit(void)
+{
return &be_machine_execution_units_DUMMY[0];
}
/**
* Check if given unit is the generic dummy unit.
*/
-int be_machine_is_dummy_unit(const be_execution_unit_t *unit) {
+int be_machine_is_dummy_unit(const be_execution_unit_t *unit)
+{
return unit == &be_machine_execution_units_DUMMY[0];
}
/**
* Check if given unit is the generic dummy unit type.
*/
-int be_machine_is_dummy_unit_type(const be_execution_unit_type_t *tp) {
+int be_machine_is_dummy_unit_type(const be_execution_unit_type_t *tp)
+{
return tp == &be_machine_execution_unit_types[0];
}
}
/* Parse one argument. */
-int be_parse_arg(const char *arg) {
+int be_parse_arg(const char *arg)
+{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
if (strcmp(arg, "help") == 0 || (arg[0] == '?' && arg[1] == '\0')) {
lc_opt_print_help_for_entry(be_grp, '-', stdout);
};
/* Perform schedule verification if requested. */
-static void be_sched_vrfy(be_irg_t *birg, int vrfy_opt) {
+static void be_sched_vrfy(be_irg_t *birg, int vrfy_opt)
+{
if (vrfy_opt == BE_VRFY_WARN) {
be_verify_schedule(birg);
} else if (vrfy_opt == BE_VRFY_ASSERT) {
*
* @return zero if both nodes have identically attributes
*/
-static int IncSP_cmp_attr(ir_node *a, ir_node *b) {
+static int IncSP_cmp_attr(ir_node *a, ir_node *b)
+{
const be_incsp_attr_t *a_attr = get_irn_attr_const(a);
const be_incsp_attr_t *b_attr = get_irn_attr_const(b);
return res;
}
-ir_node *be_get_Copy_op(const ir_node *cpy) {
+ir_node *be_get_Copy_op(const ir_node *cpy)
+{
return get_irn_n(cpy, be_pos_Copy_op);
}
-void be_set_Copy_op(ir_node *cpy, ir_node *op) {
+void be_set_Copy_op(ir_node *cpy, ir_node *op)
+{
set_irn_n(cpy, be_pos_Copy_op, op);
}
}
/* Gets the call entity or NULL if this is no static call. */
-ir_entity *be_Call_get_entity(const ir_node *call) {
+ir_entity *be_Call_get_entity(const ir_node *call)
+{
const be_call_attr_t *a = get_irn_attr_const(call);
assert(be_is_Call(call));
return a->ent;
}
/* Sets the call entity. */
-void be_Call_set_entity(ir_node *call, ir_entity *ent) {
+void be_Call_set_entity(ir_node *call, ir_entity *ent)
+{
be_call_attr_t *a = get_irn_attr(call);
assert(be_is_Call(call));
a->ent = ent;
}
/* Gets the call type. */
-ir_type *be_Call_get_type(ir_node *call) {
+ir_type *be_Call_get_type(ir_node *call)
+{
const be_call_attr_t *a = get_irn_attr_const(call);
assert(be_is_Call(call));
return a->call_tp;
}
/* Sets the call type. */
-void be_Call_set_type(ir_node *call, ir_type *call_tp) {
+void be_Call_set_type(ir_node *call, ir_type *call_tp)
+{
be_call_attr_t *a = get_irn_attr(call);
assert(be_is_Call(call));
a->call_tp = call_tp;
}
-void be_Call_set_pop(ir_node *call, unsigned pop) {
+void be_Call_set_pop(ir_node *call, unsigned pop)
+{
be_call_attr_t *a = get_irn_attr(call);
a->pop = pop;
}
-unsigned be_Call_get_pop(const ir_node *call) {
+unsigned be_Call_get_pop(const ir_node *call)
+{
const be_call_attr_t *a = get_irn_attr_const(call);
return a->pop;
}
}
/* Returns the number of real returns values */
-int be_Return_get_n_rets(const ir_node *ret) {
+int be_Return_get_n_rets(const ir_node *ret)
+{
const be_return_attr_t *a = get_irn_generic_attr_const(ret);
return a->num_ret_vals;
}
/* return the number of bytes that should be popped from stack when executing the Return. */
-unsigned be_Return_get_pop(const ir_node *ret) {
+unsigned be_Return_get_pop(const ir_node *ret)
+{
const be_return_attr_t *a = get_irn_generic_attr_const(ret);
return a->pop;
}
/* return non-zero, if number of popped bytes must be always emitted */
-int be_Return_get_emit_pop(const ir_node *ret) {
+int be_Return_get_emit_pop(const ir_node *ret)
+{
const be_return_attr_t *a = get_irn_generic_attr_const(ret);
return a->emit_pop;
}
/* return non-zero, if number of popped bytes must be always emitted */
-void be_Return_set_emit_pop(ir_node *ret, int emit_pop) {
+void be_Return_set_emit_pop(ir_node *ret, int emit_pop)
+{
be_return_attr_t *a = get_irn_generic_attr(ret);
a->emit_pop = emit_pop;
}
-int be_Return_append_node(ir_node *ret, ir_node *node) {
+int be_Return_append_node(ir_node *ret, ir_node *node)
+{
int pos;
pos = add_irn_n(ret, node);
return optimize_node(irn);
}
-ir_node *be_get_FrameAddr_frame(const ir_node *node) {
+ir_node *be_get_FrameAddr_frame(const ir_node *node)
+{
assert(be_is_FrameAddr(node));
return get_irn_n(node, be_pos_FrameAddr_ptr);
}
return be_new_CopyKeep(cls, bl, src, 1, &keep, mode);
}
-ir_node *be_get_CopyKeep_op(const ir_node *cpy) {
+ir_node *be_get_CopyKeep_op(const ir_node *cpy)
+{
return get_irn_n(cpy, be_pos_CopyKeep_op);
}
-void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) {
+void be_set_CopyKeep_op(ir_node *cpy, ir_node *op)
+{
set_irn_n(cpy, be_pos_CopyKeep_op, op);
}
be_set_constr_out(irn, pos, cls->class_req);
}
-ir_node *be_get_IncSP_pred(ir_node *irn) {
+ir_node *be_get_IncSP_pred(ir_node *irn)
+{
assert(be_is_IncSP(irn));
return get_irn_n(irn, 0);
}
-void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) {
+void be_set_IncSP_pred(ir_node *incsp, ir_node *pred)
+{
assert(be_is_IncSP(incsp));
set_irn_n(incsp, 0, pred);
}
#endif
-static void create_pbqp_node(be_pbqp_alloc_env_t *pbqp_alloc_env, ir_node *irn) {
+static void create_pbqp_node(be_pbqp_alloc_env_t *pbqp_alloc_env, ir_node *irn)
+{
const arch_register_class_t *cls = pbqp_alloc_env->cls;
pbqp *pbqp_inst = pbqp_alloc_env->pbqp_inst;
bitset_t *ignored_regs = pbqp_alloc_env->ignored_regs;
/**
* Walk through the block schedule and skip all barrier nodes.
*/
-static void skip_barrier(ir_node *ret_blk, ir_graph *irg) {
+static void skip_barrier(ir_node *ret_blk, ir_graph *irg)
+{
ir_node *irn;
sched_foreach_reverse(ret_blk, irn) {
/**
* Kill the Barrier nodes for better peephole optimization.
*/
-static void kill_barriers(ir_graph *irg) {
+static void kill_barriers(ir_graph *irg)
+{
ir_node *end_blk = get_irg_end_block(irg);
ir_node *start_blk;
int i;
return arch_irn_consider_in_reg_alloc(ra->cls, irn);
}
-static inline int regpressure(pset *live) {
+static inline int regpressure(pset *live)
+{
int pressure = pset_count(live);
return MIN(pressure, MAXPRESSURE);
}
return is_cfop(irn);
}
-int sched_skip_phi_predicator(const ir_node *irn, void *data) {
+int sched_skip_phi_predicator(const ir_node *irn, void *data)
+{
(void) data;
return is_Phi(irn);
}
return 1;
}
-void dump_ir_block_graph_mris(mris_env_t *env, const char *suffix) {
+void dump_ir_block_graph_mris(mris_env_t *env, const char *suffix)
+{
DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
dump_consts_local(0);
/**
* Acquire opcodes if needed and create source and sink nodes.
*/
-static void init_rss_special_nodes(ir_graph *irg) {
+static void init_rss_special_nodes(ir_graph *irg)
+{
ir_node *block;
if (op_rss_Source == NULL) {
_sink = new_ir_node(NULL, irg, block, op_rss_Sink, mode_ANY, 0, NULL);
}
-static int cmp_int(const void *a, const void *b) {
+static int cmp_int(const void *a, const void *b)
+{
const int *i1 = a;
const int *i2 = b;
return QSORT_CMP(*i1, *i2);
}
-static int cmp_child_costs(const void *a, const void *b) {
+static int cmp_child_costs(const void *a, const void *b)
+{
const child_t *c1 = a;
const child_t *c2 = b;
return QSORT_CMP(c1->cost, c2->cost);
}
-static int cmp_irn_idx(const void *a, const void *b) {
+static int cmp_irn_idx(const void *a, const void *b)
+{
const ir_node *n1 = *(ir_node **)a;
const ir_node *n2 = *(ir_node **)b;
return QSORT_CMP(get_irn_idx(n1), get_irn_idx(n2));
}
-static int cmp_rss_edges(const void *a, const void *b) {
+static int cmp_rss_edges(const void *a, const void *b)
+{
const rss_edge_t *e1 = a;
const rss_edge_t *e2 = b;
return (e1->src != e2->src) || (e1->tgt != e2->tgt);
}
-static int bsearch_for_index(int key, int *arr, size_t len, int force) {
+static int bsearch_for_index(int key, int *arr, size_t len, int force)
+{
int left = 0;
int right = len;
return -1;
}
-static const ir_node **build_sorted_array_from_list(plist_t *irn_list, struct obstack *obst) {
+static const ir_node **build_sorted_array_from_list(plist_t *irn_list, struct obstack *obst)
+{
plist_element_t *el;
int i = 0;
int len = plist_count(irn_list);
*****************************************************/
#ifdef DEBUG_libfirm
-static void dump_nodeset(ir_nodeset_t *ns, const char *prefix) {
+static void dump_nodeset(ir_nodeset_t *ns, const char *prefix)
+{
ir_nodeset_iterator_t iter;
ir_node *irn;
}
#endif
-static void build_file_name(rss_t *rss, const char *suffix, size_t suf_len, char *buf, size_t len) {
+static void build_file_name(rss_t *rss, const char *suffix, size_t suf_len, char *buf, size_t len)
+{
const char *irg_name;
memset(buf, 0, len);
}
/* Dumps all collected bipartite components of current irg as vcg. */
-static void debug_vcg_dump_bipartite(rss_t *rss) {
+static void debug_vcg_dump_bipartite(rss_t *rss)
+{
cbc_t *cbc;
FILE *f;
char file_name[256];
}
/* Dump the computed killing function as vcg. */
-static void debug_vcg_dump_kill(rss_t *rss) {
+static void debug_vcg_dump_kill(rss_t *rss)
+{
FILE *f;
char file_name[256];
plist_element_t *el;
}
/* Dumps the disjoint value DAG (DVG) as vcg. */
-static void debug_vcg_dump_dvg(rss_t *rss, dvg_t *dvg) {
+static void debug_vcg_dump_dvg(rss_t *rss, dvg_t *dvg)
+{
static const char suffix[] = "-RSS-DVG.vcg";
FILE *f;
char file_name[256];
#if 0
/* Dumps the PKG(DVG). */
-static void debug_vcg_dump_dvg_pkiller(rss_t *rss, dvg_t *dvg) {
+static void debug_vcg_dump_dvg_pkiller(rss_t *rss, dvg_t *dvg)
+{
static const char suffix[] = "-RSS-DVG-PKG.vcg";
FILE *f;
char file_name[256];
/**
* In case there is no rss information for irn, initialize it.
*/
-static void *init_rss_irn(ir_phase *ph, const ir_node *irn, void *old) {
+static void *init_rss_irn(ir_phase *ph, const ir_node *irn, void *old)
+{
rss_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
res->descendant_list = plist_obstack_new(phase_obst(ph));
/**
* Collect all nodes data dependent on current node.
*/
-static void collect_descendants(rss_t *rss, rss_irn_t *rirn, ir_node *irn, int *got_sink, unsigned cur_desc_walk) {
+static void collect_descendants(rss_t *rss, rss_irn_t *rirn, ir_node *irn, int *got_sink, unsigned cur_desc_walk)
+{
const ir_edge_t *edge;
rss_irn_t *cur_node = get_rss_irn(rss, irn);
ir_node *block = rss->block;
/**
* Handles a single consumer.
*/
-static void collect_single_consumer(rss_t *rss, rss_irn_t *rss_irn, ir_node *consumer, int *got_sink) {
+static void collect_single_consumer(rss_t *rss, rss_irn_t *rss_irn, ir_node *consumer, int *got_sink)
+{
ir_node *block = rss->block;
assert(! is_Proj(consumer) && "Cannot handle Projs");
/**
* Collect all nodes consuming the value(s) produced by current node.
*/
-static void collect_consumer(rss_t *rss, rss_irn_t *rss_irn, ir_node *irn, int *got_sink) {
+static void collect_consumer(rss_t *rss, rss_irn_t *rss_irn, ir_node *irn, int *got_sink)
+{
const ir_edge_t *edge;
int i;
ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
/**
* Collects all consumer and descendant of a irn.
*/
-static void collect_node_info(rss_t *rss, ir_node *irn) {
+static void collect_node_info(rss_t *rss, ir_node *irn)
+{
static unsigned cur_desc_walk = 0;
rss_irn_t *rss_irn = get_rss_irn(rss, irn);
int got_sink;
* @param u The potentially killed value
* @return 1 if v is in pkill(u), 0 otherwise
*/
-static int is_potential_killer(rss_t *rss, rss_irn_t *v, rss_irn_t *u) {
+static int is_potential_killer(rss_t *rss, rss_irn_t *v, rss_irn_t *u)
+{
plist_t *list;
const ir_node **arr;
plist_element_t *el;
/**
* Update descendants, consumer and pkiller list for the given irn.
*/
-static void update_node_info(rss_t *rss, ir_node *irn, ir_node *pk_irn) {
+static void update_node_info(rss_t *rss, ir_node *irn, ir_node *pk_irn)
+{
rss_irn_t *node = get_rss_irn(rss, irn);
rss_irn_t *pkiller = get_rss_irn(rss, pk_irn);
/**
* Compute the potential killing set PK.
*/
-static void compute_pkill_set(rss_t *rss) {
+static void compute_pkill_set(rss_t *rss)
+{
plist_element_t *u_el, *v_el;
foreach_plist(rss->nodes, u_el) {
/**
* Build set of killing edges (from values to their potential killers)
*/
-static void build_kill_edges(rss_t *rss, pset *epk) {
+static void build_kill_edges(rss_t *rss, pset *epk)
+{
plist_element_t *el, *k_el;
foreach_plist(rss->nodes, el) {
#ifdef DEBUG_libfirm
/* print the given cbc for debugging purpose */
-static void debug_print_cbc(firm_dbg_module_t *mod, cbc_t *cbc) {
+static void debug_print_cbc(firm_dbg_module_t *mod, cbc_t *cbc)
+{
ir_nodeset_iterator_t iter;
ir_node *n;
rss_edge_t *ke;
* Sid-Ahmed-Ali Touati, Phd Thesis
* Register Pressure in Instruction Level Parallelism, p. 71
*/
-static void compute_bipartite_decomposition(rss_t *rss) {
+static void compute_bipartite_decomposition(rss_t *rss)
+{
pset *epk = new_pset(cmp_rss_edges, 10);
int cur_num = 0;
/**
* Select the child with the maximum cost.
*/
-static child_t *select_child_max_cost(rss_t *rss, ir_nodeset_t *x, ir_nodeset_t *y, child_t *t, cbc_t *cbc) {
+static child_t *select_child_max_cost(rss_t *rss, ir_nodeset_t *x, ir_nodeset_t *y, child_t *t, cbc_t *cbc)
+{
ir_node *child;
ir_nodeset_iterator_t iter;
float max_cost = -1.0f;
/**
* Remove all parents from x which are killed by t_irn.
*/
-static void remove_covered_parents(rss_t *rss, ir_nodeset_t *x, ir_node *t_irn, cbc_t *cbc) {
+static void remove_covered_parents(rss_t *rss, ir_nodeset_t *x, ir_node *t_irn, cbc_t *cbc)
+{
rss_irn_t *t = get_rss_irn(rss, t_irn);
rss_edge_t *k_edge;
}
}
-static void update_cumulated_descendent_values(rss_t *rss, ir_nodeset_t *y, ir_node *t_irn) {
+static void update_cumulated_descendent_values(rss_t *rss, ir_nodeset_t *y, ir_node *t_irn)
+{
rss_irn_t *t = get_rss_irn(rss, t_irn);
plist_element_t *el;
/**
* Greedy-k: a heuristics for the MMA problem
*/
-static void compute_killing_function(rss_t *rss) {
+static void compute_killing_function(rss_t *rss)
+{
cbc_t *cbc;
struct obstack obst;
/**
* Adds the edge src -> tgt to the dvg. Checks if reverse edge is already there (asserts).
*/
-static inline void add_dvg_edge(rss_t *rss, dvg_t *dvg, const ir_node *src, const ir_node *tgt, int have_source) {
+static inline void add_dvg_edge(rss_t *rss, dvg_t *dvg, const ir_node *src, const ir_node *tgt, int have_source)
+{
rss_edge_t *dvg_edge;
rss_edge_t key;
* BEWARE: It is not made explicitly clear in the Touati paper,
* but the DVG is meant to be build from the KILLING DAG
*/
-static void compute_dvg(rss_t *rss, dvg_t *dvg) {
+static void compute_dvg(rss_t *rss, dvg_t *dvg)
+{
plist_element_t *el;
DBG((rss->dbg, LEVEL_1, "\tcomputing DVG:\n"));
/**
* Updates the dvg structure when a serialization edge from src -> tgt is added.
*/
-static void update_dvg(rss_t *rss, dvg_t *dvg, rss_irn_t *src, rss_irn_t *tgt) {
+static void update_dvg(rss_t *rss, dvg_t *dvg, rss_irn_t *src, rss_irn_t *tgt)
+{
int i, j, idx;
rss_edge_t *edge;
rss_edge_t **arr = ALLOCAN(rss_edge_t*, pset_count(dvg->edges));
/**
* Accumulate all descendants for root into list.
*/
-static void accumulate_dvg_descendant_values(rss_t *rss, rss_irn_t *root, plist_t *list) {
+static void accumulate_dvg_descendant_values(rss_t *rss, rss_irn_t *root, plist_t *list)
+{
if (plist_count(root->dvg_user_list) > 0) {
plist_element_t *el;
* in the given DVG.
* Needs the descendant list for all user as sorted array.
*/
-static void build_dvg_pkiller_list(rss_t *rss, dvg_t *dvg) {
+static void build_dvg_pkiller_list(rss_t *rss, dvg_t *dvg)
+{
ir_nodeset_iterator_t iter;
ir_node *irn;
* This is a reimplementation of the MAXIMAL_ANTI_CHAIN function
* from the DDG library 1.1 (DAG.cpp).
*/
-static ir_nodeset_t *compute_maximal_antichain(rss_t *rss, dvg_t *dvg, int iteration) {
+static ir_nodeset_t *compute_maximal_antichain(rss_t *rss, dvg_t *dvg, int iteration)
+{
int n = ir_nodeset_size(&dvg->nodes);
int *assignment = ALLOCAN(int, n);
int *assignment_rev = ALLOCAN(int, n);
/**
* Computes the best serialization between two nodes of sat_vals.
*/
-static serialization_t *compute_best_admissible_serialization(rss_t *rss, ir_nodeset_t *sat_vals, serialization_t *ser, int num_regs) {
+static serialization_t *compute_best_admissible_serialization(rss_t *rss, ir_nodeset_t *sat_vals, serialization_t *ser, int num_regs)
+{
int n = ir_nodeset_size(sat_vals);
int n_idx = ARR_LEN_SAFE(rss->idx_map);
int i = 0;
* Perform the value serialization heuristic and add all
* computed serialization edges as dependencies to the irg.
*/
-static void perform_value_serialization_heuristic(rss_t *rss) {
+static void perform_value_serialization_heuristic(rss_t *rss)
+{
bitset_t *arch_nonign_bs = bitset_alloca(arch_register_class_n_regs(rss->cls));
bitset_t *abi_ign_bs = bitset_alloca(arch_register_class_n_regs(rss->cls));
unsigned available_regs, iteration;
/**
* Do initial calculations for a block.
*/
-static void process_block(ir_node *block, void *env) {
+static void process_block(ir_node *block, void *env)
+{
rss_t *rss = env;
int i, n;
const ir_edge_t *edge;
/**
* Register the options.
*/
-void be_init_schedrss(void) {
+void be_init_schedrss(void)
+{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "sched");
lc_opt_entry_t *rss_grp = lc_opt_get_grp(sched_grp, "rss");
/**
* Preprocess the irg for scheduling.
*/
-void rss_schedule_preparation(be_irg_t *birg) {
+void rss_schedule_preparation(be_irg_t *birg)
+{
ir_graph *irg = be_get_birg_irg(birg);
rss_t rss;
/**
* Get the current delay.
*/
-static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current delay.
*/
-static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
+static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the current etime.
*/
-static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the current etime.
*/
-static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
+static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the number of users.
*/
-static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the number of users.
*/
-static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
+static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the register difference.
*/
-static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
+static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the register difference.
*/
-static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
+static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static inline int get_irn_preorder(trace_env_t *env, ir_node *n) {
+static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
+static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Get the pre-order position.
*/
-static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* Set the pre-order position.
*/
-static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
+static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
/**
* returns the exec-time for node n.
*/
-static sched_timestep_t exectime(trace_env_t *env, ir_node *n) {
+static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
+{
if (be_is_Keep(n) || is_Proj(n))
return 0;
if (env->selector->exectime)
/**
* Calculates the latency for between two ops
*/
-static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
+static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
+{
/* a Keep hides a root */
if (be_is_Keep(curr))
return exectime(env, pred);
/**
* Returns the number of users of a node having mode datab.
*/
-static int get_num_successors(ir_node *irn) {
+static int get_num_successors(ir_node *irn)
+{
int sum = 0;
const ir_edge_t *edge;
/**
* Returns the difference of regs_output - regs_input;
*/
-static int get_reg_difference(trace_env_t *env, ir_node *irn) {
+static int get_reg_difference(trace_env_t *env, ir_node *irn)
+{
int num_out = 0;
int num_in = 0;
int i;
/**
* descent into a dag and create a pre-order list.
*/
-static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len) {
+static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
+{
int i;
if (! is_Phi(root)) {
/**
* Returns non-zero if root is a root in the block block.
*/
-static int is_root(ir_node *root, ir_node *block) {
+static int is_root(ir_node *root, ir_node *block)
+{
const ir_edge_t *edge;
foreach_out_edge(root, edge) {
/**
* Performs initial block calculations for trace scheduling.
*/
-static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
+static void trace_preprocess_block(trace_env_t *env, ir_node *block)
+{
ir_node *root = NULL, *preord = NULL;
ir_node *curr, *irn;
int cur_pos;
/**
* This functions gets called after a node finally has been made ready.
*/
-static void trace_node_ready(void *data, ir_node *irn, ir_node *pred) {
+static void trace_node_ready(void *data, ir_node *irn, ir_node *pred)
+{
trace_env_t *env = data;
sched_timestep_t etime_p, etime;
/**
* Update the current time after irn has been selected.
*/
-static void trace_update_time(void *data, ir_node *irn) {
+static void trace_update_time(void *data, ir_node *irn)
+{
trace_env_t *env = data;
if (is_Phi(irn) || get_irn_opcode(irn) == beo_Start) {
env->curr_time += get_irn_etime(env, irn);
* @param birg The backend irg object
* @return The environment
*/
-static trace_env_t *trace_init(const be_irg_t *birg) {
+static trace_env_t *trace_init(const be_irg_t *birg)
+{
trace_env_t *env = XMALLOCZ(trace_env_t);
ir_graph *irg = be_get_birg_irg(birg);
int nn = get_irg_last_idx(irg);
* Frees all memory allocated for trace scheduling environment.
* @param env The environment
*/
-static void trace_free(void *data) {
+static void trace_free(void *data)
+{
trace_env_t *env = data;
be_liveness_free(env->liveness);
DEL_ARR_F(env->sched_info);
/**
* Alloc a new workset on obstack @p ob with maximum size @p max
*/
-static inline workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
+static inline workset_t *new_workset(belady_env_t *env, struct obstack *ob)
+{
return OALLOCFZ(ob, workset_t, vals, env->n_regs);
}
/**
* Alloc a new instance on obstack and make it equal to @param ws
*/
-static inline workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
+static inline workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws)
+{
workset_t *res = OALLOCF(ob, workset_t, vals, env->n_regs);
memcpy(res, ws, sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]));
return res;
* Do NOT alloc anything. Make @param tgt equal to @param src.
* returns @param tgt for convenience
*/
-static inline workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
+static inline workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src)
+{
size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
memcpy(tgt, src, size);
return tgt;
* @param count locations given at memory @param locs.
* Set the length of @param ws to count.
*/
-static inline void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
+static inline void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
+{
workset->len = count;
memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
}
* Inserts the value @p val into the workset, iff it is not
* already contained. The workset must not be full.
*/
-static inline void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
+static inline void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val)
+{
int i;
/* check for current regclass */
if (!arch_irn_consider_in_reg_alloc(env->cls, val)) {
/**
* Removes all entries from this workset
*/
-static inline void workset_clear(workset_t *ws) {
+static inline void workset_clear(workset_t *ws)
+{
ws->len = 0;
}
/**
* Removes the value @p val from the workset if present.
*/
-static inline void workset_remove(workset_t *ws, ir_node *val) {
+static inline void workset_remove(workset_t *ws, ir_node *val)
+{
int i;
for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val) {
}
}
-static inline int workset_get_index(const workset_t *ws, const ir_node *val) {
+static inline int workset_get_index(const workset_t *ws, const ir_node *val)
+{
int i;
for(i=0; i<ws->len; ++i) {
if (ws->vals[i].irn == val)
* @p is_usage indicates that the values in new_vals are used (not defined)
* In this case reloads must be performed
*/
-static void displace(block_info_t *bi, workset_t *new_vals, int is_usage) {
+static void displace(block_info_t *bi, workset_t *new_vals, int is_usage)
+{
belady_env_t *env = bi->bel;
workset_t *ws = env->ws;
ir_node **to_insert = ALLOCAN(ir_node*, env->n_regs);
* whether it is used from a register or is reloaded
* before the use.
*/
-static void belady(belady_env_t *env, int id) {
+static void belady(belady_env_t *env, int id)
+{
block_info_t *block_info = new_block_info(env, id);
const ir_node *block = block_info->bl;
#define get_reg(irn) arch_get_irn_register(irn)
#define set_reg(irn, reg) arch_set_irn_register(irn, reg)
-static void clear_link(ir_node *irn, void *data) {
+static void clear_link(ir_node *irn, void *data)
+{
(void) data;
set_irn_link(irn, NULL);
}
* - have the current register class
* The list is rooted at get_irn_link(BB).
*/
-static void collect_phis_walker(ir_node *irn, void *data) {
+static void collect_phis_walker(ir_node *irn, void *data)
+{
be_chordal_env_t *env = data;
if (is_Phi(irn) && chordal_has_class(env, irn)) {
ir_node *bl = get_nodes_block(irn);
ir_node *proj; /**< The proj created for @p arg. */
} perm_proj_t;
-static int cmp_perm_proj(const void *a, const void *b, size_t n) {
+static int cmp_perm_proj(const void *a, const void *b, size_t n)
+{
const perm_proj_t *p = a;
const perm_proj_t *q = b;
(void) n;
/**
* Insert Perms in all predecessors of a block containing a phi
*/
-static void insert_all_perms_walker(ir_node *bl, void *data) {
+static void insert_all_perms_walker(ir_node *bl, void *data)
+{
insert_all_perms_env_t *env = data;
be_chordal_env_t *chordal_env = env->chordal_env;
pmap *perm_map = env->perm_map;
* Adjusts the register allocation for the (new) phi-operands
* and insert duplicates iff necessary.
*/
-static void set_regs_or_place_dupls_walker(ir_node *bl, void *data) {
+static void set_regs_or_place_dupls_walker(ir_node *bl, void *data)
+{
be_chordal_env_t *chordal_env = data;
be_lv_t *lv = chordal_env->birg->lv;
ir_node *phi;
}
}
-void be_ssa_destruction(be_chordal_env_t *chordal_env) {
+void be_ssa_destruction(be_chordal_env_t *chordal_env)
+{
insert_all_perms_env_t insert_perms_env;
pmap *perm_map = pmap_create();
ir_graph *irg = chordal_env->irg;
pmap_destroy(perm_map);
}
-static void ssa_destruction_check_walker(ir_node *bl, void *data) {
+static void ssa_destruction_check_walker(ir_node *bl, void *data)
+{
ir_node *phi;
int i, max;
(void)data;
}
}
-void be_ssa_destruction_check(be_chordal_env_t *chordal_env) {
+void be_ssa_destruction_check(be_chordal_env_t *chordal_env)
+{
irg_block_walk_graph(chordal_env->irg, ssa_destruction_check_walker, NULL, NULL);
}
/**
* Returns the stabs type number of a Firm type.
*/
-static unsigned get_type_number(stabs_handle *h, ir_type *tp) {
+static unsigned get_type_number(stabs_handle *h, ir_type *tp)
+{
pmap_entry *entry;
unsigned num;
/**
* Map a given Type to void by assigned the type number 0.
*/
-static void map_to_void(stabs_handle *h, ir_type *tp) {
+static void map_to_void(stabs_handle *h, ir_type *tp)
+{
pmap_insert(h->type_map, tp, INT_TO_PTR(0));
}
/**
* generate the void type.
*/
-static void gen_void_type(stabs_handle *h) {
+static void gen_void_type(stabs_handle *h)
+{
(void) h;
be_emit_irprintf("\t.stabs\t\"void:t%u=%u\",%d,0,0,0\n", 0, 0, N_LSYM);
be_emit_write_line();
/**
* emit a tarval as decimal
*/
-static void be_emit_tv_as_decimal(tarval *tv) {
+static void be_emit_tv_as_decimal(tarval *tv)
+{
ir_mode *mode = get_tarval_mode(tv);
const tarval_mode_info *old = get_tarval_mode_output_option(mode);
* @param h the stabs handle
* @param tp the type
*/
-static void gen_primitive_type(stabs_handle *h, ir_type *tp) {
+static void gen_primitive_type(stabs_handle *h, ir_type *tp)
+{
ir_mode *mode = get_type_mode(tp);
unsigned type_num;
* @param h the stabs handle
* @param tp the type
*/
-static void gen_enum_type(stabs_handle *h, ir_type *tp) {
+static void gen_enum_type(stabs_handle *h, ir_type *tp)
+{
unsigned type_num = get_type_number(h, tp);
int i, n;
/**
* print a pointer type
*/
-void print_pointer_type(stabs_handle *h, ir_type *tp, int local) {
+void print_pointer_type(stabs_handle *h, ir_type *tp, int local)
+{
unsigned type_num = local ? h->next_type_nr++ : get_type_number(h, tp);
ir_type *el_tp = get_pointer_points_to_type(tp);
unsigned el_num = get_type_number(h, el_tp);
* @param env the walker environment
* @param tp the type
*/
-static void gen_pointer_type(wenv_t *env, ir_type *tp) {
+static void gen_pointer_type(wenv_t *env, ir_type *tp)
+{
stabs_handle *h = env->h;
ir_type *el_tp = get_pointer_points_to_type(tp);
/**
* print an array type
*/
-static void print_array_type(stabs_handle *h, ir_type *tp, int local) {
+static void print_array_type(stabs_handle *h, ir_type *tp, int local)
+{
ir_type *etp = get_array_element_type(tp);
int i, n = get_array_n_dimensions(tp);
unsigned type_num = local ? h->next_type_nr++ : get_type_number(h, tp);
* @param env the walker environment
* @param tp the type
*/
-static void gen_array_type(wenv_t *env, ir_type *tp) {
+static void gen_array_type(wenv_t *env, ir_type *tp)
+{
stabs_handle *h = env->h;
ir_type *etp = get_array_element_type(tp);
* @param env the walker environment
* @param tp the type
*/
-static void gen_struct_union_type(wenv_t *env, ir_type *tp) {
+static void gen_struct_union_type(wenv_t *env, ir_type *tp)
+{
stabs_handle *h = env->h;
unsigned type_num = get_type_number(h, tp);
int i, n;
* @param env the walker environment
* @param tp the type
*/
-static void gen_method_type(wenv_t *env, ir_type *tp) {
+static void gen_method_type(wenv_t *env, ir_type *tp)
+{
stabs_handle *h = env->h;
unsigned type_num = get_type_number(h, tp);
ir_type *rtp = NULL;
/**
* generate all types.
*/
-static void gen_types(stabs_handle *h) {
+static void gen_types(stabs_handle *h)
+{
wenv_t env;
env.h = h;
/**
* start a new source object (compilation unit)
*/
-static void stabs_so(dbg_handle *handle, const char *filename) {
+static void stabs_so(dbg_handle *handle, const char *filename)
+{
stabs_handle *h = (stabs_handle *)handle;
h->main_file = h->curr_file = filename;
be_emit_irprintf("\t.stabs\t\"%s\",%d,0,0,.Ltext0\n", filename, N_SO);
/**
* Main Program
*/
-static void stabs_main_program(dbg_handle *handle) {
+static void stabs_main_program(dbg_handle *handle)
+{
ir_graph *irg = get_irp_main_irg();
(void) handle;
/**
* dump the stabs for a method end
*/
-static void stabs_method_end(dbg_handle *handle) {
+static void stabs_method_end(dbg_handle *handle)
+{
stabs_handle *h = (stabs_handle *)handle;
ir_entity *ent = h->cur_ent;
const be_stack_layout_t *layout = h->layout;
/**
* dump types
*/
-static void stabs_types(dbg_handle *handle) {
+static void stabs_types(dbg_handle *handle)
+{
stabs_handle *h = (stabs_handle *)handle;
/* allocate the zero for the void type */
/**
* dump a variable in the global type
*/
-static void stabs_variable(dbg_handle *handle, ir_entity *ent) {
+static void stabs_variable(dbg_handle *handle, ir_entity *ent)
+{
stabs_handle *h = (stabs_handle *)handle;
unsigned tp_num = get_type_number(h, get_entity_type(ent));
char buf[1024];
/**
* Close the stabs handler.
*/
-static void stabs_close(dbg_handle *handle) {
+static void stabs_close(dbg_handle *handle)
+{
stabs_handle *h = (stabs_handle *)handle;
pmap_destroy(h->type_map);
free(h);
};
/* Opens a stabs handler */
-dbg_handle *be_stabs_open(void) {
+dbg_handle *be_stabs_open(void)
+{
stabs_handle *h = XMALLOCZ(stabs_handle);
h->base.ops = &stabs_ops;
ir_nodeset_destroy(&live_nodes);
}
-static void stat_reg_pressure_block(ir_node *block, void *data) {
+static void stat_reg_pressure_block(ir_node *block, void *data)
+{
pressure_walker_env_t *env = data;
check_reg_pressure_class(env, block, env->cls);
}
-void be_do_stat_reg_pressure(be_irg_t *birg, const arch_register_class_t *cls) {
+void be_do_stat_reg_pressure(be_irg_t *birg, const arch_register_class_t *cls)
+{
pressure_walker_env_t env;
ir_graph *irg = be_get_birg_irg(birg);
double average_pressure;
* We must adapt the live-outs to the live-ins at each block-border.
*/
static
-void fix_block_borders(ir_node *block, void *data) {
+void fix_block_borders(ir_node *block, void *data)
+{
minibelady_env_t *env = data;
ir_graph *irg = get_irn_irg(block);
ir_node *startblock = get_irg_start_block(irg);
return 1;
}
-void dump_ir_block_graph_sched(ir_graph *irg, const char *suffix) {
+void dump_ir_block_graph_sched(ir_graph *irg, const char *suffix)
+{
DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
dump_consts_local(0);
set_dump_node_edge_hook(old);
}
-void dump_ir_extblock_graph_sched(ir_graph *irg, const char *suffix) {
+void dump_ir_extblock_graph_sched(ir_graph *irg, const char *suffix)
+{
DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
dump_consts_local(0);
* @param suffix A suffix to its file name.
* @param dumper The dump function
*/
-void be_dump(ir_graph *irg, const char *suffix, void (*dumper)(ir_graph *, const char *)) {
+void be_dump(ir_graph *irg, const char *suffix, void (*dumper)(ir_graph *, const char *))
+{
static ir_graph *last_irg = NULL;
static int nr = 0;
char buf[128];
(void) irn;
}
-unsigned get_num_reachable_nodes(ir_graph *irg) {
+unsigned get_num_reachable_nodes(ir_graph *irg)
+{
int num = 0;
irg_walk_graph(irg, count_num_reachable_nodes, NULL, &num);
return num;
/**
* Gets the Proj with number pn from irn.
*/
-ir_node *be_get_Proj_for_pn(const ir_node *irn, long pn) {
+ir_node *be_get_Proj_for_pn(const ir_node *irn, long pn)
+{
const ir_edge_t *edge;
ir_node *proj;
assert(get_irn_mode(irn) == mode_T && "need mode_T");
return NULL;
}
-FILE *be_ffopen(const char *base, const char *ext, const char *mode) {
+FILE *be_ffopen(const char *base, const char *ext, const char *mode)
+{
FILE *out;
char buf[1024];
/**
* Print all nodes of a pset into a file.
*/
-static void print_living_values(FILE *F, const ir_nodeset_t *live_nodes) {
+static void print_living_values(FILE *F, const ir_nodeset_t *live_nodes)
+{
ir_nodeset_iterator_t iter;
ir_node *node;
/**
* Check if number of live nodes never exceeds the number of available registers.
*/
-static void verify_liveness_walker(ir_node *block, void *data) {
+static void verify_liveness_walker(ir_node *block, void *data)
+{
be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
ir_nodeset_t live_nodes;
ir_node *irn;
/**
* Simple schedule checker.
*/
-static void verify_schedule_walker(ir_node *block, void *data) {
+static void verify_schedule_walker(ir_node *block, void *data)
+{
be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
ir_node *node;
ir_node *non_phi_found = NULL;
return 1;
}
-static void check_schedule(ir_node *node, void *data) {
+static void check_schedule(ir_node *node, void *data)
+{
be_verify_schedule_env_t *env = data;
int should_be;
int scheduled;
int problem_found;
} be_verify_spillslots_env_t;
-static int cmp_spill(const void* d1, const void* d2, size_t size) {
+static int cmp_spill(const void* d1, const void* d2, size_t size)
+{
const spill_t* s1 = d1;
const spill_t* s2 = d2;
(void) size;
return s1->spill != s2->spill;
}
-static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node) {
+static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node)
+{
spill_t spill;
spill.spill = node;
return set_find(env->spills, &spill, sizeof(spill), HASH_PTR(node));
}
-static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
+static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent)
+{
spill_t spill, *res;
int hash = HASH_PTR(node);
return res;
}
-static ir_node *get_memory_edge(const ir_node *node) {
+static ir_node *get_memory_edge(const ir_node *node)
+{
int i, arity;
ir_node *result = NULL;
void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
static
-void be_check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
+void be_check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent)
+{
if(ent == NULL) {
ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have an entity assigned\n",
node, get_nodes_block(node), get_irg_dump_name(env->irg));
}
static
-void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
+void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
+{
ir_entity *spillent = arch_get_frame_entity(node);
be_check_entity(env, node, spillent);
get_spill(env, node, ent);
}
}
-static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
+static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
+{
int i, arity;
spill_t spill, *res;
int hash = HASH_PTR(node);
}
}
-static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent) {
+static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent)
+{
int i, arity;
spill_t spill, *res;
int hash = HASH_PTR(node);
}
}
-static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
+static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
+{
if(be_is_Spill(node)) {
collect_spill(env, node, reload, ent);
} else if(is_Proj(node)) {
* This walker function searches for reloads and collects all the spills
* and memphis attached to them.
*/
-static void collect_spills_walker(ir_node *node, void *data) {
+static void collect_spills_walker(ir_node *node, void *data)
+{
be_verify_spillslots_env_t *env = data;
/* @@@ ia32_classify returns classification of Proj_pred :-/ */
}
}
-static void check_lonely_spills(ir_node *node, void *data) {
+static void check_lonely_spills(ir_node *node, void *data)
+{
be_verify_spillslots_env_t *env = data;
if(be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
* @param b The second value.
* @return 1, if a and b interfere, 0 if not.
*/
-static int my_values_interfere(const ir_node *a, const ir_node *b) {
+static int my_values_interfere(const ir_node *a, const ir_node *b)
+{
const ir_edge_t *edge;
ir_node *bb;
int a2b = value_dominates(a, b);
}
}
-static void value_used(ir_node *block, ir_node *node) {
+static void value_used(ir_node *block, ir_node *node)
+{
const arch_register_t *reg;
ir_node *reg_node;
registers[reg->index] = NULL;
}
-static void verify_block_register_allocation(ir_node *block, void *data) {
+static void verify_block_register_allocation(ir_node *block, void *data)
+{
int i, nregclasses;
(void) data;
}
}
-int be_verify_register_allocation(const be_irg_t *birg) {
+int be_verify_register_allocation(const be_irg_t *birg)
+{
arch_env = be_get_birg_arch_env(birg);
irg = be_get_birg_irg(birg);
lv = be_liveness(irg);
int problem_found;
} verify_out_dead_nodes_env;
-static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env) {
+static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env)
+{
ir_graph *irg = env->irg;
const ir_edge_t* edge;
bitset_set(reachable, get_irn_idx(node));
}
-int be_verify_out_edges(ir_graph *irg) {
+int be_verify_out_edges(ir_graph *irg)
+{
verify_out_dead_nodes_env env;
return 1;
* @return non-zero if the DAG represents an immediate, 0 else
*/
#if 0
-static int is_immediate_simple(const ir_node *node) {
+static int is_immediate_simple(const ir_node *node)
+{
int symconsts = 0;
return do_is_immediate(node, &symconsts, 0);
}
}
/* Evaluate the costs of an instruction. */
-int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
+int ia32_evaluate_insn(insn_kind kind, tarval *tv)
+{
int cost;
switch (kind) {
return reg;
}
-int ia32_mode_needs_gp_reg(ir_mode *mode) {
+int ia32_mode_needs_gp_reg(ir_mode *mode)
+{
if (mode == mode_fpcw)
return 0;
if (get_mode_size_bits(mode) > 32)
return new_node;
}
-ir_node *gen_CopyB(ir_node *node) {
+ir_node *gen_CopyB(ir_node *node)
+{
ir_node *block = NULL;
ir_node *src = NULL;
ir_node *new_src = NULL;
return res;
}
-ir_node *gen_Proj_tls(ir_node *node) {
+ir_node *gen_Proj_tls(ir_node *node)
+{
ir_node *block = NULL;
dbg_info *dbgi = NULL;
ir_node *res = NULL;
* and map all instructions the backend did not support
* to runtime calls.
*/
-void ia32_handle_intrinsics(void) {
+void ia32_handle_intrinsics(void)
+{
if (intrinsics && ARR_LEN(intrinsics) > 0) {
lower_intrinsics(intrinsics, ARR_LEN(intrinsics), /*part_block_used=*/1);
}
* @param h_res the upper 32 bit result or NULL
* @param irg the graph to replace on
*/
-static void reroute_result(ir_node *proj, ir_node *l_res, ir_node *h_res, ir_graph *irg) {
+static void reroute_result(ir_node *proj, ir_node *l_res, ir_node *h_res, ir_graph *irg)
+{
const ir_edge_t *edge, *next;
foreach_out_edge_safe(proj, edge, next) {
* @param irg the graph to replace on
* @param block the block to replace on (always the call block)
*/
-static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
+static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block)
+{
ir_node *jmp, *res, *in[2];
ir_node *bad = get_irg_bad(irg);
ir_node *nomem = get_irg_no_mem(irg);
/**
* Map an Add (a_l, a_h, b_l, b_h)
*/
-static int map_Add(ir_node *call, void *ctx) {
+static int map_Add(ir_node *call, void *ctx)
+{
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
ir_node **params = get_Call_param_arr(call);
/**
* Map a Shl (a_l, a_h, count)
*/
-static int map_Shl(ir_node *call, void *ctx) {
+static int map_Shl(ir_node *call, void *ctx)
+{
ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
/**
* Map a Shr (a_l, a_h, count)
*/
-static int map_Shr(ir_node *call, void *ctx) {
+static int map_Shr(ir_node *call, void *ctx)
+{
ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
/**
* Map a Shrs (a_l, a_h, count)
*/
-static int map_Shrs(ir_node *call, void *ctx) {
+static int map_Shrs(ir_node *call, void *ctx)
+{
ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
/**
* Map a Mul (a_l, a_h, b_l, b_h)
*/
-static int map_Mul(ir_node *call, void *ctx) {
+static int map_Mul(ir_node *call, void *ctx)
+{
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
ir_node **params = get_Call_param_arr(call);
/**
* Map a Minus (a_l, a_h)
*/
-static int map_Minus(ir_node *call, void *ctx) {
+static int map_Minus(ir_node *call, void *ctx)
+{
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
ir_node **params = get_Call_param_arr(call);
/**
* Map a Abs (a_l, a_h)
*/
-static int map_Abs(ir_node *call, void *ctx) {
+static int map_Abs(ir_node *call, void *ctx)
+{
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
ir_node **params = get_Call_param_arr(call);
/**
* Maps a Mod. Change into a library call
*/
-static int map_Mod(ir_node *call, void *ctx) {
+static int map_Mod(ir_node *call, void *ctx)
+{
ia32_intrinsic_env_t *env = ctx;
ir_type *method = get_Call_type(call);
ir_mode *h_mode = get_type_mode(get_method_res_type(method, 1));
/**
* Maps a Conv.
*/
-static int map_Conv(ir_node *call, void *ctx) {
+static int map_Conv(ir_node *call, void *ctx)
+{
ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(call);
ir_node *block = get_nodes_block(call);
/* this is the order of the assigned registers used for parameter passing */
-void ia32_build_16bit_reg_map(pmap *reg_map) {
+void ia32_build_16bit_reg_map(pmap *reg_map)
+{
pmap_insert(reg_map, &ia32_gp_regs[REG_EAX], "ax");
pmap_insert(reg_map, &ia32_gp_regs[REG_EBX], "bx");
pmap_insert(reg_map, &ia32_gp_regs[REG_ECX], "cx");
pmap_insert(reg_map, &ia32_gp_regs[REG_ESP], "sp");
}
-void ia32_build_8bit_reg_map(pmap *reg_map) {
+void ia32_build_8bit_reg_map(pmap *reg_map)
+{
pmap_insert(reg_map, &ia32_gp_regs[REG_EAX], "al");
pmap_insert(reg_map, &ia32_gp_regs[REG_EBX], "bl");
pmap_insert(reg_map, &ia32_gp_regs[REG_ECX], "cl");
pmap_insert(reg_map, &ia32_gp_regs[REG_EDX], "dl");
}
-void ia32_build_8bit_reg_map_high(pmap *reg_map) {
+void ia32_build_8bit_reg_map_high(pmap *reg_map)
+{
pmap_insert(reg_map, &ia32_gp_regs[REG_EAX], "ah");
pmap_insert(reg_map, &ia32_gp_regs[REG_EBX], "bh");
pmap_insert(reg_map, &ia32_gp_regs[REG_ECX], "ch");
pmap_insert(reg_map, &ia32_gp_regs[REG_EDX], "dh");
}
-const char *ia32_get_mapped_reg_name(pmap *reg_map, const arch_register_t *reg) {
+const char *ia32_get_mapped_reg_name(pmap *reg_map, const arch_register_t *reg)
+{
pmap_entry *e = pmap_find(reg_map, (void *)reg);
//assert(e && "missing map init?");
* @param reason indicates which kind of information should be dumped
* @return 0 on success or != 0 on failure
*/
-static int ia32_dump_node(ir_node *n, FILE *F, dump_reason_t reason) {
+static int ia32_dump_node(ir_node *n, FILE *F, dump_reason_t reason)
+{
ir_mode *mode = NULL;
int bad = 0;
* |___/
***************************************************************************************************/
-ia32_attr_t *get_ia32_attr(ir_node *node) {
+ia32_attr_t *get_ia32_attr(ir_node *node)
+{
assert(is_ia32_irn(node) && "need ia32 node to get ia32 attributes");
return (ia32_attr_t *)get_irn_generic_attr(node);
}
-const ia32_attr_t *get_ia32_attr_const(const ir_node *node) {
+const ia32_attr_t *get_ia32_attr_const(const ir_node *node)
+{
assert(is_ia32_irn(node) && "need ia32 node to get ia32 attributes");
return (const ia32_attr_t*) get_irn_generic_attr_const(node);
}
-ia32_x87_attr_t *get_ia32_x87_attr(ir_node *node) {
+ia32_x87_attr_t *get_ia32_x87_attr(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
ia32_x87_attr_t *x87_attr = CAST_IA32_ATTR(ia32_x87_attr_t, attr);
return x87_attr;
}
-const ia32_x87_attr_t *get_ia32_x87_attr_const(const ir_node *node) {
+const ia32_x87_attr_t *get_ia32_x87_attr_const(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
const ia32_x87_attr_t *x87_attr = CONST_CAST_IA32_ATTR(ia32_x87_attr_t, attr);
return x87_attr;
}
-const ia32_asm_attr_t *get_ia32_asm_attr_const(const ir_node *node) {
+const ia32_asm_attr_t *get_ia32_asm_attr_const(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
const ia32_asm_attr_t *asm_attr = CONST_CAST_IA32_ATTR(ia32_asm_attr_t, attr);
return asm_attr;
}
-ia32_immediate_attr_t *get_ia32_immediate_attr(ir_node *node) {
+ia32_immediate_attr_t *get_ia32_immediate_attr(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
ia32_immediate_attr_t *imm_attr = CAST_IA32_ATTR(ia32_immediate_attr_t, attr);
return imm_attr;
}
-ia32_condcode_attr_t *get_ia32_condcode_attr(ir_node *node) {
+ia32_condcode_attr_t *get_ia32_condcode_attr(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
ia32_condcode_attr_t *cc_attr = CAST_IA32_ATTR(ia32_condcode_attr_t, attr);
return cc_attr;
}
-const ia32_condcode_attr_t *get_ia32_condcode_attr_const(const ir_node *node) {
+const ia32_condcode_attr_t *get_ia32_condcode_attr_const(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
const ia32_condcode_attr_t *cc_attr = CONST_CAST_IA32_ATTR(ia32_condcode_attr_t, attr);
return call_attr;
}
-ia32_copyb_attr_t *get_ia32_copyb_attr(ir_node *node) {
+ia32_copyb_attr_t *get_ia32_copyb_attr(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
ia32_copyb_attr_t *copyb_attr = CAST_IA32_ATTR(ia32_copyb_attr_t, attr);
return copyb_attr;
}
-const ia32_copyb_attr_t *get_ia32_copyb_attr_const(const ir_node *node) {
+const ia32_copyb_attr_t *get_ia32_copyb_attr_const(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
const ia32_copyb_attr_t *copyb_attr = CONST_CAST_IA32_ATTR(ia32_copyb_attr_t, attr);
return copyb_attr;
}
-ia32_climbframe_attr_t *get_ia32_climbframe_attr(ir_node *node) {
+ia32_climbframe_attr_t *get_ia32_climbframe_attr(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
ia32_climbframe_attr_t *climbframe_attr = CAST_IA32_ATTR(ia32_climbframe_attr_t, attr);
return climbframe_attr;
}
-const ia32_climbframe_attr_t *get_ia32_climbframe_attr_const(const ir_node *node) {
+const ia32_climbframe_attr_t *get_ia32_climbframe_attr_const(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
const ia32_climbframe_attr_t *climbframe_attr = CONST_CAST_IA32_ATTR(ia32_climbframe_attr_t, attr);
/**
* Gets the type of an ia32 node.
*/
-ia32_op_type_t get_ia32_op_type(const ir_node *node) {
+ia32_op_type_t get_ia32_op_type(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.tp;
}
/**
* Sets the type of an ia32 node.
*/
-void set_ia32_op_type(ir_node *node, ia32_op_type_t tp) {
+void set_ia32_op_type(ir_node *node, ia32_op_type_t tp)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.tp = tp;
}
/**
* Gets the address mode offset as int.
*/
-int get_ia32_am_offs_int(const ir_node *node) {
+int get_ia32_am_offs_int(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->am_offs;
}
/**
* Sets the address mode offset from an int.
*/
-void set_ia32_am_offs_int(ir_node *node, int offset) {
+void set_ia32_am_offs_int(ir_node *node, int offset)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->am_offs = offset;
}
-void add_ia32_am_offs_int(ir_node *node, int offset) {
+void add_ia32_am_offs_int(ir_node *node, int offset)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->am_offs += offset;
}
/**
* Returns the symconst entity associated to address mode.
*/
-ir_entity *get_ia32_am_sc(const ir_node *node) {
+ir_entity *get_ia32_am_sc(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->am_sc;
}
/**
* Sets the symconst entity associated to address mode.
*/
-void set_ia32_am_sc(ir_node *node, ir_entity *entity) {
+void set_ia32_am_sc(ir_node *node, ir_entity *entity)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->am_sc = entity;
}
/**
* Sets the sign bit for address mode symconst.
*/
-void set_ia32_am_sc_sign(ir_node *node) {
+void set_ia32_am_sc_sign(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.am_sc_sign = 1;
}
/**
* Clears the sign bit for address mode symconst.
*/
-void clear_ia32_am_sc_sign(ir_node *node) {
+void clear_ia32_am_sc_sign(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.am_sc_sign = 0;
}
/**
* Returns the sign bit for address mode symconst.
*/
-int is_ia32_am_sc_sign(const ir_node *node) {
+int is_ia32_am_sc_sign(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.am_sc_sign;
}
/**
* Gets the addr mode const.
*/
-unsigned get_ia32_am_scale(const ir_node *node) {
+unsigned get_ia32_am_scale(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.am_scale;
}
/**
* Sets the index register scale for address mode.
*/
-void set_ia32_am_scale(ir_node *node, unsigned scale) {
+void set_ia32_am_scale(ir_node *node, unsigned scale)
+{
ia32_attr_t *attr = get_ia32_attr(node);
assert(scale <= 3 && "AM scale out of range [0 ... 3]");
attr->data.am_scale = scale;
/**
* Sets the uses_frame flag.
*/
-void set_ia32_use_frame(ir_node *node) {
+void set_ia32_use_frame(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.use_frame = 1;
}
/**
* Clears the uses_frame flag.
*/
-void clear_ia32_use_frame(ir_node *node) {
+void clear_ia32_use_frame(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.use_frame = 0;
}
/**
* Gets the uses_frame flag.
*/
-int is_ia32_use_frame(const ir_node *node) {
+int is_ia32_use_frame(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.use_frame;
}
/**
* Sets node to commutative.
*/
-void set_ia32_commutative(ir_node *node) {
+void set_ia32_commutative(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.is_commutative = 1;
}
/**
* Sets node to non-commutative.
*/
-void clear_ia32_commutative(ir_node *node) {
+void clear_ia32_commutative(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.is_commutative = 0;
}
/**
* Checks if node is commutative.
*/
-int is_ia32_commutative(const ir_node *node) {
+int is_ia32_commutative(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.is_commutative;
}
-void set_ia32_need_stackent(ir_node *node) {
+void set_ia32_need_stackent(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.need_stackent = 1;
}
-void clear_ia32_need_stackent(ir_node *node) {
+void clear_ia32_need_stackent(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.need_stackent = 0;
}
-int is_ia32_need_stackent(const ir_node *node) {
+int is_ia32_need_stackent(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.need_stackent;
}
-void set_ia32_is_reload(ir_node *node) {
+void set_ia32_is_reload(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.is_reload = 1;
}
-int is_ia32_is_reload(const ir_node *node) {
+int is_ia32_is_reload(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.is_reload;
}
-void set_ia32_is_spill(ir_node *node) {
+void set_ia32_is_spill(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.is_spill = 1;
}
-int is_ia32_is_spill(const ir_node *node) {
+int is_ia32_is_spill(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.is_spill;
}
-void set_ia32_is_remat(ir_node *node) {
+void set_ia32_is_remat(ir_node *node)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->data.is_remat = 1;
}
-int is_ia32_is_remat(const ir_node *node) {
+int is_ia32_is_remat(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->data.is_remat;
}
/**
* Gets the mode of the stored/loaded value (only set for Store/Load)
*/
-ir_mode *get_ia32_ls_mode(const ir_node *node) {
+ir_mode *get_ia32_ls_mode(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->ls_mode;
}
/**
* Sets the mode of the stored/loaded value (only set for Store/Load)
*/
-void set_ia32_ls_mode(ir_node *node, ir_mode *mode) {
+void set_ia32_ls_mode(ir_node *node, ir_mode *mode)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->ls_mode = mode;
}
/**
* Gets the frame entity assigned to this node.
*/
-ir_entity *get_ia32_frame_ent(const ir_node *node) {
+ir_entity *get_ia32_frame_ent(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->frame_ent;
}
/**
* Sets the frame entity for this node.
*/
-void set_ia32_frame_ent(ir_node *node, ir_entity *ent) {
+void set_ia32_frame_ent(ir_node *node, ir_entity *ent)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->frame_ent = ent;
if(ent != NULL)
/**
* Gets the instruction latency.
*/
-unsigned get_ia32_latency(const ir_node *node) {
+unsigned get_ia32_latency(const ir_node *node)
+{
const ir_op *op = get_irn_op(node);
const ia32_op_attr_t *op_attr = (ia32_op_attr_t*) get_op_attr(op);
return op_attr->latency;
/**
* Returns the argument register requirements of an ia32 node.
*/
-const arch_register_req_t **get_ia32_in_req_all(const ir_node *node) {
+const arch_register_req_t **get_ia32_in_req_all(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->in_req;
}
/**
* Sets the argument register requirements of an ia32 node.
*/
-void set_ia32_in_req_all(ir_node *node, const arch_register_req_t **reqs) {
+void set_ia32_in_req_all(ir_node *node, const arch_register_req_t **reqs)
+{
ia32_attr_t *attr = get_ia32_attr(node);
attr->in_req = reqs;
}
/**
* Returns the argument register requirement at position pos of an ia32 node.
*/
-const arch_register_req_t *get_ia32_in_req(const ir_node *node, int pos) {
+const arch_register_req_t *get_ia32_in_req(const ir_node *node, int pos)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
if(attr->in_req == NULL)
return arch_no_register_req;
/**
* Returns whether or not the node is an AddrModeS node.
*/
-int is_ia32_AddrModeS(const ir_node *node) {
+int is_ia32_AddrModeS(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return (attr->data.tp == ia32_AddrModeS);
}
/**
* Returns whether or not the node is an AddrModeD node.
*/
-int is_ia32_AddrModeD(const ir_node *node) {
+int is_ia32_AddrModeD(const ir_node *node)
+{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return (attr->data.tp == ia32_AddrModeD);
}
}
void
-init_ia32_copyb_attributes(ir_node *res, unsigned size) {
+init_ia32_copyb_attributes(ir_node *res, unsigned size)
+{
ia32_copyb_attr_t *attr = get_irn_generic_attr(res);
#ifndef NDEBUG
}
void
-init_ia32_condcode_attributes(ir_node *res, long pnc) {
+init_ia32_condcode_attributes(ir_node *res, long pnc)
+{
ia32_condcode_attr_t *attr = get_irn_generic_attr(res);
#ifndef NDEBUG
}
void
-init_ia32_climbframe_attributes(ir_node *res, unsigned count) {
+init_ia32_climbframe_attributes(ir_node *res, unsigned count)
+{
ia32_climbframe_attr_t *attr = get_irn_generic_attr(res);
#ifndef NDEBUG
/**
* Hash function for Immediates
*/
-static unsigned ia32_hash_Immediate(const ir_node *irn) {
+static unsigned ia32_hash_Immediate(const ir_node *irn)
+{
const ia32_immediate_attr_t *a = get_ia32_immediate_attr_const(irn);
return HASH_PTR(a->symconst) + (a->sc_sign << 16) + a->offset;
* conditional jump or directly preceded by other jump instruction.
* Can be avoided by placing a Rep prefix before the return.
*/
-static void peephole_ia32_Return(ir_node *node) {
+static void peephole_ia32_Return(ir_node *node)
+{
ir_node *block, *irn;
if (!ia32_cg_config.use_pad_return)
/**
* Return true if a mode can be stored in the GP register set
*/
-static inline int mode_needs_gp_reg(ir_mode *mode) {
+static inline int mode_needs_gp_reg(ir_mode *mode)
+{
if (mode == mode_fpcw)
return 0;
if (get_mode_size_bits(mode) > 32)
* @param new_mode IN/OUT for the mode of the constants, if NULL
* smallest possible mode will be used
*/
-static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) {
+static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode)
+{
ir_entity *ent;
ir_mode *mode = *new_mode;
ir_type *tp;
/**
* Transform Builtin trap
*/
-static ir_node *gen_trap(ir_node *node) {
+static ir_node *gen_trap(ir_node *node)
+{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = be_transform_node(get_Builtin_mem(node));
/**
* Transform Builtin debugbreak
*/
-static ir_node *gen_debugbreak(ir_node *node) {
+static ir_node *gen_debugbreak(ir_node *node)
+{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = be_transform_node(get_Builtin_mem(node));
/**
* Transform Builtin return_address
*/
-static ir_node *gen_return_address(ir_node *node) {
+static ir_node *gen_return_address(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform Builtin frame_address
*/
-static ir_node *gen_frame_address(ir_node *node) {
+static ir_node *gen_frame_address(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform Builtin frame_address
*/
-static ir_node *gen_prefetch(ir_node *node) {
+static ir_node *gen_prefetch(ir_node *node)
+{
dbg_info *dbgi;
ir_node *ptr, *block, *mem, *base, *index;
ir_node *param, *new_node;
/**
* Transform builtin popcount
*/
-static ir_node *gen_popcount(ir_node *node) {
+static ir_node *gen_popcount(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform builtin byte swap.
*/
-static ir_node *gen_bswap(ir_node *node) {
+static ir_node *gen_bswap(ir_node *node)
+{
ir_node *param = be_transform_node(get_Builtin_param(node, 0));
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform builtin outport.
*/
-static ir_node *gen_outport(ir_node *node) {
+static ir_node *gen_outport(ir_node *node)
+{
ir_node *port = create_immediate_or_transform(get_Builtin_param(node, 0), 0);
ir_node *oldv = get_Builtin_param(node, 1);
ir_mode *mode = get_irn_mode(oldv);
/**
* Transform builtin inport.
*/
-static ir_node *gen_inport(ir_node *node) {
+static ir_node *gen_inport(ir_node *node)
+{
ir_type *tp = get_Builtin_type(node);
ir_type *rstp = get_method_res_type(tp, 0);
ir_mode *mode = get_type_mode(rstp);
/**
* Transform a builtin inner trampoline
*/
-static ir_node *gen_inner_trampoline(ir_node *node) {
+static ir_node *gen_inner_trampoline(ir_node *node)
+{
ir_node *ptr = get_Builtin_param(node, 0);
ir_node *callee = get_Builtin_param(node, 1);
ir_node *env = be_transform_node(get_Builtin_param(node, 2));
/**
* Transform Builtin node.
*/
-static ir_node *gen_Builtin(ir_node *node) {
+static ir_node *gen_Builtin(ir_node *node)
+{
ir_builtin_kind kind = get_Builtin_kind(node);
switch (kind) {
/**
* Transform Proj(Builtin) node.
*/
-static ir_node *gen_Proj_Builtin(ir_node *proj) {
+static ir_node *gen_Proj_Builtin(ir_node *proj)
+{
ir_node *node = get_Proj_pred(proj);
ir_node *new_node = be_transform_node(node);
ir_builtin_kind kind = get_Builtin_kind(node);
* The ABI requires that the results are in st0, copy them
* to a xmm register.
*/
-static void postprocess_fp_call_results(void) {
+static void postprocess_fp_call_results(void)
+{
int i;
for (i = ARR_LEN(call_list) - 1; i >= 0; --i) {
* @param irn The irn
* @param First proj with mode != mode_M or NULL if none found
*/
-ir_node *ia32_get_res_proj(const ir_node *irn) {
+ir_node *ia32_get_res_proj(const ir_node *irn)
+{
const ir_edge_t *edge;
ir_node *src;
* Transforms the standard firm graph into
* a mips firm graph
*/
-static void mips_prepare_graph(void *self) {
+static void mips_prepare_graph(void *self)
+{
mips_code_gen_t *cg = self;
/* do local optimizations */
/**
* Called immediately before emit phase.
*/
-static void mips_finish_irg(void *self) {
+static void mips_finish_irg(void *self)
+{
mips_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
/**
* Initializes the backend ISA and opens the output file.
*/
-static arch_env_t *mips_init(FILE *file_handle) {
+static arch_env_t *mips_init(FILE *file_handle)
+{
static int inited = 0;
mips_isa_t *isa;
* it will contain the return address and space to store the old frame pointer.
* @return The Firm type modelling the ABI between type.
*/
-static ir_type *mips_abi_get_between_type(void *self) {
+static ir_type *mips_abi_get_between_type(void *self)
+{
mips_abi_env_t *env = self;
static ir_type *debug_between_type = NULL;
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *mips_get_libfirm_params(void) {
+static const backend_params *mips_get_libfirm_params(void)
+{
static backend_params p = {
1, /* need dword lowering */
0, /* don't support inline assembler yet */
const arch_register_t *reg;
};
-int mips_cmp_irn_reg_assoc(const void *a, const void *b, size_t size) {
+int mips_cmp_irn_reg_assoc(const void *a, const void *b, size_t size)
+{
const struct mips_irn_reg_assoc *x = a;
const struct mips_irn_reg_assoc *y = b;
(void) size;
return x->irn != y->irn;
}
-static struct mips_irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn, set *reg_set) {
+static struct mips_irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn, set *reg_set)
+{
struct mips_irn_reg_assoc templ;
unsigned int hash;
return set_insert(reg_set, &templ, sizeof(templ), hash);
}
-void mips_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set) {
+void mips_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set)
+{
struct mips_irn_reg_assoc *assoc = get_irn_reg_assoc(irn, reg_set);
assoc->reg = reg;
}
-const arch_register_t *mips_get_firm_reg(const ir_node *irn, set *reg_set) {
+const arch_register_t *mips_get_firm_reg(const ir_node *irn, set *reg_set)
+{
struct mips_irn_reg_assoc *assoc = get_irn_reg_assoc(irn, reg_set);
return assoc->reg;
}
* Translates the projnum into a "real" argument position for register
* requirements dependend on the predecessor.
*/
-long mips_translate_proj_pos(const ir_node *proj) {
+long mips_translate_proj_pos(const ir_node *proj)
+{
return get_Proj_proj(proj);
}
return is_mips_irn(irn) && !is_mips_zero(irn) && !is_mips_reinterpret_conv(irn) && !is_mips_fallthrough(irn);
}
-static void mips_collect_mflohis(pset* set, ir_node* node) {
+static void mips_collect_mflohis(pset* set, ir_node* node)
+{
// construct a list of nodes that need to be scheduled before
// we are allowed to schedule another div or mul instruction
const ir_edge_t *edge, *edge2;
typedef ir_node *construct_binop_func(dbg_info *db, ir_node *block,
ir_node *left, ir_node *right);
-static inline int mode_needs_gp_reg(ir_mode *mode) {
+static inline int mode_needs_gp_reg(ir_mode *mode)
+{
return mode_is_int(mode) || mode_is_reference(mode);
}
}
#if 0
-static ir_node *gen_node_for_Mul(mips_transform_env_t *env) {
+static ir_node *gen_node_for_Mul(mips_transform_env_t *env)
+{
ir_node *node = env->irn;
ir_node *mul;
ir_node *mflo;
}
static
-ir_node *gen_node_for_IJmp(mips_transform_env_t *env) {
+ir_node *gen_node_for_IJmp(mips_transform_env_t *env)
+{
ir_node *node = env->irn;
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
}
static
-ir_node *gen_node_for_Rot(mips_transform_env_t *env) {
+ir_node *gen_node_for_Rot(mips_transform_env_t *env)
+{
ir_node *node = env->irn;
ir_node *subu, *srlv, *sllv, *or;
/*
* lower a copyB into standard Firm assembler :-)
*/
-ir_node *gen_code_for_CopyB(ir_node *block, ir_node *node) {
+ir_node *gen_code_for_CopyB(ir_node *block, ir_node *node)
+{
ir_node *cnt, *sub;
ir_node *dst = get_CopyB_dst(node);
ir_node *src = get_CopyB_src(node);
return result;
}
-static void mips_fix_CopyB_Proj(mips_transform_env_t* env) {
+static void mips_fix_CopyB_Proj(mips_transform_env_t* env)
+{
ir_node *node = env->irn;
long n = get_Proj_proj(node);
}
#endif
-static void mips_transform_Spill(mips_transform_env_t* env) {
+static void mips_transform_Spill(mips_transform_env_t* env)
+{
ir_node *node = env->irn;
ir_node *sched_point = NULL;
ir_node *store;
exchange(node, store);
}
-static void mips_transform_Reload(mips_transform_env_t* env) {
+static void mips_transform_Reload(mips_transform_env_t* env)
+{
ir_node *node = env->irn;
ir_node *sched_point = NULL;
ir_node *load, *proj;
/**
* Calls the transform functions for Spill and Reload.
*/
-void mips_after_ra_walker(ir_node *node, void *env) {
+void mips_after_ra_walker(ir_node *node, void *env)
+{
mips_code_gen_t *cg = env;
mips_transform_env_t tenv;
* |___/
**************************************************/
-static void ppc32_before_abi(void *self) {
+static void ppc32_before_abi(void *self)
+{
ppc32_code_gen_t *cg = self;
ir_type *frame_type = get_irg_frame_type(cg->irg);
}
}
-static void ppc32_search_start_successor(ir_node *block, void *env) {
+static void ppc32_search_start_successor(ir_node *block, void *env)
+{
ppc32_code_gen_t *cg = env;
int n = get_Block_n_cfgpreds(block);
ir_node *startblock = get_irg_start_block(cg->irg);
* Transforms the standard firm graph into
* a ppc firm graph
*/
-static void ppc32_prepare_graph(void *self) {
+static void ppc32_prepare_graph(void *self)
+{
ppc32_code_gen_t *cg = self;
irg_block_walk_graph(cg->irg, NULL, ppc32_search_start_successor, cg);
/**
* Called immediatly before emit phase.
*/
-static void ppc32_finish_irg(void *self) {
+static void ppc32_finish_irg(void *self)
+{
(void) self;
/* TODO: - fix offsets for nodes accessing stack
- ...
* Calculate a block schedule here. We need it for the x87
* simulator and the emitter.
*/
-static void ppc32_before_ra(void *self) {
+static void ppc32_before_ra(void *self)
+{
ppc32_code_gen_t *cg = self;
cg->blk_sched = be_create_block_schedule(cg->irg, cg->birg->exec_freq);
}
/**
* Some stuff to do immediately after register allocation
*/
-static void ppc32_after_ra(void *self) {
+static void ppc32_after_ra(void *self)
+{
ppc32_code_gen_t *cg = self;
be_coalesce_spillslots(cg->birg);
irg_walk_blkwise_graph(cg->irg, NULL, ppc32_transform_spill, NULL);
* Emits the code, closes the output file and frees
* the code generator interface.
*/
-static void ppc32_emit_and_done(void *self) {
+static void ppc32_emit_and_done(void *self)
+{
ppc32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
/**
* Initializes the code generator.
*/
-static void *ppc32_cg_init(be_irg_t *birg) {
+static void *ppc32_cg_init(be_irg_t *birg)
+{
ppc32_isa_t *isa = (ppc32_isa_t *)birg->main_env->arch_env;
ppc32_code_gen_t *cg = XMALLOC(ppc32_code_gen_t);
* @param node the firm node
* @param env the symbol set
*/
-static void ppc32_collect_symconsts_walk(ir_node *node, void *env) {
+static void ppc32_collect_symconsts_walk(ir_node *node, void *env)
+{
pset *symbol_set = env;
if (is_SymConst(node)) {
/**
* Initializes the backend ISA and opens the output file.
*/
-static arch_env_t *ppc32_init(FILE *file_handle) {
+static arch_env_t *ppc32_init(FILE *file_handle)
+{
static int inited = 0;
ppc32_isa_t *isa;
int i;
return &isa->arch_env;
}
-static void ppc32_dump_indirect_symbols(ppc32_isa_t *isa) {
+static void ppc32_dump_indirect_symbols(ppc32_isa_t *isa)
+{
ir_entity *ent;
foreach_pset(isa->symbol_set, ent) {
/**
* Closes the output file and frees the ISA structure.
*/
-static void ppc32_done(void *self) {
+static void ppc32_done(void *self)
+{
ppc32_isa_t *isa = self;
be_gas_emit_decls(isa->arch_env.main_env);
* @param method_type The type of the method (procedure) in question.
* @param abi The abi object to be modified
*/
-static void ppc32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi) {
+static void ppc32_get_call_abi(const void *self, ir_type *method_type, be_abi_call_t *abi)
+{
ir_type *tp;
ir_mode *mode;
int i, n = get_method_n_params(method_type);
}
}
-int ppc32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
+int ppc32_to_appear_in_schedule(void *block_env, const ir_node *irn)
+{
(void) block_env;
if(!is_ppc32_irn(irn))
return -1;
/**
* Initializes the code generator interface.
*/
-static const arch_code_generator_if_t *ppc32_get_code_generator_if(void *self) {
+static const arch_code_generator_if_t *ppc32_get_code_generator_if(void *self)
+{
(void) self;
return &ppc32_code_gen_if;
}
/**
* Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
*/
-static const list_sched_selector_t *ppc32_get_list_sched_selector(const void *self, list_sched_selector_t *selector) {
+static const list_sched_selector_t *ppc32_get_list_sched_selector(const void *self, list_sched_selector_t *selector)
+{
(void) self;
(void) selector;
ppc32_sched_selector = trivial_selector;
return &ppc32_sched_selector;
}
-static const ilp_sched_selector_t *ppc32_get_ilp_sched_selector(const void *self) {
+static const ilp_sched_selector_t *ppc32_get_ilp_sched_selector(const void *self)
+{
(void) self;
return NULL;
}
return get_mode_size_bytes(mode);
}
-static const be_execution_unit_t ***ppc32_get_allowed_execution_units(const ir_node *irn) {
+static const be_execution_unit_t ***ppc32_get_allowed_execution_units(const ir_node *irn)
+{
(void) irn;
/* TODO */
panic("Unimplemented ppc32_get_allowed_execution_units()");
return NULL;
}
-static const be_machine_t *ppc32_get_machine(const void *self) {
+static const be_machine_t *ppc32_get_machine(const void *self)
+{
(void) self;
/* TODO */
panic("Unimplemented ppc32_get_machine()");
/**
* Return irp irgs in the desired order.
*/
-static ir_graph **ppc32_get_irg_list(const void *self, ir_graph ***irg_list) {
+static ir_graph **ppc32_get_irg_list(const void *self, ir_graph ***irg_list)
+{
(void) self;
(void) irg_list;
return NULL;
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *ppc32_get_libfirm_params(void) {
+static const backend_params *ppc32_get_libfirm_params(void)
+{
static backend_params p = {
1, /* need dword lowering */
0, /* don't support inline assembler yet */
/**
* Returns the register at in position pos.
*/
-static const arch_register_t *get_in_reg(const ir_node *irn, int pos) {
+static const arch_register_t *get_in_reg(const ir_node *irn, int pos)
+{
ir_node *op;
const arch_register_t *reg = NULL;
/**
* Returns the register at out position pos.
*/
-static const arch_register_t *get_out_reg(const ir_node *irn, int pos) {
+static const arch_register_t *get_out_reg(const ir_node *irn, int pos)
+{
ir_node *proj;
const arch_register_t *reg = NULL;
/**
* Emit the name of the source register at given input position.
*/
-void ppc32_emit_source_register(const ir_node *node, int pos) {
+void ppc32_emit_source_register(const ir_node *node, int pos)
+{
const arch_register_t *reg = get_in_reg(node, pos);
be_emit_string(arch_register_get_name(reg));
}
/**
* Emit the name of the destination register at given output position.
*/
-void ppc32_emit_dest_register(const ir_node *node, int pos) {
+void ppc32_emit_dest_register(const ir_node *node, int pos)
+{
const arch_register_t *reg = get_out_reg(node, pos);
be_emit_string(arch_register_get_name(reg));
}
-void ppc32_emit_rlwimi_helper(const ir_node *n) {
+void ppc32_emit_rlwimi_helper(const ir_node *n)
+{
const rlwimi_const_t *rlwimi_const = get_ppc32_rlwimi_const(n);
be_emit_irprintf("%i, %i, %i", rlwimi_const->shift,
/**
* Emit a const or symconst.
*/
-void ppc32_emit_immediate(const ir_node *n) {
+void ppc32_emit_immediate(const ir_node *n)
+{
const char *buf;
switch (get_ppc32_type(n)) {
/**
* Emits a node's offset.
*/
-void ppc32_emit_offset(const ir_node *n) {
+void ppc32_emit_offset(const ir_node *n)
+{
const char *buf;
if (get_ppc32_type(n) == ppc32_ac_None) {
be_emit_char('0');
/**
* Returns the target label for a control flow node.
*/
-static char *get_cfop_target(const ir_node *irn, char *buf) {
+static char *get_cfop_target(const ir_node *irn, char *buf)
+{
ir_node *bl = get_irn_link(irn);
snprintf(buf, SNPRINTF_BUF_LEN, "BLOCK_%ld", get_irn_node_nr(bl));
/**
* Emits code for a unconditional jump.
*/
-static void emit_Jmp(const ir_node *irn) {
+static void emit_Jmp(const ir_node *irn)
+{
ir_node *block = get_nodes_block(irn);
if (get_irn_link(irn) != get_irn_link(block)) {
/**
* Emits code for a call
*/
-static void emit_be_Call(const ir_node *irn) {
+static void emit_be_Call(const ir_node *irn)
+{
ir_entity *call_ent = be_Call_get_entity(irn);
if (call_ent) {
be_emit_finish_line_gas(irn);
}
-static void emit_ppc32_Branch(const ir_node *irn) {
+static void emit_ppc32_Branch(const ir_node *irn)
+{
static const char *branchops[8] = { 0, "beq", "blt", "ble", "bgt", "bge", "bne", "b" };
int projnum = get_ppc32_proj_nr(irn);
}
}
-static void emit_ppc32_LoopCopy(const ir_node *irn) {
+static void emit_ppc32_LoopCopy(const ir_node *irn)
+{
be_emit_irprintf("LOOP_%ld:\n", get_irn_node_nr(irn));
be_emit_write_line();
be_emit_finish_line_gas(irn);
}
-static void emit_ppc32_Switch(const ir_node *irn) {
+static void emit_ppc32_Switch(const ir_node *irn)
+{
ir_node *proj, *defproj = NULL;
int pn;
/**
* Emits code for a backend Copy node
*/
-static void emit_be_Copy(const ir_node *irn) {
+static void emit_be_Copy(const ir_node *irn)
+{
const arch_register_class_t *regclass = arch_get_irn_reg_class(irn, 0);
if (regclass == &ppc32_reg_classes[CLASS_ppc32_gp]) {
/**
* Emits code for a backend Perm node
*/
-static void emit_be_Perm(const ir_node *irn) {
+static void emit_be_Perm(const ir_node *irn)
+{
const arch_register_class_t *regclass = arch_get_irn_reg_class(irn, 0);
if (regclass == &ppc32_reg_classes[CLASS_ppc32_gp]) {
/**
* Emits code for a proj -> node
*/
-static void emit_Proj(const ir_node *irn) {
+static void emit_Proj(const ir_node *irn)
+{
ir_node *pred = get_Proj_pred(irn);
if (is_Start(pred)) {
}
}
-static void emit_be_IncSP(const ir_node *irn) {
+static void emit_be_IncSP(const ir_node *irn)
+{
int offs = be_get_IncSP_offset(irn);
be_emit_irprintf("\t/* ignored IncSP with %d */", -offs);
/**
* Set a node emitter. Make it a bit more type safe.
*/
-static inline void set_emitter(ir_op *op, emit_func ppc32_emit_node) {
+static inline void set_emitter(ir_op *op, emit_func ppc32_emit_node)
+{
op->ops.generic = (op_func)ppc32_emit_node;
}
-static void ppc32_register_emitters(void) {
+static void ppc32_register_emitters(void)
+{
/* first clear generic function pointers */
clear_irp_opcodes_generic_func();
/**
* Emits code for a node.
*/
-static void ppc32_emit_node(const ir_node *irn) {
+static void ppc32_emit_node(const ir_node *irn)
+{
ir_op *op = get_irn_op(irn);
if (op->ops.generic) {
* Walks over the nodes in a block connected by scheduling edges
* and emits code for each node.
*/
-static void ppc32_gen_block(const ir_node *block) {
+static void ppc32_gen_block(const ir_node *block)
+{
ir_node *irn;
if (! is_Block(block))
/**
* Emits code for function start.
*/
-static void ppc32_emit_start(ir_graph *irg) {
+static void ppc32_emit_start(ir_graph *irg)
+{
const char *irg_name = get_entity_ld_name(get_irg_entity(irg));
int framesize = get_type_size_bytes(get_irg_frame_type(irg));
/**
* Emits code for function end
*/
-static void ppc32_emit_end(ir_graph *irg) {
+static void ppc32_emit_end(ir_graph *irg)
+{
int framesize = get_type_size_bytes(get_irg_frame_type(irg));
(void) irg;
* Sets labels for control flow nodes (jump target)
* TODO: Jump optimization
*/
-void ppc32_gen_labels(ir_node *block, void *env) {
+void ppc32_gen_labels(ir_node *block, void *env)
+{
ir_node *pred;
int n;
(void) env;
const arch_register_t *reg;
};
-int ppc32_cmp_irn_reg_assoc(const void *a, const void *b, size_t len) {
+int ppc32_cmp_irn_reg_assoc(const void *a, const void *b, size_t len)
+{
const struct ppc32_irn_reg_assoc *x = a;
const struct ppc32_irn_reg_assoc *y = b;
(void) len;
return x->irn != y->irn;
}
-static struct ppc32_irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn, set *reg_set) {
+static struct ppc32_irn_reg_assoc *get_irn_reg_assoc(const ir_node *irn, set *reg_set)
+{
struct ppc32_irn_reg_assoc templ;
unsigned int hash;
return set_insert(reg_set, &templ, sizeof(templ), hash);
}
-void ppc32_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set) {
+void ppc32_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set)
+{
struct ppc32_irn_reg_assoc *assoc = get_irn_reg_assoc(irn, reg_set);
assoc->reg = reg;
}
-const arch_register_t *ppc32_get_firm_reg(const ir_node *irn, set *reg_set) {
+const arch_register_t *ppc32_get_firm_reg(const ir_node *irn, set *reg_set)
+{
struct ppc32_irn_reg_assoc *assoc = get_irn_reg_assoc(irn, reg_set);
return assoc->reg;
}
* Translates the projnum into a "real" argument position for register
* requirements dependend on the predecessor.
*/
-long ppc32_translate_proj_pos(const ir_node *proj) {
+long ppc32_translate_proj_pos(const ir_node *proj)
+{
ir_node *pred = get_Proj_pred(proj);
long nr = get_Proj_proj(proj);
* |___/
***************************************************************************************************/
-ppc32_attr_t *get_ppc32_attr(ir_node *node) {
+ppc32_attr_t *get_ppc32_attr(ir_node *node)
+{
assert(is_ppc32_irn(node) && "need ppc node to get attributes");
return (ppc32_attr_t *)get_irn_generic_attr(node);
}
-const ppc32_attr_t *get_ppc32_attr_const(const ir_node *node) {
+const ppc32_attr_t *get_ppc32_attr_const(const ir_node *node)
+{
assert(is_ppc32_irn(node) && "need ppc node to get attributes");
return (const ppc32_attr_t *)get_irn_generic_attr_const(node);
}
/**
* Returns the argument register requirements of a ppc node.
*/
-const arch_register_req_t **get_ppc32_in_req_all(const ir_node *node) {
+const arch_register_req_t **get_ppc32_in_req_all(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->in_req;
}
/**
* Returns the argument register requirement at position pos of an ppc node.
*/
-const arch_register_req_t *get_ppc32_in_req(const ir_node *node, int pos) {
+const arch_register_req_t *get_ppc32_in_req(const ir_node *node, int pos)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->in_req[pos];
}
/**
* Sets the IN register requirements at position pos.
*/
-void set_ppc32_req_in(ir_node *node, const arch_register_req_t *req, int pos) {
+void set_ppc32_req_in(ir_node *node, const arch_register_req_t *req, int pos)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->in_req[pos] = req;
}
/**
* Returns the type of the content (if any)
*/
-ppc32_attr_content_type get_ppc32_type(const ir_node *node) {
+ppc32_attr_content_type get_ppc32_type(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->content_type;
}
/**
* Sets a tarval type content (also updating the content_type)
*/
-void set_ppc32_constant_tarval(ir_node *node, tarval *const_tarval) {
+void set_ppc32_constant_tarval(ir_node *node, tarval *const_tarval)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->content_type = ppc32_ac_Const;
attr->data.constant_tarval = const_tarval;
/**
* Returns a tarval type constant
*/
-tarval *get_ppc32_constant_tarval(const ir_node *node) {
+tarval *get_ppc32_constant_tarval(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->data.constant_tarval;
}
/**
* Sets an ident type constant (also updating the content_type)
*/
-void set_ppc32_symconst_ident(ir_node *node, ident *symconst_ident) {
+void set_ppc32_symconst_ident(ir_node *node, ident *symconst_ident)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->content_type = ppc32_ac_SymConst;
attr->data.symconst_ident = symconst_ident;
/**
* Returns an ident type constant
*/
-ident *get_ppc32_symconst_ident(const ir_node *node) {
+ident *get_ppc32_symconst_ident(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->data.symconst_ident;
}
/**
* Sets an entity (also updating the content_type)
*/
-void set_ppc32_frame_entity(ir_node *node, ir_entity *ent) {
+void set_ppc32_frame_entity(ir_node *node, ir_entity *ent)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->content_type = ppc32_ac_FrameEntity;
attr->data.frame_entity = ent;
/**
* Returns an entity
*/
-ir_entity *get_ppc32_frame_entity(const ir_node *node) {
+ir_entity *get_ppc32_frame_entity(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->data.frame_entity;
}
/**
* Sets a Rlwimi const (also updating the content_type)
*/
-void set_ppc32_rlwimi_const(ir_node *node, unsigned shift, unsigned maskA, unsigned maskB) {
+void set_ppc32_rlwimi_const(ir_node *node, unsigned shift, unsigned maskA, unsigned maskB)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->content_type = ppc32_ac_RlwimiConst;
attr->data.rlwimi_const.shift = shift;
/**
* Returns the rlwimi const structure
*/
-const rlwimi_const_t *get_ppc32_rlwimi_const(const ir_node *node) {
+const rlwimi_const_t *get_ppc32_rlwimi_const(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return &attr->data.rlwimi_const;
}
/**
* Sets a Proj number (also updating the content_type)
*/
-void set_ppc32_proj_nr(ir_node *node, int proj_nr) {
+void set_ppc32_proj_nr(ir_node *node, int proj_nr)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->content_type = ppc32_ac_BranchProj;
attr->data.proj_nr = proj_nr;
/**
* Returns the proj number
*/
-int get_ppc32_proj_nr(const ir_node *node) {
+int get_ppc32_proj_nr(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->data.proj_nr;
}
/**
* Sets an offset for a memory access (also updating the content_type)
*/
-void set_ppc32_offset(ir_node *node, int offset) {
+void set_ppc32_offset(ir_node *node, int offset)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->content_type = ppc32_ac_Offset;
attr->data.offset = offset;
/**
* Returns the offset
*/
-int get_ppc32_offset(const ir_node *node) {
+int get_ppc32_offset(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->data.offset;
}
/**
* Sets the offset mode (ppc32_ao_None, ppc32_ao_Lo16, ppc32_ao_Hi16 or ppc32_ao_Ha16)
*/
-void set_ppc32_offset_mode(ir_node *node, ppc32_attr_offset_mode mode) {
+void set_ppc32_offset_mode(ir_node *node, ppc32_attr_offset_mode mode)
+{
ppc32_attr_t *attr = get_ppc32_attr(node);
attr->offset_mode = mode;
}
/**
* Returns the offset mode
*/
-ppc32_attr_offset_mode get_ppc32_offset_mode(const ir_node *node) {
+ppc32_attr_offset_mode get_ppc32_offset_mode(const ir_node *node)
+{
const ppc32_attr_t *attr = get_ppc32_attr_const(node);
return attr->offset_mode;
}
/**
* Returns a singleton condition mode
*/
-ir_mode *get_ppc32_mode_Cond(void) {
+ir_mode *get_ppc32_mode_Cond(void)
+{
if (ppc32_mode_Cond)
return ppc32_mode_Cond;
else {
* @param env The transformation environment
* @return the created ppc Add node
*/
-static ir_node *gen_Add(ppc32_transform_env_t *env) {
+static ir_node *gen_Add(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Add_left(env->irn);
ir_node *op2 = get_Add_right(env->irn);
* @param env The transformation environment
* @return the created ppc Mul node
*/
-static ir_node *gen_Mul(ppc32_transform_env_t *env) {
+static ir_node *gen_Mul(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Mul_left(env->irn);
ir_node *op2 = get_Mul_right(env->irn);
* @param env The transformation environment
* @return the created ppc Mulh node
*/
-static ir_node *gen_Mulh(ppc32_transform_env_t *env) {
+static ir_node *gen_Mulh(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_irn_n(env->irn, 0);
ir_node *op2 = get_irn_n(env->irn, 1);
* @param env The transformation environment
* @return the created ppc And node
*/
-static ir_node *gen_And(ppc32_transform_env_t *env) {
+static ir_node *gen_And(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_And_left(env->irn);
ir_node *op2 = get_And_right(env->irn);
* @param env The transformation environment
* @return the created ppc Or node
*/
-static ir_node *gen_Or(ppc32_transform_env_t *env) {
+static ir_node *gen_Or(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Or_left(env->irn);
ir_node *op2 = get_Or_right(env->irn);
* @param env The transformation environment
* @return the created ppc Xor node
*/
-static ir_node *gen_Eor(ppc32_transform_env_t *env) {
+static ir_node *gen_Eor(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Eor_left(env->irn);
ir_node *op2 = get_Eor_right(env->irn);
* @param env The transformation environment
* @return the created ppc Sub node
*/
-static ir_node *gen_Sub(ppc32_transform_env_t *env) {
+static ir_node *gen_Sub(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Sub_left(env->irn);
ir_node *op2 = get_Sub_right(env->irn);
* @param env The transformation environment
* @return the created ppc fDiv node
*/
-static ir_node *gen_Quot(ppc32_transform_env_t *env) {
+static ir_node *gen_Quot(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Quot_left(env->irn);
ir_node *op2 = get_Quot_right(env->irn);
* @param env The transformation environment
* @return the created ppc Div node
*/
-static ir_node *gen_Div(ppc32_transform_env_t *env) {
+static ir_node *gen_Div(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Div_left(env->irn);
ir_node *op2 = get_Div_right(env->irn);
* @param env The transformation environment
* @return the created ppc Div node
*/
-static ir_node *gen_DivMod(ppc32_transform_env_t *env) {
+static ir_node *gen_DivMod(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_DivMod_left(env->irn);
ir_node *op2 = get_DivMod_right(env->irn);
ir_node *proj_div = NULL, *proj_mod = NULL;
* @param env The transformation environment
* @return the created ppc Mod result node
*/
-static ir_node *gen_Mod(ppc32_transform_env_t *env) {
+static ir_node *gen_Mod(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Mod_left(env->irn);
ir_node *op2 = get_Mod_right(env->irn);
ir_node *proj_div = NULL, *proj_mod = NULL;
* @param env The transformation environment
* @return the created ppc Shl node
*/
-static ir_node *gen_Shl(ppc32_transform_env_t *env) {
+static ir_node *gen_Shl(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Shl_left(env->irn);
ir_node *op2 = get_Shl_right(env->irn);
* @param env The transformation environment
* @return the created ppc Shr node
*/
-static ir_node *gen_Shr(ppc32_transform_env_t *env) {
+static ir_node *gen_Shr(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Shr_left(env->irn);
ir_node *op2 = get_Shr_right(env->irn);
* @param env The transformation environment
* @return the created ppc Sraw node
*/
-static ir_node *gen_Shrs(ppc32_transform_env_t *env) {
+static ir_node *gen_Shrs(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Shrs_left(env->irn);
ir_node *op2 = get_Shrs_right(env->irn);
* @param env The transformation environment
* @return the created ppc Rotl node
*/
-static ir_node *gen_Rotl(ppc32_transform_env_t *env) {
+static ir_node *gen_Rotl(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Rotl_left(env->irn);
ir_node *op2 = get_Rotl_right(env->irn);
* @param env The transformation environment
* @return the created ppc Cmp node
*/
-static ir_node *gen_Cmp(ppc32_transform_env_t *env) {
+static ir_node *gen_Cmp(ppc32_transform_env_t *env)
+{
ir_node *op1 = get_Cmp_left(env->irn);
ir_node *op2 = get_Cmp_right(env->irn);
* @param env The transformation environment
* @return the created ppc Minus node
*/
-static ir_node *gen_Minus(ppc32_transform_env_t *env) {
+static ir_node *gen_Minus(ppc32_transform_env_t *env)
+{
ir_node *op = get_Minus_op(env->irn);
switch(get_nice_modecode(env->mode)){
* @param env The transformation environment
* @return the created ppc Not node
*/
-static ir_node *gen_Not(ppc32_transform_env_t *env) {
+static ir_node *gen_Not(ppc32_transform_env_t *env)
+{
return new_bd_ppc32_Not(env->dbg, env->block, get_Not_op(env->irn), env->mode);
}
* @param env The transformation environment
* @return the created ppc Conv node
*/
-static ir_node *gen_Conv(ppc32_transform_env_t *env) {
+static ir_node *gen_Conv(ppc32_transform_env_t *env)
+{
ir_node *op = get_Conv_op(env->irn);
ppc32_modecode from_mode=get_nice_modecode(get_irn_mode(op));
ppc32_modecode to_mode=get_nice_modecode(env->mode);
* @param env The transformation environment
* @return the ppc node generating the absolute value
*/
-static ir_node *gen_Abs(ppc32_transform_env_t *env) {
+static ir_node *gen_Abs(ppc32_transform_env_t *env)
+{
ir_node *op = get_Abs_op(env->irn);
int shift = 7;
ir_node *n1,*n2;
* @param env The transformation environment
* @return a ppc branch node
*/
-static ir_node *gen_Cond(ppc32_transform_env_t *env) {
+static ir_node *gen_Cond(ppc32_transform_env_t *env)
+{
ir_node *selector = get_Cond_selector(env->irn);
ir_mode *projmode = get_irn_mode(selector);
if(is_Proj(selector) && projmode==get_ppc32_mode_Cond())
* @param env The transformation environment
* @return a ppc Unknown node
*/
-static ir_node *gen_Unknown(ppc32_transform_env_t *env) {
+static ir_node *gen_Unknown(ppc32_transform_env_t *env)
+{
if(mode_is_float(env->mode))
return new_bd_ppc32_fUnknown(env->dbg, env->block, env->mode);
else if (mode_is_int(env->mode))
panic("Mode %F for unknown value not supported.", env->mode);
}
-static ir_node *ldst_insert_const(ir_node *ptr, tarval **ptv, ident **pid, ppc32_transform_env_t *env) {
+static ir_node *ldst_insert_const(ir_node *ptr, tarval **ptv, ident **pid, ppc32_transform_env_t *env)
+{
tarval *tv_const = NULL;
ident *id_symconst = NULL;
* @param env The transformation environment
* @return the created ppc Load node
*/
-static ir_node *gen_Load(ppc32_transform_env_t *env) {
+static ir_node *gen_Load(ppc32_transform_env_t *env)
+{
ir_node *node = env->irn;
ir_node *loadptr = get_Load_ptr(node);
ir_node *load;
* @param env The transformation environment
* @return the created ppc Store node
*/
-static ir_node *gen_Store(ppc32_transform_env_t *env) {
+static ir_node *gen_Store(ppc32_transform_env_t *env)
+{
ir_node *node = env->irn;
ir_node *storeptr = get_Store_ptr(node);
ir_node *valuenode = get_Store_value(node);
* @param env The transformation environment
* @return the created ppc CopyB node
*/
-static ir_node *gen_CopyB(ppc32_transform_env_t *env) {
+static ir_node *gen_CopyB(ppc32_transform_env_t *env)
+{
ir_node *mem = get_CopyB_mem(env->irn);
ir_node *src = get_CopyB_src(env->irn);
ir_node *dest = get_CopyB_dst(env->irn);
*
* @param env The transformation environment
*/
-static ir_node *gen_be_FrameAddr(ppc32_transform_env_t *env) {
+static ir_node *gen_be_FrameAddr(ppc32_transform_env_t *env)
+{
ir_node *op = get_irn_n(env->irn, 0);
ir_node *add = new_bd_ppc32_Addi(env->dbg, env->block, op, mode_P);
set_ppc32_frame_entity(add, be_get_frame_entity(env->irn));
/**
* the BAD transformer.
*/
-static ir_node *bad_transform(ppc32_transform_env_t *env) {
+static ir_node *bad_transform(ppc32_transform_env_t *env)
+{
panic("Transformation not implemented: %+F\n", env->irn);
}
/**
* Enters all transform functions into the generic pointer
*/
-void ppc32_register_transformers(void) {
+void ppc32_register_transformers(void)
+{
/* first clear the generic function pointer for all ops */
clear_irp_opcodes_generic_func();
* @param node the firm node
* @param env the debug module
*/
-void ppc32_transform_node(ir_node *node, void *env) {
+void ppc32_transform_node(ir_node *node, void *env)
+{
ppc32_code_gen_t *cg = (ppc32_code_gen_t *)env;
ir_op *op = get_irn_op(node);
ir_node *asm_node = NULL;
};
/** Compares two (entity, tarval) combinations */
-static int cmp_tv_ent(const void *a, const void *b, size_t len) {
+static int cmp_tv_ent(const void *a, const void *b, size_t len)
+{
const struct tv_ent *e1 = a;
const struct tv_ent *e2 = b;
(void) len;
* @param mode node mode
* @return the created ppc Load immediate node
*/
-static ir_node *gen_ppc32_Const(ppc32_transform_env_t *env) {
+static ir_node *gen_ppc32_Const(ppc32_transform_env_t *env)
+{
tarval *tv_const = get_ppc32_constant_tarval(env->irn);
ir_node *node;
* @param mode node mode
* @return the created ppc float Load node
*/
-static ir_node *gen_ppc32_fConst(ppc32_transform_env_t *env) {
+static ir_node *gen_ppc32_fConst(ppc32_transform_env_t *env)
+{
tarval *tv_const = get_ppc32_constant_tarval(env->irn);
switch(get_nice_modecode(env->mode)){
* @param mode node mode
* @return the created ppc Load immediate node
*/
-static ir_node *gen_ppc32_SymConst(ppc32_transform_env_t *env) {
+static ir_node *gen_ppc32_SymConst(ppc32_transform_env_t *env)
+{
ir_entity *ent = get_ppc32_frame_entity(env->irn);
ident *id_symconst = get_entity_ident(ent);
ir_node *node;
* @param node the firm node
* @param env the debug module
*/
-void ppc32_transform_const(ir_node *node, void *env) {
+void ppc32_transform_const(ir_node *node, void *env)
+{
ppc32_code_gen_t *cgenv = (ppc32_code_gen_t *)env;
ir_node *asm_node = NULL;
ppc32_transform_env_t tenv;
* @param mode node mode
* @return the created ppc Conv node
*/
-static ir_node *gen_Conv(ppc32_transform_env_t *env, ir_node *op) {
+static ir_node *gen_Conv(ppc32_transform_env_t *env, ir_node *op)
+{
ir_mode *from_mode = get_irn_mode(get_irn_n(env->irn,0));
ir_mode *to_mode = env->mode;
ppc32_modecode from_modecode=get_nice_modecode(from_mode);
};
/* Compares two (entity, tarval) combinations */
-static int cmp_tv_ent(const void *a, const void *b, size_t len) {
+static int cmp_tv_ent(const void *a, const void *b, size_t len)
+{
const struct tv_ent *e1 = a;
const struct tv_ent *e2 = b;
}
/* Generates a SymConst node for a known FP const */
-static ir_node *gen_fp_known_symconst(ppc32_transform_env_t *env, tarval *known_const) {
+static ir_node *gen_fp_known_symconst(ppc32_transform_env_t *env, tarval *known_const)
+{
static set *const_set = NULL;
static ir_type *tp = NULL;
struct tv_ent key;
* @param env transformation environment
* @return the created ppc Const node
*/
-static ir_node *gen_Const(ppc32_transform_env_t *env) {
+static ir_node *gen_Const(ppc32_transform_env_t *env)
+{
tarval *tv_const = get_Const_tarval(env->irn);
ir_node *constant;
* @param env transformation environment
* @return the created ppc SymConst node
*/
-static ir_node *gen_SymConst(ppc32_transform_env_t *env) {
+static ir_node *gen_SymConst(ppc32_transform_env_t *env)
+{
ir_node *symconst;
symconst = new_bd_ppc32_SymConst(env->dbg, env->block, env->mode);
set_ppc32_frame_entity(symconst, get_SymConst_entity(env->irn));
* @param node the firm node
* @param env the debug module
*/
-void ppc32_conv_walk(ir_node *node, void *env) {
+void ppc32_conv_walk(ir_node *node, void *env)
+{
ppc32_code_gen_t *cgenv = (ppc32_code_gen_t *)env;
ir_opcode code = get_irn_opcode(node);
ppc32_transform_env_t tenv;
* @param node the firm node
* @param env the debug module
*/
-void ppc32_pretransform_walk(ir_node *node, void *env) {
+void ppc32_pretransform_walk(ir_node *node, void *env)
+{
ppc32_code_gen_t *cgenv = (ppc32_code_gen_t *)env;
ir_opcode code = get_irn_opcode(node);
ppc32_transform_env_t tenv;
/**
* Returns the libFirm configuration parameter for this backend.
*/
-static const backend_params *sparc_get_backend_params(void) {
+static const backend_params *sparc_get_backend_params(void)
+{
static backend_params p = {
0, /* no dword lowering */
0, /* no inline assembly */
be_emit_finish_line_gas(node);
}
-static void emit_be_Copy(const ir_node *irn) {
+static void emit_be_Copy(const ir_node *irn)
+{
ir_mode *mode = get_irn_mode(irn);
if (get_in_reg(irn, 0) == get_out_reg(irn, 0)) {
* Walks over the nodes in a block connected by scheduling edges
* and emits code for each node.
*/
-void sparc_gen_block(ir_node *block, void *data) {
+void sparc_gen_block(ir_node *block, void *data)
+{
ir_node *node;
(void) data;
/**
* Emits code for function start.
*/
-void sparc_emit_func_prolog(ir_graph *irg) {
+void sparc_emit_func_prolog(ir_graph *irg)
+{
ir_entity *ent = get_irg_entity(irg);
be_gas_emit_function_prolog(ent, 4);
/**
* Emits code for function end
*/
-void sparc_emit_func_epilog(ir_graph *irg) {
+void sparc_emit_func_epilog(ir_graph *irg)
+{
ir_entity *ent = get_irg_entity(irg);
const char *irg_name = get_entity_ld_name(ent);
* TODO: Sets labels for control flow nodes (jump target).
* Links control predecessors to there destination blocks.
*/
-void sparc_gen_labels(ir_node *block, void *env) {
+void sparc_gen_labels(ir_node *block, void *env)
+{
ir_node *pred;
int n = get_Block_n_cfgpreds(block);
(void) env;
};
-const arch_register_t *sparc_get_RegParam_reg(int n) {
+const arch_register_t *sparc_get_RegParam_reg(int n)
+{
assert(n < 6 && n >=0 && "trying to get register for param >= 6");
return gp_param_regs[n];
}
-int get_sparc_jmp_cond_proj_num(const ir_node *node) {
+int get_sparc_jmp_cond_proj_num(const ir_node *node)
+{
const sparc_jmp_cond_attr_t *attr = get_sparc_jmp_cond_attr_const(node);
return attr->proj_num;
}
-int get_sparc_jmp_switch_n_projs(const ir_node *node) {
+int get_sparc_jmp_switch_n_projs(const ir_node *node)
+{
const sparc_jmp_switch_attr_t *attr = get_sparc_jmp_switch_attr_const(node);
return attr->n_projs;
}
-long get_sparc_jmp_switch_default_proj_num(const ir_node *node) {
+long get_sparc_jmp_switch_default_proj_num(const ir_node *node)
+{
const sparc_jmp_switch_attr_t *attr = get_sparc_jmp_switch_attr_const(node);
return attr->default_proj_num;
}
/**
* copies sparc attributes of node
*/
-static void sparc_copy_attr(const ir_node *old_node, ir_node *new_node) {
+static void sparc_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
ir_graph *irg = get_irn_irg(new_node);
struct obstack *obst = get_irg_obstack(irg);
const sparc_attr_t *attr_old = get_sparc_attr_const(old_node);
* @param node the ir Store node
* @return The transformed sparc node.
*/
-static ir_node *gen_Const(ir_node *node) {
+static ir_node *gen_Const(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_mode *mode = get_irn_mode(node);
dbg_info *dbg = get_irn_dbg_info(node);
/**
* Transform a be_Copy.
*/
-static ir_node *gen_be_Copy(ir_node *node) {
+static ir_node *gen_be_Copy(ir_node *node)
+{
ir_node *result = be_duplicate_node(node);
ir_mode *mode = get_irn_mode(result);
* Transforms a Conv node.
*
*/
-static ir_node *gen_Conv(ir_node *node) {
+static ir_node *gen_Conv(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Conv_op(node);
ir_node *new_op = be_transform_node(op);
/**
* Transform a Proj from a Load.
*/
-static ir_node *gen_Proj_Load(ir_node *node) {
+static ir_node *gen_Proj_Load(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *load = get_Proj_pred(node);
ir_node *new_load = be_transform_node(load);
/**
* Transform the Projs of a be_AddSP.
*/
-static ir_node *gen_Proj_be_AddSP(ir_node *node) {
+static ir_node *gen_Proj_be_AddSP(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
/**
* Transform the Projs of a be_SubSP.
*/
-static ir_node *gen_Proj_be_SubSP(ir_node *node) {
+static ir_node *gen_Proj_be_SubSP(ir_node *node)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
/**
* Transform the Projs from a Cmp.
*/
-static ir_node *gen_Proj_Cmp(ir_node *node) {
+static ir_node *gen_Proj_Cmp(ir_node *node)
+{
(void) node;
panic("not implemented");
}
/**
* Transform a Proj node.
*/
-static ir_node *gen_Proj(ir_node *node) {
+static ir_node *gen_Proj(ir_node *node)
+{
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *pred = get_Proj_pred(node);
#include "error.h"
#include "irprintf.h"
-NORETURN panic(const char *fmt, ...) {
+NORETURN panic(const char *fmt, ...)
+{
va_list ap;
fputs("libFirm panic: ", stderr);
#define POINTER_READ(p, size) (p)
/* returns the kind of the thing */
-firm_kind get_kind(const void *firm_thing) {
+firm_kind get_kind(const void *firm_thing)
+{
return POINTER_READ(firm_thing, sizeof(firm_kind)) ? *(firm_kind *)firm_thing : k_BAD;
} /* get_kind */
-const char *print_firm_kind(void *firm_thing) {
+const char *print_firm_kind(void *firm_thing)
+{
if (! firm_thing)
return "(NULL)";
/*
* identify a firm thing
*/
-void firm_identify_thing(void *X) {
+void firm_identify_thing(void *X)
+{
if (! X) {
printf("(NULL)\n");
return;
#ifdef _WIN32
/* Break into the debugger. The Win32 way. */
-void firm_debug_break(void) {
+void firm_debug_break(void)
+{
DebugBreak();
}
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64))
/* Break into the debugger. The ia32/x86_64 way under GCC. */
-void firm_debug_break(void) {
+void firm_debug_break(void)
+{
__asm__ __volatile__("int3");
}
#else
/* Break into the debugger. Poor Unix way. */
-void firm_debug_break(void) {
+void firm_debug_break(void)
+{
raise(SIGINT);
}
#endif /* _WIN32 */
/**
* Returns non-zero, if the debug extension is active
*/
-int firm_debug_active(void) {
+int firm_debug_active(void)
+{
return is_active;
} /* firm_debug_active */
/**
* Reset the debug text buffer.
*/
-static void reset_dbg_buf(void) {
+static void reset_dbg_buf(void)
+{
firm_dbg_msg_buf[0] = '\0';
} /* reset_dbg_buf */
/**
* Add text to the debug text buffer.
*/
-static void add_to_dbg_buf(const char *buf) {
+static void add_to_dbg_buf(const char *buf)
+{
strncat(firm_dbg_msg_buf, buf, sizeof(firm_dbg_msg_buf));
} /* add_to_dbg_buf */
*
* To be called from the debugger.
*/
-const char *firm_debug_text(void) {
+const char *firm_debug_text(void)
+{
firm_dbg_msg_buf[sizeof(firm_dbg_msg_buf) - 1] = '\0';
return firm_dbg_msg_buf;
} /* firm_debug_text */
/**
* Break if ident name is reached.
*/
-static void break_on_ident(const char *name, bp_reasons_t reason) {
+static void break_on_ident(const char *name, bp_reasons_t reason)
+{
bp_ident_t key, *elem;
key.bp.kind = BP_IDENT;
/**
* Show a list of supported commands
*/
-static void show_commands(void) {
+static void show_commands(void)
+{
dbg_printf("Internal Firm debugger extension $Revision$ commands:\n"
"init break after initialization\n"
"create nr break if node nr was created\n"
/**
* Shows all Firm breakpoints.
*/
-static void show_bp(void) {
+static void show_bp(void)
+{
breakpoint *p;
bp_nr_t *node_p;
bp_ident_t *ident_p;
* firm_dbg_register() expects that the name is stored persistent.
* So we need this little helper function
*/
-static firm_dbg_module_t *dbg_register(const char *name) {
+static firm_dbg_module_t *dbg_register(const char *name)
+{
ident *id = new_id_from_str(name);
return firm_dbg_register(get_id_str(id));
/**
* Show info about a firm thing.
*/
-static void show_firm_object(void *firm_thing) {
+static void show_firm_object(void *firm_thing)
+{
FILE *f = stdout;
if (firm_thing == NULL) {
/**
* Find a firm type by its number.
*/
-static ir_type *find_type_nr(long nr) {
+static ir_type *find_type_nr(long nr)
+{
int i, n = get_irp_n_types();
ir_type *tp;
/**
* Find a firm type by its name.
*/
-static ir_type *find_type_name(const char *name) {
+static ir_type *find_type_name(const char *name)
+{
int i, n = get_irp_n_types();
ir_type *tp;
/**
* Type-walker: Find an entity with given number.
*/
-static void check_ent_nr(type_or_ent tore, void *ctx) {
+static void check_ent_nr(type_or_ent tore, void *ctx)
+{
find_env_t *env = ctx;
if (is_entity(tore.ent)) {
/**
* Type-walker: Find an entity with given name.
*/
-static void check_ent_name(type_or_ent tore, void *ctx) {
+static void check_ent_name(type_or_ent tore, void *ctx)
+{
find_env_t *env = ctx;
if (is_entity(tore.ent))
/**
* Find a firm entity by its number.
*/
-static ir_entity *find_entity_nr(long nr) {
+static ir_entity *find_entity_nr(long nr)
+{
find_env_t env;
env.u.nr = nr;
/**
* Find a firm entity by its name.
*/
-static ir_entity *find_entity_name(const char *name) {
+static ir_entity *find_entity_name(const char *name)
+{
find_env_t env;
env.u.name = name;
/**
* Search methods for a name.
*/
-static void show_by_name(type_or_ent tore, void *env) {
+static void show_by_name(type_or_ent tore, void *env)
+{
ident *id = (ident *)env;
if (is_entity(tore.ent)) {
/**
* Search methods for a ldname.
*/
-static void show_by_ldname(type_or_ent tore, void *env) {
+static void show_by_ldname(type_or_ent tore, void *env)
+{
ident *id = (ident *)env;
if (is_entity(tore.ent)) {
/**
* prints the address and graph number of all irgs with given name
*/
-static void irg_name(const char *name) {
+static void irg_name(const char *name)
+{
ident *id = new_id_from_str(name);
type_walk(show_by_name, NULL, (void *)id);
/**
* prints the address and graph number of all irgs with given ld_name
*/
-static void irg_ld_name(const char *name) {
+static void irg_ld_name(const char *name)
+{
ident *id = new_id_from_str(name);
type_walk(show_by_ldname, NULL, (void *)id);
/**
* Initialize the lexer.
*/
-static void init_lexer(const char *input) {
+static void init_lexer(const char *input)
+{
lexer.has_token = 0;
lexer.curr_pos = input;
lexer.end_pos = input + strlen(input);
/**
* Get the next char from the input.
*/
-static char next_char(void) {
+static char next_char(void)
+{
if (lexer.curr_pos >= lexer.end_pos)
return '\0';
return *lexer.curr_pos++;
/**
* The lexer.
*/
-static unsigned get_token(void) {
+static unsigned get_token(void)
+{
char c;
int i;
*
* See show_commands() for supported commands.
*/
-void firm_debug(const char *cmd) {
+void firm_debug(const char *cmd)
+{
char name[1024], fname[1024];
int len;
/**
* A gdb helper function to print firm objects.
*/
-const char *gdb_node_helper(void *firm_object) {
+const char *gdb_node_helper(void *firm_object)
+{
static char buf[1024];
ir_snprintf(buf, sizeof(buf), "%+F", firm_object);
return buf;
/**
* A gdb helper function to print tarvals.
*/
-const char *gdb_tarval_helper(void *tv_object) {
+const char *gdb_tarval_helper(void *tv_object)
+{
static char buf[1024];
ir_snprintf(buf, sizeof(buf), "%+T", tv_object);
return buf;
}
-const char *gdb_out_edge_helper(const ir_node *node) {
+const char *gdb_out_edge_helper(const ir_node *node)
+{
static char buf[4*1024];
char *b = buf;
size_t l;
/**
* Default implementation using libfirm sets.
*/
-void set_finish_ident(void *handle) {
+void set_finish_ident(void *handle)
+{
set *id_set = handle;
del_set(id_set);
return impl.get_id_strlen(impl.handle, id);
}
-void finish_ident(void) {
+void finish_ident(void)
+{
if (impl.finish_ident)
impl.finish_ident(impl.handle);
}
static struct obstack mangle_obst;
/** returned a mangled type name, currently no mangling */
-static inline ident *mangle_type(ir_type *tp) {
+static inline ident *mangle_type(ir_type *tp)
+{
assert(tp->kind == k_type);
return tp->name;
}
-ident *id_mangle_entity(ir_entity *ent) {
+ident *id_mangle_entity(ir_entity *ent)
+{
ident *type_id;
char *cp;
int len;
/* Returns a new ident that represents 'firstscnd'. */
-ident *id_mangle(ident *first, ident *scnd) {
+ident *id_mangle(ident *first, ident *scnd)
+{
char *cp;
int len;
ident *res;
}
/** Returns a new ident that represents 'prefixscndsuffix'. */
-ident *id_mangle3(const char *prefix, ident *scnd, const char *suffix) {
+ident *id_mangle3(const char *prefix, ident *scnd, const char *suffix)
+{
char *cp;
int len;
ident *res;
}
/** Returns a new ident that represents first<c>scnd. */
-static ident *id_mangle_3(ident *first, char c, ident* scnd) {
+static ident *id_mangle_3(ident *first, char c, ident* scnd)
+{
char *cp;
int len;
ident *res;
}
/* Returns a new ident that represents first_scnd. */
-ident *id_mangle_u(ident *first, ident* scnd) {
+ident *id_mangle_u(ident *first, ident* scnd)
+{
return id_mangle_3(first, '_', scnd);
}
/* Returns a new ident that represents first.scnd. */
-ident *id_mangle_dot(ident *first, ident* scnd) {
+ident *id_mangle_dot(ident *first, ident* scnd)
+{
return id_mangle_3(first, '.', scnd);
}
/* returns a mangled name for a Win32 function using it's calling convention */
-ident *id_decorate_win32_c_fkt(ir_entity *ent, ident *id) {
+ident *id_decorate_win32_c_fkt(ir_entity *ent, ident *id)
+{
ir_type *tp = get_entity_type(ent);
unsigned cc_mask = get_method_calling_convention(tp);
char buf[16];
return id;
}
-void firm_init_mangle(void) {
+void firm_init_mangle(void)
+{
obstack_init(&mangle_obst);
}
/**
* Adds a Call at the beginning of the given irg.
*/
-void instrument_initcall(ir_graph *irg, ir_entity *ent) {
+void instrument_initcall(ir_graph *irg, ir_entity *ent)
+{
const ir_edge_t *edge;
ir_node *initial_exec;
ir_node *initial_mem;
/** The bit mask, which optimizations to apply. */
static arch_dep_opts_t opts;
-void arch_dep_init(arch_dep_params_factory_t factory) {
+void arch_dep_init(arch_dep_params_factory_t factory)
+{
opts = arch_dep_none;
if (factory != NULL)
params = factory();
}
-void arch_dep_set_opts(arch_dep_opts_t the_opts) {
+void arch_dep_set_opts(arch_dep_opts_t the_opts)
+{
opts = the_opts;
}
/** check, whether a mode allows a Mulh instruction. */
-static int allow_Mulh(ir_mode *mode) {
+static int allow_Mulh(ir_mode *mode)
+{
if (get_mode_size_bits(mode) > params->max_bits_for_mulh)
return 0;
return (mode_is_signed(mode) && params->allow_mulhs) || (!mode_is_signed(mode) && params->allow_mulhu);
* Some kind of default evaluator. Return the cost of
* instructions.
*/
-static int default_evaluate(insn_kind kind, tarval *tv) {
+static int default_evaluate(insn_kind kind, tarval *tv)
+{
(void) tv;
if (kind == MUL)
/**
* emit a LEA (or an Add) instruction
*/
-static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift) {
+static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift)
+{
instruction *res = OALLOC(&env->obst, instruction);
res->kind = shift > 0 ? LEA : ADD;
res->in[0] = a;
/**
* emit a SHIFT (or an Add or a Zero) instruction
*/
-static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift) {
+static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift)
+{
instruction *res = OALLOC(&env->obst, instruction);
if (shift == env->bits) {
/* a 2^bits with bits resolution is a zero */
/**
* emit a SUB instruction
*/
-static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) {
+static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b)
+{
instruction *res = OALLOC(&env->obst, instruction);
res->kind = SUB;
res->in[0] = a;
/**
* emit the ROOT instruction
*/
-static instruction *emit_ROOT(mul_env *env, ir_node *root_op) {
+static instruction *emit_ROOT(mul_env *env, ir_node *root_op)
+{
instruction *res = OALLOC(&env->obst, instruction);
res->kind = ROOT;
res->in[0] = NULL;
/**
* Returns the condensed representation of the tarval tv
*/
-static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) {
+static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr)
+{
ir_mode *mode = get_tarval_mode(tv);
int bits = get_mode_size_bits(mode);
char *bitstr = get_tarval_bitpattern(tv);
/**
* Calculate the gain when using the generalized complementary technique
*/
-static int calculate_gain(unsigned char *R, int r) {
+static int calculate_gain(unsigned char *R, int r)
+{
int max_gain = 0;
int idx = -1, i;
int gain;
/**
* Calculates the condensed complement of a given (R,r) tuple
*/
-static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs) {
+static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs)
+{
unsigned char *value = obstack_alloc(&env->obst, env->bits);
int i, l, j;
unsigned char c;
/**
* creates a tarval from a condensed representation.
*/
-static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) {
+static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r)
+{
tarval *res, *tv;
int i, j;
/*
* handle simple cases with up-to 2 bits set
*/
-static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N) {
+static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N)
+{
instruction *ins, *ins2;
(void) N;
/**
* Main decompose driver.
*/
-static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) {
+static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N)
+{
unsigned i;
int gain;
/**
* basic decomposition routine
*/
-static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) {
+static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N)
+{
instruction *Ns;
unsigned t;
* @param env the environment
* @param inst the instruction
*/
-static ir_node *build_graph(mul_env *env, instruction *inst) {
+static ir_node *build_graph(mul_env *env, instruction *inst)
+{
ir_node *l, *r, *c;
if (inst->irn)
* Calculate the costs for the given instruction sequence.
* Note that additional costs due to higher register pressure are NOT evaluated yet
*/
-static int evaluate_insn(mul_env *env, instruction *inst) {
+static int evaluate_insn(mul_env *env, instruction *inst)
+{
int costs;
if (inst->costs >= 0) {
*
* @return the new graph
*/
-static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) {
+static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv)
+{
mul_env env;
instruction *inst;
unsigned char *R;
}
/* Replace Muls with Shifts and Add/Subs. */
-ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) {
+ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn)
+{
ir_graph *irg;
ir_node *res = irn;
ir_mode *mode = get_irn_mode(irn);
/**
* calculated the ld2 of a tarval if tarval is 2^n, else returns -1.
*/
-static int tv_ld2(tarval *tv, int bits) {
+static int tv_ld2(tarval *tv, int bits)
+{
int i, k = 0, num;
for (num = i = 0; i < bits; ++i) {
*
* see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler
*/
-static struct ms magic(tarval *d) {
+static struct ms magic(tarval *d)
+{
ir_mode *mode = get_tarval_mode(d);
ir_mode *u_mode = find_unsigned_mode(mode);
int bits = get_mode_size_bits(u_mode);
*
* see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned)
*/
-static struct mu magicu(tarval *d) {
+static struct mu magicu(tarval *d)
+{
ir_mode *mode = get_tarval_mode(d);
int bits = get_mode_size_bits(mode);
int p;
*
* Note that 'div' might be a mod or DivMod operation as well
*/
-static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) {
+static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv)
+{
dbg_info *dbg = get_irn_dbg_info(div);
ir_node *n = get_binop_left(div);
ir_node *block = get_irn_n(div, -1);
}
/* Replace Divs with Shifts and Add/Subs and Mulh. */
-ir_node *arch_dep_replace_div_by_const(ir_node *irn) {
+ir_node *arch_dep_replace_div_by_const(ir_node *irn)
+{
ir_node *res = irn;
/* If the architecture dependent optimizations were not initialized
}
/* Replace Mods with Shifts and Add/Subs and Mulh. */
-ir_node *arch_dep_replace_mod_by_const(ir_node *irn) {
+ir_node *arch_dep_replace_mod_by_const(ir_node *irn)
+{
ir_node *res = irn;
/* If the architecture dependent optimizations were not initialized
}
/* Replace DivMods with Shifts and Add/Subs and Mulh. */
-void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) {
+void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn)
+{
*div = *mod = NULL;
/* If the architecture dependent optimizations were not initialized
};
/* A default parameter factory for testing purposes. */
-const ir_settings_arch_dep_t *arch_dep_default_factory(void) {
+const ir_settings_arch_dep_t *arch_dep_default_factory(void)
+{
return &default_params;
}
/**
* identify a firm object type
*/
-static int firm_get_arg_type(const lc_arg_occ_t *occ) {
+static int firm_get_arg_type(const lc_arg_occ_t *occ)
+{
(void) occ;
/* Firm objects are always pointer */
return lc_arg_type_ptr;
}
-static int firm_get_arg_type_int(const lc_arg_occ_t *occ) {
+static int firm_get_arg_type_int(const lc_arg_occ_t *occ)
+{
(void) occ;
return lc_arg_type_int;
}
-static int bitset_get_arg_type(const lc_arg_occ_t *occ) {
+static int bitset_get_arg_type(const lc_arg_occ_t *occ)
+{
(void) occ;
return lc_arg_type_ptr;
}
/**
* Beware: do not set the entity ld_name
*/
-static const char *get_entity_ld_name_ex(ir_entity *ent) {
+static const char *get_entity_ld_name_ex(ir_entity *ent)
+{
if (ent->ld_name)
return get_entity_ld_name(ent);
return get_entity_name(ent);
#include "irtools.h"
/* Return the current state of the interprocedural view. */
-ip_view_state get_irp_ip_view_state(void) {
+ip_view_state get_irp_ip_view_state(void)
+{
return irp->ip_view;
}
/* Set the current state of the interprocedural view. */
-static void set_irp_ip_view(ip_view_state state) {
+static void set_irp_ip_view(ip_view_state state)
+{
irp->ip_view = state;
}
/* Set the state of the interprocedural view to invalid. */
-void set_irp_ip_view_invalid(void) {
+void set_irp_ip_view_invalid(void)
+{
set_irp_ip_view(ip_view_invalid);
}
* - are external visible
* - are dereferenced somewhere within the program (i.e., the address of the
* method is stored somewhere). */
-static void caller_init(int arr_length, ir_entity ** free_methods) {
+static void caller_init(int arr_length, ir_entity ** free_methods)
+{
int i, j;
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
set_entity_link(get_irg_entity(get_irp_irg(i)), irg_data_create());
}
/*
-static inline ir_node * tail(ir_node * node) {
+static inline ir_node * tail(ir_node * node)
+{
ir_node * link;
for (; (link = get_irn_link(node)); node = link) ;
return node;
* "call_tail" aktualisieren), Proj-Operationen in die Liste ihrer Definition
* (auch bei Proj->Call Operationen) und Phi-Operationen in die Liste ihres
* Grundblocks einfügen. */
-static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) {
+static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail)
+{
if (is_Call(node)) {
/* Die Liste von Call an call_tail anhängen. */
ir_node * link;
}
-static void link(ir_node * head, ir_node * node) {
+static void link(ir_node * head, ir_node * node)
+{
if (node) {
set_irn_link(node, get_irn_link(head));
set_irn_link(head, node);
* Phi-Operationen an ihren Grundblöcken. Die Liste der Calls sieht
* dann so aus: End -> Call -> Proj -> ... -> Proj -> Call -> Proj ->
* ... -> Proj -> NULL. */
-static void collect_phicallproj(void) {
+static void collect_phicallproj(void)
+{
int i;
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
/* Proj-Operation durch Filter-Operation im aktuellen Block ersetzen. */
-static ir_node * exchange_proj(ir_node * proj) {
+static ir_node * exchange_proj(ir_node * proj)
+{
ir_node * filter;
assert(get_irn_op(proj) == op_Proj);
filter = new_Filter(get_Proj_pred(proj), get_irn_mode(proj), get_Proj_proj(proj));
/* Echt neue Block-Operation erzeugen. CSE abschalten! */
-static ir_node * create_Block(int n, ir_node ** in) {
+static ir_node * create_Block(int n, ir_node ** in)
+{
/* Turn off optimizations so that blocks are not merged again. */
int rem_opt = get_opt_optimize();
ir_node * block;
* cause cycles we don't want to see, as Unknwon is in the Start Block
* of the procedure. Use unknown of outermost irg where the start
* block has no predecessors. */
-static inline ir_node *get_cg_Unknown(ir_mode *m) {
+static inline ir_node *get_cg_Unknown(ir_mode *m)
+{
assert((get_Block_n_cfgpreds(get_irg_start_block(get_irp_main_irg())) == 1) &&
(get_nodes_block(get_Block_cfgpred(get_irg_start_block(get_irp_main_irg()), 0)) ==
get_irg_start_block(get_irp_main_irg())));
* umwandeln. Die künstlichen Steuerzusammenflüsse EndReg und EndExcept
* einfügen. An der Start-Operation hängt nach dem Aufruf eine Liste der
* entsprechenden Filter-Knoten. */
-static void prepare_irg(ir_graph * irg, irg_data_t * data) {
+static void prepare_irg(ir_graph * irg, irg_data_t * data)
+{
ir_node * start_block = get_irg_start_block(irg);
ir_node * link, * proj;
int n_callers = data->count + (data->open ? 1 : 0);
/* Künstlicher Steuerzusammenfluss EndReg einfügen. */
-static void prepare_irg_end(ir_graph * irg, irg_data_t * data) {
+static void prepare_irg_end(ir_graph * irg, irg_data_t * data)
+{
ir_node * end_block = get_irg_end_block(irg);
ir_node * end = get_irg_end(irg);
ir_node **ret_arr = NULL;
/* Künstlicher Steuerzusammenfluss EndExcept einfügen. */
-static void prepare_irg_end_except(ir_graph * irg, irg_data_t * data) {
+static void prepare_irg_end_except(ir_graph * irg, irg_data_t * data)
+{
ir_node * end_block = get_irg_end_block(irg);
ir_node * end = get_irg_end(irg);
ir_node ** except_arr = NULL;
/* Zwischengespeicherte Daten wieder freigeben. */
-static void cleanup_irg(ir_graph * irg) {
+static void cleanup_irg(ir_graph * irg)
+{
ir_entity * ent = get_irg_entity(irg);
irg_data_t * data = get_entity_link(ent);
assert(data);
/* Alle Phi-Operationen aus "from_block" nach "to_block"
* verschieben. Die Phi-Operationen müssen am zugehörigen Grundblock
* verlinkt sein. Danach sind sie am neuen Grundblock verlinkt. */
-static void move_phis(ir_node * from_block, ir_node * to_block) {
+static void move_phis(ir_node * from_block, ir_node * to_block)
+{
ir_node * phi;
for (phi = get_irn_link(from_block); phi != NULL; phi = get_irn_link(phi)) {
set_nodes_block(phi, to_block);
/* Rekursiv die Operation "node" und alle ihre Vorgänger aus dem Block
* "from_block" nach "to_block" verschieben.
* Verschiebe ebenfalls die Projs aus diesen Operationen. */
-static void move_nodes(ir_node * from_block, ir_node * to_block, ir_node * node) {
+static void move_nodes(ir_node * from_block, ir_node * to_block, ir_node * node)
+{
int i, arity = get_irn_arity(node);
ir_node *proj;
/* Abhängigkeiten für den Speicherzustand über alle aufgerufenen
* Methoden bestimmen. */
-static void fill_mem(int length, irg_data_t * data[], ir_node * in[]) {
+static void fill_mem(int length, irg_data_t * data[], ir_node * in[])
+{
int i;
for (i = 0; i < length; ++i) {
if (data[i]) { /* explicit */
/* Abhängigkeiten für den Ausnahme-Speicherzustand über alle
* aufgerufenen Methoden bestimmen. */
-static void fill_except_mem(int length, irg_data_t * data[], ir_node * in[]) {
+static void fill_except_mem(int length, irg_data_t * data[], ir_node * in[])
+{
int i;
for (i = 0; i < length; ++i) {
if (data[i]) { /* explicit */
/* Abhängigkeiten für ein Ergebnis über alle aufgerufenen Methoden
* bestimmen. */
-static void fill_result(int pos, int length, irg_data_t * data[], ir_node * in[], ir_mode *m) {
+static void fill_result(int pos, int length, irg_data_t * data[], ir_node * in[], ir_mode *m)
+{
int i;
for (i = 0; i < length; ++i) {
if (data[i]) { /* explicit */
/* Proj auf Except-X einer Call-Operation (aus der Link-Liste) bestimmen. */
-static ir_node * get_except(ir_node * call) {
+static ir_node * get_except(ir_node * call)
+{
/* Mit CSE könnte man das effizienter machen! Die Methode wird aber für jede
* Aufrufstelle nur ein einziges Mal aufgerufen. */
ir_node * proj;
/* Returns true if control flow operation exc is predecessor of end
block in irg. Works also for Return nodes, not only exceptions. */
-static bool exc_branches_to_end(ir_graph *irg, ir_node *exc) {
+static bool exc_branches_to_end(ir_graph *irg, ir_node *exc)
+{
int i;
ir_node *end = get_irg_end_block(irg);
for (i = get_Block_n_cfgpreds(end) -1; i >= 0; --i)
}
/* Returns true if only caller of irg is "Unknown". */
-static bool is_outermost_graph(ir_graph *irg) {
+static bool is_outermost_graph(ir_graph *irg)
+{
irg_data_t * data = get_entity_link(get_irg_entity(irg));
if (data->count) {
return false;
* einfügen. Die Steuer- und Datenflussabhängigkeiten von den aufgerufenen
* Methoden auf die CallBegin-Operation, und von der Aufrufstelle auf die
* aufgerufenen Methoden eintragen. */
-static void construct_call(ir_node * call) {
+static void construct_call(ir_node * call)
+{
int i, n_callees;
ir_node *post_block, *pre_block, *except_block, * proj, *jmp, *call_begin;
ir_node ** in;
#endif
-void cg_construct(int arr_len, ir_entity ** free_methods_arr) {
+void cg_construct(int arr_len, ir_entity ** free_methods_arr)
+{
int i;
if (get_irp_ip_view_state() == ip_view_valid) return;
}
-void cg_destruct(void) {
+void cg_destruct(void)
+{
int i;
if (get_irp_ip_view_state() != ip_view_no) {
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
#include "gen_ir_cons.c.inl"
static ir_node *
-new_bd_Start(dbg_info *db, ir_node *block) {
+new_bd_Start(dbg_info *db, ir_node *block)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
} /* new_bd_Start */
static ir_node *
-new_bd_End(dbg_info *db, ir_node *block) {
+new_bd_End(dbg_info *db, ir_node *block)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
* is only allowed if the corresponding block is mature.
*/
static ir_node *
-new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
+new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
int i;
} /* new_bd_Phi */
static ir_node *
-new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
+new_bd_Const_type(dbg_info *db, tarval *con, ir_type *tp)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
} /* new_bd_Const_type */
static ir_node *
-new_bd_Const(dbg_info *db, tarval *con) {
+new_bd_Const(dbg_info *db, tarval *con)
+{
ir_graph *irg = current_ir_graph;
return new_rd_Const_type(db, irg, con, firm_unknown_type);
} /* new_bd_Const */
static ir_node *
-new_bd_Const_long(dbg_info *db, ir_mode *mode, long value) {
+new_bd_Const_long(dbg_info *db, ir_mode *mode, long value)
+{
ir_graph *irg = current_ir_graph;
return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
} /* new_bd_SymConst_type */
static ir_node *
-new_bd_Sync(dbg_info *db, ir_node *block) {
+new_bd_Sync(dbg_info *db, ir_node *block)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
static ir_node *
-new_bd_EndReg(dbg_info *db, ir_node *block) {
+new_bd_EndReg(dbg_info *db, ir_node *block)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
} /* new_bd_EndReg */
static ir_node *
-new_bd_EndExcept(dbg_info *db, ir_node *block) {
+new_bd_EndExcept(dbg_info *db, ir_node *block)
+{
ir_node *res;
ir_graph *irg = current_ir_graph;
/* --------------------------------------------- */
ir_node *
-new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block) {
+new_rd_Start(dbg_info *db, ir_graph *irg, ir_node *block)
+{
ir_graph *rem = current_ir_graph;
ir_node *res;
} /* new_rd_Start */
ir_node *
-new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block) {
+new_rd_End(dbg_info *db, ir_graph *irg, ir_node *block)
+{
ir_node *res;
ir_graph *rem = current_ir_graph;
/* Creates a Phi node with all predecessors. Calling this constructor
is only allowed if the corresponding block is mature. */
ir_node *
-new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode) {
+new_rd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
+{
ir_node *res;
ir_graph *rem = current_ir_graph;
} /* new_rd_Phi */
ir_node *
-new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp) {
+new_rd_Const_type(dbg_info *db, ir_graph *irg, tarval *con, ir_type *tp)
+{
ir_node *res;
ir_graph *rem = current_ir_graph;
} /* new_rd_Const_type */
ir_node *
-new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con) {
+new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
+{
ir_node *res;
//#ifdef USE_ORIGINAL
ir_graph *rem = current_ir_graph;
} /* new_rd_Const */
ir_node *
-new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value) {
+new_rd_Const_long(dbg_info *db, ir_graph *irg, ir_mode *mode, long value)
+{
return new_rd_Const(db, irg, new_tarval_from_long(value, mode));
} /* new_rd_Const_long */
ir_node *
-new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj) {
+new_rd_defaultProj(dbg_info *db, ir_node *block, ir_node *arg, long max_proj)
+{
ir_node *res;
ir_graph *rem = current_ir_graph;
return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_ent, tp);
} /* new_rd_SymConst_addr_ent */
-ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_ofs_ent(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_entity *symbol, ir_type *tp)
+{
symconst_symbol sym;
sym.entity_p = symbol;
return new_rd_SymConst_type(db, irg, mode, sym, symconst_ofs_ent, tp);
} /* new_rd_SymConst_ofs_ent */
-ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_addr_name(dbg_info *db, ir_graph *irg, ir_mode *mode, ident *symbol, ir_type *tp)
+{
symconst_symbol sym;
sym.ident_p = symbol;
return new_rd_SymConst_type(db, irg, mode, sym, symconst_addr_name, tp);
} /* new_rd_SymConst_addr_name */
-ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_type_tag(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
+{
symconst_symbol sym;
sym.type_p = symbol;
return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_tag, tp);
} /* new_rd_SymConst_type_tag */
-ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_size(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
+{
symconst_symbol sym;
sym.type_p = symbol;
return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_size, tp);
} /* new_rd_SymConst_size */
-ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp) {
+ir_node *new_rd_SymConst_align(dbg_info *db, ir_graph *irg, ir_mode *mode, ir_type *symbol, ir_type *tp)
+{
symconst_symbol sym;
sym.type_p = symbol;
return new_rd_SymConst_type(db, irg, mode, sym, symconst_type_align, tp);
} /* new_rd_SymConst_align */
ir_node *
-new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[]) {
+new_rd_Sync(dbg_info *db, ir_node *block, int arity, ir_node *in[])
+{
ir_node *res;
ir_graph *rem = current_ir_graph;
int i;
} /* new_rd_Sync */
ir_node *
-new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block) {
+new_rd_EndReg(dbg_info *db, ir_graph *irg, ir_node *block)
+{
ir_node *res;
res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
} /* new_rd_EndReg */
ir_node *
-new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block) {
+new_rd_EndExcept(dbg_info *db, ir_graph *irg, ir_node *block)
+{
ir_node *res;
res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
return res;
} /* new_rd_ASM */
-ir_node *new_r_Start(ir_graph *irg, ir_node *block) {
+ir_node *new_r_Start(ir_graph *irg, ir_node *block)
+{
return new_rd_Start(NULL, irg, block);
}
-ir_node *new_r_End(ir_graph *irg, ir_node *block) {
+ir_node *new_r_End(ir_graph *irg, ir_node *block)
+{
return new_rd_End(NULL, irg, block);
}
-ir_node *new_r_Const(ir_graph *irg, tarval *con) {
+ir_node *new_r_Const(ir_graph *irg, tarval *con)
+{
return new_rd_Const(NULL, irg, con);
}
-ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value) {
+ir_node *new_r_Const_long(ir_graph *irg, ir_mode *mode, long value)
+{
return new_rd_Const_long(NULL, irg, mode, value);
}
-ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp) {
+ir_node *new_r_Const_type(ir_graph *irg, tarval *con, ir_type *tp)
+{
return new_rd_Const_type(NULL, irg, con, tp);
}
ir_node *new_r_SymConst(ir_graph *irg, ir_mode *mode,
ir_node *objptr, ir_entity *ent) {
return new_rd_Sel(NULL, block, store, objptr, 0, NULL, ent);
}
-ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode) {
+ir_node *new_r_Phi(ir_node *block, int arity, ir_node **in, ir_mode *mode)
+{
return new_rd_Phi(NULL, block, arity, in, mode);
}
-ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[]) {
+ir_node *new_r_Sync(ir_node *block, int arity, ir_node *in[])
+{
return new_rd_Sync(NULL, block, arity, in);
}
-ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj) {
+ir_node *new_r_defaultProj(ir_node *block, ir_node *arg, long max_proj)
+{
return new_rd_defaultProj(NULL, block, arg, max_proj);
}
-ir_node *new_r_Bad(ir_graph *irg) {
+ir_node *new_r_Bad(ir_graph *irg)
+{
return get_irg_bad(irg);
}
-ir_node *new_r_EndReg(ir_graph *irg, ir_node *block) {
+ir_node *new_r_EndReg(ir_graph *irg, ir_node *block)
+{
return new_rd_EndReg(NULL, irg, block);
}
-ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block) {
+ir_node *new_r_EndExcept(ir_graph *irg, ir_node *block)
+{
return new_rd_EndExcept(NULL, irg, block);
}
-ir_node *new_r_NoMem(ir_graph *irg) {
+ir_node *new_r_NoMem(ir_graph *irg)
+{
return get_irg_no_mem(irg);
}
ir_node *new_r_ASM(ir_node *block,
/** construction tools */
ir_node *
-new_d_Start(dbg_info *db) {
+new_d_Start(dbg_info *db)
+{
ir_node *res;
res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
} /* new_d_Start */
ir_node *
-new_d_End(dbg_info *db) {
+new_d_End(dbg_info *db)
+{
ir_node *res;
res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
op_End, mode_X, -1, NULL);
/** Creates a Phi node with 0 predecessors. */
static inline ir_node *
-new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
+new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode)
+{
ir_node *res;
res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
* Further the last entry in frag_arr of current block points to n. This
* constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
*/
-static inline ir_node **new_frag_arr(ir_node *n) {
+static inline ir_node **new_frag_arr(ir_node *n)
+{
ir_node **arr;
int opt;
/**
* Returns the frag_arr from a node.
*/
-static inline ir_node **get_frag_arr(ir_node *n) {
+static inline ir_node **get_frag_arr(ir_node *n)
+{
switch (get_irn_opcode(n)) {
case iro_Call:
return n->attr.call.exc.frag_arr;
} /* get_frag_arr */
static void
-set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
+set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
+{
#ifdef DEBUG_libfirm
int i;
} /* set_frag_value */
static ir_node *
-get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
+get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode)
+{
ir_node *res;
ir_node **frag_arr;
* @param cf_pred the control flow node
* @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
*/
-static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op) {
+static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
+{
/*
* Note: all projections from a raise are "exceptional control flow" we we handle it
* like a normal Jmp, because there is no "regular" one.
* This function must be called with an in-array of proper size.
*/
static ir_node *
-phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins) {
+phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
+{
ir_node *prevBlock, *res, *phi0, *phi0_all;
int i;
* @param mode the mode of this value (needed for Phi construction)
*/
static ir_node *
-get_r_value_internal(ir_node *block, int pos, ir_mode *mode) {
+get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
+{
ir_node *res;
/* There are 4 cases to treat.
* Acceptable parameters are only Block nodes.
*/
void
-mature_immBlock(ir_node *block) {
+mature_immBlock(ir_node *block)
+{
int ins;
ir_node *n, **nin;
ir_node *next;
} /* mature_immBlock */
ir_node *
-new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode) {
+new_d_Phi(dbg_info *db, int arity, ir_node **in, ir_mode *mode)
+{
return new_bd_Phi(db, current_ir_graph->current_block, arity, in, mode);
} /* new_d_Phi */
ir_node *
-new_d_Const(dbg_info *db, tarval *con) {
+new_d_Const(dbg_info *db, tarval *con)
+{
return new_bd_Const(db, con);
} /* new_d_Const */
ir_node *
-new_d_Const_long(dbg_info *db, ir_mode *mode, long value) {
+new_d_Const_long(dbg_info *db, ir_mode *mode, long value)
+{
return new_bd_Const_long(db, mode, value);
} /* new_d_Const_long */
ir_node *
-new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp) {
+new_d_Const_type(dbg_info *db, tarval *con, ir_type *tp)
+{
return new_bd_Const_type(db, con, tp);
} /* new_d_Const_type */
ir_node *
-new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj) {
+new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
+{
ir_node *res;
assert(arg->op == op_Cond);
arg->attr.cond.default_proj = max_proj;
* @param frag_store the address of the frag store in irn attributes, if this
* address contains a value != NULL, does nothing
*/
-void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
+void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
+{
if (get_opt_precise_exc_context()) {
if ((current_ir_graph->phase_state == phase_building) &&
(get_irn_op(irn) == op) && /* Could be optimized away. */
} /* new_d_simpleSel */
ir_node *
-new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp) {
+new_d_SymConst_type(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *tp)
+{
return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
value, kind, tp);
} /* new_d_SymConst_type */
ir_node *
-new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind) {
+new_d_SymConst(dbg_info *db, ir_mode *mode, symconst_symbol value, symconst_kind kind)
+{
return new_bd_SymConst_type(db, get_irg_start_block(current_ir_graph), mode,
value, kind, firm_unknown_type);
} /* new_d_SymConst */
ir_node *
-new_d_Sync(dbg_info *db, int arity, ir_node *in[]) {
+new_d_Sync(dbg_info *db, int arity, ir_node *in[])
+{
return new_rd_Sync(db, current_ir_graph->current_block, arity, in);
} /* new_d_Sync */
ir_node *
-new_d_EndReg(dbg_info *db) {
+new_d_EndReg(dbg_info *db)
+{
return new_bd_EndReg(db, current_ir_graph->current_block);
} /* new_d_EndReg */
ir_node *
-new_d_EndExcept(dbg_info *db) {
+new_d_EndExcept(dbg_info *db)
+{
return new_bd_EndExcept(db, current_ir_graph->current_block);
} /* new_d_EndExcept */
/* Block construction */
/* immature Block without predecessors */
ir_node *
-new_d_immBlock(dbg_info *db) {
+new_d_immBlock(dbg_info *db)
+{
ir_node *res;
assert(get_irg_phase_state(current_ir_graph) == phase_building);
} /* new_d_immBlock */
ir_node *
-new_immBlock(void) {
+new_immBlock(void)
+{
return new_d_immBlock(NULL);
} /* new_immBlock */
/* immature PartBlock with its predecessors */
ir_node *
-new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp) {
+new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
+{
ir_node *res = new_d_immBlock(db);
ir_node *blk = get_nodes_block(pred_jmp);
} /* new_d_immPartBlock */
ir_node *
-new_immPartBlock(ir_node *pred_jmp) {
+new_immPartBlock(ir_node *pred_jmp)
+{
return new_d_immPartBlock(NULL, pred_jmp);
} /* new_immPartBlock */
/* add an edge to a jmp/control flow node */
void
-add_immBlock_pred(ir_node *block, ir_node *jmp) {
+add_immBlock_pred(ir_node *block, ir_node *jmp)
+{
int n = ARR_LEN(block->in) - 1;
assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
/* changing the current block */
void
-set_cur_block(ir_node *target) {
+set_cur_block(ir_node *target)
+{
current_ir_graph->current_block = target;
} /* set_cur_block */
/* get a value from the parameter array from the current block by its index */
ir_node *
-get_d_value(dbg_info *db, int pos, ir_mode *mode) {
+get_d_value(dbg_info *db, int pos, ir_mode *mode)
+{
ir_graph *irg = current_ir_graph;
assert(get_irg_phase_state(irg) == phase_building);
inc_irg_visited(irg);
/* get a value from the parameter array from the current block by its index */
ir_node *
-get_value(int pos, ir_mode *mode) {
+get_value(int pos, ir_mode *mode)
+{
return get_d_value(NULL, pos, mode);
} /* get_value */
/* set a value at position pos in the parameter array from the current block */
void
-set_value(int pos, ir_node *value) {
+set_value(int pos, ir_node *value)
+{
ir_graph *irg = current_ir_graph;
assert(get_irg_phase_state(irg) == phase_building);
assert(pos >= 0);
/* Find the value number for a node in the current block.*/
int
-find_value(ir_node *value) {
+find_value(ir_node *value)
+{
int i;
ir_node *bl = current_ir_graph->current_block;
/* get the current store */
ir_node *
-get_store(void) {
+get_store(void)
+{
ir_graph *irg = current_ir_graph;
assert(get_irg_phase_state(irg) == phase_building);
/* set the current store: handles automatic Sync construction for Load nodes */
void
-set_store(ir_node *store) {
+set_store(ir_node *store)
+{
ir_node *load, *pload, *pred, *in[2];
assert(get_irg_phase_state(current_ir_graph) == phase_building);
} /* set_store */
void
-keep_alive(ir_node *ka) {
+keep_alive(ir_node *ka)
+{
add_End_keepalive(get_irg_end(current_ir_graph), ka);
} /* keep_alive */
/* --- Useful access routines --- */
/* Returns the current block of the current graph. To set the current
block use set_cur_block. */
-ir_node *get_cur_block(void) {
+ir_node *get_cur_block(void)
+{
return get_irg_current_block(current_ir_graph);
} /* get_cur_block */
/* Returns the frame type of the current graph */
-ir_type *get_cur_frame_type(void) {
+ir_type *get_cur_frame_type(void)
+{
return get_irg_frame_type(current_ir_graph);
} /* get_cur_frame_type */
/* call once for each run of the library */
void
-firm_init_cons(uninitialized_local_variable_func_t *func) {
+firm_init_cons(uninitialized_local_variable_func_t *func)
+{
default_initialize_local_variable = func;
} /* firm_init_cons */
void
-irp_finalize_cons(void) {
+irp_finalize_cons(void)
+{
int i;
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
irg_finalize_cons(get_irp_irg(i));
irp->phase_state = phase_high;
} /* irp_finalize_cons */
-ir_node *new_Start(void) {
+ir_node *new_Start(void)
+{
return new_d_Start(NULL);
}
-ir_node *new_End(void) {
+ir_node *new_End(void)
+{
return new_d_End(NULL);
}
-ir_node *new_Const(tarval *con) {
+ir_node *new_Const(tarval *con)
+{
return new_d_Const(NULL, con);
}
-ir_node *new_Const_long(ir_mode *mode, long value) {
+ir_node *new_Const_long(ir_mode *mode, long value)
+{
return new_d_Const_long(NULL, mode, value);
}
-ir_node *new_Const_type(tarval *con, ir_type *tp) {
+ir_node *new_Const_type(tarval *con, ir_type *tp)
+{
return new_d_Const_type(NULL, con, tp);
}
-ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type) {
+ir_node *new_SymConst_type(ir_mode *mode, symconst_symbol value, symconst_kind kind, ir_type *type)
+{
return new_d_SymConst_type(NULL, mode, value, kind, type);
}
-ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind) {
+ir_node *new_SymConst(ir_mode *mode, symconst_symbol value, symconst_kind kind)
+{
return new_d_SymConst(NULL, mode, value, kind);
}
-ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent) {
+ir_node *new_simpleSel(ir_node *store, ir_node *objptr, ir_entity *ent)
+{
return new_d_simpleSel(NULL, store, objptr, ent);
}
-ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode) {
+ir_node *new_Phi(int arity, ir_node **in, ir_mode *mode)
+{
return new_d_Phi(NULL, arity, in, mode);
}
-ir_node *new_Sync(int arity, ir_node *in[]) {
+ir_node *new_Sync(int arity, ir_node *in[])
+{
return new_d_Sync(NULL, arity, in);
}
-ir_node *new_defaultProj(ir_node *arg, long max_proj) {
+ir_node *new_defaultProj(ir_node *arg, long max_proj)
+{
return new_d_defaultProj(NULL, arg, max_proj);
}
-ir_node *new_Bad(void) {
+ir_node *new_Bad(void)
+{
return get_irg_bad(current_ir_graph);
}
-ir_node *new_EndReg(void) {
+ir_node *new_EndReg(void)
+{
return new_d_EndReg(NULL);
}
-ir_node *new_EndExcept(void) {
+ir_node *new_EndExcept(void)
+{
return new_d_EndExcept(NULL);
}
-ir_node *new_NoMem(void) {
+ir_node *new_NoMem(void)
+{
return get_irg_no_mem(current_ir_graph);
}
ir_node *new_ASM(int arity, ir_node *in[], ir_asm_constraint *inputs,
}
/* create a new anchor node */
-ir_node *new_Anchor(ir_graph *irg) {
+ir_node *new_Anchor(ir_graph *irg)
+{
ir_node *in[anchor_last];
memset(in, 0, sizeof(in));
return new_ir_node(NULL, irg, NULL, op_Anchor, mode_ANY, anchor_last, in);
static DUMP_NODE_EDGE_FUNC dump_node_edge_hook = NULL;
/* Set the hook to be called to dump additional edges to a node. */
-void set_dump_node_edge_hook(DUMP_NODE_EDGE_FUNC func) {
+void set_dump_node_edge_hook(DUMP_NODE_EDGE_FUNC func)
+{
dump_node_edge_hook = func;
}
/* Get the additional edge dump hook. */
-DUMP_NODE_EDGE_FUNC get_dump_node_edge_hook(void) {
+DUMP_NODE_EDGE_FUNC get_dump_node_edge_hook(void)
+{
return dump_node_edge_hook;
}
/* Set the hook to be called to dump additional edges to a block. */
-void set_dump_block_edge_hook(DUMP_NODE_EDGE_FUNC func) {
+void set_dump_block_edge_hook(DUMP_NODE_EDGE_FUNC func)
+{
dump_block_edge_hook = func;
}
/* Get the additional block edge dump hook. */
-DUMP_NODE_EDGE_FUNC get_dump_block_edge_hook(void) {
+DUMP_NODE_EDGE_FUNC get_dump_block_edge_hook(void)
+{
return dump_node_edge_hook;
}
/* set the ir graph hook */
-void set_dump_ir_graph_hook(DUMP_IR_GRAPH_FUNC hook) {
+void set_dump_ir_graph_hook(DUMP_IR_GRAPH_FUNC hook)
+{
dump_ir_graph_hook = hook;
}
/* set the node attribute hook */
-void set_dump_node_vcgattr_hook(DUMP_NODE_VCGATTR_FUNC hook) {
+void set_dump_node_vcgattr_hook(DUMP_NODE_VCGATTR_FUNC hook)
+{
dump_node_vcgattr_hook = hook;
}
/* set the edge attribute hook */
-void set_dump_edge_vcgattr_hook(DUMP_EDGE_VCGATTR_FUNC hook) {
+void set_dump_edge_vcgattr_hook(DUMP_EDGE_VCGATTR_FUNC hook)
+{
dump_edge_vcgattr_hook = hook;
}
/** Returns 0 if dump_out_edge_flag or dump_loop_information_flag
* are set, else returns dump_const_local_flag.
*/
-static int get_opt_dump_const_local(void) {
+static int get_opt_dump_const_local(void)
+{
if (dump_out_edge_flag || dump_loop_information_flag || (dump_new_edges_flag && edges_activated(current_ir_graph)))
return 0;
return dump_const_local;
}
/* Set a prefix filter for output functions. */
-void only_dump_method_with_name(ident *name) {
+void only_dump_method_with_name(ident *name)
+{
dump_file_filter_id = name;
}
/* Returns the prefix filter set with only_dump_method_with_name(). */
-ident *get_dump_file_filter_ident(void) {
+ident *get_dump_file_filter_ident(void)
+{
return dump_file_filter_id;
}
/* Returns non-zero if dump file filter is not set, or if it is a prefix of name. */
-int is_filtered_dump_name(ident *name) {
+int is_filtered_dump_name(ident *name)
+{
if (!dump_file_filter_id) return 1;
return id_is_prefix(dump_file_filter_id, name);
}
/* To turn off display of edge labels. Edge labels often cause xvcg to
abort with a segmentation fault. */
-void turn_off_edge_labels(void) {
+void turn_off_edge_labels(void)
+{
edge_label = 0;
}
-void dump_consts_local(int flag) {
+void dump_consts_local(int flag)
+{
dump_const_local = flag;
}
-void dump_node_idx_label(int flag) {
+void dump_node_idx_label(int flag)
+{
dump_node_idx_labels = flag;
}
-void dump_constant_entity_values(int flag) {
+void dump_constant_entity_values(int flag)
+{
const_entities = flag;
}
-void dump_keepalive_edges(int flag) {
+void dump_keepalive_edges(int flag)
+{
dump_keepalive = flag;
}
-void dump_new_edges(int flag) {
+void dump_new_edges(int flag)
+{
dump_new_edges_flag = flag;
}
-int get_opt_dump_keepalive_edges(void) {
+int get_opt_dump_keepalive_edges(void)
+{
return dump_keepalive;
}
-void dump_out_edges(int flag) {
+void dump_out_edges(int flag)
+{
dump_out_edge_flag = flag;
}
-void dump_dominator_information(int flag) {
+void dump_dominator_information(int flag)
+{
dump_dominator_information_flag = flag;
}
-void dump_loop_information(int flag) {
+void dump_loop_information(int flag)
+{
dump_loop_information_flag = flag;
}
-void dump_backedge_information(int flag) {
+void dump_backedge_information(int flag)
+{
dump_backedge_information_flag = flag;
}
* If the flag is set, the type name is output in [] in the node label,
* else it is output as info.
*/
-void set_opt_dump_analysed_type_info(int flag) {
+void set_opt_dump_analysed_type_info(int flag)
+{
opt_dump_analysed_type_info = flag;
}
-void dump_pointer_values_to_info(int flag) {
+void dump_pointer_values_to_info(int flag)
+{
opt_dump_pointer_values_to_info = flag;
}
-void dump_ld_names(int flag) {
+void dump_ld_names(int flag)
+{
dump_ld_name = flag;
}
-void dump_all_anchors(int flag) {
+void dump_all_anchors(int flag)
+{
dump_anchors = flag;
}
-void dump_macroblock_edges(int flag) {
+void dump_macroblock_edges(int flag)
+{
dump_macro_block_edges = flag;
}
-void dump_block_marker_in_title(int flag) {
+void dump_block_marker_in_title(int flag)
+{
dump_block_marker = flag;
}
* returns the name of a mode or ERROR_TXT if mode is NOT a mode object.
* in the later case, sets bad.
*/
-const char *get_mode_name_ex(const ir_mode *mode, int *bad) {
+const char *get_mode_name_ex(const ir_mode *mode, int *bad)
+{
if (is_mode(mode))
return get_mode_name(mode);
*bad |= 1;
/**
* Prints the VCG color to a file.
*/
-static void print_vcg_color(FILE *F, ird_color_t color) {
+static void print_vcg_color(FILE *F, ird_color_t color)
+{
assert(color < ird_color_count);
fprintf(F, "color:%s", color_names[color]);
}
*
* Projs should be dumped near their predecessor, so they get "nearedge".
*/
-static void print_node_edge_kind(FILE *F, ir_node *node) {
+static void print_node_edge_kind(FILE *F, ir_node *node)
+{
if (is_Proj(node)) {
fprintf(F, "nearedge: ");
} else {
* initialization is performed lazily and not called from within init_firm.
*
* Creates the link attribute map. */
-static void init_irdump(void) {
+static void init_irdump(void)
+{
/* We need a new, empty map. */
if (irdump_link_map) pmap_destroy(irdump_link_map);
irdump_link_map = pmap_create();
/**
* Returns the private link field.
*/
-static void *ird_get_irn_link(const ir_node *n) {
+static void *ird_get_irn_link(const ir_node *n)
+{
void *res = NULL;
if (irdump_link_map == NULL)
return NULL;
/**
* Sets the private link field.
*/
-static void ird_set_irn_link(const ir_node *n, void *x) {
+static void ird_set_irn_link(const ir_node *n, void *x)
+{
if (irdump_link_map == NULL)
init_irdump();
pmap_insert(irdump_link_map, n, x);
/**
* Gets the private link field of an irg.
*/
-static void *ird_get_irg_link(const ir_graph *irg) {
+static void *ird_get_irg_link(const ir_graph *irg)
+{
void *res = NULL;
if (irdump_link_map == NULL)
return NULL;
/**
* Sets the private link field of an irg.
*/
-static void ird_set_irg_link(const ir_graph *irg, void *x) {
+static void ird_set_irg_link(const ir_graph *irg, void *x)
+{
if (irdump_link_map == NULL)
init_irdump();
pmap_insert(irdump_link_map, irg, x);
/**
* Walker, clears the private link field.
*/
-static void clear_link(ir_node *node, void *env) {
+static void clear_link(ir_node *node, void *env)
+{
(void) env;
ird_set_irn_link(node, NULL);
}
* If the entity has a ld_name, returns it if the dump_ld_name is set,
* else returns the name of the entity.
*/
-static const char *_get_ent_dump_name(const ir_entity *ent, int dump_ld_name) {
+static const char *_get_ent_dump_name(const ir_entity *ent, int dump_ld_name)
+{
if (ent == NULL)
return "<NULL entity>";
if (dump_ld_name) {
* If the entity has a ld_name, returns it if the option dump_ld_name is set,
* else returns the name of the entity.
*/
-const char *get_ent_dump_name(const ir_entity *ent) {
+const char *get_ent_dump_name(const ir_entity *ent)
+{
return _get_ent_dump_name(ent, dump_ld_name);
}
/* Returns the name of an IRG. */
-const char *get_irg_dump_name(const ir_graph *irg) {
+const char *get_irg_dump_name(const ir_graph *irg)
+{
/* Don't use get_entity_ld_ident (ent) as it computes the mangled name! */
return _get_ent_dump_name(get_irg_entity(irg), 1);
}
/**
* Returns non-zero if a node is in floating state.
*/
-static int node_floats(const ir_node *n) {
+static int node_floats(const ir_node *n)
+{
return ((get_irn_pinned(n) == op_pin_state_floats) &&
(get_irg_pinned(current_ir_graph) == op_pin_state_floats));
}
/**
* Walker that visits the anchors
*/
-static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
if (dump_anchors || (dump_new_edges_flag && edges_activated(irg))) {
irg_walk_anchors(irg, pre, post, env);
} else {
/**
* Walker, allocates an array for all blocks and puts it's nodes non-floating nodes into this array.
*/
-static void collect_node(ir_node *node, void *env) {
+static void collect_node(ir_node *node, void *env)
+{
(void) env;
if (is_Block(node)
|| node_floats(node)
* graphs not visited.
* Free the list with DEL_ARR_F().
*/
-static ir_node **construct_block_lists(ir_graph *irg) {
+static ir_node **construct_block_lists(ir_graph *irg)
+{
int i;
#ifdef INTERPROCEDURAL_VIEW
int rem_view = get_interprocedural_view();
* Sets the irg link field to NULL in all
* graphs not visited.
*/
-static list_tuple *construct_extblock_lists(ir_graph *irg) {
+static list_tuple *construct_extblock_lists(ir_graph *irg)
+{
ir_node **blk_list = construct_block_lists(irg);
int i;
ir_graph *rem = current_ir_graph;
/**
* Dump the type of a node n to a file F if it's known.
*/
-static int dump_node_typeinfo(FILE *F, ir_node *n) {
+static int dump_node_typeinfo(FILE *F, ir_node *n)
+{
int bad = 0;
if (opt_dump_analysed_type_info) {
#include <math.h>
#include "execution_frequency.h"
-static void dump_node_ana_vals(FILE *F, ir_node *n) {
+static void dump_node_ana_vals(FILE *F, ir_node *n)
+{
(void) F;
(void) n;
return;
/* Dumps a node label without the enclosing ". */
-int dump_node_label(FILE *F, ir_node *n) {
+int dump_node_label(FILE *F, ir_node *n)
+{
int bad = 0;
bad |= dump_node_opcode(F, n);
}
/* Dumps the vrp information of a node to a file */
-int dump_vrp_info(FILE *F, ir_node *n) {
+int dump_vrp_info(FILE *F, ir_node *n)
+{
if (!n->vrp.valid) {
return 1;
}
/** outputs the predecessors of n, that are constants, local. I.e.,
generates a copy of the constant predecessors for each node called with. */
-static void dump_const_node_local(FILE *F, ir_node *n) {
+static void dump_const_node_local(FILE *F, ir_node *n)
+{
int i;
if (!get_opt_dump_const_local()) return;
}
/** If the block of an edge is a const_like node, dump it local with an edge */
-static void dump_const_block_local(FILE *F, ir_node *n) {
+static void dump_const_block_local(FILE *F, ir_node *n)
+{
ir_node *blk;
if (!get_opt_dump_const_local()) return;
/** dump the edge to the block this node belongs to */
static void
-dump_ir_block_edge(FILE *F, ir_node *n) {
+dump_ir_block_edge(FILE *F, ir_node *n)
+{
if (get_opt_dump_const_local() && is_constlike_node(n)) return;
if (is_no_Block(n)) {
ir_node *block = get_nodes_block(n);
}
static void
-print_data_edge_vcgattr(FILE *F, ir_node *from, int to) {
+print_data_edge_vcgattr(FILE *F, ir_node *from, int to)
+{
/*
* do not use get_nodes_block() here, will fail
* if the irg is not pinned.
}
static void
-print_mem_edge_vcgattr(FILE *F, ir_node *from, int to) {
+print_mem_edge_vcgattr(FILE *F, ir_node *from, int to)
+{
/*
* do not use get_nodes_block() here, will fail
* if the irg is not pinned.
}
/** Print the vcg attributes for the edge from node from to it's to's input */
-static void print_edge_vcgattr(FILE *F, ir_node *from, int to) {
+static void print_edge_vcgattr(FILE *F, ir_node *from, int to)
+{
assert(from);
if (dump_edge_vcgattr_hook)
}
/** dump edges to our inputs */
-static void dump_ir_data_edges(FILE *F, ir_node *n) {
+static void dump_ir_data_edges(FILE *F, ir_node *n)
+{
int i, num;
ir_visited_t visited = get_irn_visited(n);
* Dump the ir_edges
*/
static void
-dump_ir_edges(FILE *F, ir_node *n) {
+dump_ir_edges(FILE *F, ir_node *n)
+{
const ir_edge_t *edge;
int i = 0;
/** Dumps a node and its edges but not the block edge */
-static void dump_node_wo_blockedge(ir_node *n, void *env) {
+static void dump_node_wo_blockedge(ir_node *n, void *env)
+{
FILE *F = env;
dump_node(F, n);
dump_ir_data_edges(F, n);
}
/** Dumps a node and its edges. */
-static void dump_whole_node(ir_node *n, void *env) {
+static void dump_whole_node(ir_node *n, void *env)
+{
FILE *F = env;
dump_node_wo_blockedge(n, env);
if (!node_floats(n))
}
/** Dumps a const-like node. */
-static void dump_const_node(ir_node *n, void *env) {
+static void dump_const_node(ir_node *n, void *env)
+{
if (is_Block(n)) return;
dump_node_wo_blockedge(n, env);
}
/** Dumps a constant expression as entity initializer, array bound ...
*/
-static void dump_const_expression(FILE *F, ir_node *value) {
+static void dump_const_expression(FILE *F, ir_node *value)
+{
ir_graph *rem = current_ir_graph;
int rem_dump_const_local = dump_const_local;
dump_const_local = 0;
* Expects to find nodes belonging to the block as list in its
* link field.
* Dumps the edges of all nodes including itself. */
-static void dump_whole_block(FILE *F, ir_node *block) {
+static void dump_whole_block(FILE *F, ir_node *block)
+{
ir_node *node;
ird_color_t color = ird_color_block_background;
/** dumps a graph block-wise. Expects all blockless nodes in arr in irgs link.
* The outermost nodes: blocks and nodes not op_pin_state_pinned, Bad, Unknown. */
static void
-dump_block_graph(FILE *F, ir_graph *irg) {
+dump_block_graph(FILE *F, ir_graph *irg)
+{
int i;
ir_graph *rem = current_ir_graph;
ir_node **arr = ird_get_irg_link(irg);
* Dump the info for an irg.
* Parsed by XVCG but not shown. use yComp.
*/
-static void dump_graph_info(FILE *F, ir_graph *irg) {
+static void dump_graph_info(FILE *F, ir_graph *irg)
+{
fprintf(F, "info1: \"");
dump_entity_to_file(F, get_irg_entity(irg), dump_verbosity_entattrs | dump_verbosity_entconsts);
fprintf(F, "\"\n");
/** Dumps an irg as a graph clustered by block nodes.
* If interprocedural view edges can point to nodes out of this graph.
*/
-static void dump_graph_from_list(FILE *F, ir_graph *irg) {
+static void dump_graph_from_list(FILE *F, ir_graph *irg)
+{
ir_entity *ent = get_irg_entity(irg);
fprintf(F, "graph: { title: \"");
/** dumps a graph extended block-wise. Expects all blockless nodes in arr in irgs link.
* The outermost nodes: blocks and nodes not op_pin_state_pinned, Bad, Unknown. */
static void
-dump_extblock_graph(FILE *F, ir_graph *irg) {
+dump_extblock_graph(FILE *F, ir_graph *irg)
+{
int i;
ir_graph *rem = current_ir_graph;
ir_extblk **arr = ird_get_irg_link(irg);
}
#if 0
-static int print_type_info(FILE *F, ir_type *tp) {
+static int print_type_info(FILE *F, ir_type *tp)
+{
int bad = 0;
if (get_type_state(tp) == layout_undefined) {
return bad;
}
-static void print_typespecific_info(FILE *F, ir_type *tp) {
+static void print_typespecific_info(FILE *F, ir_type *tp)
+{
switch (get_type_tpop_code(tp)) {
case tpo_class:
fprintf(F, "peculiarity: %s\n", get_peculiarity_string(get_class_peculiarity(tp)));
}
#endif
-static void print_typespecific_vcgattr(FILE *F, ir_type *tp) {
+static void print_typespecific_vcgattr(FILE *F, ir_type *tp)
+{
switch (get_type_tpop_code(tp)) {
case tpo_class:
fprintf(F, " " TYPE_CLASS_NODE_ATTR);
/**
* Dumps a new style initializer.
*/
-static void dump_entity_initializer(FILE *F, const ir_entity *ent) {
+static void dump_entity_initializer(FILE *F, const ir_entity *ent)
+{
/* TODO */
(void) F;
(void) ent;
}
/** Dumps a type or entity and it's edges. */
-static void dump_type_info(type_or_ent tore, void *env) {
+static void dump_type_info(type_or_ent tore, void *env)
+{
FILE *F = env;
int i = 0; /* to shutup gcc */
* If env->dump_ent dumps entities of classes and overwrites edges.
*/
static void
-dump_class_hierarchy_node(type_or_ent tore, void *ctx) {
+dump_class_hierarchy_node(type_or_ent tore, void *ctx)
+{
h_env_t *env = ctx;
FILE *F = env->f;
int i = 0; /* to shutup gcc */
/* dump out edges */
static void
-dump_out_edge(ir_node *n, void *env) {
+dump_out_edge(ir_node *n, void *env)
+{
FILE *F = env;
int i;
for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
}
static inline void
-dump_loop_label(FILE *F, ir_loop *loop) {
+dump_loop_label(FILE *F, ir_loop *loop)
+{
fprintf(F, "loop %d, %d sons, %d nodes",
get_loop_depth(loop), get_loop_n_sons(loop), get_loop_n_nodes(loop));
}
-static inline void dump_loop_info(FILE *F, ir_loop *loop) {
+static inline void dump_loop_info(FILE *F, ir_loop *loop)
+{
fprintf(F, " info1: \"");
fprintf(F, " loop nr: %d", get_loop_loop_nr(loop));
#ifdef DEBUG_libfirm /* GL @@@ debug analyses */
}
static inline void
-dump_loop_node(FILE *F, ir_loop *loop) {
+dump_loop_node(FILE *F, ir_loop *loop)
+{
fprintf(F, "node: {title: \"");
PRINT_LOOPID(loop);
fprintf(F, "\" label: \"");
}
static inline void
-dump_loop_node_edge(FILE *F, ir_loop *loop, int i) {
+dump_loop_node_edge(FILE *F, ir_loop *loop, int i)
+{
assert(loop);
fprintf(F, "edge: {sourcename: \"");
PRINT_LOOPID(loop);
}
static inline void
-dump_loop_son_edge(FILE *F, ir_loop *loop, int i) {
+dump_loop_son_edge(FILE *F, ir_loop *loop, int i)
+{
assert(loop);
fprintf(F, "edge: {sourcename: \"");
PRINT_LOOPID(loop);
}
static
-void dump_loops(FILE *F, ir_loop *loop) {
+void dump_loops(FILE *F, ir_loop *loop)
+{
int i;
/* dump this loop node */
dump_loop_node(F, loop);
}
static inline
-void dump_loop_nodes_into_graph(FILE *F, ir_graph *irg) {
+void dump_loop_nodes_into_graph(FILE *F, ir_graph *irg)
+{
ir_loop *loop = get_irg_loop(irg);
if (loop != NULL) {
/**
* dumps the VCG header
*/
-void dump_vcg_header(FILE *F, const char *name, const char *layout, const char *orientation) {
+void dump_vcg_header(FILE *F, const char *name, const char *layout, const char *orientation)
+{
int i;
char *label;
* @param suffix1 first filename suffix
* @param suffix2 second filename suffix
*/
-FILE *vcg_open(const ir_graph *irg, const char *suffix1, const char *suffix2) {
+FILE *vcg_open(const ir_graph *irg, const char *suffix1, const char *suffix2)
+{
FILE *F;
const char *nm = get_irg_dump_name(irg);
int len = strlen(nm), i, j;
* @param name prefix file name
* @param suffix filename suffix
*/
-FILE *vcg_open_name(const char *name, const char *suffix) {
+FILE *vcg_open_name(const char *name, const char *suffix)
+{
FILE *F;
char *fname; /* filename to put the vcg information in */
int i, j, len = strlen(name);
/**
* Dumps the vcg file footer
*/
-void dump_vcg_footer(FILE *F) {
+void dump_vcg_footer(FILE *F)
+{
fprintf(F, "}\n");
}
/*---------------------------------------------------------------------*/
static void
-dump_block_to_cfg(ir_node *block, void *env) {
+dump_block_to_cfg(ir_node *block, void *env)
+{
FILE *F = env;
int i, fl = 0;
ir_node *pred;
}
-static void descend_and_dump(FILE *F, ir_node *n, int depth, pset *mark_set) {
+static void descend_and_dump(FILE *F, ir_node *n, int depth, pset *mark_set)
+{
if (pset_find_ptr(mark_set, n))
return;
}
static int subgraph_counter = 0;
-void dump_subgraph(ir_node *root, int depth, const char *suffix) {
+void dump_subgraph(ir_node *root, int depth, const char *suffix)
+{
FILE *F;
char buf[32];
}
}
-void dump_callgraph(const char *suffix) {
+void dump_callgraph(const char *suffix)
+{
FILE *F = vcg_open_name("Callgraph", suffix);
if (F != NULL) {
#if 0
/* Dump all irgs in interprocedural view to a single file. */
-void dump_all_cg_block_graph(const char *suffix) {
+void dump_all_cg_block_graph(const char *suffix)
+{
FILE *f = vcg_open_name("All_graphs", suffix);
if (f != NULL) {
/* dump_ir_graph_w_types */
/*---------------------------------------------------------------------*/
-void dump_all_ir_graphs(dump_graph_func *dmp_grph, const char *suffix) {
+void dump_all_ir_graphs(dump_graph_func *dmp_grph, const char *suffix)
+{
int i;
for (i = get_irp_n_irgs() - 1; i >= 0; --i)
dmp_grph(get_irp_irg(i), suffix);
/**
* Wrapper around dump_all_ir_graphs().
*/
-static int dump_all_ir_graphs_wrapper(ir_prog *irp, void *context) {
+static int dump_all_ir_graphs_wrapper(ir_prog *irp, void *context)
+{
struct pass_t *pass = context;
(void)irp;
* packed together in one subgraph/box *
*--------------------------------------------------------------------------------*/
-void dump_loops_standalone(FILE *F, ir_loop *loop) {
+void dump_loops_standalone(FILE *F, ir_loop *loop)
+{
int i = 0, loop_node_started = 0, son_number = 0, first = 0;
loop_element le;
ir_loop *son = NULL;
}
}
-void dump_callgraph_loop_tree(const char *suffix) {
+void dump_callgraph_loop_tree(const char *suffix)
+{
FILE *F;
F = vcg_open_name("Callgraph_looptree", suffix);
dump_vcg_header(F, "callgraph looptree", "Tree", "top_to_bottom");
/* Dumps the firm nodes in the loop tree to a graph along with the loop nodes.*/
/*----------------------------------------------------------------------------*/
-void collect_nodeloop(FILE *F, ir_loop *loop, eset *loopnodes) {
+void collect_nodeloop(FILE *F, ir_loop *loop, eset *loopnodes)
+{
int i, son_number = 0, node_number = 0;
if (dump_loop_information_flag) dump_loop_node(F, loop);
}
}
-void collect_nodeloop_external_nodes(ir_loop *loop, eset *loopnodes, eset *extnodes) {
+void collect_nodeloop_external_nodes(ir_loop *loop, eset *loopnodes, eset *extnodes)
+{
int i, j, start;
for(i = 0; i < get_loop_n_elements(loop); i++) {
}
}
-void dump_loop(ir_loop *l, const char *suffix) {
+void dump_loop(ir_loop *l, const char *suffix)
+{
FILE *F;
char name[50];
* @param suffix2 The second suffix.
* @param suffix3 The third suffix.
*/
-static FILE *text_open(const char *basename, const char * suffix1, const char *suffix2, const char *suffix3) {
+static FILE *text_open(const char *basename, const char * suffix1, const char *suffix2, const char *suffix3)
+{
FILE *F;
int len = strlen(basename), i, j;
char *fname; /* filename to put the vcg information in */
}
/* Write the irnode and all its attributes to the file passed. */
-int dump_irnode_to_file(FILE *F, ir_node *n) {
+int dump_irnode_to_file(FILE *F, ir_node *n)
+{
int i, bad = 0;
char comma;
ir_graph *irg;
-void dump_irnode(ir_node *n) {
+void dump_irnode(ir_node *n)
+{
dump_irnode_to_file(stdout, n);
}
-void dump_graph_to_file(FILE *F, ir_graph *irg) {
+void dump_graph_to_file(FILE *F, ir_graph *irg)
+{
fprintf(F, "graph %s\n", get_irg_dump_name(irg));
}
-void dump_graph(ir_graph *g) {
+void dump_graph(ir_graph *g)
+{
dump_graph_to_file(stdout, g);
}
-static void dump_node_to_graph_file(ir_node *n, void *env) {
+static void dump_node_to_graph_file(ir_node *n, void *env)
+{
FILE *F = (FILE *)env;
dump_irnode_to_file(F, n);
fprintf(F, "\n");
}
-void dump_graph_as_text(ir_graph *irg, const char *suffix) {
+void dump_graph_as_text(ir_graph *irg, const char *suffix)
+{
const char *basename = get_irg_dump_name(irg);
FILE *F;
}
#ifdef EXTENDED_ACCESS_STATS
-static int addr_is_alloc(ir_node *acc) {
+static int addr_is_alloc(ir_node *acc)
+{
ir_node *addr = NULL;
ir_opcode addr_op;
if (is_memop(acc)) {
fprintf(F, " hidden_user");
}
-void dump_entity_to_file_prefix(FILE *F, ir_entity *ent, char *prefix, unsigned verbosity) {
+void dump_entity_to_file_prefix(FILE *F, ir_entity *ent, char *prefix, unsigned verbosity)
+{
int i, j;
ir_type *owner, *type;
}
}
-void dump_entity_to_file (FILE *F, ir_entity *ent, unsigned verbosity) {
+void dump_entity_to_file (FILE *F, ir_entity *ent, unsigned verbosity)
+{
dump_entity_to_file_prefix (F, ent, "", verbosity);
fprintf(F, "\n");
}
-void dump_entity(ir_entity *ent) {
+void dump_entity(ir_entity *ent)
+{
dump_entity_to_file(stdout, ent, dump_verbosity_max);
}
-void dump_type_to_file(FILE *F, ir_type *tp, dump_verbosity verbosity) {
+void dump_type_to_file(FILE *F, ir_type *tp, dump_verbosity verbosity)
+{
int i;
if ((is_Class_type(tp)) && (verbosity & dump_verbosity_noClassTypes)) return;
fprintf(F, "\n\n");
}
-void dump_type(ir_type *tp) {
+void dump_type(ir_type *tp)
+{
dump_type_to_file (stdout, tp, dump_verbosity_max);
}
-void dump_types_as_text(unsigned verbosity, const char *suffix) {
+void dump_types_as_text(unsigned verbosity, const char *suffix)
+{
const char *basename;
FILE *F;
int i, n_types = get_irp_n_types();
fclose(F);
}
-void dump_globals_as_text(unsigned verbosity, const char *suffix) {
+void dump_globals_as_text(unsigned verbosity, const char *suffix)
+{
const char *basename;
FILE *F;
ir_type *g = get_glob_type();
/**
* Get the predecessor block.
*/
-static ir_node *get_block_n(const ir_node *block, int pos) {
+static ir_node *get_block_n(const ir_node *block, int pos)
+{
if (is_Block(block))
return get_Block_cfgpred_block(block, pos);
/* might be a Bad */
/**
* Returns an ID for the given edge.
*/
-static inline long edge_get_id(const ir_edge_t *e) {
+static inline long edge_get_id(const ir_edge_t *e)
+{
#ifdef DEBUG_libfirm
return e->edge_nr;
#else /* DEBUG_libfirm */
* Each user has to remember his given offset and the size of his private data.
* To be called before FIRM is initialized.
*/
-int edges_register_private_data(size_t n) {
+int edges_register_private_data(size_t n)
+{
int res = edges_private_size;
assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
* The user has to remember his offset and the size of his data!
* Caution: Using wrong values here can destroy other users private data!
*/
-void edges_reset_private_data(ir_graph *irg, int offset, unsigned size) {
+void edges_reset_private_data(ir_graph *irg, int offset, unsigned size)
+{
irg_edge_info_t *info = _get_irg_edge_info(irg, EDGE_KIND_NORMAL);
ir_edge_t *edge;
ir_edgeset_iterator_t iter;
* Initialize the out information for a graph.
* @note Dead node elimination can call this on an already initialized graph.
*/
-void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind) {
+void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
+{
if (edges_activated_kind(irg, kind)) {
irg_edge_info_t *info = _get_irg_edge_info(irg, kind);
size_t amount = irg->estimated_node_count * 2;
* Get the edge object of an outgoing edge at a node.
* Looks for an edge for all kinds.
*/
-const ir_edge_t *get_irn_edge(ir_graph *irg, const ir_node *src, int pos) {
+const ir_edge_t *get_irn_edge(ir_graph *irg, const ir_node *src, int pos)
+{
const ir_edge_t *edge;
if((edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_NORMAL)) == NULL)
edge = get_irn_edge_kind(irg, src, pos, EDGE_KIND_BLOCK);
* @param tgt the edge target
* @param kind the kind of the edge
*/
-static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs) {
+static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
+{
irn_edge_info_t *info = _get_irn_edge_info(tgt, kind);
info->out_count += ofs;
* Verify the edge list of a node, ie. ensure it's a loop:
* head -> e_1 -> ... -> e_n -> head
*/
-static inline void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind) {
+static inline void vrfy_list_head(ir_node *irn, ir_edge_kind_t kind)
+{
int err = 0;
int num = 0;
pset *lh_set = pset_new_ptr(16);
/**
* Post-Walker: notify all edges
*/
-static void build_edges_walker(ir_node *irn, void *data) {
+static void build_edges_walker(ir_node *irn, void *data)
+{
struct build_walker *w = data;
int i, n;
ir_edge_kind_t kind = w->kind;
* Pre-Walker: initializes the list-heads and set the out-count
* of all nodes to 0.
*/
-static void init_lh_walker(ir_node *irn, void *data) {
+static void init_lh_walker(ir_node *irn, void *data)
+{
struct build_walker *w = data;
ir_edge_kind_t kind = w->kind;
list_head *head = _get_irn_outs_head(irn, kind);
* b) it might be sufficient to add those stupid NO_REG nodes
* to the anchor
*/
-static void init_lh_walker_dep(ir_node *irn, void *data) {
+static void init_lh_walker_dep(ir_node *irn, void *data)
+{
struct build_walker *w = data;
ir_edge_kind_t kind = w->kind;
list_head *head = _get_irn_outs_head(irn, kind);
* Visitor: initializes the list-heads and set the out-count
* of all nodes to 0 of nodes that are not seen so far.
*/
-static void visitor(ir_node *irn, void *data) {
+static void visitor(ir_node *irn, void *data)
+{
visitor_info_t *info = data;
if (!irn_visited_else_mark(irn)) {
/**
* Clear link field of all nodes.
*/
-static void clear_links(ir_node *irn, void *env) {
+static void clear_links(ir_node *irn, void *env)
+{
struct build_walker *w = env;
bitset_t *bs;
/**
* Increases count (stored in link field) for all operands of a node.
*/
-static void count_user(ir_node *irn, void *env) {
+static void count_user(ir_node *irn, void *env)
+{
int i;
int first;
(void) env;
/**
* Verifies if collected count, number of edges in list and stored edge count are in sync.
*/
-static void verify_edge_counter(ir_node *irn, void *env) {
+static void verify_edge_counter(ir_node *irn, void *env)
+{
struct build_walker *w = env;
bitset_t *bs;
int list_cnt;
/**
* Verifies the out edges of an irg.
*/
-int edges_verify(ir_graph *irg) {
+int edges_verify(ir_graph *irg)
+{
struct build_walker w;
int problem_found = 0;
/**
* Wrapper to edges_verify to be run as an ir_graph pass.
*/
-static int edges_verify_wrapper(ir_graph *irg, void *context) {
+static int edges_verify_wrapper(ir_graph *irg, void *context)
+{
struct pass_t *pass = context;
int problems_found = edges_verify(irg);
/* do NOT rerun the pass if verify is ok :-) */
}
/* Creates an ir_graph pass for edges_verify(). */
-ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem) {
+ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
def_graph_pass_constructor(
return &pass->pass;
}
-void init_edges(void) {
+void init_edges(void)
+{
FIRM_DBG_REGISTER(dbg, DBG_EDGES);
/* firm_dbg_set_mask(dbg, -1); */
}
-void edges_init_dbg(int do_dbg) {
+void edges_init_dbg(int do_dbg)
+{
edges_dbg = do_dbg;
}
-void edges_activate(ir_graph *irg) {
+void edges_activate(ir_graph *irg)
+{
edges_activate_kind(irg, EDGE_KIND_NORMAL);
edges_activate_kind(irg, EDGE_KIND_BLOCK);
if (get_irg_phase_state(irg) == phase_backend)
edges_activate_kind(irg, EDGE_KIND_DEP);
}
-void edges_deactivate(ir_graph *irg) {
+void edges_deactivate(ir_graph *irg)
+{
if (get_irg_phase_state(irg) == phase_backend)
edges_deactivate_kind(irg, EDGE_KIND_DEP);
edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
return activated;
}
-int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind) {
+int edges_assure_kind(ir_graph *irg, ir_edge_kind_t kind)
+{
int activated = edges_activated_kind(irg, kind);
if (!activated)
return activated;
}
-void edges_node_deleted(ir_node *irn, ir_graph *irg) {
+void edges_node_deleted(ir_node *irn, ir_graph *irg)
+{
edges_node_deleted_kind(irn, EDGE_KIND_NORMAL, irg);
edges_node_deleted_kind(irn, EDGE_KIND_BLOCK, irg);
}
-void edges_node_revival(ir_node *irn, ir_graph *irg) {
+void edges_node_revival(ir_node *irn, ir_graph *irg)
+{
edges_node_revival_kind(irn, EDGE_KIND_NORMAL, irg);
edges_node_revival_kind(irn, EDGE_KIND_BLOCK, irg);
}
-const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind) {
+const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
+{
return _get_irn_out_edge_first_kind(irn, kind);
}
-const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last) {
+const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last)
+{
return _get_irn_out_edge_next(irn, last);
}
-ir_node *(get_edge_src_irn)(const ir_edge_t *edge) {
+ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
+{
return _get_edge_src_irn(edge);
}
-int (get_edge_src_pos)(const ir_edge_t *edge) {
+int (get_edge_src_pos)(const ir_edge_t *edge)
+{
return _get_edge_src_pos(edge);
}
-int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind) {
+int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
+{
return _get_irn_n_edges_kind(irn, kind);
}
-void dump_all_out_edges(ir_node *irn) {
+void dump_all_out_edges(ir_node *irn)
+{
int i;
for (i = 0; i < EDGE_KIND_LAST; ++i) {
const ir_edge_t *edge;
#undef R_FLAG
/* for compatibility reasons */
-void set_optimize(int value) {
+void set_optimize(int value)
+{
if (value) libFIRM_opt |= irf_optimize;
else libFIRM_opt &= ~irf_optimize;
}
-int (get_optimize)(void) {
+int (get_optimize)(void)
+{
return get_opt_optimize();
}
-void set_opt_control_flow(int value) {
+void set_opt_control_flow(int value)
+{
set_opt_control_flow_straightening(value);
set_opt_control_flow_weak_simplification(value);
set_opt_control_flow_strong_simplification(value);
}
/* Save the current optimization state. */
-void save_optimization_state(optimization_state_t *state) {
+void save_optimization_state(optimization_state_t *state)
+{
*state = libFIRM_opt;
}
/* Restore the current optimization state. */
-void restore_optimization_state(const optimization_state_t *state) {
+void restore_optimization_state(const optimization_state_t *state)
+{
libFIRM_opt = *state;
}
/* Switches ALL optimizations off */
-void all_optimizations_off(void) {
+void all_optimizations_off(void)
+{
libFIRM_opt = 0;
}
#ifdef _DEBUG
/* only for debugging */
-void firm_show_flags(FILE *f) {
+void firm_show_flags(FILE *f)
+{
if (! f)
f = stdout;
printf("Firm optimization state:\n");
LC_OPT_LAST
};
-void firm_init_flags(void) {
+void firm_init_flags(void)
+{
lc_opt_entry_t *grp = lc_opt_get_grp(firm_opt_get_root(), "opt");
lc_opt_add_table(grp, firm_flags);
}
firm_verification_t opt_do_node_verification = FIRM_VERIFICATION_ON;
-void do_node_verification(firm_verification_t mode) {
+void do_node_verification(firm_verification_t mode)
+{
opt_do_node_verification = mode;
}
* This is useful if a node returning a tuple is removed, but the Projs
* extracting values from the tuple are not available.
*/
-void turn_into_tuple(ir_node *node, int arity) {
+void turn_into_tuple(ir_node *node, int arity)
+{
assert(node);
set_irn_op(node, op_Tuple);
if (get_irn_arity(node) == arity) {
* Since `new' may be bigger than `old' replace `old'
* by an op_Id which is smaller than everything.
*/
-void exchange(ir_node *old, ir_node *nw) {
+void exchange(ir_node *old, ir_node *nw)
+{
ir_graph *irg;
assert(old && nw);
* all Proj nodes to there predecessors and all
* partBlocks to there MacroBlock header.
*/
-static void collect_phiprojs_walker(ir_node *n, void *env) {
+static void collect_phiprojs_walker(ir_node *n, void *env)
+{
ir_node *pred;
(void) env;
}
}
-void collect_phiprojs(ir_graph *irg) {
+void collect_phiprojs(ir_graph *irg)
+{
assert((ir_resources_reserved(irg) & (IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST)) ==
(IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST));
irg_walk_graph(irg, firm_clear_node_and_phi_links, collect_phiprojs_walker, NULL);
* Moves node and all predecessors of node from from_bl to to_bl.
* Does not move predecessors of Phi nodes (or block nodes).
*/
-static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl) {
+static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl)
+{
int i, arity;
/* move this node */
}
}
-void part_block(ir_node *node) {
+void part_block(ir_node *node)
+{
ir_node *new_block, *old_block, *mbh;
ir_node *phi, *jmp, *next, *block;
ir_graph *rem = current_ir_graph;
}
/* kill a node by setting its predecessors to Bad and finally exchange the node by Bad itself. */
-void kill_node(ir_node *node) {
+void kill_node(ir_node *node)
+{
ir_graph *irg = get_irn_irg(node);
ir_node *bad = get_irg_bad(irg);
int i;
/**
* A wrapper around optimize_inplace_2() to be called from a walker.
*/
-static void optimize_in_place_wrapper(ir_node *n, void *env) {
+static void optimize_in_place_wrapper(ir_node *n, void *env)
+{
ir_node *optimized = optimize_in_place_2(n);
(void) env;
*
* @note current_ir_graph must be set
*/
-static inline void do_local_optimize(ir_node *n) {
+static inline void do_local_optimize(ir_node *n)
+{
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
}
/* Applies local optimizations (see iropt.h) to all nodes reachable from node n */
-void local_optimize_node(ir_node *n) {
+void local_optimize_node(ir_node *n)
+{
ir_graph *rem = current_ir_graph;
current_ir_graph = get_irn_irg(n);
/**
* Block-Walker: uses dominance depth to mark dead blocks.
*/
-static void kill_dead_blocks(ir_node *block, void *env) {
+static void kill_dead_blocks(ir_node *block, void *env)
+{
(void) env;
if (get_Block_dom_depth(block) < 0) {
}
/* Applies local optimizations (see iropt.h) to all nodes reachable from node n. */
-void local_optimize_graph(ir_graph *irg) {
+void local_optimize_graph(ir_graph *irg)
+{
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
* Enqueue all users of a node to a wait queue.
* Handles mode_T nodes.
*/
-static void enqueue_users(ir_node *n, pdeq *waitq) {
+static void enqueue_users(ir_node *n, pdeq *waitq)
+{
const ir_edge_t *edge;
foreach_out_edge(n, edge) {
* Optimizes all nodes and enqueue it's users
* if done.
*/
-static void opt_walker(ir_node *n, void *env) {
+static void opt_walker(ir_node *n, void *env)
+{
pdeq *waitq = env;
ir_node *optimized;
}
/* Applies local optimizations to all nodes in the graph until fixpoint. */
-int optimize_graph_df(ir_graph *irg) {
+int optimize_graph_df(ir_graph *irg)
+{
pdeq *waitq = new_pdeq();
ir_graph *rem = current_ir_graph;
ir_node *end;
static size_t additional_graph_data_size = 0;
ir_graph *current_ir_graph;
-ir_graph *get_current_ir_graph(void) {
+ir_graph *get_current_ir_graph(void)
+{
return current_ir_graph;
}
-void set_current_ir_graph(ir_graph *graph) {
+void set_current_ir_graph(ir_graph *graph)
+{
current_ir_graph = graph;
}
#ifdef INTERPROCEDURAL_VIEW
int firm_interprocedural_view = 0;
-int (get_interprocedural_view)(void) {
+int (get_interprocedural_view)(void)
+{
return _get_interprocedural_view();
}
-void (set_interprocedural_view)(int state) {
+void (set_interprocedural_view)(int state)
+{
firm_interprocedural_view = state;
/* set function vectors for faster access */
static ident *frame_type_suffix = NULL;
/* initialize the IR graph module */
-void firm_init_irgraph(void) {
+void firm_init_irgraph(void)
+{
frame_type_suffix = new_id_from_str(FRAME_TP_SUFFIX);
forbid_new_data = 1;
}
* allocated (new_r_ir_graph, new_const_code_irg).
* @return Memory for a new graph.
*/
-static ir_graph *alloc_graph(void) {
+static ir_graph *alloc_graph(void)
+{
ir_graph *res;
size_t size = sizeof(ir_graph) + additional_graph_data_size;
char *ptr = xmalloc(size);
/**
* Frees an allocated IR graph
*/
-static void free_graph(ir_graph *irg) {
+static void free_graph(ir_graph *irg)
+{
char *ptr = (char *)irg;
int i;
* @param irg the graph
* @param n_loc number of locals
*/
-void irg_set_nloc(ir_graph *res, int n_loc) {
+void irg_set_nloc(ir_graph *res, int n_loc)
+{
assert(res->phase_state == phase_building);
if (get_opt_precise_exc_context()) {
Further it allocates several datastructures needed for graph construction
and optimization.
*/
-ir_graph *new_r_ir_graph(ir_entity *ent, int n_loc) {
+ir_graph *new_r_ir_graph(ir_entity *ent, int n_loc)
+{
ir_graph *res;
ir_node *first_block;
ir_node *end, *start, *start_block, *initial_mem, *projX, *bad;
return res;
}
-ir_graph *new_ir_graph(ir_entity *ent, int n_loc) {
+ir_graph *new_ir_graph(ir_entity *ent, int n_loc)
+{
ir_graph *res = new_r_ir_graph(ent, n_loc);
add_irp_irg(res); /* remember this graph global. */
return res;
* @param n A node from the original method graph.
* @param env The copied graph.
*/
-static void copy_all_nodes(ir_node *n, void *env) {
+static void copy_all_nodes(ir_node *n, void *env)
+{
ir_graph *irg = env;
ir_op *op = get_irn_op(n);
ir_node *nn;
* The copied nodes are set as link of their original nodes. The links of
* "irn" predecessors are the predecessors of copied node.
*/
-static void set_all_preds(ir_node *irn, void *env) {
+static void set_all_preds(ir_node *irn, void *env)
+{
int i;
ir_node *nn, *pred;
(void) env;
/*
* Create a new graph that is a copy of a given one.
*/
-ir_graph *create_irg_copy(ir_graph *irg) {
+ir_graph *create_irg_copy(ir_graph *irg)
+{
ir_graph *res;
res = alloc_graph();
#ifdef DEBUG_libfirm
/* Outputs a unique number for this node */
-long get_irg_graph_nr(const ir_graph *irg) {
+long get_irg_graph_nr(const ir_graph *irg)
+{
return irg->graph_nr;
}
#else
-long get_irg_graph_nr(const ir_graph *irg) {
+long get_irg_graph_nr(const ir_graph *irg)
+{
return PTR_TO_INT(irg);
}
#endif
-int get_irg_idx(const ir_graph *irg) {
+int get_irg_idx(const ir_graph *irg)
+{
return irg->index;
}
-ir_node *(get_idx_irn)(ir_graph *irg, unsigned idx) {
+ir_node *(get_idx_irn)(ir_graph *irg, unsigned idx)
+{
return _get_idx_irn(irg, idx);
}
}
/* Returns the value parameter type of an IR graph. */
-ir_type *get_irg_value_param_type(ir_graph *irg) {
+ir_type *get_irg_value_param_type(ir_graph *irg)
+{
ir_entity *ent = get_irg_entity(irg);
ir_type *mtp = get_entity_type(ent);
return get_method_value_param_type(mtp);
}
int
-get_irg_n_locs(ir_graph *irg) {
+get_irg_n_locs(ir_graph *irg)
+{
if (get_opt_precise_exc_context())
return irg->n_loc - 1 - 1;
else
}
void
-set_irg_n_loc(ir_graph *irg, int n_loc) {
+set_irg_n_loc(ir_graph *irg, int n_loc)
+{
if (get_opt_precise_exc_context())
irg->n_loc = n_loc + 1 + 1;
else
*
* Implementation is GLIBC specific as is uses the internal _obstack_chunk implementation.
*/
-int node_is_in_irgs_storage(ir_graph *irg, ir_node *n) {
+int node_is_in_irgs_storage(ir_graph *irg, ir_node *n)
+{
struct _obstack_chunk *p;
/*
_set_irg_loopinfo_inconsistent(irg);
}
-void set_irp_loopinfo_inconsistent(void) {
+void set_irp_loopinfo_inconsistent(void)
+{
int i;
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
set_irg_loopinfo_inconsistent(get_irp_irg(i));
}
/* Return the floating point model of this graph. */
-unsigned (get_irg_fp_model)(const ir_graph *irg) {
+unsigned (get_irg_fp_model)(const ir_graph *irg)
+{
return _get_irg_fp_model(irg);
}
/* Sets the floating point model for this graph. */
-void set_irg_fp_model(ir_graph *irg, unsigned model) {
+void set_irg_fp_model(ir_graph *irg, unsigned model)
+{
irg->fp_model = model;
}
* @param n the node
* @param env ignored
*/
-static void normalize_proj_walker(ir_node *n, void *env) {
+static void normalize_proj_walker(ir_node *n, void *env)
+{
(void) env;
if (is_Proj(n)) {
ir_node *pred = get_Proj_pred(n);
}
/* move Proj nodes into the same block as its predecessors */
-void normalize_proj_nodes(ir_graph *irg) {
+void normalize_proj_nodes(ir_graph *irg)
+{
irg_walk_graph(irg, NULL, normalize_proj_walker, NULL);
set_irg_outs_inconsistent(irg);
}
/* set a description for local value n */
-void set_irg_loc_description(ir_graph *irg, int n, void *description) {
+void set_irg_loc_description(ir_graph *irg, int n, void *description)
+{
assert(0 <= n && n < irg->n_loc);
if (! irg->loc_descriptions)
}
/* get the description for local value n */
-void *get_irg_loc_description(ir_graph *irg, int n) {
+void *get_irg_loc_description(ir_graph *irg, int n)
+{
assert(0 <= n && n < irg->n_loc);
return irg->loc_descriptions ? irg->loc_descriptions[n] : NULL;
}
#ifndef NDEBUG
-void ir_reserve_resources(ir_graph *irg, ir_resources_t resources) {
+void ir_reserve_resources(ir_graph *irg, ir_resources_t resources)
+{
assert((resources & ~IR_RESOURCE_LOCAL_MASK) == 0);
assert((irg->reserved_resources & resources) == 0);
irg->reserved_resources |= resources;
}
-void ir_free_resources(ir_graph *irg, ir_resources_t resources) {
+void ir_free_resources(ir_graph *irg, ir_resources_t resources)
+{
assert((irg->reserved_resources & resources) == resources);
irg->reserved_resources &= ~resources;
}
-ir_resources_t ir_resources_reserved(const ir_graph *irg) {
+ir_resources_t ir_resources_reserved(const ir_graph *irg)
+{
return irg->reserved_resources;
}
#endif /* NDEBUG */
/* Returns a estimated node count of the irg. */
-unsigned (get_irg_estimated_node_cnt)(const ir_graph *irg) {
+unsigned (get_irg_estimated_node_cnt)(const ir_graph *irg)
+{
return _get_irg_estimated_node_cnt(irg);
}
/* Returns the last irn index for this graph. */
-unsigned get_irg_last_idx(const ir_graph *irg) {
+unsigned get_irg_last_idx(const ir_graph *irg)
+{
return irg->last_node_idx;
}
/* register additional space in an IR graph */
-size_t register_additional_graph_data(size_t size) {
+size_t register_additional_graph_data(size_t size)
+{
assert(!forbid_new_data && "Too late to register additional node data");
if (forbid_new_data)
/**
* Insert all ir_graphs in irg_set, that are (transitive) reachable.
*/
-static void collect_irgs(ir_node * node, pset_new_t *irg_set) {
+static void collect_irgs(ir_node * node, pset_new_t *irg_set)
+{
if (is_Call(node)) {
int i;
for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
* @return number of visited nodes
*/
static unsigned
-irg_walk_2_pre(ir_node *node, irg_walk_func *pre, void * env) {
+irg_walk_2_pre(ir_node *node, irg_walk_func *pre, void * env)
+{
int i;
unsigned cnt = 1;
ir_graph *irg = current_ir_graph;
* @return number of visited nodes
*/
static unsigned
-irg_walk_2_post(ir_node *node, irg_walk_func *post, void * env) {
+irg_walk_2_post(ir_node *node, irg_walk_func *post, void * env)
+{
int i;
unsigned cnt = 1;
ir_graph *irg = current_ir_graph;
* @return number of visited nodes
*/
static unsigned
-irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) {
+irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
+{
int i;
unsigned cnt = 1;
ir_graph *irg = current_ir_graph;
/*
* walk over a graph
*/
-void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
ir_graph * rem = current_ir_graph;
hook_irg_walk(irg, (generic_func *)pre, (generic_func *)post);
/* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
Sets current_ir_graph properly for each walk. Conserves current
current_ir_graph. */
-void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
+void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env)
+{
int i, n;
ir_graph *irg;
* @return number of visited nodes
*/
static unsigned
-irg_walk_in_or_dep_2_pre(ir_node *node, irg_walk_func *pre, void *env) {
+irg_walk_in_or_dep_2_pre(ir_node *node, irg_walk_func *pre, void *env)
+{
int i;
unsigned cnt = 1;
ir_graph *irg = current_ir_graph;
* @return number of visited nodes
*/
static unsigned
-irg_walk_in_or_dep_2_post(ir_node *node, irg_walk_func *post, void *env) {
+irg_walk_in_or_dep_2_post(ir_node *node, irg_walk_func *post, void *env)
+{
int i;
unsigned cnt = 1;
ir_graph *irg = current_ir_graph;
* @return number of visited nodes
*/
static unsigned
-irg_walk_in_or_dep_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env) {
+irg_walk_in_or_dep_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
int i;
unsigned cnt = 1;
ir_graph *irg = current_ir_graph;
/*
* Walk over a graph. Follow all edges (including dependencies)
*/
-void irg_walk_in_or_dep_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+void irg_walk_in_or_dep_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
ir_graph * rem = current_ir_graph;
hook_irg_walk(irg, (generic_func *)pre, (generic_func *)post);
* of node n.
*/
static inline ir_graph *
-switch_irg(ir_node *n, int index) {
+switch_irg(ir_node *n, int index)
+{
ir_graph *old_current = current_ir_graph;
if (get_interprocedural_view()) {
}
/* Walks all irgs in interprocedural view. Visits each node only once. */
-void cg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
+void cg_walk(irg_walk_func *pre, irg_walk_func *post, void *env)
+{
int i;
ir_graph *rem = current_ir_graph;
int rem_view = get_interprocedural_view();
/***************************************************************************/
/* Walks back from n until it finds a real cf op. */
-static ir_node *get_cf_op(ir_node *n) {
+static ir_node *get_cf_op(ir_node *n)
+{
while (!is_cfop(n) && !is_fragile_op(n) && !is_Bad(n)) {
n = skip_Id(n);
n = skip_Tuple(n);
/*
* Additionally walk over all anchors. Do NOT increase the visit flag.
*/
-void irg_walk_anchors(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+void irg_walk_anchors(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
ir_graph * rem = current_ir_graph;
current_ir_graph = irg;
}
/* Walks over all code in const_code_irg. */
-void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env) {
+void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env)
+{
int i, j, n_types;
walk_env my_env;
ir_segment_t s;
/**
* compare two block_entries
*/
-static int addr_cmp(const void *elt, const void *key) {
+static int addr_cmp(const void *elt, const void *key)
+{
const block_entry_t *e1 = elt;
const block_entry_t *e2 = key;
/**
* Traverse a block in pre order.
*/
-static void traverse_block_pre(ir_node *block, block_entry_t *entry, irg_walk_func *pre, void *env) {
+static void traverse_block_pre(ir_node *block, block_entry_t *entry, irg_walk_func *pre, void *env)
+{
int j;
for (j = ARR_LEN(entry->cf_list) - 1; j >= 0; --j) {
/**
* Traverse a block in post order.
*/
-void traverse_block_post(ir_node *block, block_entry_t *entry, irg_walk_func *post, void *env) {
+void traverse_block_post(ir_node *block, block_entry_t *entry, irg_walk_func *post, void *env)
+{
int j, n;
post(block, env);
/**
* traverse the pre order only, from End to Start
*/
-static void traverse_pre(blk_collect_data_t *blks, irg_walk_func *pre, void *env) {
+static void traverse_pre(blk_collect_data_t *blks, irg_walk_func *pre, void *env)
+{
int i;
for (i = ARR_LEN(blks->blk_list) - 1; i >= 0; --i) {
/**
* traverse the post order only, from Start to End
*/
-static void traverse_post(blk_collect_data_t *blks, irg_walk_func *post, void *env) {
+static void traverse_post(blk_collect_data_t *blks, irg_walk_func *post, void *env)
+{
int i, k;
for (i = 0, k = ARR_LEN(blks->blk_list); i < k; ++i) {
/**
* Do the traversal.
*/
-static void traverse_blocks(blk_collect_data_t *blks, irg_walk_func *pre, irg_walk_func *post, void *env) {
+static void traverse_blocks(blk_collect_data_t *blks, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
if (!post) traverse_pre (blks, pre, env);
else if (!pre) traverse_post(blks, post, env);
else traverse_both(blks, pre, post, env);
/**
* Dom block walker. Visit all nodes in pre oder.
*/
-static void dom_block_visit_pre(ir_node *block, void *env) {
+static void dom_block_visit_pre(ir_node *block, void *env)
+{
dom_traversal_t *ctx = env;
block_entry_t *entry = block_find_entry(block, ctx->blks);
/**
* Dom block walker. Visit all nodes in post oder.
*/
-static void dom_block_visit_post(ir_node *block, void *env) {
+static void dom_block_visit_post(ir_node *block, void *env)
+{
dom_traversal_t *ctx = env;
block_entry_t *entry = block_find_entry(block, ctx->blks);
/**
* Dom block walker. Visit all nodes in pre oder, than in post order.
*/
-static void dom_block_visit_both(ir_node *block, void *env) {
+static void dom_block_visit_both(ir_node *block, void *env)
+{
dom_traversal_t *ctx = env;
block_entry_t *entry = block_find_entry(block, ctx->blks);
/**
* Do the traversal in the dominator tree in top-down order.
*/
-static void traverse_dom_blocks_top_down(blk_collect_data_t* blks, irg_walk_func *pre, irg_walk_func *post, void *env) {
+static void traverse_dom_blocks_top_down(blk_collect_data_t* blks, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
dom_traversal_t ctx;
ctx.blks = blks;
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
}
-void irg_walk_blkwise_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+void irg_walk_blkwise_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
ir_graph * rem = current_ir_graph;
hook_irg_walk_blkwise(irg, (generic_func *)pre, (generic_func *)post);
current_ir_graph = rem;
}
-void irg_walk_in_or_dep_blkwise_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+void irg_walk_in_or_dep_blkwise_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
ir_graph * rem = current_ir_graph;
hook_irg_walk_blkwise(irg, (generic_func *)pre, (generic_func *)post);
current_ir_graph = rem;
}
-void irg_walk_blkwise_dom_top_down(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
+void irg_walk_blkwise_dom_top_down(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
+{
ir_graph * rem = current_ir_graph;
hook_irg_walk_blkwise(irg, (generic_func *)pre, (generic_func *)post);
hook_entry_t *hooks[hook_last];
/* register a hook */
-void register_hook(hook_type_t hook, hook_entry_t *entry) {
+void register_hook(hook_type_t hook, hook_entry_t *entry)
+{
/* check if a hook function is specified. It's a union, so no matter which one */
if (! entry->hook._hook_turn_into_id)
return;
}
/* unregister a hook */
-void unregister_hook(hook_type_t hook, hook_entry_t *entry) {
+void unregister_hook(hook_type_t hook, hook_entry_t *entry)
+{
hook_entry_t *p;
if (hooks[hook] == entry) {
* @param iterator Pointer to the nodemap iterator.
* @returns Next element in the nodemap or NULL
*/
-ir_node *ir_lnk_nodemap_iterator_next(ir_lnk_nodemap_iterator_t *iterator) {
+ir_node *ir_lnk_nodemap_iterator_next(ir_lnk_nodemap_iterator_t *iterator)
+{
ir_node *res;
if (iterator->iter == &iterator->nodemap->elem_list)
return NULL;
/* Inserts a node into a linked nodeset. */
-int ir_lnk_nodeset_insert(ir_lnk_nodeset_t *nodeset, ir_node *node) {
+int ir_lnk_nodeset_insert(ir_lnk_nodeset_t *nodeset, ir_node *node)
+{
ir_lnk_nodeset_entry_t *entry = _ir_lnk_nodeset_insert(nodeset, node);
if (entry->list.next == NULL) {
* @param iterator Pointer to the nodeset iterator.
* @returns Next element in the nodeset or NULL
*/
-ir_node *ir_lnk_nodeset_iterator_next(ir_lnk_nodeset_iterator_t *iterator) {
+ir_node *ir_lnk_nodeset_iterator_next(ir_lnk_nodeset_iterator_t *iterator)
+{
ir_node *res;
if (iterator->iter == &iterator->nodeset->elem_list)
return NULL;
*
* TODO: Add other fields
**/
-static inline int modes_are_equal(const ir_mode *m, const ir_mode *n) {
+static inline int modes_are_equal(const ir_mode *m, const ir_mode *n)
+{
if (m == n) return 1;
if (m->sort == n->sort &&
m->arithmetic == n->arithmetic &&
* a pointer on an equal mode already in the array, NULL if
* none found
*/
-static ir_mode *find_mode(const ir_mode *m) {
+static ir_mode *find_mode(const ir_mode *m)
+{
int i;
for (i = ARR_LEN(mode_list) - 1; i >= 0; --i) {
ir_mode *n = mode_list[i];
#ifdef FIRM_STATISTICS
/* return the mode index, only needed for statistics */
-int stat_find_mode_index(const ir_mode *m) {
+int stat_find_mode_index(const ir_mode *m)
+{
int i;
for (i = ARR_LEN(mode_list) - 1; i >= 0; --i) {
ir_mode *n = mode_list[i];
}
/* return the mode for a given index, only needed for statistics */
-ir_mode *stat_mode_for_index(int idx) {
+ir_mode *stat_mode_for_index(int idx)
+{
if (0 <= idx && idx < ARR_LEN(mode_list))
return mode_list[idx];
return NULL;
/**
* sets special values of modes
*/
-static void set_mode_values(ir_mode* mode) {
+static void set_mode_values(ir_mode* mode)
+{
switch (get_mode_sort(mode)) {
case irms_reference:
case irms_int_number:
ir_mode *get_modeBAD(void) { return mode_BAD; }
-ir_mode *(get_modeP_code)(void) {
+ir_mode *(get_modeP_code)(void)
+{
return _get_modeP_code();
}
-ir_mode *(get_modeP_data)(void) {
+ir_mode *(get_modeP_data)(void)
+{
return _get_modeP_data();
}
-void set_modeP_code(ir_mode *p) {
+void set_modeP_code(ir_mode *p)
+{
assert(mode_is_reference(p));
mode_P_code = p;
}
-void set_modeP_data(ir_mode *p) {
+void set_modeP_data(ir_mode *p)
+{
assert(mode_is_reference(p));
mode_P_data = p;
}
*
* @param new_mode The new mode template.
*/
-static ir_mode *register_mode(const ir_mode *new_mode) {
+static ir_mode *register_mode(const ir_mode *new_mode)
+{
ir_mode *mode = NULL;
assert(new_mode);
}
/* Functions for the direct access to all attributes of an ir_mode */
-ident *(get_mode_ident)(const ir_mode *mode) {
+ident *(get_mode_ident)(const ir_mode *mode)
+{
return _get_mode_ident(mode);
}
-const char *get_mode_name(const ir_mode *mode) {
+const char *get_mode_name(const ir_mode *mode)
+{
return get_id_str(mode->name);
}
-ir_mode_sort (get_mode_sort)(const ir_mode* mode) {
+ir_mode_sort (get_mode_sort)(const ir_mode* mode)
+{
return _get_mode_sort(mode);
}
-unsigned (get_mode_size_bits)(const ir_mode *mode) {
+unsigned (get_mode_size_bits)(const ir_mode *mode)
+{
return _get_mode_size_bits(mode);
}
-unsigned (get_mode_size_bytes)(const ir_mode *mode) {
+unsigned (get_mode_size_bytes)(const ir_mode *mode)
+{
return _get_mode_size_bytes(mode);
}
-int (get_mode_sign)(const ir_mode *mode) {
+int (get_mode_sign)(const ir_mode *mode)
+{
return _get_mode_sign(mode);
}
-ir_mode_arithmetic (get_mode_arithmetic)(const ir_mode *mode) {
+ir_mode_arithmetic (get_mode_arithmetic)(const ir_mode *mode)
+{
return get_mode_arithmetic(mode);
}
* whether shift applies modulo to value of bits to shift. Asserts
* if mode is not irms_int_number.
*/
-unsigned int (get_mode_modulo_shift)(const ir_mode *mode) {
+unsigned int (get_mode_modulo_shift)(const ir_mode *mode)
+{
return _get_mode_modulo_shift(mode);
}
-unsigned int (get_mode_n_vector_elems)(const ir_mode *mode) {
+unsigned int (get_mode_n_vector_elems)(const ir_mode *mode)
+{
return _get_mode_vector_elems(mode);
}
-void *(get_mode_link)(const ir_mode *mode) {
+void *(get_mode_link)(const ir_mode *mode)
+{
return _get_mode_link(mode);
}
-void (set_mode_link)(ir_mode *mode, void *l) {
+void (set_mode_link)(ir_mode *mode, void *l)
+{
_set_mode_link(mode, l);
}
-tarval *get_mode_min(ir_mode *mode) {
+tarval *get_mode_min(ir_mode *mode)
+{
assert(mode);
assert(mode_is_data(mode));
return mode->min;
}
-tarval *get_mode_max(ir_mode *mode) {
+tarval *get_mode_max(ir_mode *mode)
+{
assert(mode);
assert(mode_is_data(mode));
return mode->max;
}
-tarval *get_mode_null(ir_mode *mode) {
+tarval *get_mode_null(ir_mode *mode)
+{
assert(mode);
assert(mode_is_datab(mode));
return mode->null;
}
-tarval *get_mode_one(ir_mode *mode) {
+tarval *get_mode_one(ir_mode *mode)
+{
assert(mode);
assert(mode_is_datab(mode));
return mode->one;
}
-tarval *get_mode_minus_one(ir_mode *mode) {
+tarval *get_mode_minus_one(ir_mode *mode)
+{
assert(mode);
assert(mode_is_data(mode));
return mode->minus_one;
}
-tarval *get_mode_all_one(ir_mode *mode) {
+tarval *get_mode_all_one(ir_mode *mode)
+{
assert(mode);
assert(mode_is_datab(mode));
return mode->all_one;
}
-tarval *get_mode_infinite(ir_mode *mode) {
+tarval *get_mode_infinite(ir_mode *mode)
+{
assert(mode);
assert(mode_is_float(mode));
return get_tarval_plus_inf(mode);
}
-tarval *get_mode_NAN(ir_mode *mode) {
+tarval *get_mode_NAN(ir_mode *mode)
+{
assert(mode);
assert(mode_is_float(mode));
return get_tarval_nan(mode);
}
-int is_mode(const void *thing) {
+int is_mode(const void *thing)
+{
return get_kind(thing) == k_ir_mode;
}
-int (mode_is_signed)(const ir_mode *mode) {
+int (mode_is_signed)(const ir_mode *mode)
+{
return _mode_is_signed(mode);
}
-int (mode_is_float)(const ir_mode *mode) {
+int (mode_is_float)(const ir_mode *mode)
+{
return _mode_is_float(mode);
}
-int (mode_is_int)(const ir_mode *mode) {
+int (mode_is_int)(const ir_mode *mode)
+{
return _mode_is_int(mode);
}
-int (mode_is_reference)(const ir_mode *mode) {
+int (mode_is_reference)(const ir_mode *mode)
+{
return _mode_is_reference(mode);
}
-int (mode_is_num)(const ir_mode *mode) {
+int (mode_is_num)(const ir_mode *mode)
+{
return _mode_is_num(mode);
}
-int (mode_is_data)(const ir_mode *mode) {
+int (mode_is_data)(const ir_mode *mode)
+{
return _mode_is_data(mode);
}
-int (mode_is_datab)(const ir_mode *mode) {
+int (mode_is_datab)(const ir_mode *mode)
+{
return _mode_is_datab(mode);
}
-int (mode_is_dataM)(const ir_mode *mode) {
+int (mode_is_dataM)(const ir_mode *mode)
+{
return _mode_is_dataM(mode);
}
-int (mode_is_float_vector)(const ir_mode *mode) {
+int (mode_is_float_vector)(const ir_mode *mode)
+{
return _mode_is_float_vector(mode);
}
-int (mode_is_int_vector)(const ir_mode *mode) {
+int (mode_is_int_vector)(const ir_mode *mode)
+{
return _mode_is_int_vector(mode);
}
/* Returns true if sm can be converted to lm without loss. */
-int smaller_mode(const ir_mode *sm, const ir_mode *lm) {
+int smaller_mode(const ir_mode *sm, const ir_mode *lm)
+{
int sm_bits, lm_bits;
assert(sm);
/* Returns true if a value of mode sm can be converted into mode lm
and backwards without loss. */
-int values_in_mode(const ir_mode *sm, const ir_mode *lm) {
+int values_in_mode(const ir_mode *sm, const ir_mode *lm)
+{
int sm_bits, lm_bits;
ir_mode_arithmetic arith;
}
/* Return the signed integer equivalent mode for an reference mode. */
-ir_mode *get_reference_mode_signed_eq(ir_mode *mode) {
+ir_mode *get_reference_mode_signed_eq(ir_mode *mode)
+{
assert(mode_is_reference(mode));
return mode->eq_signed;
}
/* Sets the signed integer equivalent mode for an reference mode. */
-void set_reference_mode_signed_eq(ir_mode *ref_mode, ir_mode *int_mode) {
+void set_reference_mode_signed_eq(ir_mode *ref_mode, ir_mode *int_mode)
+{
assert(mode_is_reference(ref_mode));
assert(mode_is_int(int_mode));
ref_mode->eq_signed = int_mode;
}
/* Return the unsigned integer equivalent mode for an reference mode. */
-ir_mode *get_reference_mode_unsigned_eq(ir_mode *mode) {
+ir_mode *get_reference_mode_unsigned_eq(ir_mode *mode)
+{
assert(mode_is_reference(mode));
return mode->eq_unsigned;
}
/* Sets the unsigned integer equivalent mode for an reference mode. */
-void set_reference_mode_unsigned_eq(ir_mode *ref_mode, ir_mode *int_mode) {
+void set_reference_mode_unsigned_eq(ir_mode *ref_mode, ir_mode *int_mode)
+{
assert(mode_is_reference(ref_mode));
assert(mode_is_int(int_mode));
ref_mode->eq_unsigned = int_mode;
}
/* initialization, build the default modes */
-void init_mode(void) {
+void init_mode(void)
+{
ir_mode newmode;
obstack_init(&modes);
}
/* find a signed mode for an unsigned integer mode */
-ir_mode *find_unsigned_mode(const ir_mode *mode) {
+ir_mode *find_unsigned_mode(const ir_mode *mode)
+{
ir_mode n = *mode;
/* allowed for reference mode */
}
/* find an unsigned mode for a signed integer mode */
-ir_mode *find_signed_mode(const ir_mode *mode) {
+ir_mode *find_signed_mode(const ir_mode *mode)
+{
ir_mode n = *mode;
assert(mode->sort == irms_int_number);
}
/* finds a integer mode with 2*n bits for an integer mode with n bits. */
-ir_mode *find_double_bits_int_mode(const ir_mode *mode) {
+ir_mode *find_double_bits_int_mode(const ir_mode *mode)
+{
ir_mode n = *mode;
assert(mode->sort == irms_int_number && mode->arithmetic == irma_twos_complement);
* Returns non-zero if the given mode honors signed zero's, i.e.,
* a +0 and a -0 exists and handled differently.
*/
-int mode_honor_signed_zeros(const ir_mode *mode) {
+int mode_honor_signed_zeros(const ir_mode *mode)
+{
/* for floating point, we know that IEEE 754 has +0 and -0,
* but always handles it identical.
*/
*
* This does NOT happen on IEEE 754.
*/
-int mode_overflow_on_unary_Minus(const ir_mode *mode) {
+int mode_overflow_on_unary_Minus(const ir_mode *mode)
+{
if (mode->sort == irms_float_number)
return mode->arithmetic == irma_ieee754 ? 0 : 1;
return 1;
* This is normally true for integer modes, not for floating
* point modes.
*/
-int mode_wrap_around(const ir_mode *mode) {
+int mode_wrap_around(const ir_mode *mode)
+{
/* FIXME: better would be an extra mode property */
return mode_is_int(mode);
}
* reinterpret cast (ie. only the bit pattern is reinterpreted,
* no conversion is done)
*/
-int is_reinterpret_cast(const ir_mode *src, const ir_mode *dst) {
+int is_reinterpret_cast(const ir_mode *src, const ir_mode *dst)
+{
ir_mode_arithmetic ma;
if (src == dst)
return ma == irma_twos_complement || ma == irma_ones_complement;
}
-void finish_mode(void) {
+void finish_mode(void)
+{
obstack_free(&modes, 0);
DEL_ARR_F(mode_list);
/**
* returns the pnc name from an pnc constant
*/
-const char *get_pnc_string(int pnc) {
+const char *get_pnc_string(int pnc)
+{
assert(pnc >= 0 && pnc <
(int) (sizeof(pnc_name_arr)/sizeof(pnc_name_arr[0])));
return pnc_name_arr[pnc];
/*
* Calculates the negated (Complement(R)) pnc condition.
*/
-pn_Cmp get_negated_pnc(long pnc, ir_mode *mode) {
+pn_Cmp get_negated_pnc(long pnc, ir_mode *mode)
+{
pnc ^= pn_Cmp_True;
/* do NOT add the Uo bit for non-floating point values */
}
/* Calculates the inversed (R^-1) pnc condition, i.e., "<" --> ">" */
-pn_Cmp get_inversed_pnc(long pnc) {
+pn_Cmp get_inversed_pnc(long pnc)
+{
long code = pnc & ~(pn_Cmp_Lt|pn_Cmp_Gt);
long lesser = pnc & pn_Cmp_Lt;
long greater = pnc & pn_Cmp_Gt;
/* register new space for every node */
-unsigned firm_register_additional_node_data(unsigned size) {
+unsigned firm_register_additional_node_data(unsigned size)
+{
assert(!forbid_new_data && "Too late to register additional node data");
if (forbid_new_data)
}
-void init_irnode(void) {
+void init_irnode(void)
+{
/* Forbid the addition of new data to an ir node. */
forbid_new_data = 1;
}
/*-- getting some parameters from ir_nodes --*/
-int (is_ir_node)(const void *thing) {
+int (is_ir_node)(const void *thing)
+{
return _is_ir_node(thing);
}
-int (get_irn_intra_arity)(const ir_node *node) {
+int (get_irn_intra_arity)(const ir_node *node)
+{
return _get_irn_intra_arity(node);
}
-int (get_irn_inter_arity)(const ir_node *node) {
+int (get_irn_inter_arity)(const ir_node *node)
+{
return _get_irn_inter_arity(node);
}
int (*_get_irn_arity)(const ir_node *node) = _get_irn_intra_arity;
-int (get_irn_arity)(const ir_node *node) {
+int (get_irn_arity)(const ir_node *node)
+{
return _get_irn_arity(node);
}
The order of the predecessors in this array is not guaranteed, except that
lists of operands as predecessors of Block or arguments of a Call are
consecutive. */
-ir_node **get_irn_in(const ir_node *node) {
+ir_node **get_irn_in(const ir_node *node)
+{
assert(node);
#ifdef INTERPROCEDURAL_VIEW
if (get_interprocedural_view()) { /* handle Filter and Block specially */
return node->in;
}
-void set_irn_in(ir_node *node, int arity, ir_node **in) {
+void set_irn_in(ir_node *node, int arity, ir_node **in)
+{
int i;
ir_node *** pOld_in;
ir_graph *irg = get_irn_irg(node);
memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
}
-ir_node *(get_irn_intra_n)(const ir_node *node, int n) {
+ir_node *(get_irn_intra_n)(const ir_node *node, int n)
+{
return _get_irn_intra_n(node, n);
}
-ir_node *(get_irn_inter_n)(const ir_node *node, int n) {
+ir_node *(get_irn_inter_n)(const ir_node *node, int n)
+{
return _get_irn_inter_n(node, n);
}
ir_node *(*_get_irn_n)(const ir_node *node, int n) = _get_irn_intra_n;
-ir_node *(get_irn_n)(const ir_node *node, int n) {
+ir_node *(get_irn_n)(const ir_node *node, int n)
+{
return _get_irn_n(node, n);
}
-void set_irn_n(ir_node *node, int n, ir_node *in) {
+void set_irn_n(ir_node *node, int n, ir_node *in)
+{
assert(node && node->kind == k_ir_node);
assert(-1 <= n);
assert(n < get_irn_arity(node));
node->in[n + 1] = in;
}
-int add_irn_n(ir_node *node, ir_node *in) {
+int add_irn_n(ir_node *node, ir_node *in)
+{
int pos;
ir_graph *irg = get_irn_irg(node);
ARR_SHRINKLEN(get_irn_in(n), arity);
}
-int (get_irn_deps)(const ir_node *node) {
+int (get_irn_deps)(const ir_node *node)
+{
return _get_irn_deps(node);
}
-ir_node *(get_irn_dep)(const ir_node *node, int pos) {
+ir_node *(get_irn_dep)(const ir_node *node, int pos)
+{
return _get_irn_dep(node, pos);
}
-void (set_irn_dep)(ir_node *node, int pos, ir_node *dep) {
+void (set_irn_dep)(ir_node *node, int pos, ir_node *dep)
+{
_set_irn_dep(node, pos, dep);
}
-int add_irn_dep(ir_node *node, ir_node *dep) {
+int add_irn_dep(ir_node *node, ir_node *dep)
+{
int res = 0;
/* DEP edges are only allowed in backend phase */
return res;
}
-void add_irn_deps(ir_node *tgt, ir_node *src) {
+void add_irn_deps(ir_node *tgt, ir_node *src)
+{
int i, n;
for (i = 0, n = get_irn_deps(src); i < n; ++i)
}
-ir_mode *(get_irn_mode)(const ir_node *node) {
+ir_mode *(get_irn_mode)(const ir_node *node)
+{
return _get_irn_mode(node);
}
-void (set_irn_mode)(ir_node *node, ir_mode *mode) {
+void (set_irn_mode)(ir_node *node, ir_mode *mode)
+{
_set_irn_mode(node, mode);
}
/** Gets the string representation of the mode .*/
-const char *get_irn_modename(const ir_node *node) {
+const char *get_irn_modename(const ir_node *node)
+{
assert(node);
return get_mode_name(node->mode);
}
-ident *get_irn_modeident(const ir_node *node) {
+ident *get_irn_modeident(const ir_node *node)
+{
assert(node);
return get_mode_ident(node->mode);
}
-ir_op *(get_irn_op)(const ir_node *node) {
+ir_op *(get_irn_op)(const ir_node *node)
+{
return _get_irn_op(node);
}
/* should be private to the library: */
-void (set_irn_op)(ir_node *node, ir_op *op) {
+void (set_irn_op)(ir_node *node, ir_op *op)
+{
_set_irn_op(node, op);
}
-unsigned (get_irn_opcode)(const ir_node *node) {
+unsigned (get_irn_opcode)(const ir_node *node)
+{
return _get_irn_opcode(node);
}
-const char *get_irn_opname(const ir_node *node) {
+const char *get_irn_opname(const ir_node *node)
+{
assert(node);
if (is_Phi0(node)) return "Phi0";
return get_id_str(node->op->name);
}
-ident *get_irn_opident(const ir_node *node) {
+ident *get_irn_opident(const ir_node *node)
+{
assert(node);
return node->op->name;
}
-ir_visited_t (get_irn_visited)(const ir_node *node) {
+ir_visited_t (get_irn_visited)(const ir_node *node)
+{
return _get_irn_visited(node);
}
-void (set_irn_visited)(ir_node *node, ir_visited_t visited) {
+void (set_irn_visited)(ir_node *node, ir_visited_t visited)
+{
_set_irn_visited(node, visited);
}
-void (mark_irn_visited)(ir_node *node) {
+void (mark_irn_visited)(ir_node *node)
+{
_mark_irn_visited(node);
}
-int (irn_visited)(const ir_node *node) {
+int (irn_visited)(const ir_node *node)
+{
return _irn_visited(node);
}
-int (irn_visited_else_mark)(ir_node *node) {
+int (irn_visited_else_mark)(ir_node *node)
+{
return _irn_visited_else_mark(node);
}
-void (set_irn_link)(ir_node *node, void *link) {
+void (set_irn_link)(ir_node *node, void *link)
+{
_set_irn_link(node, link);
}
-void *(get_irn_link)(const ir_node *node) {
+void *(get_irn_link)(const ir_node *node)
+{
return _get_irn_link(node);
}
-op_pin_state (get_irn_pinned)(const ir_node *node) {
+op_pin_state (get_irn_pinned)(const ir_node *node)
+{
return _get_irn_pinned(node);
}
-op_pin_state (is_irn_pinned_in_irg) (const ir_node *node) {
+op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
+{
return _is_irn_pinned_in_irg(node);
}
-void set_irn_pinned(ir_node *node, op_pin_state state) {
+void set_irn_pinned(ir_node *node, op_pin_state state)
+{
/* due to optimization an opt may be turned into a Tuple */
if (is_Tuple(node))
return;
}
/* Outputs a unique number for this node */
-long get_irn_node_nr(const ir_node *node) {
+long get_irn_node_nr(const ir_node *node)
+{
assert(node);
return node->node_nr;
}
-const_attr *get_irn_const_attr(ir_node *node) {
+const_attr *get_irn_const_attr(ir_node *node)
+{
assert(is_Const(node));
return &node->attr.con;
}
-long get_irn_proj_attr(ir_node *node) {
+long get_irn_proj_attr(ir_node *node)
+{
/* BEWARE: check for true Proj node here, no Filter */
assert(node->op == op_Proj);
return node->attr.proj;
}
-alloc_attr *get_irn_alloc_attr(ir_node *node) {
+alloc_attr *get_irn_alloc_attr(ir_node *node)
+{
assert(is_Alloc(node));
return &node->attr.alloc;
}
-free_attr *get_irn_free_attr(ir_node *node) {
+free_attr *get_irn_free_attr(ir_node *node)
+{
assert(is_Free(node));
return &node->attr.free;
}
-symconst_attr *get_irn_symconst_attr(ir_node *node) {
+symconst_attr *get_irn_symconst_attr(ir_node *node)
+{
assert(is_SymConst(node));
return &node->attr.symc;
}
-call_attr *get_irn_call_attr(ir_node *node) {
+call_attr *get_irn_call_attr(ir_node *node)
+{
assert(is_Call(node));
return &node->attr.call;
}
-sel_attr *get_irn_sel_attr(ir_node *node) {
+sel_attr *get_irn_sel_attr(ir_node *node)
+{
assert(is_Sel(node));
return &node->attr.sel;
}
-phi_attr *get_irn_phi_attr(ir_node *node) {
+phi_attr *get_irn_phi_attr(ir_node *node)
+{
return &node->attr.phi;
}
-block_attr *get_irn_block_attr(ir_node *node) {
+block_attr *get_irn_block_attr(ir_node *node)
+{
assert(is_Block(node));
return &node->attr.block;
}
-load_attr *get_irn_load_attr(ir_node *node) {
+load_attr *get_irn_load_attr(ir_node *node)
+{
assert(is_Load(node));
return &node->attr.load;
}
-store_attr *get_irn_store_attr(ir_node *node) {
+store_attr *get_irn_store_attr(ir_node *node)
+{
assert(is_Store(node));
return &node->attr.store;
}
-except_attr *get_irn_except_attr(ir_node *node) {
+except_attr *get_irn_except_attr(ir_node *node)
+{
assert(node->op == op_Div || node->op == op_Quot ||
node->op == op_DivMod || node->op == op_Mod || node->op == op_Call || node->op == op_Alloc || node->op == op_Bound);
return &node->attr.except;
}
-divmod_attr *get_irn_divmod_attr(ir_node *node) {
+divmod_attr *get_irn_divmod_attr(ir_node *node)
+{
assert(node->op == op_Div || node->op == op_Quot ||
node->op == op_DivMod || node->op == op_Mod);
return &node->attr.divmod;
}
-builtin_attr *get_irn_builtin_attr(ir_node *node) {
+builtin_attr *get_irn_builtin_attr(ir_node *node)
+{
assert(is_Builtin(node));
return &node->attr.builtin;
}
-void *(get_irn_generic_attr)(ir_node *node) {
+void *(get_irn_generic_attr)(ir_node *node)
+{
assert(is_ir_node(node));
return _get_irn_generic_attr(node);
}
-const void *(get_irn_generic_attr_const)(const ir_node *node) {
+const void *(get_irn_generic_attr_const)(const ir_node *node)
+{
assert(is_ir_node(node));
return _get_irn_generic_attr_const(node);
}
-unsigned (get_irn_idx)(const ir_node *node) {
+unsigned (get_irn_idx)(const ir_node *node)
+{
assert(is_ir_node(node));
return _get_irn_idx(node);
}
-int get_irn_pred_pos(ir_node *node, ir_node *arg) {
+int get_irn_pred_pos(ir_node *node, ir_node *arg)
+{
int i;
for (i = get_irn_arity(node) - 1; i >= 0; i--) {
if (get_irn_n(node, i) == arg)
/** manipulate fields of individual nodes **/
/* this works for all except Block */
-ir_node *get_nodes_block(const ir_node *node) {
+ir_node *get_nodes_block(const ir_node *node)
+{
assert(node->op != op_Block);
return get_irn_n(node, -1);
}
-void set_nodes_block(ir_node *node, ir_node *block) {
+void set_nodes_block(ir_node *node, ir_node *block)
+{
assert(node->op != op_Block);
set_irn_n(node, -1, block);
}
/* this works for all except Block */
-ir_node *get_nodes_MacroBlock(const ir_node *node) {
+ir_node *get_nodes_MacroBlock(const ir_node *node)
+{
assert(node->op != op_Block);
return get_Block_MacroBlock(get_irn_n(node, -1));
}
/* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
* from Start. If so returns frame type, else Null. */
-ir_type *is_frame_pointer(const ir_node *n) {
+ir_type *is_frame_pointer(const ir_node *n)
+{
if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
ir_node *start = get_Proj_pred(n);
if (is_Start(start)) {
/* Test whether arbitrary node is tls pointer, i.e. Proj(pn_Start_P_tls)
* from Start. If so returns tls type, else Null. */
-ir_type *is_tls_pointer(const ir_node *n) {
+ir_type *is_tls_pointer(const ir_node *n)
+{
if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_tls)) {
ir_node *start = get_Proj_pred(n);
if (is_Start(start)) {
return NULL;
}
-ir_node **get_Block_cfgpred_arr(ir_node *node) {
+ir_node **get_Block_cfgpred_arr(ir_node *node)
+{
assert(is_Block(node));
return (ir_node **)&(get_irn_in(node)[1]);
}
-int (get_Block_n_cfgpreds)(const ir_node *node) {
+int (get_Block_n_cfgpreds)(const ir_node *node)
+{
return _get_Block_n_cfgpreds(node);
}
-ir_node *(get_Block_cfgpred)(const ir_node *node, int pos) {
+ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
+{
return _get_Block_cfgpred(node, pos);
}
-void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred) {
+void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
+{
assert(is_Block(node));
set_irn_n(node, pos, pred);
}
-int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred) {
+int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
+{
int i;
for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
return -1;
}
-ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos) {
+ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
+{
return _get_Block_cfgpred_block(node, pos);
}
-int get_Block_matured(const ir_node *node) {
+int get_Block_matured(const ir_node *node)
+{
assert(is_Block(node));
return (int)node->attr.block.is_matured;
}
-void set_Block_matured(ir_node *node, int matured) {
+void set_Block_matured(ir_node *node, int matured)
+{
assert(is_Block(node));
node->attr.block.is_matured = matured;
}
-ir_visited_t (get_Block_block_visited)(const ir_node *node) {
+ir_visited_t (get_Block_block_visited)(const ir_node *node)
+{
return _get_Block_block_visited(node);
}
-void (set_Block_block_visited)(ir_node *node, ir_visited_t visit) {
+void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
+{
_set_Block_block_visited(node, visit);
}
/* For this current_ir_graph must be set. */
-void (mark_Block_block_visited)(ir_node *node) {
+void (mark_Block_block_visited)(ir_node *node)
+{
_mark_Block_block_visited(node);
}
-int (Block_block_visited)(const ir_node *node) {
+int (Block_block_visited)(const ir_node *node)
+{
return _Block_block_visited(node);
}
-ir_node *get_Block_graph_arr(ir_node *node, int pos) {
+ir_node *get_Block_graph_arr(ir_node *node, int pos)
+{
assert(is_Block(node));
return node->attr.block.graph_arr[pos+1];
}
-void set_Block_graph_arr(ir_node *node, int pos, ir_node *value) {
+void set_Block_graph_arr(ir_node *node, int pos, ir_node *value)
+{
assert(is_Block(node));
node->attr.block.graph_arr[pos+1] = value;
}
#ifdef INTERPROCEDURAL_VIEW
-void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[]) {
+void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node *in[])
+{
assert(is_Block(node));
if (node->attr.block.in_cg == NULL || arity != ARR_LEN(node->attr.block.in_cg) - 1) {
node->attr.block.in_cg = NEW_ARR_D(ir_node *, current_ir_graph->obst, arity + 1);
memcpy(node->attr.block.in_cg + 1, in, sizeof(ir_node *) * arity);
}
-void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred) {
+void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred)
+{
assert(is_Block(node) && node->attr.block.in_cg &&
0 <= pos && pos < ARR_LEN(node->attr.block.in_cg) - 1);
node->attr.block.in_cg[pos + 1] = pred;
}
-ir_node **get_Block_cg_cfgpred_arr(ir_node *node) {
+ir_node **get_Block_cg_cfgpred_arr(ir_node *node)
+{
assert(is_Block(node));
return node->attr.block.in_cg == NULL ? NULL : node->attr.block.in_cg + 1;
}
-int get_Block_cg_n_cfgpreds(const ir_node *node) {
+int get_Block_cg_n_cfgpreds(const ir_node *node)
+{
assert(is_Block(node));
return node->attr.block.in_cg == NULL ? 0 : ARR_LEN(node->attr.block.in_cg) - 1;
}
-ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos) {
+ir_node *get_Block_cg_cfgpred(const ir_node *node, int pos)
+{
assert(is_Block(node) && node->attr.block.in_cg);
return node->attr.block.in_cg[pos + 1];
}
-void remove_Block_cg_cfgpred_arr(ir_node *node) {
+void remove_Block_cg_cfgpred_arr(ir_node *node)
+{
assert(is_Block(node));
node->attr.block.in_cg = NULL;
}
#endif /* INTERPROCEDURAL_VIEW */
-ir_node *(set_Block_dead)(ir_node *block) {
+ir_node *(set_Block_dead)(ir_node *block)
+{
return _set_Block_dead(block);
}
-int (is_Block_dead)(const ir_node *block) {
+int (is_Block_dead)(const ir_node *block)
+{
return _is_Block_dead(block);
}
-ir_extblk *get_Block_extbb(const ir_node *block) {
+ir_extblk *get_Block_extbb(const ir_node *block)
+{
ir_extblk *res;
assert(is_Block(block));
res = block->attr.block.extblk;
return res;
}
-void set_Block_extbb(ir_node *block, ir_extblk *extblk) {
+void set_Block_extbb(ir_node *block, ir_extblk *extblk)
+{
assert(is_Block(block));
assert(extblk == NULL || is_ir_extbb(extblk));
block->attr.block.extblk = extblk;
}
/* Returns the macro block header of a block.*/
-ir_node *get_Block_MacroBlock(const ir_node *block) {
+ir_node *get_Block_MacroBlock(const ir_node *block)
+{
ir_node *mbh;
assert(is_Block(block));
mbh = get_irn_n(block, -1);
}
/* Sets the macro block header of a block. */
-void set_Block_MacroBlock(ir_node *block, ir_node *mbh) {
+void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
+{
assert(is_Block(block));
mbh = skip_Id(mbh);
assert(is_Block(mbh));
}
/* returns the macro block header of a node. */
-ir_node *get_irn_MacroBlock(const ir_node *n) {
+ir_node *get_irn_MacroBlock(const ir_node *n)
+{
if (! is_Block(n)) {
n = get_nodes_block(n);
/* if the Block is Bad, do NOT try to get it's MB, it will fail. */
}
/* returns the graph of a Block. */
-ir_graph *(get_Block_irg)(const ir_node *block) {
+ir_graph *(get_Block_irg)(const ir_node *block)
+{
return _get_Block_irg(block);
}
-ir_entity *create_Block_entity(ir_node *block) {
+ir_entity *create_Block_entity(ir_node *block)
+{
ir_entity *entity;
assert(is_Block(block));
return entity;
}
-ir_entity *get_Block_entity(const ir_node *block) {
+ir_entity *get_Block_entity(const ir_node *block)
+{
assert(is_Block(block));
return block->attr.block.entity;
}
return block->attr.block.entity != NULL;
}
-ir_node *(get_Block_phis)(const ir_node *block) {
+ir_node *(get_Block_phis)(const ir_node *block)
+{
return _get_Block_phis(block);
}
-void (set_Block_phis)(ir_node *block, ir_node *phi) {
+void (set_Block_phis)(ir_node *block, ir_node *phi)
+{
_set_Block_phis(block, phi);
}
-void (add_Block_phi)(ir_node *block, ir_node *phi) {
+void (add_Block_phi)(ir_node *block, ir_node *phi)
+{
_add_Block_phi(block, phi);
}
/* Get the Block mark (single bit). */
-unsigned (get_Block_mark)(const ir_node *block) {
+unsigned (get_Block_mark)(const ir_node *block)
+{
return _get_Block_mark(block);
}
/* Set the Block mark (single bit). */
-void (set_Block_mark)(ir_node *block, unsigned mark) {
+void (set_Block_mark)(ir_node *block, unsigned mark)
+{
_set_Block_mark(block, mark);
}
-int get_End_n_keepalives(const ir_node *end) {
+int get_End_n_keepalives(const ir_node *end)
+{
assert(is_End(end));
return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
}
-ir_node *get_End_keepalive(const ir_node *end, int pos) {
+ir_node *get_End_keepalive(const ir_node *end, int pos)
+{
assert(is_End(end));
return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
}
-void add_End_keepalive(ir_node *end, ir_node *ka) {
+void add_End_keepalive(ir_node *end, ir_node *ka)
+{
assert(is_End(end));
add_irn_n(end, ka);
}
-void set_End_keepalive(ir_node *end, int pos, ir_node *ka) {
+void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
+{
assert(is_End(end));
set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
}
/* Set new keep-alives */
-void set_End_keepalives(ir_node *end, int n, ir_node *in[]) {
+void set_End_keepalives(ir_node *end, int n, ir_node *in[])
+{
int i;
ir_graph *irg = get_irn_irg(end);
}
/* Set new keep-alives from old keep-alives, skipping irn */
-void remove_End_keepalive(ir_node *end, ir_node *irn) {
+void remove_End_keepalive(ir_node *end, ir_node *irn)
+{
int n = get_End_n_keepalives(end);
int i, idx;
ir_graph *irg;
}
/* remove Bads, NoMems and doublets from the keep-alive set */
-void remove_End_Bads_and_doublets(ir_node *end) {
+void remove_End_Bads_and_doublets(ir_node *end)
+{
pset_new_t keeps;
int idx, n = get_End_n_keepalives(end);
ir_graph *irg;
pset_new_destroy(&keeps);
}
-void free_End(ir_node *end) {
+void free_End(ir_node *end)
+{
assert(is_End(end));
end->kind = k_BAD;
DEL_ARR_F(end->in);
}
/* Return the target address of an IJmp */
-ir_node *get_IJmp_target(const ir_node *ijmp) {
+ir_node *get_IJmp_target(const ir_node *ijmp)
+{
assert(is_IJmp(ijmp));
return get_irn_n(ijmp, 0);
}
/** Sets the target address of an IJmp */
-void set_IJmp_target(ir_node *ijmp, ir_node *tgt) {
+void set_IJmp_target(ir_node *ijmp, ir_node *tgt)
+{
assert(is_IJmp(ijmp));
set_irn_n(ijmp, 0, tgt);
}
ir_node *
-get_Cond_selector(const ir_node *node) {
+get_Cond_selector(const ir_node *node)
+{
assert(is_Cond(node));
return get_irn_n(node, 0);
}
void
-set_Cond_selector(ir_node *node, ir_node *selector) {
+set_Cond_selector(ir_node *node, ir_node *selector)
+{
assert(is_Cond(node));
set_irn_n(node, 0, selector);
}
long
-get_Cond_default_proj(const ir_node *node) {
+get_Cond_default_proj(const ir_node *node)
+{
assert(is_Cond(node));
return node->attr.cond.default_proj;
}
-void set_Cond_default_proj(ir_node *node, long defproj) {
+void set_Cond_default_proj(ir_node *node, long defproj)
+{
assert(is_Cond(node));
node->attr.cond.default_proj = defproj;
}
ir_node *
-get_Return_mem(const ir_node *node) {
+get_Return_mem(const ir_node *node)
+{
assert(is_Return(node));
return get_irn_n(node, 0);
}
void
-set_Return_mem(ir_node *node, ir_node *mem) {
+set_Return_mem(ir_node *node, ir_node *mem)
+{
assert(is_Return(node));
set_irn_n(node, 0, mem);
}
int
-get_Return_n_ress(const ir_node *node) {
+get_Return_n_ress(const ir_node *node)
+{
assert(is_Return(node));
return (get_irn_arity(node) - RETURN_RESULT_OFFSET);
}
ir_node **
-get_Return_res_arr(ir_node *node) {
+get_Return_res_arr(ir_node *node)
+{
assert(is_Return(node));
if (get_Return_n_ress(node) > 0)
return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
/*
void
-set_Return_n_res(ir_node *node, int results) {
+set_Return_n_res(ir_node *node, int results)
+{
assert(is_Return(node));
}
*/
ir_node *
-get_Return_res(const ir_node *node, int pos) {
+get_Return_res(const ir_node *node, int pos)
+{
assert(is_Return(node));
assert(get_Return_n_ress(node) > pos);
return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
}
void
-set_Return_res(ir_node *node, int pos, ir_node *res){
+set_Return_res(ir_node *node, int pos, ir_node *res)
+{
assert(is_Return(node));
set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
}
-tarval *(get_Const_tarval)(const ir_node *node) {
+tarval *(get_Const_tarval)(const ir_node *node)
+{
return _get_Const_tarval(node);
}
void
-set_Const_tarval(ir_node *node, tarval *con) {
+set_Const_tarval(ir_node *node, tarval *con)
+{
assert(is_Const(node));
node->attr.con.tv = con;
}
-int (is_Const_null)(const ir_node *node) {
+int (is_Const_null)(const ir_node *node)
+{
return _is_Const_null(node);
}
-int (is_Const_one)(const ir_node *node) {
+int (is_Const_one)(const ir_node *node)
+{
return _is_Const_one(node);
}
-int (is_Const_all_one)(const ir_node *node) {
+int (is_Const_all_one)(const ir_node *node)
+{
return _is_Const_all_one(node);
}
be mode of node. For tarvals from entities type must be pointer to
entity type. */
ir_type *
-get_Const_type(ir_node *node) {
+get_Const_type(ir_node *node)
+{
assert(is_Const(node));
return node->attr.con.tp;
}
void
-set_Const_type(ir_node *node, ir_type *tp) {
+set_Const_type(ir_node *node, ir_type *tp)
+{
assert(is_Const(node));
if (tp != firm_unknown_type) {
assert(is_atomic_type(tp));
symconst_kind
-get_SymConst_kind(const ir_node *node) {
+get_SymConst_kind(const ir_node *node)
+{
assert(is_SymConst(node));
return node->attr.symc.kind;
}
void
-set_SymConst_kind(ir_node *node, symconst_kind kind) {
+set_SymConst_kind(ir_node *node, symconst_kind kind)
+{
assert(is_SymConst(node));
node->attr.symc.kind = kind;
}
ir_type *
-get_SymConst_type(const ir_node *node) {
+get_SymConst_type(const ir_node *node)
+{
/* the cast here is annoying, but we have to compensate for
the skip_tip() */
ir_node *irn = (ir_node *)node;
}
void
-set_SymConst_type(ir_node *node, ir_type *tp) {
+set_SymConst_type(ir_node *node, ir_type *tp)
+{
assert(is_SymConst(node) &&
(SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
node->attr.symc.sym.type_p = tp;
}
ident *
-get_SymConst_name(const ir_node *node) {
+get_SymConst_name(const ir_node *node)
+{
assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
return node->attr.symc.sym.ident_p;
}
void
-set_SymConst_name(ir_node *node, ident *name) {
+set_SymConst_name(ir_node *node, ident *name)
+{
assert(is_SymConst(node) && SYMCONST_HAS_ID(get_SymConst_kind(node)));
node->attr.symc.sym.ident_p = name;
}
/* Only to access SymConst of kind symconst_addr_ent. Else assertion: */
-ir_entity *get_SymConst_entity(const ir_node *node) {
+ir_entity *get_SymConst_entity(const ir_node *node)
+{
assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
return node->attr.symc.sym.entity_p;
}
-void set_SymConst_entity(ir_node *node, ir_entity *ent) {
+void set_SymConst_entity(ir_node *node, ir_entity *ent)
+{
assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
node->attr.symc.sym.entity_p = ent;
}
-ir_enum_const *get_SymConst_enum(const ir_node *node) {
+ir_enum_const *get_SymConst_enum(const ir_node *node)
+{
assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
return node->attr.symc.sym.enum_p;
}
-void set_SymConst_enum(ir_node *node, ir_enum_const *ec) {
+void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
+{
assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
node->attr.symc.sym.enum_p = ec;
}
union symconst_symbol
-get_SymConst_symbol(const ir_node *node) {
+get_SymConst_symbol(const ir_node *node)
+{
assert(is_SymConst(node));
return node->attr.symc.sym;
}
void
-set_SymConst_symbol(ir_node *node, union symconst_symbol sym) {
+set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
+{
assert(is_SymConst(node));
node->attr.symc.sym = sym;
}
ir_type *
-get_SymConst_value_type(ir_node *node) {
+get_SymConst_value_type(ir_node *node)
+{
assert(is_SymConst(node));
return node->attr.symc.tp;
}
void
-set_SymConst_value_type(ir_node *node, ir_type *tp) {
+set_SymConst_value_type(ir_node *node, ir_type *tp)
+{
assert(is_SymConst(node));
node->attr.symc.tp = tp;
}
ir_node *
-get_Sel_mem(const ir_node *node) {
+get_Sel_mem(const ir_node *node)
+{
assert(is_Sel(node));
return get_irn_n(node, 0);
}
void
-set_Sel_mem(ir_node *node, ir_node *mem) {
+set_Sel_mem(ir_node *node, ir_node *mem)
+{
assert(is_Sel(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Sel_ptr(const ir_node *node) {
+get_Sel_ptr(const ir_node *node)
+{
assert(is_Sel(node));
return get_irn_n(node, 1);
}
void
-set_Sel_ptr(ir_node *node, ir_node *ptr) {
+set_Sel_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_Sel(node));
set_irn_n(node, 1, ptr);
}
int
-get_Sel_n_indexs(const ir_node *node) {
+get_Sel_n_indexs(const ir_node *node)
+{
assert(is_Sel(node));
return (get_irn_arity(node) - SEL_INDEX_OFFSET);
}
ir_node **
-get_Sel_index_arr(ir_node *node) {
+get_Sel_index_arr(ir_node *node)
+{
assert(is_Sel(node));
if (get_Sel_n_indexs(node) > 0)
return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
}
ir_node *
-get_Sel_index(const ir_node *node, int pos) {
+get_Sel_index(const ir_node *node, int pos)
+{
assert(is_Sel(node));
return get_irn_n(node, pos + SEL_INDEX_OFFSET);
}
void
-set_Sel_index(ir_node *node, int pos, ir_node *index) {
+set_Sel_index(ir_node *node, int pos, ir_node *index)
+{
assert(is_Sel(node));
set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
}
ir_entity *
-get_Sel_entity(const ir_node *node) {
+get_Sel_entity(const ir_node *node)
+{
assert(is_Sel(node));
return node->attr.sel.entity;
}
/* need a version without const to prevent warning */
-static ir_entity *_get_Sel_entity(ir_node *node) {
+static ir_entity *_get_Sel_entity(ir_node *node)
+{
return get_Sel_entity(node);
}
void
-set_Sel_entity(ir_node *node, ir_entity *ent) {
+set_Sel_entity(ir_node *node, ir_entity *ent)
+{
assert(is_Sel(node));
node->attr.sel.entity = ent;
}
ir_node *
-get_Call_mem(const ir_node *node) {
+get_Call_mem(const ir_node *node)
+{
assert(is_Call(node));
return get_irn_n(node, 0);
}
void
-set_Call_mem(ir_node *node, ir_node *mem) {
+set_Call_mem(ir_node *node, ir_node *mem)
+{
assert(is_Call(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Call_ptr(const ir_node *node) {
+get_Call_ptr(const ir_node *node)
+{
assert(is_Call(node));
return get_irn_n(node, 1);
}
void
-set_Call_ptr(ir_node *node, ir_node *ptr) {
+set_Call_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_Call(node));
set_irn_n(node, 1, ptr);
}
ir_node **
-get_Call_param_arr(ir_node *node) {
+get_Call_param_arr(ir_node *node)
+{
assert(is_Call(node));
return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
}
int
-get_Call_n_params(const ir_node *node) {
+get_Call_n_params(const ir_node *node)
+{
assert(is_Call(node));
return (get_irn_arity(node) - CALL_PARAM_OFFSET);
}
ir_node *
-get_Call_param(const ir_node *node, int pos) {
+get_Call_param(const ir_node *node, int pos)
+{
assert(is_Call(node));
return get_irn_n(node, pos + CALL_PARAM_OFFSET);
}
void
-set_Call_param(ir_node *node, int pos, ir_node *param) {
+set_Call_param(ir_node *node, int pos, ir_node *param)
+{
assert(is_Call(node));
set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
}
ir_type *
-get_Call_type(ir_node *node) {
+get_Call_type(ir_node *node)
+{
assert(is_Call(node));
return node->attr.call.type;
}
void
-set_Call_type(ir_node *node, ir_type *tp) {
+set_Call_type(ir_node *node, ir_type *tp)
+{
assert(is_Call(node));
assert((get_unknown_type() == tp) || is_Method_type(tp));
node->attr.call.type = tp;
}
unsigned
-get_Call_tail_call(const ir_node *node) {
+get_Call_tail_call(const ir_node *node)
+{
assert(is_Call(node));
return node->attr.call.tail_call;
}
void
-set_Call_tail_call(ir_node *node, unsigned tail_call) {
+set_Call_tail_call(ir_node *node, unsigned tail_call)
+{
assert(is_Call(node));
node->attr.call.tail_call = tail_call != 0;
}
ir_node *
-get_Builtin_mem(const ir_node *node) {
+get_Builtin_mem(const ir_node *node)
+{
assert(is_Builtin(node));
return get_irn_n(node, 0);
}
void
-set_Builin_mem(ir_node *node, ir_node *mem) {
+set_Builin_mem(ir_node *node, ir_node *mem)
+{
assert(is_Builtin(node));
set_irn_n(node, 0, mem);
}
ir_builtin_kind
-get_Builtin_kind(const ir_node *node) {
+get_Builtin_kind(const ir_node *node)
+{
assert(is_Builtin(node));
return node->attr.builtin.kind;
}
void
-set_Builtin_kind(ir_node *node, ir_builtin_kind kind) {
+set_Builtin_kind(ir_node *node, ir_builtin_kind kind)
+{
assert(is_Builtin(node));
node->attr.builtin.kind = kind;
}
ir_node **
-get_Builtin_param_arr(ir_node *node) {
+get_Builtin_param_arr(ir_node *node)
+{
assert(is_Builtin(node));
return &get_irn_in(node)[BUILDIN_PARAM_OFFSET + 1];
}
int
-get_Builtin_n_params(const ir_node *node) {
+get_Builtin_n_params(const ir_node *node)
+{
assert(is_Builtin(node));
return (get_irn_arity(node) - BUILDIN_PARAM_OFFSET);
}
ir_node *
-get_Builtin_param(const ir_node *node, int pos) {
+get_Builtin_param(const ir_node *node, int pos)
+{
assert(is_Builtin(node));
return get_irn_n(node, pos + BUILDIN_PARAM_OFFSET);
}
void
-set_Builtin_param(ir_node *node, int pos, ir_node *param) {
+set_Builtin_param(ir_node *node, int pos, ir_node *param)
+{
assert(is_Builtin(node));
set_irn_n(node, pos + BUILDIN_PARAM_OFFSET, param);
}
ir_type *
-get_Builtin_type(ir_node *node) {
+get_Builtin_type(ir_node *node)
+{
assert(is_Builtin(node));
return node->attr.builtin.type;
}
void
-set_Builtin_type(ir_node *node, ir_type *tp) {
+set_Builtin_type(ir_node *node, ir_type *tp)
+{
assert(is_Builtin(node));
assert((get_unknown_type() == tp) || is_Method_type(tp));
node->attr.builtin.type = tp;
}
/* Returns a human readable string for the ir_builtin_kind. */
-const char *get_builtin_kind_name(ir_builtin_kind kind) {
+const char *get_builtin_kind_name(ir_builtin_kind kind)
+{
#define X(a) case a: return #a;
switch (kind) {
X(ir_bk_trap);
}
-int Call_has_callees(const ir_node *node) {
+int Call_has_callees(const ir_node *node)
+{
assert(is_Call(node));
return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
(node->attr.call.callee_arr != NULL));
}
-int get_Call_n_callees(const ir_node *node) {
+int get_Call_n_callees(const ir_node *node)
+{
assert(is_Call(node) && node->attr.call.callee_arr);
return ARR_LEN(node->attr.call.callee_arr);
}
-ir_entity *get_Call_callee(const ir_node *node, int pos) {
+ir_entity *get_Call_callee(const ir_node *node, int pos)
+{
assert(pos >= 0 && pos < get_Call_n_callees(node));
return node->attr.call.callee_arr[pos];
}
-void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr) {
+void set_Call_callee_arr(ir_node *node, const int n, ir_entity ** arr)
+{
assert(is_Call(node));
if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, current_ir_graph->obst, n);
memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
}
-void remove_Call_callee_arr(ir_node *node) {
+void remove_Call_callee_arr(ir_node *node)
+{
assert(is_Call(node));
node->attr.call.callee_arr = NULL;
}
-ir_node *get_CallBegin_ptr(const ir_node *node) {
+ir_node *get_CallBegin_ptr(const ir_node *node)
+{
assert(is_CallBegin(node));
return get_irn_n(node, 0);
}
-void set_CallBegin_ptr(ir_node *node, ir_node *ptr) {
+void set_CallBegin_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_CallBegin(node));
set_irn_n(node, 0, ptr);
}
-ir_node *get_CallBegin_call(const ir_node *node) {
+ir_node *get_CallBegin_call(const ir_node *node)
+{
assert(is_CallBegin(node));
return node->attr.callbegin.call;
}
-void set_CallBegin_call(ir_node *node, ir_node *call) {
+void set_CallBegin_call(ir_node *node, ir_node *call)
+{
assert(is_CallBegin(node));
node->attr.callbegin.call = call;
}
* Returns non-zero if a Call is surely a self-recursive Call.
* Beware: if this functions returns 0, the call might be self-recursive!
*/
-int is_self_recursive_Call(const ir_node *call) {
+int is_self_recursive_Call(const ir_node *call)
+{
const ir_node *callee = get_Call_ptr(call);
if (is_SymConst_addr_ent(callee)) {
UNOP(Conv)
UNOP(Cast)
-int get_Div_no_remainder(const ir_node *node) {
+int get_Div_no_remainder(const ir_node *node)
+{
assert(is_Div(node));
return node->attr.divmod.no_remainder;
}
-void set_Div_no_remainder(ir_node *node, int no_remainder) {
+void set_Div_no_remainder(ir_node *node, int no_remainder)
+{
assert(is_Div(node));
node->attr.divmod.no_remainder = no_remainder;
}
-int get_Conv_strict(const ir_node *node) {
+int get_Conv_strict(const ir_node *node)
+{
assert(is_Conv(node));
return node->attr.conv.strict;
}
-void set_Conv_strict(ir_node *node, int strict_flag) {
+void set_Conv_strict(ir_node *node, int strict_flag)
+{
assert(is_Conv(node));
node->attr.conv.strict = (char)strict_flag;
}
ir_type *
-get_Cast_type(ir_node *node) {
+get_Cast_type(ir_node *node)
+{
assert(is_Cast(node));
return node->attr.cast.type;
}
void
-set_Cast_type(ir_node *node, ir_type *to_tp) {
+set_Cast_type(ir_node *node, ir_type *to_tp)
+{
assert(is_Cast(node));
node->attr.cast.type = to_tp;
}
*
* Returns true if the Cast node casts a class type to a super type.
*/
-int is_Cast_upcast(ir_node *node) {
+int is_Cast_upcast(ir_node *node)
+{
ir_type *totype = get_Cast_type(node);
ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
*
* Returns true if the Cast node casts a class type to a sub type.
*/
-int is_Cast_downcast(ir_node *node) {
+int is_Cast_downcast(ir_node *node)
+{
ir_type *totype = get_Cast_type(node);
ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
}
ir_node *
-get_unop_op(const ir_node *node) {
+get_unop_op(const ir_node *node)
+{
if (node->op->opar == oparity_unary)
return get_irn_n(node, node->op->op_index);
}
void
-set_unop_op(ir_node *node, ir_node *op) {
+set_unop_op(ir_node *node, ir_node *op)
+{
if (node->op->opar == oparity_unary)
set_irn_n(node, node->op->op_index, op);
}
ir_node *
-get_binop_left(const ir_node *node) {
+get_binop_left(const ir_node *node)
+{
assert(node->op->opar == oparity_binary);
return get_irn_n(node, node->op->op_index);
}
void
-set_binop_left(ir_node *node, ir_node *left) {
+set_binop_left(ir_node *node, ir_node *left)
+{
assert(node->op->opar == oparity_binary);
set_irn_n(node, node->op->op_index, left);
}
ir_node *
-get_binop_right(const ir_node *node) {
+get_binop_right(const ir_node *node)
+{
assert(node->op->opar == oparity_binary);
return get_irn_n(node, node->op->op_index + 1);
}
void
-set_binop_right(ir_node *node, ir_node *right) {
+set_binop_right(ir_node *node, ir_node *right)
+{
assert(node->op->opar == oparity_binary);
set_irn_n(node, node->op->op_index + 1, right);
}
-int is_Phi0(const ir_node *n) {
+int is_Phi0(const ir_node *n)
+{
assert(n);
return ((get_irn_op(n) == op_Phi) &&
}
ir_node **
-get_Phi_preds_arr(ir_node *node) {
+get_Phi_preds_arr(ir_node *node)
+{
assert(node->op == op_Phi);
return (ir_node **)&(get_irn_in(node)[1]);
}
int
-get_Phi_n_preds(const ir_node *node) {
+get_Phi_n_preds(const ir_node *node)
+{
assert(is_Phi(node) || is_Phi0(node));
return (get_irn_arity(node));
}
/*
-void set_Phi_n_preds(ir_node *node, int n_preds) {
+void set_Phi_n_preds(ir_node *node, int n_preds)
+{
assert(node->op == op_Phi);
}
*/
ir_node *
-get_Phi_pred(const ir_node *node, int pos) {
+get_Phi_pred(const ir_node *node, int pos)
+{
assert(is_Phi(node) || is_Phi0(node));
return get_irn_n(node, pos);
}
void
-set_Phi_pred(ir_node *node, int pos, ir_node *pred) {
+set_Phi_pred(ir_node *node, int pos, ir_node *pred)
+{
assert(is_Phi(node) || is_Phi0(node));
set_irn_n(node, pos, pred);
}
-ir_node *(get_Phi_next)(const ir_node *phi) {
+ir_node *(get_Phi_next)(const ir_node *phi)
+{
return _get_Phi_next(phi);
}
-void (set_Phi_next)(ir_node *phi, ir_node *next) {
+void (set_Phi_next)(ir_node *phi, ir_node *next)
+{
_set_Phi_next(phi, next);
}
-int is_memop(const ir_node *node) {
+int is_memop(const ir_node *node)
+{
ir_opcode code = get_irn_opcode(node);
return (code == iro_Load || code == iro_Store);
}
-ir_node *get_memop_mem(const ir_node *node) {
+ir_node *get_memop_mem(const ir_node *node)
+{
assert(is_memop(node));
return get_irn_n(node, 0);
}
-void set_memop_mem(ir_node *node, ir_node *mem) {
+void set_memop_mem(ir_node *node, ir_node *mem)
+{
assert(is_memop(node));
set_irn_n(node, 0, mem);
}
-ir_node *get_memop_ptr(const ir_node *node) {
+ir_node *get_memop_ptr(const ir_node *node)
+{
assert(is_memop(node));
return get_irn_n(node, 1);
}
-void set_memop_ptr(ir_node *node, ir_node *ptr) {
+void set_memop_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_memop(node));
set_irn_n(node, 1, ptr);
}
ir_node *
-get_Load_mem(const ir_node *node) {
+get_Load_mem(const ir_node *node)
+{
assert(is_Load(node));
return get_irn_n(node, 0);
}
void
-set_Load_mem(ir_node *node, ir_node *mem) {
+set_Load_mem(ir_node *node, ir_node *mem)
+{
assert(is_Load(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Load_ptr(const ir_node *node) {
+get_Load_ptr(const ir_node *node)
+{
assert(is_Load(node));
return get_irn_n(node, 1);
}
void
-set_Load_ptr(ir_node *node, ir_node *ptr) {
+set_Load_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_Load(node));
set_irn_n(node, 1, ptr);
}
ir_mode *
-get_Load_mode(const ir_node *node) {
+get_Load_mode(const ir_node *node)
+{
assert(is_Load(node));
return node->attr.load.mode;
}
void
-set_Load_mode(ir_node *node, ir_mode *mode) {
+set_Load_mode(ir_node *node, ir_mode *mode)
+{
assert(is_Load(node));
node->attr.load.mode = mode;
}
ir_volatility
-get_Load_volatility(const ir_node *node) {
+get_Load_volatility(const ir_node *node)
+{
assert(is_Load(node));
return node->attr.load.volatility;
}
void
-set_Load_volatility(ir_node *node, ir_volatility volatility) {
+set_Load_volatility(ir_node *node, ir_volatility volatility)
+{
assert(is_Load(node));
node->attr.load.volatility = volatility;
}
ir_align
-get_Load_align(const ir_node *node) {
+get_Load_align(const ir_node *node)
+{
assert(is_Load(node));
return node->attr.load.aligned;
}
void
-set_Load_align(ir_node *node, ir_align align) {
+set_Load_align(ir_node *node, ir_align align)
+{
assert(is_Load(node));
node->attr.load.aligned = align;
}
ir_node *
-get_Store_mem(const ir_node *node) {
+get_Store_mem(const ir_node *node)
+{
assert(is_Store(node));
return get_irn_n(node, 0);
}
void
-set_Store_mem(ir_node *node, ir_node *mem) {
+set_Store_mem(ir_node *node, ir_node *mem)
+{
assert(is_Store(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Store_ptr(const ir_node *node) {
+get_Store_ptr(const ir_node *node)
+{
assert(is_Store(node));
return get_irn_n(node, 1);
}
void
-set_Store_ptr(ir_node *node, ir_node *ptr) {
+set_Store_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_Store(node));
set_irn_n(node, 1, ptr);
}
ir_node *
-get_Store_value(const ir_node *node) {
+get_Store_value(const ir_node *node)
+{
assert(is_Store(node));
return get_irn_n(node, 2);
}
void
-set_Store_value(ir_node *node, ir_node *value) {
+set_Store_value(ir_node *node, ir_node *value)
+{
assert(is_Store(node));
set_irn_n(node, 2, value);
}
ir_volatility
-get_Store_volatility(const ir_node *node) {
+get_Store_volatility(const ir_node *node)
+{
assert(is_Store(node));
return node->attr.store.volatility;
}
void
-set_Store_volatility(ir_node *node, ir_volatility volatility) {
+set_Store_volatility(ir_node *node, ir_volatility volatility)
+{
assert(is_Store(node));
node->attr.store.volatility = volatility;
}
ir_align
-get_Store_align(const ir_node *node) {
+get_Store_align(const ir_node *node)
+{
assert(is_Store(node));
return node->attr.store.aligned;
}
void
-set_Store_align(ir_node *node, ir_align align) {
+set_Store_align(ir_node *node, ir_align align)
+{
assert(is_Store(node));
node->attr.store.aligned = align;
}
ir_node *
-get_Alloc_mem(const ir_node *node) {
+get_Alloc_mem(const ir_node *node)
+{
assert(is_Alloc(node));
return get_irn_n(node, 0);
}
void
-set_Alloc_mem(ir_node *node, ir_node *mem) {
+set_Alloc_mem(ir_node *node, ir_node *mem)
+{
assert(is_Alloc(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Alloc_size(const ir_node *node) {
+get_Alloc_size(const ir_node *node)
+{
assert(is_Alloc(node));
return get_irn_n(node, 1);
}
void
-set_Alloc_size(ir_node *node, ir_node *size) {
+set_Alloc_size(ir_node *node, ir_node *size)
+{
assert(is_Alloc(node));
set_irn_n(node, 1, size);
}
ir_type *
-get_Alloc_type(ir_node *node) {
+get_Alloc_type(ir_node *node)
+{
assert(is_Alloc(node));
return node->attr.alloc.type;
}
void
-set_Alloc_type(ir_node *node, ir_type *tp) {
+set_Alloc_type(ir_node *node, ir_type *tp)
+{
assert(is_Alloc(node));
node->attr.alloc.type = tp;
}
ir_where_alloc
-get_Alloc_where(const ir_node *node) {
+get_Alloc_where(const ir_node *node)
+{
assert(is_Alloc(node));
return node->attr.alloc.where;
}
void
-set_Alloc_where(ir_node *node, ir_where_alloc where) {
+set_Alloc_where(ir_node *node, ir_where_alloc where)
+{
assert(is_Alloc(node));
node->attr.alloc.where = where;
}
ir_node *
-get_Free_mem(const ir_node *node) {
+get_Free_mem(const ir_node *node)
+{
assert(is_Free(node));
return get_irn_n(node, 0);
}
void
-set_Free_mem(ir_node *node, ir_node *mem) {
+set_Free_mem(ir_node *node, ir_node *mem)
+{
assert(is_Free(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Free_ptr(const ir_node *node) {
+get_Free_ptr(const ir_node *node)
+{
assert(is_Free(node));
return get_irn_n(node, 1);
}
void
-set_Free_ptr(ir_node *node, ir_node *ptr) {
+set_Free_ptr(ir_node *node, ir_node *ptr)
+{
assert(is_Free(node));
set_irn_n(node, 1, ptr);
}
ir_node *
-get_Free_size(const ir_node *node) {
+get_Free_size(const ir_node *node)
+{
assert(is_Free(node));
return get_irn_n(node, 2);
}
void
-set_Free_size(ir_node *node, ir_node *size) {
+set_Free_size(ir_node *node, ir_node *size)
+{
assert(is_Free(node));
set_irn_n(node, 2, size);
}
ir_type *
-get_Free_type(ir_node *node) {
+get_Free_type(ir_node *node)
+{
assert(is_Free(node));
return node->attr.free.type;
}
void
-set_Free_type(ir_node *node, ir_type *tp) {
+set_Free_type(ir_node *node, ir_type *tp)
+{
assert(is_Free(node));
node->attr.free.type = tp;
}
ir_where_alloc
-get_Free_where(const ir_node *node) {
+get_Free_where(const ir_node *node)
+{
assert(is_Free(node));
return node->attr.free.where;
}
void
-set_Free_where(ir_node *node, ir_where_alloc where) {
+set_Free_where(ir_node *node, ir_where_alloc where)
+{
assert(is_Free(node));
node->attr.free.where = where;
}
-ir_node **get_Sync_preds_arr(ir_node *node) {
+ir_node **get_Sync_preds_arr(ir_node *node)
+{
assert(is_Sync(node));
return (ir_node **)&(get_irn_in(node)[1]);
}
-int get_Sync_n_preds(const ir_node *node) {
+int get_Sync_n_preds(const ir_node *node)
+{
assert(is_Sync(node));
return (get_irn_arity(node));
}
/*
-void set_Sync_n_preds(ir_node *node, int n_preds) {
+void set_Sync_n_preds(ir_node *node, int n_preds)
+{
assert(is_Sync(node));
}
*/
-ir_node *get_Sync_pred(const ir_node *node, int pos) {
+ir_node *get_Sync_pred(const ir_node *node, int pos)
+{
assert(is_Sync(node));
return get_irn_n(node, pos);
}
-void set_Sync_pred(ir_node *node, int pos, ir_node *pred) {
+void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
+{
assert(is_Sync(node));
set_irn_n(node, pos, pred);
}
/* Add a new Sync predecessor */
-void add_Sync_pred(ir_node *node, ir_node *pred) {
+void add_Sync_pred(ir_node *node, ir_node *pred)
+{
assert(is_Sync(node));
add_irn_n(node, pred);
}
/* Returns the source language type of a Proj node. */
-ir_type *get_Proj_type(ir_node *n) {
+ir_type *get_Proj_type(ir_node *n)
+{
ir_type *tp = firm_unknown_type;
ir_node *pred = get_Proj_pred(n);
}
ir_node *
-get_Proj_pred(const ir_node *node) {
+get_Proj_pred(const ir_node *node)
+{
assert(is_Proj(node));
return get_irn_n(node, 0);
}
void
-set_Proj_pred(ir_node *node, ir_node *pred) {
+set_Proj_pred(ir_node *node, ir_node *pred)
+{
assert(is_Proj(node));
set_irn_n(node, 0, pred);
}
long
-get_Proj_proj(const ir_node *node) {
+get_Proj_proj(const ir_node *node)
+{
#ifdef INTERPROCEDURAL_VIEW
ir_opcode code = get_irn_opcode(node);
}
void
-set_Proj_proj(ir_node *node, long proj) {
+set_Proj_proj(ir_node *node, long proj)
+{
#ifdef INTERPROCEDURAL_VIEW
ir_opcode code = get_irn_opcode(node);
}
/* Returns non-zero if a node is a routine parameter. */
-int (is_arg_Proj)(const ir_node *node) {
+int (is_arg_Proj)(const ir_node *node)
+{
return _is_arg_Proj(node);
}
ir_node **
-get_Tuple_preds_arr(ir_node *node) {
+get_Tuple_preds_arr(ir_node *node)
+{
assert(is_Tuple(node));
return (ir_node **)&(get_irn_in(node)[1]);
}
int
-get_Tuple_n_preds(const ir_node *node) {
+get_Tuple_n_preds(const ir_node *node)
+{
assert(is_Tuple(node));
return get_irn_arity(node);
}
/*
void
-set_Tuple_n_preds(ir_node *node, int n_preds) {
+set_Tuple_n_preds(ir_node *node, int n_preds)
+{
assert(is_Tuple(node));
}
*/
ir_node *
-get_Tuple_pred(const ir_node *node, int pos) {
+get_Tuple_pred(const ir_node *node, int pos)
+{
assert(is_Tuple(node));
return get_irn_n(node, pos);
}
void
-set_Tuple_pred(ir_node *node, int pos, ir_node *pred) {
+set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
+{
assert(is_Tuple(node));
set_irn_n(node, pos, pred);
}
ir_node *
-get_Id_pred(const ir_node *node) {
+get_Id_pred(const ir_node *node)
+{
assert(is_Id(node));
return get_irn_n(node, 0);
}
void
-set_Id_pred(ir_node *node, ir_node *pred) {
+set_Id_pred(ir_node *node, ir_node *pred)
+{
assert(is_Id(node));
set_irn_n(node, 0, pred);
}
-ir_node *get_Confirm_value(const ir_node *node) {
+ir_node *get_Confirm_value(const ir_node *node)
+{
assert(is_Confirm(node));
return get_irn_n(node, 0);
}
-void set_Confirm_value(ir_node *node, ir_node *value) {
+void set_Confirm_value(ir_node *node, ir_node *value)
+{
assert(is_Confirm(node));
set_irn_n(node, 0, value);
}
-ir_node *get_Confirm_bound(const ir_node *node) {
+ir_node *get_Confirm_bound(const ir_node *node)
+{
assert(is_Confirm(node));
return get_irn_n(node, 1);
}
-void set_Confirm_bound(ir_node *node, ir_node *bound) {
+void set_Confirm_bound(ir_node *node, ir_node *bound)
+{
assert(is_Confirm(node));
set_irn_n(node, 0, bound);
}
-pn_Cmp get_Confirm_cmp(const ir_node *node) {
+pn_Cmp get_Confirm_cmp(const ir_node *node)
+{
assert(is_Confirm(node));
return node->attr.confirm.cmp;
}
-void set_Confirm_cmp(ir_node *node, pn_Cmp cmp) {
+void set_Confirm_cmp(ir_node *node, pn_Cmp cmp)
+{
assert(is_Confirm(node));
node->attr.confirm.cmp = cmp;
}
ir_node *
-get_Filter_pred(ir_node *node) {
+get_Filter_pred(ir_node *node)
+{
assert(is_Filter(node));
return node->in[1];
}
void
-set_Filter_pred(ir_node *node, ir_node *pred) {
+set_Filter_pred(ir_node *node, ir_node *pred)
+{
assert(is_Filter(node));
node->in[1] = pred;
}
long
-get_Filter_proj(ir_node *node) {
+get_Filter_proj(ir_node *node)
+{
assert(is_Filter(node));
return node->attr.filter.proj;
}
void
-set_Filter_proj(ir_node *node, long proj) {
+set_Filter_proj(ir_node *node, long proj)
+{
assert(is_Filter(node));
node->attr.filter.proj = proj;
}
/* Don't use get_irn_arity, get_irn_n in implementation as access
shall work independent of view!!! */
-void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in) {
+void set_Filter_cg_pred_arr(ir_node *node, int arity, ir_node ** in)
+{
assert(is_Filter(node));
if (node->attr.filter.in_cg == NULL || arity != ARR_LEN(node->attr.filter.in_cg) - 1) {
ir_graph *irg = get_irn_irg(node);
memcpy(node->attr.filter.in_cg + 1, in, sizeof(ir_node *) * arity);
}
-void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred) {
+void set_Filter_cg_pred(ir_node * node, int pos, ir_node * pred)
+{
assert(is_Filter(node) && node->attr.filter.in_cg &&
0 <= pos && pos < ARR_LEN(node->attr.filter.in_cg) - 1);
node->attr.filter.in_cg[pos + 1] = pred;
}
-int get_Filter_n_cg_preds(ir_node *node) {
+int get_Filter_n_cg_preds(ir_node *node)
+{
assert(is_Filter(node) && node->attr.filter.in_cg);
return (ARR_LEN(node->attr.filter.in_cg) - 1);
}
-ir_node *get_Filter_cg_pred(ir_node *node, int pos) {
+ir_node *get_Filter_cg_pred(ir_node *node, int pos)
+{
int arity;
assert(is_Filter(node) && node->attr.filter.in_cg &&
0 <= pos);
}
/* Mux support */
-ir_node *get_Mux_sel(const ir_node *node) {
+ir_node *get_Mux_sel(const ir_node *node)
+{
assert(is_Mux(node));
return node->in[1];
}
-void set_Mux_sel(ir_node *node, ir_node *sel) {
+void set_Mux_sel(ir_node *node, ir_node *sel)
+{
assert(is_Mux(node));
node->in[1] = sel;
}
-ir_node *get_Mux_false(const ir_node *node) {
+ir_node *get_Mux_false(const ir_node *node)
+{
assert(is_Mux(node));
return node->in[2];
}
-void set_Mux_false(ir_node *node, ir_node *ir_false) {
+void set_Mux_false(ir_node *node, ir_node *ir_false)
+{
assert(is_Mux(node));
node->in[2] = ir_false;
}
-ir_node *get_Mux_true(const ir_node *node) {
+ir_node *get_Mux_true(const ir_node *node)
+{
assert(is_Mux(node));
return node->in[3];
}
-void set_Mux_true(ir_node *node, ir_node *ir_true) {
+void set_Mux_true(ir_node *node, ir_node *ir_true)
+{
assert(is_Mux(node));
node->in[3] = ir_true;
}
/* CopyB support */
-ir_node *get_CopyB_mem(const ir_node *node) {
+ir_node *get_CopyB_mem(const ir_node *node)
+{
assert(is_CopyB(node));
return get_irn_n(node, 0);
}
-void set_CopyB_mem(ir_node *node, ir_node *mem) {
+void set_CopyB_mem(ir_node *node, ir_node *mem)
+{
assert(node->op == op_CopyB);
set_irn_n(node, 0, mem);
}
-ir_node *get_CopyB_dst(const ir_node *node) {
+ir_node *get_CopyB_dst(const ir_node *node)
+{
assert(is_CopyB(node));
return get_irn_n(node, 1);
}
-void set_CopyB_dst(ir_node *node, ir_node *dst) {
+void set_CopyB_dst(ir_node *node, ir_node *dst)
+{
assert(is_CopyB(node));
set_irn_n(node, 1, dst);
}
-ir_node *get_CopyB_src(const ir_node *node) {
+ir_node *get_CopyB_src(const ir_node *node)
+{
assert(is_CopyB(node));
return get_irn_n(node, 2);
}
-void set_CopyB_src(ir_node *node, ir_node *src) {
+void set_CopyB_src(ir_node *node, ir_node *src)
+{
assert(is_CopyB(node));
set_irn_n(node, 2, src);
}
-ir_type *get_CopyB_type(ir_node *node) {
+ir_type *get_CopyB_type(ir_node *node)
+{
assert(is_CopyB(node));
return node->attr.copyb.type;
}
-void set_CopyB_type(ir_node *node, ir_type *data_type) {
+void set_CopyB_type(ir_node *node, ir_type *data_type)
+{
assert(is_CopyB(node) && data_type);
node->attr.copyb.type = data_type;
}
ir_type *
-get_InstOf_type(ir_node *node) {
+get_InstOf_type(ir_node *node)
+{
assert(node->op == op_InstOf);
return node->attr.instof.type;
}
void
-set_InstOf_type(ir_node *node, ir_type *type) {
+set_InstOf_type(ir_node *node, ir_type *type)
+{
assert(node->op == op_InstOf);
node->attr.instof.type = type;
}
ir_node *
-get_InstOf_store(const ir_node *node) {
+get_InstOf_store(const ir_node *node)
+{
assert(node->op == op_InstOf);
return get_irn_n(node, 0);
}
void
-set_InstOf_store(ir_node *node, ir_node *obj) {
+set_InstOf_store(ir_node *node, ir_node *obj)
+{
assert(node->op == op_InstOf);
set_irn_n(node, 0, obj);
}
ir_node *
-get_InstOf_obj(const ir_node *node) {
+get_InstOf_obj(const ir_node *node)
+{
assert(node->op == op_InstOf);
return get_irn_n(node, 1);
}
void
-set_InstOf_obj(ir_node *node, ir_node *obj) {
+set_InstOf_obj(ir_node *node, ir_node *obj)
+{
assert(node->op == op_InstOf);
set_irn_n(node, 1, obj);
}
/* Returns the memory input of a Raise operation. */
ir_node *
-get_Raise_mem(const ir_node *node) {
+get_Raise_mem(const ir_node *node)
+{
assert(is_Raise(node));
return get_irn_n(node, 0);
}
void
-set_Raise_mem(ir_node *node, ir_node *mem) {
+set_Raise_mem(ir_node *node, ir_node *mem)
+{
assert(is_Raise(node));
set_irn_n(node, 0, mem);
}
ir_node *
-get_Raise_exo_ptr(const ir_node *node) {
+get_Raise_exo_ptr(const ir_node *node)
+{
assert(is_Raise(node));
return get_irn_n(node, 1);
}
void
-set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr) {
+set_Raise_exo_ptr(ir_node *node, ir_node *exo_ptr)
+{
assert(is_Raise(node));
set_irn_n(node, 1, exo_ptr);
}
/* Bound support */
/* Returns the memory input of a Bound operation. */
-ir_node *get_Bound_mem(const ir_node *bound) {
+ir_node *get_Bound_mem(const ir_node *bound)
+{
assert(is_Bound(bound));
return get_irn_n(bound, 0);
}
-void set_Bound_mem(ir_node *bound, ir_node *mem) {
+void set_Bound_mem(ir_node *bound, ir_node *mem)
+{
assert(is_Bound(bound));
set_irn_n(bound, 0, mem);
}
/* Returns the index input of a Bound operation. */
-ir_node *get_Bound_index(const ir_node *bound) {
+ir_node *get_Bound_index(const ir_node *bound)
+{
assert(is_Bound(bound));
return get_irn_n(bound, 1);
}
-void set_Bound_index(ir_node *bound, ir_node *idx) {
+void set_Bound_index(ir_node *bound, ir_node *idx)
+{
assert(is_Bound(bound));
set_irn_n(bound, 1, idx);
}
/* Returns the lower bound input of a Bound operation. */
-ir_node *get_Bound_lower(const ir_node *bound) {
+ir_node *get_Bound_lower(const ir_node *bound)
+{
assert(is_Bound(bound));
return get_irn_n(bound, 2);
}
-void set_Bound_lower(ir_node *bound, ir_node *lower) {
+void set_Bound_lower(ir_node *bound, ir_node *lower)
+{
assert(is_Bound(bound));
set_irn_n(bound, 2, lower);
}
/* Returns the upper bound input of a Bound operation. */
-ir_node *get_Bound_upper(const ir_node *bound) {
+ir_node *get_Bound_upper(const ir_node *bound)
+{
assert(is_Bound(bound));
return get_irn_n(bound, 3);
}
-void set_Bound_upper(ir_node *bound, ir_node *upper) {
+void set_Bound_upper(ir_node *bound, ir_node *upper)
+{
assert(is_Bound(bound));
set_irn_n(bound, 3, upper);
}
/* Return the operand of a Pin node. */
-ir_node *get_Pin_op(const ir_node *pin) {
+ir_node *get_Pin_op(const ir_node *pin)
+{
assert(is_Pin(pin));
return get_irn_n(pin, 0);
}
-void set_Pin_op(ir_node *pin, ir_node *node) {
+void set_Pin_op(ir_node *pin, ir_node *node)
+{
assert(is_Pin(pin));
set_irn_n(pin, 0, node);
}
/* Return the assembler text of an ASM pseudo node. */
-ident *get_ASM_text(const ir_node *node) {
+ident *get_ASM_text(const ir_node *node)
+{
assert(is_ASM(node));
return node->attr.assem.asm_text;
}
/* Return the number of input constraints for an ASM node. */
-int get_ASM_n_input_constraints(const ir_node *node) {
+int get_ASM_n_input_constraints(const ir_node *node)
+{
assert(is_ASM(node));
return ARR_LEN(node->attr.assem.inputs);
}
/* Return the input constraints for an ASM node. This is a flexible array. */
-const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node) {
+const ir_asm_constraint *get_ASM_input_constraints(const ir_node *node)
+{
assert(is_ASM(node));
return node->attr.assem.inputs;
}
/* Return the number of output constraints for an ASM node. */
-int get_ASM_n_output_constraints(const ir_node *node) {
+int get_ASM_n_output_constraints(const ir_node *node)
+{
assert(is_ASM(node));
return ARR_LEN(node->attr.assem.outputs);
}
/* Return the output constraints for an ASM node. */
-const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node) {
+const ir_asm_constraint *get_ASM_output_constraints(const ir_node *node)
+{
assert(is_ASM(node));
return node->attr.assem.outputs;
}
/* Return the number of clobbered registers for an ASM node. */
-int get_ASM_n_clobbers(const ir_node *node) {
+int get_ASM_n_clobbers(const ir_node *node)
+{
assert(is_ASM(node));
return ARR_LEN(node->attr.assem.clobber);
}
/* Return the list of clobbered registers for an ASM node. */
-ident **get_ASM_clobbers(const ir_node *node) {
+ident **get_ASM_clobbers(const ir_node *node)
+{
assert(is_ASM(node));
return node->attr.assem.clobber;
}
/* returns the graph of a node */
ir_graph *
-get_irn_irg(const ir_node *node) {
+get_irn_irg(const ir_node *node)
+{
/*
* Do not use get_nodes_Block() here, because this
* will check the pinned state.
/*----------------------------------------------------------------*/
ir_node *
-skip_Proj(ir_node *node) {
+skip_Proj(ir_node *node)
+{
/* don't assert node !!! */
if (node == NULL)
return NULL;
}
const ir_node *
-skip_Proj_const(const ir_node *node) {
+skip_Proj_const(const ir_node *node)
+{
/* don't assert node !!! */
if (node == NULL)
return NULL;
}
ir_node *
-skip_Tuple(ir_node *node) {
+skip_Tuple(ir_node *node)
+{
ir_node *pred;
ir_op *op;
}
/* returns operand of node if node is a Cast */
-ir_node *skip_Cast(ir_node *node) {
+ir_node *skip_Cast(ir_node *node)
+{
if (is_Cast(node))
return get_Cast_op(node);
return node;
}
/* returns operand of node if node is a Cast */
-const ir_node *skip_Cast_const(const ir_node *node) {
+const ir_node *skip_Cast_const(const ir_node *node)
+{
if (is_Cast(node))
return get_Cast_op(node);
return node;
}
/* returns operand of node if node is a Pin */
-ir_node *skip_Pin(ir_node *node) {
+ir_node *skip_Pin(ir_node *node)
+{
if (is_Pin(node))
return get_Pin_op(node);
return node;
}
/* returns operand of node if node is a Confirm */
-ir_node *skip_Confirm(ir_node *node) {
+ir_node *skip_Confirm(ir_node *node)
+{
if (is_Confirm(node))
return get_Confirm_value(node);
return node;
}
/* skip all high-level ops */
-ir_node *skip_HighLevel_ops(ir_node *node) {
+ir_node *skip_HighLevel_ops(ir_node *node)
+{
while (is_op_highlevel(get_irn_op(node))) {
node = get_irn_n(node, 0);
}
* Moreover, it CANNOT be switched off using get_opt_normalize() ...
*/
ir_node *
-skip_Id(ir_node *node) {
+skip_Id(ir_node *node)
+{
ir_node *pred;
/* don't assert node !!! */
}
}
-void skip_Id_and_store(ir_node **node) {
+void skip_Id_and_store(ir_node **node)
+{
ir_node *n = *node;
if (!n || (n->op != op_Id)) return;
}
/* Returns true if the operation manipulates control flow. */
-int is_cfop(const ir_node *node) {
+int is_cfop(const ir_node *node)
+{
return is_op_cfopcode(get_irn_op(node));
}
/* Returns true if the operation manipulates interprocedural control flow:
CallBegin, EndReg, EndExcept */
-int is_ip_cfop(const ir_node *node) {
+int is_ip_cfop(const ir_node *node)
+{
return is_ip_cfopcode(get_irn_op(node));
}
/* Returns true if the operation can change the control flow because
of an exception. */
int
-is_fragile_op(const ir_node *node) {
+is_fragile_op(const ir_node *node)
+{
return is_op_fragile(get_irn_op(node));
}
/* Returns the memory operand of fragile operations. */
-ir_node *get_fragile_op_mem(ir_node *node) {
+ir_node *get_fragile_op_mem(ir_node *node)
+{
assert(node && is_fragile_op(node));
switch (get_irn_opcode(node)) {
}
/* Returns the result mode of a Div operation. */
-ir_mode *get_divop_resmod(const ir_node *node) {
+ir_mode *get_divop_resmod(const ir_node *node)
+{
switch (get_irn_opcode(node)) {
case iro_Quot : return get_Quot_resmode(node);
case iro_DivMod: return get_DivMod_resmode(node);
}
/* Returns true if the operation is a forking control flow operation. */
-int (is_irn_forking)(const ir_node *node) {
+int (is_irn_forking)(const ir_node *node)
+{
return _is_irn_forking(node);
}
-void (copy_node_attr)(const ir_node *old_node, ir_node *new_node) {
+void (copy_node_attr)(const ir_node *old_node, ir_node *new_node)
+{
_copy_node_attr(old_node, new_node);
}
/* Return the type associated with the value produced by n
* if the node remarks this type as it is the case for
* Cast, Const, SymConst and some Proj nodes. */
-ir_type *(get_irn_type)(ir_node *node) {
+ir_type *(get_irn_type)(ir_node *node)
+{
return _get_irn_type(node);
}
/* Return the type attribute of a node n (SymConst, Call, Alloc, Free,
Cast) or NULL.*/
-ir_type *(get_irn_type_attr)(ir_node *node) {
+ir_type *(get_irn_type_attr)(ir_node *node)
+{
return _get_irn_type_attr(node);
}
/* Return the entity attribute of a node n (SymConst, Sel) or NULL. */
-ir_entity *(get_irn_entity_attr)(ir_node *node) {
+ir_entity *(get_irn_entity_attr)(ir_node *node)
+{
return _get_irn_entity_attr(node);
}
/* Returns non-zero for constant-like nodes. */
-int (is_irn_constlike)(const ir_node *node) {
+int (is_irn_constlike)(const ir_node *node)
+{
return _is_irn_constlike(node);
}
* Returns non-zero for nodes that are allowed to have keep-alives and
* are neither Block nor PhiM.
*/
-int (is_irn_keep)(const ir_node *node) {
+int (is_irn_keep)(const ir_node *node)
+{
return _is_irn_keep(node);
}
/*
* Returns non-zero for nodes that are always placed in the start block.
*/
-int (is_irn_start_block_placed)(const ir_node *node) {
+int (is_irn_start_block_placed)(const ir_node *node)
+{
return _is_irn_start_block_placed(node);
}
/* Returns non-zero for nodes that are machine operations. */
-int (is_irn_machine_op)(const ir_node *node) {
+int (is_irn_machine_op)(const ir_node *node)
+{
return _is_irn_machine_op(node);
}
/* Returns non-zero for nodes that are machine operands. */
-int (is_irn_machine_operand)(const ir_node *node) {
+int (is_irn_machine_operand)(const ir_node *node)
+{
return _is_irn_machine_operand(node);
}
/* Returns non-zero for nodes that have the n'th user machine flag set. */
-int (is_irn_machine_user)(const ir_node *node, unsigned n) {
+int (is_irn_machine_user)(const ir_node *node, unsigned n)
+{
return _is_irn_machine_user(node, n);
}
/* Returns non-zero for nodes that are CSE neutral to its users. */
-int (is_irn_cse_neutral)(const ir_node *node) {
+int (is_irn_cse_neutral)(const ir_node *node)
+{
return _is_irn_cse_neutral(node);
}
/* Gets the string representation of the jump prediction .*/
-const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred) {
+const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
+{
#define X(a) case a: return #a;
switch (pred) {
X(COND_JMP_PRED_NONE);
}
/* Returns the conditional jump prediction of a Cond node. */
-cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond) {
+cond_jmp_predicate (get_Cond_jmp_pred)(const ir_node *cond)
+{
return _get_Cond_jmp_pred(cond);
}
/* Sets a new conditional jump prediction. */
-void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred) {
+void (set_Cond_jmp_pred)(ir_node *cond, cond_jmp_predicate pred)
+{
_set_Cond_jmp_pred(cond, pred);
}
/** the get_type operation must be always implemented and return a firm type */
-static ir_type *get_Default_type(ir_node *n) {
+static ir_type *get_Default_type(ir_node *n)
+{
(void) n;
return get_unknown_type();
}
/* Sets the get_type operation for an ir_op_ops. */
-ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops) {
+ir_op_ops *firm_set_default_get_type(ir_opcode code, ir_op_ops *ops)
+{
switch (code) {
case iro_Const: ops->get_type = get_Const_type; break;
case iro_SymConst: ops->get_type = get_SymConst_value_type; break;
}
/** Return the attribute type of a SymConst node if exists */
-static ir_type *get_SymConst_attr_type(ir_node *self) {
+static ir_type *get_SymConst_attr_type(ir_node *self)
+{
symconst_kind kind = get_SymConst_kind(self);
if (SYMCONST_HAS_TYPE(kind))
return get_SymConst_type(self);
}
/** Return the attribute entity of a SymConst node if exists */
-static ir_entity *get_SymConst_attr_entity(ir_node *self) {
+static ir_entity *get_SymConst_attr_entity(ir_node *self)
+{
symconst_kind kind = get_SymConst_kind(self);
if (SYMCONST_HAS_ENT(kind))
return get_SymConst_entity(self);
}
/** the get_type_attr operation must be always implemented */
-static ir_type *get_Null_type(ir_node *n) {
+static ir_type *get_Null_type(ir_node *n)
+{
(void) n;
return firm_unknown_type;
}
/* Sets the get_type operation for an ir_op_ops. */
-ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops) {
+ir_op_ops *firm_set_default_get_type_attr(ir_opcode code, ir_op_ops *ops)
+{
switch (code) {
case iro_SymConst: ops->get_type_attr = get_SymConst_attr_type; break;
case iro_Call: ops->get_type_attr = get_Call_type; break;
}
/** the get_entity_attr operation must be always implemented */
-static ir_entity *get_Null_ent(ir_node *n) {
+static ir_entity *get_Null_ent(ir_node *n)
+{
(void) n;
return NULL;
}
/* Sets the get_type operation for an ir_op_ops. */
-ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops) {
+ir_op_ops *firm_set_default_get_entity_attr(ir_opcode code, ir_op_ops *ops)
+{
switch (code) {
case iro_SymConst: ops->get_entity_attr = get_SymConst_attr_entity; break;
case iro_Sel: ops->get_entity_attr = _get_Sel_entity; break;
}
/* Sets the debug information of a node. */
-void (set_irn_dbg_info)(ir_node *n, dbg_info *db) {
+void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
+{
_set_irn_dbg_info(n, db);
}
*
* @param n The node.
*/
-dbg_info *(get_irn_dbg_info)(const ir_node *n) {
+dbg_info *(get_irn_dbg_info)(const ir_node *n)
+{
return _get_irn_dbg_info(n);
}
/* checks whether a node represents a global address */
-int is_Global(const ir_node *node) {
+int is_Global(const ir_node *node)
+{
return is_SymConst_addr_ent(node);
}
/* returns the entity of a global address */
-ir_entity *get_Global_entity(const ir_node *node) {
+ir_entity *get_Global_entity(const ir_node *node)
+{
return get_SymConst_entity(node);
}
/*
* Calculate a hash value of a node.
*/
-unsigned firm_default_hash(const ir_node *node) {
+unsigned firm_default_hash(const ir_node *node)
+{
unsigned h;
int i, irn_arity;
* Copies all attributes stored in the old node to the new node.
* Assumes both have the same opcode and sufficient size.
*/
-void default_copy_attr(const ir_node *old_node, ir_node *new_node) {
+void default_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
unsigned size = firm_add_node_size;
assert(get_irn_op(old_node) == get_irn_op(new_node));
* Copies all Call attributes stored in the old node to the new node.
*/
static void
-call_copy_attr(const ir_node *old_node, ir_node *new_node) {
+call_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
default_copy_attr(old_node, new_node);
remove_Call_callee_arr(new_node);
} /* call_copy_attr */
* Copies all Block attributes stored in the old node to the new node.
*/
static void
-block_copy_attr(const ir_node *old_node, ir_node *new_node) {
+block_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
ir_graph *irg = current_ir_graph;
default_copy_attr(old_node, new_node);
* Copies all phi attributes stored in old node to the new node
*/
static void
-phi_copy_attr(const ir_node *old_node, ir_node *new_node) {
+phi_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
ir_graph *irg = current_ir_graph;
default_copy_attr(old_node, new_node);
* Copies all filter attributes stored in old node to the new node
*/
static void
-filter_copy_attr(const ir_node *old_node, ir_node *new_node) {
+filter_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
ir_graph *irg = current_ir_graph;
default_copy_attr(old_node, new_node);
* Copies all ASM attributes stored in old node to the new node
*/
static void
-ASM_copy_attr(const ir_node *old_node, ir_node *new_node) {
+ASM_copy_attr(const ir_node *old_node, ir_node *new_node)
+{
ir_graph *irg = current_ir_graph;
default_copy_attr(old_node, new_node);
* @return
* The operations.
*/
-static ir_op_ops *firm_set_default_copy_attr(ir_opcode code, ir_op_ops *ops) {
+static ir_op_ops *firm_set_default_copy_attr(ir_opcode code, ir_op_ops *ops)
+{
switch(code) {
case iro_Call:
ops->copy_attr = call_copy_attr;
return res;
} /* new_ir_op */
-void free_ir_op(ir_op *code) {
+void free_ir_op(ir_op *code)
+{
hook_free_ir_op(code);
remove_irp_opcode(code);
} /* free_ir_op */
/* Returns the string for the opcode. */
-const char *get_op_name (const ir_op *op) {
+const char *get_op_name (const ir_op *op)
+{
return get_id_str(op->name);
} /* get_op_name */
-unsigned (get_op_code)(const ir_op *op){
+unsigned (get_op_code)(const ir_op *op)
+{
return _get_op_code(op);
} /* get_op_code */
-ident *(get_op_ident)(const ir_op *op){
+ident *(get_op_ident)(const ir_op *op)
+{
return _get_op_ident(op);
} /* get_op_ident */
-const char *get_op_pin_state_name(op_pin_state s) {
+const char *get_op_pin_state_name(op_pin_state s)
+{
switch(s) {
#define XXX(s) case s: return #s
XXX(op_pin_state_floats);
return "<none>";
} /* get_op_pin_state_name */
-op_pin_state (get_op_pinned)(const ir_op *op) {
+op_pin_state (get_op_pinned)(const ir_op *op)
+{
return _get_op_pinned(op);
} /* get_op_pinned */
/* Sets op_pin_state_pinned in the opcode. Setting it to floating has no effect
for Phi, Block and control flow nodes. */
-void set_op_pinned(ir_op *op, op_pin_state pinned) {
+void set_op_pinned(ir_op *op, op_pin_state pinned)
+{
if (op == op_Block || op == op_Phi || is_op_cfopcode(op)) return;
op->pin_state = pinned;
} /* set_op_pinned */
/* retrieve the next free opcode */
-unsigned get_next_ir_opcode(void) {
+unsigned get_next_ir_opcode(void)
+{
return next_iro++;
} /* get_next_ir_opcode */
/* Returns the next free n IR opcode number, allows to register a bunch of user ops */
-unsigned get_next_ir_opcodes(unsigned num) {
+unsigned get_next_ir_opcodes(unsigned num)
+{
unsigned base = next_iro;
next_iro += num;
return base;
} /* get_next_ir_opcodes */
/* Returns the generic function pointer from an ir operation. */
-op_func (get_generic_function_ptr)(const ir_op *op) {
+op_func (get_generic_function_ptr)(const ir_op *op)
+{
return _get_generic_function_ptr(op);
} /* get_generic_function_ptr */
/* Store a generic function pointer into an ir operation. */
-void (set_generic_function_ptr)(ir_op *op, op_func func) {
+void (set_generic_function_ptr)(ir_op *op, op_func func)
+{
_set_generic_function_ptr(op, func);
} /* set_generic_function_ptr */
/* Returns the ir_op_ops of an ir_op. */
-const ir_op_ops *(get_op_ops)(const ir_op *op) {
+const ir_op_ops *(get_op_ops)(const ir_op *op)
+{
return _get_op_ops(op);
} /* get_op_ops */
-irop_flags get_op_flags(const ir_op *op) {
+irop_flags get_op_flags(const ir_op *op)
+{
return op->flags;
}
/**
* Returns the tarval of a Const node or tarval_bad for all other nodes.
*/
-static tarval *default_value_of(const ir_node *n) {
+static tarval *default_value_of(const ir_node *n)
+{
if (is_Const(n))
return get_Const_tarval(n); /* might return tarval_bad */
else
value_of_func value_of_ptr = default_value_of;
/* * Set a new value_of function. */
-void set_value_of_func(value_of_func func) {
+void set_value_of_func(value_of_func func)
+{
if (func != NULL)
value_of_ptr = func;
else
/**
* Return the value of a Constant.
*/
-static tarval *computed_value_Const(const ir_node *n) {
+static tarval *computed_value_Const(const ir_node *n)
+{
return get_Const_tarval(n);
} /* computed_value_Const */
/**
* Return the value of a 'sizeof', 'alignof' or 'offsetof' SymConst.
*/
-static tarval *computed_value_SymConst(const ir_node *n) {
+static tarval *computed_value_SymConst(const ir_node *n)
+{
ir_type *type;
ir_entity *ent;
/**
* Return the value of an Add.
*/
-static tarval *computed_value_Add(const ir_node *n) {
+static tarval *computed_value_Add(const ir_node *n)
+{
ir_node *a = get_Add_left(n);
ir_node *b = get_Add_right(n);
* Return the value of a Sub.
* Special case: a - a
*/
-static tarval *computed_value_Sub(const ir_node *n) {
+static tarval *computed_value_Sub(const ir_node *n)
+{
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Sub_left(n);
ir_node *b = get_Sub_right(n);
* Return the value of a Carry.
* Special : a op 0, 0 op b
*/
-static tarval *computed_value_Carry(const ir_node *n) {
+static tarval *computed_value_Carry(const ir_node *n)
+{
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
ir_mode *m = get_irn_mode(n);
* Return the value of a Borrow.
* Special : a op 0
*/
-static tarval *computed_value_Borrow(const ir_node *n) {
+static tarval *computed_value_Borrow(const ir_node *n)
+{
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
ir_mode *m = get_irn_mode(n);
/**
* Return the value of an unary Minus.
*/
-static tarval *computed_value_Minus(const ir_node *n) {
+static tarval *computed_value_Minus(const ir_node *n)
+{
ir_node *a = get_Minus_op(n);
tarval *ta = value_of(a);
/**
* Return the value of a Mul.
*/
-static tarval *computed_value_Mul(const ir_node *n) {
+static tarval *computed_value_Mul(const ir_node *n)
+{
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
ir_mode *mode;
/**
* Return the value of an Abs.
*/
-static tarval *computed_value_Abs(const ir_node *n) {
+static tarval *computed_value_Abs(const ir_node *n)
+{
ir_node *a = get_Abs_op(n);
tarval *ta = value_of(a);
* Return the value of an And.
* Special case: a & 0, 0 & b
*/
-static tarval *computed_value_And(const ir_node *n) {
+static tarval *computed_value_And(const ir_node *n)
+{
ir_node *a = get_And_left(n);
ir_node *b = get_And_right(n);
* Return the value of an Or.
* Special case: a | 1...1, 1...1 | b
*/
-static tarval *computed_value_Or(const ir_node *n) {
+static tarval *computed_value_Or(const ir_node *n)
+{
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
/**
* Return the value of an Eor.
*/
-static tarval *computed_value_Eor(const ir_node *n) {
+static tarval *computed_value_Eor(const ir_node *n)
+{
ir_node *a = get_Eor_left(n);
ir_node *b = get_Eor_right(n);
/**
* Return the value of a Not.
*/
-static tarval *computed_value_Not(const ir_node *n) {
+static tarval *computed_value_Not(const ir_node *n)
+{
ir_node *a = get_Not_op(n);
tarval *ta = value_of(a);
/**
* Return the value of a Shl.
*/
-static tarval *computed_value_Shl(const ir_node *n) {
+static tarval *computed_value_Shl(const ir_node *n)
+{
ir_node *a = get_Shl_left(n);
ir_node *b = get_Shl_right(n);
/**
* Return the value of a Shr.
*/
-static tarval *computed_value_Shr(const ir_node *n) {
+static tarval *computed_value_Shr(const ir_node *n)
+{
ir_node *a = get_Shr_left(n);
ir_node *b = get_Shr_right(n);
/**
* Return the value of a Shrs.
*/
-static tarval *computed_value_Shrs(const ir_node *n) {
+static tarval *computed_value_Shrs(const ir_node *n)
+{
ir_node *a = get_Shrs_left(n);
ir_node *b = get_Shrs_right(n);
/**
* Return the value of a Rotl.
*/
-static tarval *computed_value_Rotl(const ir_node *n) {
+static tarval *computed_value_Rotl(const ir_node *n)
+{
ir_node *a = get_Rotl_left(n);
ir_node *b = get_Rotl_right(n);
/**
* Return the value of a Conv.
*/
-static tarval *computed_value_Conv(const ir_node *n) {
+static tarval *computed_value_Conv(const ir_node *n)
+{
ir_node *a = get_Conv_op(n);
tarval *ta = value_of(a);
* Calculate the value of a Mux: can be evaluated, if the
* sel and the right input are known.
*/
-static tarval *computed_value_Mux(const ir_node *n) {
+static tarval *computed_value_Mux(const ir_node *n)
+{
ir_node *sel = get_Mux_sel(n);
tarval *ts = value_of(sel);
* Calculate the value of a Confirm: can be evaluated,
* if it has the form Confirm(x, '=', Const).
*/
-static tarval *computed_value_Confirm(const ir_node *n) {
+static tarval *computed_value_Confirm(const ir_node *n)
+{
/*
* Beware: we might produce Phi(Confirm(x == true), Confirm(x == false)).
* Do NOT optimize them away (jump threading wants them), so wait until
* only 1 is used.
* There are several case where we can evaluate a Cmp node, see later.
*/
-static tarval *computed_value_Proj_Cmp(const ir_node *n) {
+static tarval *computed_value_Proj_Cmp(const ir_node *n)
+{
ir_node *cmp = get_Proj_pred(n);
ir_node *left = get_Cmp_left(cmp);
ir_node *right = get_Cmp_right(cmp);
/**
* Return the value of a floating point Quot.
*/
-static tarval *do_computed_value_Quot(const ir_node *a, const ir_node *b) {
+static tarval *do_computed_value_Quot(const ir_node *a, const ir_node *b)
+{
tarval *ta = value_of(a);
tarval *tb = value_of(b);
* Calculate the value of an integer Div of two nodes.
* Special case: 0 / b
*/
-static tarval *do_computed_value_Div(const ir_node *a, const ir_node *b) {
+static tarval *do_computed_value_Div(const ir_node *a, const ir_node *b)
+{
tarval *ta = value_of(a);
tarval *tb;
const ir_node *dummy;
* Calculate the value of an integer Mod of two nodes.
* Special case: a % 1
*/
-static tarval *do_computed_value_Mod(const ir_node *a, const ir_node *b) {
+static tarval *do_computed_value_Mod(const ir_node *a, const ir_node *b)
+{
tarval *ta = value_of(a);
tarval *tb = value_of(b);
/**
* Return the value of a Proj(DivMod).
*/
-static tarval *computed_value_Proj_DivMod(const ir_node *n) {
+static tarval *computed_value_Proj_DivMod(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
/* compute either the Div or the Mod part */
/**
* Return the value of a Proj(Div).
*/
-static tarval *computed_value_Proj_Div(const ir_node *n) {
+static tarval *computed_value_Proj_Div(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
if (proj_nr == pn_Div_res) {
/**
* Return the value of a Proj(Mod).
*/
-static tarval *computed_value_Proj_Mod(const ir_node *n) {
+static tarval *computed_value_Proj_Mod(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
if (proj_nr == pn_Mod_res) {
/**
* Return the value of a Proj(Quot).
*/
-static tarval *computed_value_Proj_Quot(const ir_node *n) {
+static tarval *computed_value_Proj_Quot(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
if (proj_nr == pn_Quot_res) {
/**
* Return the value of a Proj.
*/
-static tarval *computed_value_Proj(const ir_node *proj) {
+static tarval *computed_value_Proj(const ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
if (n->op->ops.computed_value_Proj != NULL)
*
* @param n The node this should be evaluated
*/
-tarval *computed_value(const ir_node *n) {
+tarval *computed_value(const ir_node *n)
+{
if(mode_is_int(get_irn_mode(n)) && n->vrp.valid && tarval_is_all_one(
tarval_or(n->vrp.bits_set, n->vrp.bits_not_set))) {
return n->vrp.bits_set;
* Returns a equivalent node for a Jmp, a Bad :-)
* Of course this only happens if the Block of the Jmp is dead.
*/
-static ir_node *equivalent_node_Jmp(ir_node *n) {
+static ir_node *equivalent_node_Jmp(ir_node *n)
+{
ir_node *oldn = n;
/* unreachable code elimination */
* Optimize operations that are commutative and have neutral 0,
* so a op 0 = 0 op a = a.
*/
-static ir_node *equivalent_node_neutral_zero(ir_node *n) {
+static ir_node *equivalent_node_neutral_zero(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_binop_left(n);
/**
* Eor is commutative and has neutral 0.
*/
-static ir_node *equivalent_node_Eor(ir_node *n) {
+static ir_node *equivalent_node_Eor(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a;
ir_node *b;
* Beware: The Mode of an Add may be different than the mode of its
* predecessors, so we could not return a predecessors in all cases.
*/
-static ir_node *equivalent_node_Add(ir_node *n) {
+static ir_node *equivalent_node_Add(ir_node *n)
+{
ir_node *oldn = n;
ir_node *left, *right;
ir_mode *mode = get_irn_mode(n);
* optimize operations that are not commutative but have neutral 0 on left,
* so a op 0 = a.
*/
-static ir_node *equivalent_node_left_zero(ir_node *n) {
+static ir_node *equivalent_node_left_zero(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_binop_left(n);
* Beware: The Mode of a Sub may be different than the mode of its
* predecessors, so we could not return a predecessors in all cases.
*/
-static ir_node *equivalent_node_Sub(ir_node *n) {
+static ir_node *equivalent_node_Sub(ir_node *n)
+{
ir_node *oldn = n;
ir_node *b;
ir_mode *mode = get_irn_mode(n);
* We handle it anyway here but the better way would be a
* flag. This would be needed for Pascal for instance.
*/
-static ir_node *equivalent_node_idempotent_unop(ir_node *n) {
+static ir_node *equivalent_node_idempotent_unop(ir_node *n)
+{
ir_node *oldn = n;
ir_node *pred = get_unop_op(n);
/**
* Optimize a * 1 = 1 * a = a.
*/
-static ir_node *equivalent_node_Mul(ir_node *n) {
+static ir_node *equivalent_node_Mul(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_Mul_left(n);
/**
* Use algebraic simplification a | a = a | 0 = 0 | a = a.
*/
-static ir_node *equivalent_node_Or(ir_node *n) {
+static ir_node *equivalent_node_Or(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_Or_left(n);
/**
* Optimize a & 0b1...1 = 0b1...1 & a = a & a = (a|X) & a = a.
*/
-static ir_node *equivalent_node_And(ir_node *n) {
+static ir_node *equivalent_node_And(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_And_left(n);
/**
* Try to remove useless Conv's:
*/
-static ir_node *equivalent_node_Conv(ir_node *n) {
+static ir_node *equivalent_node_Conv(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_Conv_op(n);
* A Cast may be removed if the type of the previous node
* is already the type of the Cast.
*/
-static ir_node *equivalent_node_Cast(ir_node *n) {
+static ir_node *equivalent_node_Cast(ir_node *n)
+{
ir_node *oldn = n;
ir_node *pred = get_Cast_op(n);
* - fold Phi-nodes, iff they have only one predecessor except
* themselves.
*/
-static ir_node *equivalent_node_Phi(ir_node *n) {
+static ir_node *equivalent_node_Phi(ir_node *n)
+{
int i, n_preds;
ir_node *oldn = n;
* - fold Sync-nodes, iff they have only one predecessor except
* themselves.
*/
-static ir_node *equivalent_node_Sync(ir_node *n) {
+static ir_node *equivalent_node_Sync(ir_node *n)
+{
int arity = get_Sync_n_preds(n);
int i;
/**
* Optimize Proj(Tuple).
*/
-static ir_node *equivalent_node_Proj_Tuple(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Tuple(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *tuple = get_Proj_pred(proj);
/**
* Optimize a / 1 = a.
*/
-static ir_node *equivalent_node_Proj_Div(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Div(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *div = get_Proj_pred(proj);
ir_node *b = get_Div_right(div);
/**
* Optimize a / 1.0 = a.
*/
-static ir_node *equivalent_node_Proj_Quot(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Quot(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *quot = get_Proj_pred(proj);
ir_node *b = get_Quot_right(quot);
/**
* Optimize a / 1 = a.
*/
-static ir_node *equivalent_node_Proj_DivMod(ir_node *proj) {
+static ir_node *equivalent_node_Proj_DivMod(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *divmod = get_Proj_pred(proj);
ir_node *b = get_DivMod_right(divmod);
/**
* Optimize CopyB(mem, x, x) into a Nop.
*/
-static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) {
+static ir_node *equivalent_node_Proj_CopyB(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *copyb = get_Proj_pred(proj);
ir_node *a = get_CopyB_dst(copyb);
/**
* Optimize Bounds(idx, idx, upper) into idx.
*/
-static ir_node *equivalent_node_Proj_Bound(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Bound(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *bound = get_Proj_pred(proj);
ir_node *idx = get_Bound_index(bound);
/**
* Optimize an Exception Proj(Load) with a non-null address.
*/
-static ir_node *equivalent_node_Proj_Load(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Load(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *load = get_Proj_pred(proj);
/**
* Optimize an Exception Proj(Store) with a non-null address.
*/
-static ir_node *equivalent_node_Proj_Store(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Store(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *store = get_Proj_pred(proj);
* Does all optimizations on nodes that must be done on it's Proj's
* because of creating new nodes.
*/
-static ir_node *equivalent_node_Proj(ir_node *proj) {
+static ir_node *equivalent_node_Proj(ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
if (get_irn_mode(proj) == mode_X) {
/**
* Remove Id's.
*/
-static ir_node *equivalent_node_Id(ir_node *n) {
+static ir_node *equivalent_node_Id(ir_node *n)
+{
ir_node *oldn = n;
do {
* Remove Confirm nodes if setting is on.
* Replace Confirms(x, '=', Constlike) by Constlike.
*/
-static ir_node *equivalent_node_Confirm(ir_node *n) {
+static ir_node *equivalent_node_Confirm(ir_node *n)
+{
ir_node *pred = get_Confirm_value(n);
pn_Cmp pnc = get_Confirm_cmp(n);
* If a node returns a Tuple we can not just skip it. If the size of the
* in array fits, we transform n into a tuple (e.g., Div).
*/
-ir_node *equivalent_node(ir_node *n) {
+ir_node *equivalent_node(ir_node *n)
+{
if (n->op->ops.equivalent_node)
return n->op->ops.equivalent_node(n);
return n;
* Returns non-zero if a node is a Phi node
* with all predecessors constant.
*/
-static int is_const_Phi(ir_node *n) {
+static int is_const_Phi(ir_node *n)
+{
int i;
if (! is_Phi(n) || get_irn_arity(n) == 0)
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, eval_func eval, ir_mode *mode, int left) {
+static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, eval_func eval, ir_mode *mode, int left)
+{
tarval *tv;
void **res;
ir_node *pred;
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode) {
+static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode)
+{
tarval *tv_l, *tv_r, *tv;
void **res;
ir_node *pred;
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_unop_on_phi(ir_node *phi, tarval *(*eval)(tarval *)) {
+static ir_node *apply_unop_on_phi(ir_node *phi, tarval *(*eval)(tarval *))
+{
tarval *tv;
void **res;
ir_node *pred;
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode) {
+static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode)
+{
tarval *tv;
void **res;
ir_node *pred;
* SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
* If possible, remove the Conv's.
*/
-static ir_node *transform_node_AddSub(ir_node *n) {
+static ir_node *transform_node_AddSub(ir_node *n)
+{
ir_mode *mode = get_irn_mode(n);
if (mode_is_reference(mode)) {
* Transform Add(a,-b) into Sub(a,b).
* Reassociation might fold this further.
*/
-static ir_node *transform_node_Add(ir_node *n) {
+static ir_node *transform_node_Add(ir_node *n)
+{
ir_mode *mode;
ir_node *a, *b, *c, *oldn = n;
/**
* returns -cnst or NULL if impossible
*/
-static ir_node *const_negate(ir_node *cnst) {
+static ir_node *const_negate(ir_node *cnst)
+{
tarval *tv = tarval_neg(get_Const_tarval(cnst));
dbg_info *dbgi = get_irn_dbg_info(cnst);
ir_graph *irg = get_irn_irg(cnst);
* Sub(x, Add(x, a)) -> -a
* Sub(x, Const) -> Add(x, -Const)
*/
-static ir_node *transform_node_Sub(ir_node *n) {
+static ir_node *transform_node_Sub(ir_node *n)
+{
ir_mode *mode;
ir_node *oldn = n;
ir_node *a, *b, *c;
* Several transformation done on n*n=2n bits mul.
* These transformations must be done here because new nodes may be produced.
*/
-static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode) {
+static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode)
+{
ir_node *oldn = n;
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
* Do constant evaluation of Phi nodes.
* Do architecture dependent optimizations on Mul nodes
*/
-static ir_node *transform_node_Mul(ir_node *n) {
+static ir_node *transform_node_Mul(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Mul_left(n);
/**
* Transform a Div Node.
*/
-static ir_node *transform_node_Div(ir_node *n) {
+static ir_node *transform_node_Div(ir_node *n)
+{
ir_mode *mode = get_Div_resmode(n);
ir_node *a = get_Div_left(n);
ir_node *b = get_Div_right(n);
/**
* Transform a Mod node.
*/
-static ir_node *transform_node_Mod(ir_node *n) {
+static ir_node *transform_node_Mod(ir_node *n)
+{
ir_mode *mode = get_Mod_resmode(n);
ir_node *a = get_Mod_left(n);
ir_node *b = get_Mod_right(n);
/**
* Transform a DivMod node.
*/
-static ir_node *transform_node_DivMod(ir_node *n) {
+static ir_node *transform_node_DivMod(ir_node *n)
+{
const ir_node *dummy;
ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
/**
* Optimize x / c to x * (1/c)
*/
-static ir_node *transform_node_Quot(ir_node *n) {
+static ir_node *transform_node_Quot(ir_node *n)
+{
ir_mode *mode = get_Quot_resmode(n);
ir_node *oldn = n;
* Optimize Abs(x) into -x if x is Confirmed <= 0
* Optimize Abs(-x) int Abs(x)
*/
-static ir_node *transform_node_Abs(ir_node *n) {
+static ir_node *transform_node_Abs(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Abs_op(n);
ir_mode *mode;
*
* For == and != can be handled in Proj(Cmp)
*/
-static ir_node *transform_node_Cmp(ir_node *n) {
+static ir_node *transform_node_Cmp(ir_node *n)
+{
ir_node *oldn = n;
ir_node *left = get_Cmp_left(n);
ir_node *right = get_Cmp_right(n);
* Replace the Cond by a Jmp if it branches on a constant
* condition.
*/
-static ir_node *transform_node_Cond(ir_node *n) {
+static ir_node *transform_node_Cond(ir_node *n)
+{
ir_node *jmp;
ir_node *a = get_Cond_selector(n);
/**
* Transform an And.
*/
-static ir_node *transform_node_And(ir_node *n) {
+static ir_node *transform_node_And(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_And_left(n);
ir_node *b = get_And_right(n);
/**
* Transform an Eor.
*/
-static ir_node *transform_node_Eor(ir_node *n) {
+static ir_node *transform_node_Eor(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Eor_left(n);
ir_node *b = get_Eor_right(n);
/**
* Transform a Not.
*/
-static ir_node *transform_node_Not(ir_node *n) {
+static ir_node *transform_node_Not(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Not_op(n);
ir_mode *mode = get_irn_mode(n);
* -(a >>s (size-1)) = a >>u (size-1)
* -(a * const) -> a * -const
*/
-static ir_node *transform_node_Minus(ir_node *n) {
+static ir_node *transform_node_Minus(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Minus_op(n);
ir_mode *mode;
/**
* Transform a Cast_type(Const) into a new Const_type
*/
-static ir_node *transform_node_Cast(ir_node *n) {
+static ir_node *transform_node_Cast(ir_node *n)
+{
ir_node *oldn = n;
ir_node *pred = get_Cast_op(n);
ir_type *tp = get_irn_type(n);
/**
* Transform a Proj(Load) with a non-null address.
*/
-static ir_node *transform_node_Proj_Load(ir_node *proj) {
+static ir_node *transform_node_Proj_Load(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *load = get_Proj_pred(proj);
/**
* Transform a Proj(Store) with a non-null address.
*/
-static ir_node *transform_node_Proj_Store(ir_node *proj) {
+static ir_node *transform_node_Proj_Store(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *store = get_Proj_pred(proj);
* Transform a Proj(Div) with a non-zero value.
* Removes the exceptions and routes the memory to the NoMem node.
*/
-static ir_node *transform_node_Proj_Div(ir_node *proj) {
+static ir_node *transform_node_Proj_Div(ir_node *proj)
+{
ir_node *div = get_Proj_pred(proj);
ir_node *b = get_Div_right(div);
ir_node *res, *new_mem;
* Transform a Proj(Mod) with a non-zero value.
* Removes the exceptions and routes the memory to the NoMem node.
*/
-static ir_node *transform_node_Proj_Mod(ir_node *proj) {
+static ir_node *transform_node_Proj_Mod(ir_node *proj)
+{
ir_node *mod = get_Proj_pred(proj);
ir_node *b = get_Mod_right(mod);
ir_node *res, *new_mem;
* Transform a Proj(DivMod) with a non-zero value.
* Removes the exceptions and routes the memory to the NoMem node.
*/
-static ir_node *transform_node_Proj_DivMod(ir_node *proj) {
+static ir_node *transform_node_Proj_DivMod(ir_node *proj)
+{
ir_node *divmod = get_Proj_pred(proj);
ir_node *b = get_DivMod_right(divmod);
ir_node *res, *new_mem;
/**
* Optimizes jump tables (CondIs or CondIu) by removing all impossible cases.
*/
-static ir_node *transform_node_Proj_Cond(ir_node *proj) {
+static ir_node *transform_node_Proj_Cond(ir_node *proj)
+{
if (get_opt_unreachable_code()) {
ir_node *n = get_Proj_pred(proj);
ir_node *b = get_Cond_selector(n);
/**
* Create a 0 constant of given mode.
*/
-static ir_node *create_zero_const(ir_mode *mode) {
+static ir_node *create_zero_const(ir_mode *mode)
+{
tarval *tv = get_mode_null(mode);
ir_node *cnst = new_Const(tv);
/**
* Normalizes and optimizes Cmp nodes.
*/
-static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
+static ir_node *transform_node_Proj_Cmp(ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
ir_node *left = get_Cmp_left(n);
ir_node *right = get_Cmp_right(n);
/**
* Optimize CopyB(mem, x, x) into a Nop.
*/
-static ir_node *transform_node_Proj_CopyB(ir_node *proj) {
+static ir_node *transform_node_Proj_CopyB(ir_node *proj)
+{
ir_node *copyb = get_Proj_pred(proj);
ir_node *a = get_CopyB_dst(copyb);
ir_node *b = get_CopyB_src(copyb);
/**
* Optimize Bounds(idx, idx, upper) into idx.
*/
-static ir_node *transform_node_Proj_Bound(ir_node *proj) {
+static ir_node *transform_node_Proj_Bound(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *bound = get_Proj_pred(proj);
ir_node *idx = get_Bound_index(bound);
* Does all optimizations on nodes that must be done on it's Proj's
* because of creating new nodes.
*/
-static ir_node *transform_node_Proj(ir_node *proj) {
+static ir_node *transform_node_Proj(ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
if (n->op->ops.transform_node_Proj)
/**
* Move Confirms down through Phi nodes.
*/
-static ir_node *transform_node_Phi(ir_node *phi) {
+static ir_node *transform_node_Phi(ir_node *phi)
+{
int i, n;
ir_mode *mode = get_irn_mode(phi);
* Returns the operands of a commutative bin-op, if one operand is
* a const, it is returned as the second one.
*/
-static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c) {
+static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
+{
ir_node *op_a = get_binop_left(binop);
ir_node *op_b = get_binop_right(binop);
* AND c1 ===> OR if (c1 | c2) == 0x111..11
* OR
*/
-static ir_node *transform_node_Or_bf_store(ir_node *or) {
+static ir_node *transform_node_Or_bf_store(ir_node *or)
+{
ir_node *and, *c1;
ir_node *or_l, *c2;
ir_node *and_l, *c3;
/**
* Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl
*/
-static ir_node *transform_node_Or_Rotl(ir_node *or) {
+static ir_node *transform_node_Or_Rotl(ir_node *or)
+{
ir_mode *mode = get_irn_mode(or);
ir_node *shl, *shr, *block;
ir_node *irn, *x, *c1, *c2, *v, *sub, *n, *rotval;
/**
* Transform an Or.
*/
-static ir_node *transform_node_Or(ir_node *n) {
+static ir_node *transform_node_Or(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
*
* Should be moved to reassociation?
*/
-static ir_node *transform_node_shift(ir_node *n) {
+static ir_node *transform_node_shift(ir_node *n)
+{
ir_node *left, *right;
ir_mode *mode;
tarval *tv1, *tv2, *res;
* - Shl, Shr, Shrs, rotl instead of >>
* (with a special case for Or/Xor + Shrs)
*/
-static ir_node *transform_node_bitop_shift(ir_node *n) {
+static ir_node *transform_node_bitop_shift(ir_node *n)
+{
ir_node *left;
ir_node *right = get_binop_right(n);
ir_mode *mode = get_irn_mode(n);
* (x >> c1) << c2 <=> x OP (c2-c1) & ((-1 >> c1) << c2)
* (also with x >>s c1 when c1>=c2)
*/
-static ir_node *transform_node_shl_shr(ir_node *n) {
+static ir_node *transform_node_shl_shr(ir_node *n)
+{
ir_node *left;
ir_node *right = get_binop_right(n);
ir_node *x;
/**
* Transform a Shr.
*/
-static ir_node *transform_node_Shr(ir_node *n) {
+static ir_node *transform_node_Shr(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *left = get_Shr_left(n);
ir_node *right = get_Shr_right(n);
/**
* Transform a Shrs.
*/
-static ir_node *transform_node_Shrs(ir_node *n) {
+static ir_node *transform_node_Shrs(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Shrs_left(n);
ir_node *b = get_Shrs_right(n);
/**
* Transform a Shl.
*/
-static ir_node *transform_node_Shl(ir_node *n) {
+static ir_node *transform_node_Shl(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Shl_left(n);
ir_node *b = get_Shl_right(n);
/**
* Transform a Rotl.
*/
-static ir_node *transform_node_Rotl(ir_node *n) {
+static ir_node *transform_node_Rotl(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Rotl_left(n);
ir_node *b = get_Rotl_right(n);
/**
* Transform a Conv.
*/
-static ir_node *transform_node_Conv(ir_node *n) {
+static ir_node *transform_node_Conv(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Conv_op(n);
* Remove dead blocks and nodes in dead blocks
* in keep alive list. We do not generate a new End node.
*/
-static ir_node *transform_node_End(ir_node *n) {
+static ir_node *transform_node_End(ir_node *n)
+{
int i, j, n_keepalives = get_End_n_keepalives(n);
ir_node **in;
/**
* Optimize a Mux into some simpler cases.
*/
-static ir_node *transform_node_Mux(ir_node *n) {
+static ir_node *transform_node_Mux(ir_node *n)
+{
ir_node *oldn = n, *sel = get_Mux_sel(n);
ir_mode *mode = get_irn_mode(n);
ir_node *t = get_Mux_true(n);
* optimize Sync nodes that have other syncs as input we simply add the inputs
* of the other sync to our own inputs
*/
-static ir_node *transform_node_Sync(ir_node *n) {
+static ir_node *transform_node_Sync(ir_node *n)
+{
int arity = get_Sync_n_preds(n);
int i;
/**
* optimize a trampoline Call into a direct Call
*/
-static ir_node *transform_node_Call(ir_node *call) {
+static ir_node *transform_node_Call(ir_node *call)
+{
ir_node *callee = get_Call_ptr(call);
ir_node *adr, *mem, *res, *bl, **in;
ir_type *ctp, *mtp, *tp;
* transformations _do_ generate new nodes, and thus the old node must
* not be freed even if the equivalent node isn't the old one.
*/
-static ir_node *transform_node(ir_node *n) {
+static ir_node *transform_node(ir_node *n)
+{
ir_node *oldn;
/*
#define N_IR_NODES 512
/** Compares the attributes of two Const nodes. */
-static int node_cmp_attr_Const(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Const(ir_node *a, ir_node *b)
+{
return (get_Const_tarval(a) != get_Const_tarval(b))
|| (get_Const_type(a) != get_Const_type(b));
} /* node_cmp_attr_Const */
/** Compares the attributes of two Proj nodes. */
-static int node_cmp_attr_Proj(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
+{
return get_irn_proj_attr(a) != get_irn_proj_attr(b);
} /* node_cmp_attr_Proj */
/** Compares the attributes of two Filter nodes. */
-static int node_cmp_attr_Filter(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
+{
return get_Filter_proj(a) != get_Filter_proj(b);
} /* node_cmp_attr_Filter */
/** Compares the attributes of two Alloc nodes. */
-static int node_cmp_attr_Alloc(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
+{
const alloc_attr *pa = get_irn_alloc_attr(a);
const alloc_attr *pb = get_irn_alloc_attr(b);
return (pa->where != pb->where) || (pa->type != pb->type);
} /* node_cmp_attr_Alloc */
/** Compares the attributes of two Free nodes. */
-static int node_cmp_attr_Free(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Free(ir_node *a, ir_node *b)
+{
const free_attr *pa = get_irn_free_attr(a);
const free_attr *pb = get_irn_free_attr(b);
return (pa->where != pb->where) || (pa->type != pb->type);
} /* node_cmp_attr_Free */
/** Compares the attributes of two SymConst nodes. */
-static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) {
+static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
+{
const symconst_attr *pa = get_irn_symconst_attr(a);
const symconst_attr *pb = get_irn_symconst_attr(b);
return (pa->kind != pb->kind)
} /* node_cmp_attr_SymConst */
/** Compares the attributes of two Call nodes. */
-static int node_cmp_attr_Call(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Call(ir_node *a, ir_node *b)
+{
const call_attr *pa = get_irn_call_attr(a);
const call_attr *pb = get_irn_call_attr(b);
return (pa->type != pb->type)
} /* node_cmp_attr_Call */
/** Compares the attributes of two Sel nodes. */
-static int node_cmp_attr_Sel(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
+{
const ir_entity *a_ent = get_Sel_entity(a);
const ir_entity *b_ent = get_Sel_entity(b);
#if 0
} /* node_cmp_attr_Sel */
/** Compares the attributes of two Phi nodes. */
-static int node_cmp_attr_Phi(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
+{
/* we can only enter this function if both nodes have the same number of inputs,
hence it is enough to check if one of them is a Phi0 */
if (is_Phi0(a)) {
} /* node_cmp_attr_Phi */
/** Compares the attributes of two Conv nodes. */
-static int node_cmp_attr_Conv(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Conv(ir_node *a, ir_node *b)
+{
return get_Conv_strict(a) != get_Conv_strict(b);
} /* node_cmp_attr_Conv */
/** Compares the attributes of two Cast nodes. */
-static int node_cmp_attr_Cast(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
+{
return get_Cast_type(a) != get_Cast_type(b);
} /* node_cmp_attr_Cast */
/** Compares the attributes of two Load nodes. */
-static int node_cmp_attr_Load(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Load(ir_node *a, ir_node *b)
+{
if (get_Load_volatility(a) == volatility_is_volatile ||
get_Load_volatility(b) == volatility_is_volatile)
/* NEVER do CSE on volatile Loads */
} /* node_cmp_attr_Load */
/** Compares the attributes of two Store nodes. */
-static int node_cmp_attr_Store(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Store(ir_node *a, ir_node *b)
+{
/* do not CSE Stores with different alignment. Be conservative. */
if (get_Store_align(a) != get_Store_align(b))
return 1;
} /* node_cmp_attr_Store */
/** Compares two exception attributes */
-static int node_cmp_exception(ir_node *a, ir_node *b) {
+static int node_cmp_exception(ir_node *a, ir_node *b)
+{
const except_attr *ea = get_irn_except_attr(a);
const except_attr *eb = get_irn_except_attr(b);
#define node_cmp_attr_Bound node_cmp_exception
/** Compares the attributes of two Div nodes. */
-static int node_cmp_attr_Div(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Div(ir_node *a, ir_node *b)
+{
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
} /* node_cmp_attr_Div */
/** Compares the attributes of two DivMod nodes. */
-static int node_cmp_attr_DivMod(ir_node *a, ir_node *b) {
+static int node_cmp_attr_DivMod(ir_node *a, ir_node *b)
+{
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
} /* node_cmp_attr_DivMod */
/** Compares the attributes of two Mod nodes. */
-static int node_cmp_attr_Mod(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Mod(ir_node *a, ir_node *b)
+{
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
} /* node_cmp_attr_Mod */
/** Compares the attributes of two Quot nodes. */
-static int node_cmp_attr_Quot(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Quot(ir_node *a, ir_node *b)
+{
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
} /* node_cmp_attr_Quot */
/** Compares the attributes of two Confirm nodes. */
-static int node_cmp_attr_Confirm(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Confirm(ir_node *a, ir_node *b)
+{
/* no need to compare the bound, as this is a input */
return (get_Confirm_cmp(a) != get_Confirm_cmp(b));
} /* node_cmp_attr_Confirm */
/** Compares the attributes of two Builtin nodes. */
-static int node_cmp_attr_Builtin(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Builtin(ir_node *a, ir_node *b)
+{
const builtin_attr *ma = get_irn_builtin_attr(a);
const builtin_attr *mb = get_irn_builtin_attr(b);
} /* node_cmp_attr_Builtin */
/** Compares the attributes of two ASM nodes. */
-static int node_cmp_attr_ASM(ir_node *a, ir_node *b) {
+static int node_cmp_attr_ASM(ir_node *a, ir_node *b)
+{
int i, n;
const ir_asm_constraint *ca;
const ir_asm_constraint *cb;
* Compare function for two nodes in the value table. Gets two
* nodes as parameters. Returns 0 if the nodes are a Common Sub Expression.
*/
-int identities_cmp(const void *elt, const void *key) {
+int identities_cmp(const void *elt, const void *key)
+{
ir_node *a = (ir_node *)elt;
ir_node *b = (ir_node *)key;
int i, irn_arity_a;
*
* @param node The IR-node
*/
-unsigned ir_node_hash(const ir_node *node) {
+unsigned ir_node_hash(const ir_node *node)
+{
return node->op->ops.hash(node);
} /* ir_node_hash */
-pset *new_identities(void) {
+pset *new_identities(void)
+{
return new_pset(identities_cmp, N_IR_NODES);
} /* new_identities */
-void del_identities(pset *value_table) {
+void del_identities(pset *value_table)
+{
del_pset(value_table);
} /* del_identities */
/* Normalize a node by putting constants (and operands with larger
* node index) on the right (operator side). */
-void ir_normalize_node(ir_node *n) {
+void ir_normalize_node(ir_node *n)
+{
if (is_op_commutative(get_irn_op(n))) {
ir_node *l = get_binop_left(n);
ir_node *r = get_binop_right(n);
* dominance info here: We known, that one block must dominate the other and
* following the only block input will allow to find it.
*/
-static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node) {
+static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node)
+{
ir_node *known_blk, *new_block, *block, *mbh;
if (get_opt_global_cse()) {
* @return a node that computes the same value as n or n if no such
* node could be found
*/
-ir_node *identify_remember(pset *value_table, ir_node *n) {
+ir_node *identify_remember(pset *value_table, ir_node *n)
+{
ir_node *nn = NULL;
if (!value_table) return n;
* @param value_table The value table
* @param n The node to lookup
*/
-static inline ir_node *identify_cons(pset *value_table, ir_node *n) {
+static inline ir_node *identify_cons(pset *value_table, ir_node *n)
+{
ir_node *old = n;
n = identify_remember(value_table, n);
} /* identify_cons */
/* Add a node to the identities value table. */
-void add_identities(pset *value_table, ir_node *node) {
+void add_identities(pset *value_table, ir_node *node)
+{
if (get_opt_cse() && is_no_Block(node))
identify_remember(value_table, node);
} /* add_identities */
/* Visit each node in the value table of a graph. */
-void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) {
+void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env)
+{
ir_node *node;
ir_graph *rem = current_ir_graph;
* Garbage in, garbage out. If a node has a dead input, i.e., the
* Bad node is input to the node, return the Bad node.
*/
-static ir_node *gigo(ir_node *node) {
+static ir_node *gigo(ir_node *node)
+{
int i, irn_arity;
ir_op *op = get_irn_op(node);
*
* current_ir_graph must be set to the graph of the node!
*/
-ir_node *optimize_node(ir_node *n) {
+ir_node *optimize_node(ir_node *n)
+{
tarval *tv;
ir_node *oldn = n;
ir_opcode iro = get_irn_opcode(n);
* nodes lying on the obstack. Remove these by a dead node elimination,
* i.e., a copying garbage collection.
*/
-ir_node *optimize_in_place_2(ir_node *n) {
+ir_node *optimize_in_place_2(ir_node *n)
+{
tarval *tv;
ir_node *oldn = n;
ir_opcode iro = get_irn_opcode(n);
/**
* Wrapper for external use, set proper status bits after optimization.
*/
-ir_node *optimize_in_place(ir_node *n) {
+ir_node *optimize_in_place(ir_node *n)
+{
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
/**
* Calculate a hash value of a Const node.
*/
-static unsigned hash_Const(const ir_node *node) {
+static unsigned hash_Const(const ir_node *node)
+{
unsigned h;
/* special value for const, as they only differ in their tarval. */
/**
* Calculate a hash value of a SymConst node.
*/
-static unsigned hash_SymConst(const ir_node *node) {
+static unsigned hash_SymConst(const ir_node *node)
+{
unsigned h;
/* all others are pointers */
/*
* Sets the default operation for an ir_ops.
*/
-ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops) {
+ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops)
+{
ops = firm_set_default_hash(code, ops);
ops = firm_set_default_computed_value(code, ops);
ops = firm_set_default_equivalent_node(code, ops);
/**
* Wrapper for running void function(ir_graph *irg) as an ir_graph pass.
*/
-static int void_graph_wrapper(ir_graph *irg, void *context) {
+static int void_graph_wrapper(ir_graph *irg, void *context)
+{
void (*function)(ir_graph *irg) = context;
function(irg);
return 0;
/**
* Wrapper for running void function(ir_graph *irg) as an ir_graph pass.
*/
-static int int_graph_wrapper(ir_graph *irg, void *context) {
+static int int_graph_wrapper(ir_graph *irg, void *context)
+{
int (*function)(ir_graph *irg) = context;
return function(irg);
} /* int_graph_wrapper */
} /* def_graph_pass_constructor */
/* set the run parallel property */
-void ir_graph_pass_set_parallel(ir_graph_pass_t *pass, int flag) {
+void ir_graph_pass_set_parallel(ir_graph_pass_t *pass, int flag)
+{
pass->run_parallel = flag != 0;
} /* ir_graph_pass_set_parallel */
/**
* Wrapper for running void function(void) as an ir_prog pass.
*/
-static int void_prog_wrapper(ir_prog *irp, void *context) {
+static int void_prog_wrapper(ir_prog *irp, void *context)
+{
void (*function)(void) = context;
(void)irp;
/**
* Wrapper for the call_function pass.
*/
-static int call_function_wrapper(ir_prog *irp, void *context) {
+static int call_function_wrapper(ir_prog *irp, void *context)
+{
struct pass_t *pass = context;
(void)irp;
}
}
-ir_node *phase_get_first_node(const ir_phase *phase) {
+ir_node *phase_get_first_node(const ir_phase *phase)
+{
unsigned i;
for (i = 0; i < phase->n_data_ptr; ++i)
return NULL;
}
-ir_node *phase_get_next_node(const ir_phase *phase, ir_node *start) {
+ir_node *phase_get_next_node(const ir_phase *phase, ir_node *start)
+{
unsigned i;
for (i = get_irn_idx(start) + 1; i < phase->n_data_ptr; ++i)
/**
* Compare two execcount_t entries.
*/
-static int cmp_execcount(const void *a, const void *b, size_t size) {
+static int cmp_execcount(const void *a, const void *b, size_t size)
+{
const execcount_t *ea = a;
const execcount_t *eb = b;
(void) size;
/**
* Block walker, count number of blocks.
*/
-static void block_counter(ir_node * bb, void * data) {
+static void block_counter(ir_node * bb, void * data)
+{
unsigned int *count = data;
(void) bb;
*count = *count + 1;
/**
* Return the number of blocks the given graph.
*/
-static unsigned int count_blocks(ir_graph *irg) {
+static unsigned int count_blocks(ir_graph *irg)
+{
unsigned int count = 0;
irg_block_walk_graph(irg, block_counter, NULL, &count);
// minimal execution frequency (an execfreq of 0 confuses algos)
static const double MIN_EXECFREQ = 0.00001;
-static void initialize_execfreq(ir_node *block, void *data) {
+static void initialize_execfreq(ir_node *block, void *data)
+{
initialize_execfreq_env_t *env = data;
double freq;
}
/* initializes ir_prog. Constructs only the basic lists. */
-void init_irprog_1(void) {
+void init_irprog_1(void)
+{
irp = new_incomplete_ir_prog();
}
/* Completes ir_prog. */
-void init_irprog_2(void) {
+void init_irprog_2(void)
+{
(void)complete_ir_prog(irp, INITAL_PROG_NAME);
}
/* Create a new ir prog. Automatically called by init_firm through
init_irprog. */
-ir_prog *new_ir_prog(const char *name) {
+ir_prog *new_ir_prog(const char *name)
+{
return complete_ir_prog(new_incomplete_ir_prog(), name);
}
/* Access the main routine of the compiled program. */
-ir_graph *get_irp_main_irg(void) {
+ir_graph *get_irp_main_irg(void)
+{
assert(irp);
return irp->main_irg;
}
-void set_irp_main_irg(ir_graph *main_irg) {
+void set_irp_main_irg(ir_graph *main_irg)
+{
assert(irp);
irp->main_irg = main_irg;
}
remove_irp_type(new_type);
}
-ir_type *(get_glob_type)(void) {
+ir_type *(get_glob_type)(void)
+{
return _get_glob_type();
}
-ir_type *(get_tls_type)(void) {
+ir_type *(get_tls_type)(void)
+{
return _get_tls_type();
}
/* Adds irg to the list of ir graphs in irp. */
-void add_irp_irg(ir_graph *irg) {
+void add_irp_irg(ir_graph *irg)
+{
assert(irg != NULL);
assert(irp && irp->graphs);
ARR_APP1(ir_graph *, irp->graphs, irg);
}
/* Removes irg from the list or irgs, shrinks the list by one. */
-void remove_irp_irg_from_list(ir_graph *irg){
+void remove_irp_irg_from_list(ir_graph *irg)
+{
int i, l, found = 0;
assert(irg);
}
/* Removes irg from the list or irgs, shrinks the list by one. */
-void remove_irp_irg(ir_graph *irg){
+void remove_irp_irg(ir_graph *irg)
+{
free_ir_graph(irg);
remove_irp_irg_from_list(irg);
}
-int (get_irp_n_irgs)(void) {
+int (get_irp_n_irgs)(void)
+{
return _get_irp_n_irgs();
}
-ir_graph *(get_irp_irg)(int pos){
+ir_graph *(get_irp_irg)(int pos)
+{
return _get_irp_irg(pos);
}
-int get_irp_last_idx(void) {
+int get_irp_last_idx(void)
+{
return irp->max_irg_idx;
}
-void set_irp_irg(int pos, ir_graph *irg) {
+void set_irp_irg(int pos, ir_graph *irg)
+{
assert(irp && irg);
assert(pos < (ARR_LEN(irp->graphs)));
irp->graphs[pos] = irg;
}
/* Gets the number of graphs _and_ pseudo graphs. */
-int get_irp_n_allirgs(void) {
+int get_irp_n_allirgs(void)
+{
/* We can not call get_irp_n_irgs, as we end up in a recursion ... */
return ARR_LEN(irp->graphs) + get_irp_n_pseudo_irgs();
}
/* Returns the ir graph at position pos of all graphs (including
pseudo graphs). Visits first graphs, then pseudo graphs. */
-ir_graph *get_irp_allirg(int pos) {
+ir_graph *get_irp_allirg(int pos)
+{
int n_irgs = ARR_LEN(irp->graphs);
assert(0 <= pos);
if (pos < n_irgs) {
}
/* Adds type to the list of types in irp. */
-void add_irp_type(ir_type *typ) {
+void add_irp_type(ir_type *typ)
+{
assert(typ != NULL);
assert(irp);
ARR_APP1(ir_type *, irp->types, typ);
}
/* Remove type from the list of types in irp. */
-void remove_irp_type(ir_type *typ) {
+void remove_irp_type(ir_type *typ)
+{
int i;
assert(typ);
}
}
-int (get_irp_n_types) (void) {
+int (get_irp_n_types) (void)
+{
return _get_irp_n_types();
}
-ir_type *(get_irp_type) (int pos) {
+ir_type *(get_irp_type) (int pos)
+{
return _get_irp_type(pos);
}
-void set_irp_type(int pos, ir_type *typ) {
+void set_irp_type(int pos, ir_type *typ)
+{
assert(irp && typ);
assert(pos < (ARR_LEN((irp)->types)));
irp->types[pos] = typ;
}
/* Returns the number of all modes in the irp. */
-int (get_irp_n_modes)(void) {
+int (get_irp_n_modes)(void)
+{
return _get_irp_n_modes();
}
/* Returns the mode at position pos in the irp. */
-ir_mode *(get_irp_mode)(int pos) {
+ir_mode *(get_irp_mode)(int pos)
+{
return _get_irp_mode(pos);
}
/* Adds mode to the list of modes in irp. */
-void add_irp_mode(ir_mode *mode) {
+void add_irp_mode(ir_mode *mode)
+{
assert(mode != NULL);
assert(irp);
ARR_APP1(ir_mode *, irp->modes, mode);
}
/* Adds opcode to the list of opcodes in irp. */
-void add_irp_opcode(ir_op *opcode) {
+void add_irp_opcode(ir_op *opcode)
+{
int len;
size_t code;
assert(opcode != NULL);
}
/* Removes opcode from the list of opcodes and shrinks the list by one. */
-void remove_irp_opcode(ir_op *opcode) {
+void remove_irp_opcode(ir_op *opcode)
+{
assert((int) opcode->code < ARR_LEN(irp->opcodes));
irp->opcodes[opcode->code] = NULL;
}
/* Returns the number of all opcodes in the irp. */
-int (get_irp_n_opcodes)(void) {
+int (get_irp_n_opcodes)(void)
+{
return _get_irp_n_opcodes();
}
/* Returns the opcode at position pos in the irp. */
-ir_op *(get_irp_opcode)(int pos) {
+ir_op *(get_irp_opcode)(int pos)
+{
return _get_irp_opcode(pos);
}
/* Sets the generic function pointer of all opcodes to NULL */
-void clear_irp_opcodes_generic_func(void) {
+void clear_irp_opcodes_generic_func(void)
+{
int i;
for (i = get_irp_n_opcodes() - 1; i >= 0; --i) {
}
/*- File name / executable name or the like -*/
-void set_irp_prog_name(ident *name) {
+void set_irp_prog_name(ident *name)
+{
irp->name = name;
}
-int irp_prog_name_is_set(void) {
+int irp_prog_name_is_set(void)
+{
return irp->name != new_id_from_str(INITAL_PROG_NAME);
}
-ident *get_irp_ident(void) {
+ident *get_irp_ident(void)
+{
return irp->name;
}
-const char *get_irp_name(void) {
+const char *get_irp_name(void)
+{
return get_id_str(irp->name);
}
-ir_graph *(get_const_code_irg)(void) {
+ir_graph *(get_const_code_irg)(void)
+{
return _get_const_code_irg();
}
-irg_phase_state get_irp_phase_state(void) {
+irg_phase_state get_irp_phase_state(void)
+{
return irp->phase_state;
}
-void set_irp_phase_state(irg_phase_state s) {
+void set_irp_phase_state(irg_phase_state s)
+{
irp->phase_state = s;
}
/**
* Wrapper for setting the state of a whole ir_prog.
*/
-static int set_irp_phase_state_wrapper(ir_prog *irp, void *context) {
+static int set_irp_phase_state_wrapper(ir_prog *irp, void *context)
+{
struct pass_t *pass = context;
irg_phase_state state = pass->state;
int i;
return 0;
}
-ir_prog_pass_t *set_irp_phase_state_pass(const char *name, irg_phase_state state) {
+ir_prog_pass_t *set_irp_phase_state_pass(const char *name, irg_phase_state state)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
def_prog_pass_constructor(
return &pass->pass;
}
-irg_outs_state get_irp_ip_outs_state(void) {
+irg_outs_state get_irp_ip_outs_state(void)
+{
return irp->outs_state;
}
-void set_irp_ip_outs_inconsistent(void) {
+void set_irp_ip_outs_inconsistent(void)
+{
irp->outs_state = outs_inconsistent;
}
-void set_irp_ip_outedges(ir_node ** ip_outedges) {
+void set_irp_ip_outedges(ir_node ** ip_outedges)
+{
irp->ip_outedges = ip_outedges;
}
-ir_node** get_irp_ip_outedges(void) {
+ir_node** get_irp_ip_outedges(void)
+{
return irp->ip_outedges;
}
-irg_callee_info_state get_irp_callee_info_state(void) {
+irg_callee_info_state get_irp_callee_info_state(void)
+{
return irp->callee_info_state;
}
-void set_irp_callee_info_state(irg_callee_info_state s) {
+void set_irp_callee_info_state(irg_callee_info_state s)
+{
irp->callee_info_state = s;
}
/* Returns a new, unique exception region number. */
-ir_exc_region_t (get_irp_next_region_nr)(void) {
+ir_exc_region_t (get_irp_next_region_nr)(void)
+{
return _get_irp_next_region_nr();
}
/* Returns a new, unique label number. */
-ir_label_t (get_irp_next_label_nr)(void) {
+ir_label_t (get_irp_next_label_nr)(void)
+{
return _get_irp_next_label_nr();
}
/* Add a new global asm include */
-void add_irp_asm(ident *asm_string) {
+void add_irp_asm(ident *asm_string)
+{
ARR_APP1(ident *, irp->global_asms, asm_string);
}
/* Return the number of global asm includes. */
-int get_irp_n_asms(void) {
+int get_irp_n_asms(void)
+{
return ARR_LEN(irp->global_asms);
}
/* Return the global asm include at position pos. */
-ident *get_irp_asm(int pos) {
+ident *get_irp_asm(int pos)
+{
assert(0 <= pos && pos < get_irp_n_asms());
return irp->global_asms[pos];
}
#ifndef NDEBUG
-void irp_reserve_resources(ir_prog *irp, ir_resources_t resources) {
+void irp_reserve_resources(ir_prog *irp, ir_resources_t resources)
+{
assert((resources & ~IR_RESOURCE_GLOBAL_MASK) == 0);
assert((irp->reserved_resources & resources) == 0);
irp->reserved_resources |= resources;
}
-void irp_free_resources(ir_prog *irp, ir_resources_t resources) {
+void irp_free_resources(ir_prog *irp, ir_resources_t resources)
+{
assert((irp->reserved_resources & resources) == resources);
irp->reserved_resources &= ~resources;
}
-ir_resources_t irp_resources_reserved(const ir_prog *irp) {
+ir_resources_t irp_resources_reserved(const ir_prog *irp)
+{
return irp->reserved_resources;
}
#endif
* Post-walker: prepare the graph nodes for new SSA construction cycle by allocation
* new arrays.
*/
-static void prepare_nodes(ir_node *irn, void *env) {
+static void prepare_nodes(ir_node *irn, void *env)
+{
(void)env;
switch (get_irn_opcode(irn)) {
* again and set_value()/get_value() and mature_block() can be used
* to construct new values.
*/
-void ssa_cons_start(ir_graph *irg, int n_loc) {
+void ssa_cons_start(ir_graph *irg, int n_loc)
+{
/* for now we support only phase_high graphs */
assert(irg->phase_state == phase_high);
/**
* mature all immature Blocks.
*/
-static void finish_block(ir_node *block, void *env) {
+static void finish_block(ir_node *block, void *env)
+{
(void)env;
if (!get_Block_matured(block))
* Finalize the (restarted) SSA construction. Matures all blocks that are
* not matured yet and reset the graph state to phase_high.
*/
-void ssa_cons_finish(ir_graph *irg) {
+void ssa_cons_finish(ir_graph *irg)
+{
irg_block_walk_graph(irg, NULL, finish_block, NULL);
irg_finalize_cons(irg);
}
const char *firm_vrfy_failure_msg;
/* enable verification of Load/Store entities */
-void vrfy_enable_entity_tests(int enable) {
+void vrfy_enable_entity_tests(int enable)
+{
vrfy_entities = enable;
}
/**
* little helper for NULL modes
*/
-static const char *get_mode_name_ex(ir_mode *mode) {
+static const char *get_mode_name_ex(ir_mode *mode)
+{
if (! mode)
return "<no mode>";
return get_mode_name(mode);
*
* @param node the node caused the failure
*/
-static void show_entity_failure(ir_node *node) {
+static void show_entity_failure(ir_node *node)
+{
ir_graph *irg = get_irn_irg(node);
if (last_irg_error == irg)
/**
* Prints a failure for a Node
*/
-static void show_node_failure(ir_node *n) {
+static void show_node_failure(ir_node *n)
+{
show_entity_failure(n);
fprintf(stderr, " node %ld %s%s\n" ,
get_irn_node_nr(n),
/**
* Prints a failure message for a binop
*/
-static void show_binop_failure(ir_node *n, const char *text) {
+static void show_binop_failure(ir_node *n, const char *text)
+{
ir_node *left = get_binop_left(n);
ir_node *right = get_binop_right(n);
/**
* Prints a failure message for an unop
*/
-static void show_unop_failure(ir_node *n, const char *text) {
+static void show_unop_failure(ir_node *n, const char *text)
+{
ir_node *op = get_unop_op(n);
show_entity_failure(n);
/**
* Prints a failure message for an op with 3 operands
*/
-static void show_triop_failure(ir_node *n, const char *text) {
+static void show_triop_failure(ir_node *n, const char *text)
+{
ir_node *op0 = get_irn_n(n, 0);
ir_node *op1 = get_irn_n(n, 1);
ir_node *op2 = get_irn_n(n, 2);
/**
* Prints a failure message for a proj
*/
-static void show_proj_failure(ir_node *n) {
+static void show_proj_failure(ir_node *n)
+{
ir_node *op = get_Proj_pred(n);
int proj = get_Proj_proj(n);
/**
* Prints a failure message for a proj from Start
*/
-static void show_proj_mode_failure(ir_node *n, ir_type *ty) {
+static void show_proj_mode_failure(ir_node *n, ir_type *ty)
+{
long proj = get_Proj_proj(n);
ir_mode *m = get_type_mode(ty);
char type_name[256];
/**
* Prints a failure message for a proj
*/
-static void show_proj_failure_ent(ir_node *n, ir_entity *ent) {
+static void show_proj_failure_ent(ir_node *n, ir_entity *ent)
+{
ir_node *op = get_Proj_pred(n);
int proj = get_Proj_proj(n);
ir_mode *m = get_type_mode(get_entity_type(ent));
/**
* Show a node and a graph
*/
-static void show_node_on_graph(ir_graph *irg, ir_node *n) {
+static void show_node_on_graph(ir_graph *irg, ir_node *n)
+{
ir_fprintf(stderr, "\nFIRM: irn_vrfy_irg() of %+F, node %+F\n", irg, n);
}
/**
* Show call parameters
*/
-static void show_call_param(ir_node *n, ir_type *mt) {
+static void show_call_param(ir_node *n, ir_type *mt)
+{
int i;
char type_name[256];
ir_print_type(type_name, sizeof(type_name), mt);
/**
* Show return modes
*/
-static void show_return_modes(ir_graph *irg, ir_node *n, ir_type *mt, int i) {
+static void show_return_modes(ir_graph *irg, ir_node *n, ir_type *mt, int i)
+{
ir_entity *ent = get_irg_entity(irg);
show_entity_failure(n);
/**
* Show return number of results
*/
-static void show_return_nres(ir_graph *irg, ir_node *n, ir_type *mt) {
+static void show_return_nres(ir_graph *irg, ir_node *n, ir_type *mt)
+{
ir_entity *ent = get_irg_entity(irg);
show_entity_failure(n);
/**
* Show Phi input
*/
-static void show_phi_failure(ir_node *phi, ir_node *pred, int pos) {
+static void show_phi_failure(ir_node *phi, ir_node *pred, int pos)
+{
(void) pos;
show_entity_failure(phi);
fprintf(stderr, " Phi node %ld has mode %s different from predeccessor node %ld mode %s\n",
/**
* Show Phi inputs
*/
-static void show_phi_inputs(ir_node *phi, ir_node *block) {
+static void show_phi_inputs(ir_node *phi, ir_node *block)
+{
show_entity_failure(phi);
fprintf(stderr, " Phi node %ld has %d inputs, its Block %ld has %d\n",
get_irn_node_nr(phi), get_irn_arity(phi),
*
* @param ptr the node representing the address
*/
-static ir_entity *get_ptr_entity(ir_node *ptr) {
+static ir_entity *get_ptr_entity(ir_node *ptr)
+{
if (is_Sel(ptr)) {
return get_Sel_entity(ptr);
} else if (is_SymConst_addr_ent(ptr)) {
/**
* verify a Proj(Start) node
*/
-static int verify_node_Proj_Start(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Start(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
(void) n;
/**
* verify a Proj(Cond) node
*/
-static int verify_node_Proj_Cond(ir_node *pred, ir_node *p) {
+static int verify_node_Proj_Cond(ir_node *pred, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Raise) node
*/
-static int verify_node_Proj_Raise(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Raise(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
(void) n;
/**
* verify a Proj(InstOf) node
*/
-static int verify_node_Proj_InstOf(ir_node *n, ir_node *p) {
+static int verify_node_Proj_InstOf(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
(void) n;
/**
* verify a Proj(Call) node
*/
-static int verify_node_Proj_Call(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Call(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Quot) node
*/
-static int verify_node_Proj_Quot(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Quot(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(DivMod) node
*/
-static int verify_node_Proj_DivMod(ir_node *n, ir_node *p) {
+static int verify_node_Proj_DivMod(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Div) node
*/
-static int verify_node_Proj_Div(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Div(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Mod) node
*/
-static int verify_node_Proj_Mod(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Mod(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Cmp) node
*/
-static int verify_node_Proj_Cmp(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Cmp(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
(void) n;
/**
* verify a Proj(Load) node
*/
-static int verify_node_Proj_Load(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Load(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Store) node
*/
-static int verify_node_Proj_Store(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Store(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Alloc) node
*/
-static int verify_node_Proj_Alloc(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Alloc(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
(void) n;
/**
* verify a Proj(Proj) node
*/
-static int verify_node_Proj_Proj(ir_node *pred, ir_node *p) {
+static int verify_node_Proj_Proj(ir_node *pred, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
long nr = get_Proj_proj(pred);
/**
* verify a Proj(Tuple) node
*/
-static int verify_node_Proj_Tuple(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Tuple(ir_node *n, ir_node *p)
+{
(void) n;
(void) p;
/* We don't test */
/**
* verify a Proj(CallBegin) node
*/
-static int verify_node_Proj_CallBegin(ir_node *n, ir_node *p) {
+static int verify_node_Proj_CallBegin(ir_node *n, ir_node *p)
+{
(void) n;
(void) p;
return 1;
/**
* verify a Proj(EndReg) node
*/
-static int verify_node_Proj_EndReg(ir_node *n, ir_node *p) {
+static int verify_node_Proj_EndReg(ir_node *n, ir_node *p)
+{
(void) n;
(void) p;
#ifdef INTERPROCEDURAL_VIEW
/**
* verify a Proj(EndExcept) node
*/
-static int verify_node_Proj_EndExcept(ir_node *n, ir_node *p) {
+static int verify_node_Proj_EndExcept(ir_node *n, ir_node *p)
+{
(void) n;
(void) p;
#ifdef INTERPROCEDURAL_VIEW
/**
* verify a Proj(CopyB) node
*/
-static int verify_node_Proj_CopyB(ir_node *n, ir_node *p) {
+static int verify_node_Proj_CopyB(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
/**
* verify a Proj(Bound) node
*/
-static int verify_node_Proj_Bound(ir_node *n, ir_node *p) {
+static int verify_node_Proj_Bound(ir_node *n, ir_node *p)
+{
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
* verify a Proj node
*/
static int
-verify_node_Proj(ir_node *p, ir_graph *irg) {
+verify_node_Proj(ir_node *p, ir_graph *irg)
+{
ir_node *pred;
ir_op *op;
/**
* verify a Block node
*/
-static int verify_node_Block(ir_node *n, ir_graph *irg) {
+static int verify_node_Block(ir_node *n, ir_graph *irg)
+{
int i;
ir_node *mb = get_Block_MacroBlock(n);
/**
* verify a Start node
*/
-static int verify_node_Start(ir_node *n, ir_graph *irg) {
+static int verify_node_Start(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
(void) irg;
/**
* verify a Jmp node
*/
-static int verify_node_Jmp(ir_node *n, ir_graph *irg) {
+static int verify_node_Jmp(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
(void) irg;
/**
* verify an IJmp node
*/
-static int verify_node_IJmp(ir_node *n, ir_graph *irg) {
+static int verify_node_IJmp(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_IJmp_target(n));
(void) irg;
/**
* verify a Break node
*/
-static int verify_node_Break(ir_node *n, ir_graph *irg) {
+static int verify_node_Break(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
(void) irg;
/**
* verify a Cond node
*/
-static int verify_node_Cond(ir_node *n, ir_graph *irg) {
+static int verify_node_Cond(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Cond_selector(n));
(void) irg;
/**
* verify a Return node
*/
-static int verify_node_Return(ir_node *n, ir_graph *irg) {
+static int verify_node_Return(ir_node *n, ir_graph *irg)
+{
int i;
ir_mode *mymode = get_irn_mode(n);
ir_mode *mem_mode = get_irn_mode(get_Return_mem(n));
/**
* verify a Raise node
*/
-static int verify_node_Raise(ir_node *n, ir_graph *irg) {
+static int verify_node_Raise(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Raise_mem(n));
ir_mode *op2mode = get_irn_mode(get_Raise_exo_ptr(n));
/**
* verify a Const node
*/
-static int verify_node_Const(ir_node *n, ir_graph *irg) {
+static int verify_node_Const(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
(void) irg;
/**
* verify a Sel node
*/
-static int verify_node_Sel(ir_node *n, ir_graph *irg) {
+static int verify_node_Sel(ir_node *n, ir_graph *irg)
+{
int i;
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Sel_mem(n));
/**
* verify an InstOf node
*/
-static int verify_node_InstOf(ir_node *n, ir_graph *irg) {
+static int verify_node_InstOf(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_InstOf_obj(n));
(void) irg;
/**
* Check if the pinned state is right.
*/
-static int verify_right_pinned(ir_node *n) {
+static int verify_right_pinned(ir_node *n)
+{
ir_node *mem;
if (get_irn_pinned(n) == op_pin_state_pinned)
/**
* verify a Call node
*/
-static int verify_node_Call(ir_node *n, ir_graph *irg) {
+static int verify_node_Call(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Call_mem(n));
ir_mode *op2mode = get_irn_mode(get_Call_ptr(n));
/**
* verify an Add node
*/
-static int verify_node_Add(ir_node *n, ir_graph *irg) {
+static int verify_node_Add(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Add_left(n));
ir_mode *op2mode = get_irn_mode(get_Add_right(n));
/**
* verify a Sub node
*/
-static int verify_node_Sub(ir_node *n, ir_graph *irg) {
+static int verify_node_Sub(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Sub_left(n));
ir_mode *op2mode = get_irn_mode(get_Sub_right(n));
/**
* verify a Minus node
*/
-static int verify_node_Minus(ir_node *n, ir_graph *irg) {
+static int verify_node_Minus(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Minus_op(n));
(void) irg;
/**
* verify a Mul node
*/
-static int verify_node_Mul(ir_node *n, ir_graph *irg) {
+static int verify_node_Mul(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Mul_left(n));
ir_mode *op2mode = get_irn_mode(get_Mul_right(n));
/**
* verify a Mulh node
*/
-static int verify_node_Mulh(ir_node *n, ir_graph *irg) {
+static int verify_node_Mulh(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Mulh_left(n));
ir_mode *op2mode = get_irn_mode(get_Mulh_right(n));
/**
* verify a Quot node
*/
-static int verify_node_Quot(ir_node *n, ir_graph *irg) {
+static int verify_node_Quot(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Quot_mem(n));
ir_mode *op2mode = get_irn_mode(get_Quot_left(n));
/**
* verify a DivMod node
*/
-static int verify_node_DivMod(ir_node *n, ir_graph *irg) {
+static int verify_node_DivMod(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_DivMod_mem(n));
ir_mode *op2mode = get_irn_mode(get_DivMod_left(n));
/**
* verify a Div node
*/
-static int verify_node_Div(ir_node *n, ir_graph *irg) {
+static int verify_node_Div(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Div_mem(n));
ir_mode *op2mode = get_irn_mode(get_Div_left(n));
/**
* verify a Mod node
*/
-static int verify_node_Mod(ir_node *n, ir_graph *irg) {
+static int verify_node_Mod(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Mod_mem(n));
ir_mode *op2mode = get_irn_mode(get_Mod_left(n));
/**
* verify an Abs node
*/
-static int verify_node_Abs(ir_node *n, ir_graph *irg) {
+static int verify_node_Abs(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Abs_op(n));
(void) irg;
/**
* verify a logical And, Or, Eor node
*/
-static int verify_node_Logic(ir_node *n, ir_graph *irg) {
+static int verify_node_Logic(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_binop_left(n));
ir_mode *op2mode = get_irn_mode(get_binop_right(n));
/**
* verify a Not node
*/
-static int verify_node_Not(ir_node *n, ir_graph *irg) {
+static int verify_node_Not(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Not_op(n));
(void) irg;
/**
* verify a Cmp node
*/
-static int verify_node_Cmp(ir_node *n, ir_graph *irg) {
+static int verify_node_Cmp(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Cmp_left(n));
ir_mode *op2mode = get_irn_mode(get_Cmp_right(n));
/**
* verify a Shift node
*/
-static int verify_node_Shift(ir_node *n, ir_graph *irg) {
+static int verify_node_Shift(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_binop_left(n));
ir_mode *op2mode = get_irn_mode(get_binop_right(n));
/**
* verify a Rotl node
*/
-static int verify_node_Rotl(ir_node *n, ir_graph *irg) {
+static int verify_node_Rotl(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Rotl_left(n));
ir_mode *op2mode = get_irn_mode(get_Rotl_right(n));
/**
* verify a Conv node
*/
-static int verify_node_Conv(ir_node *n, ir_graph *irg) {
+static int verify_node_Conv(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Conv_op(n));
(void) irg;
/**
* verify a Cast node
*/
-static int verify_node_Cast(ir_node *n, ir_graph *irg) {
+static int verify_node_Cast(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Cast_op(n));
(void) irg;
/**
* verify a Phi node
*/
-static int verify_node_Phi(ir_node *n, ir_graph *irg) {
+static int verify_node_Phi(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_node *block = get_nodes_block(n);
int i;
/**
* verify a Filter node
*/
-static int verify_node_Filter(ir_node *n, ir_graph *irg) {
+static int verify_node_Filter(ir_node *n, ir_graph *irg)
+{
(void) n;
(void) irg;
#ifdef INTERPROCEDURAL_VIEW
/**
* verify a Load node
*/
-static int verify_node_Load(ir_node *n, ir_graph *irg) {
+static int verify_node_Load(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Load_mem(n));
ir_mode *op2mode = get_irn_mode(get_Load_ptr(n));
/**
* verify a Store node
*/
-static int verify_node_Store(ir_node *n, ir_graph *irg) {
+static int verify_node_Store(ir_node *n, ir_graph *irg)
+{
ir_entity *target;
ir_mode *mymode = get_irn_mode(n);
/**
* verify an Alloc node
*/
-static int verify_node_Alloc(ir_node *n, ir_graph *irg) {
+static int verify_node_Alloc(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Alloc_mem(n));
ir_mode *op2mode = get_irn_mode(get_Alloc_size(n));
/**
* verify a Free node
*/
-static int verify_node_Free(ir_node *n, ir_graph *irg) {
+static int verify_node_Free(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Free_mem(n));
ir_mode *op2mode = get_irn_mode(get_Free_ptr(n));
/**
* verify a Sync node
*/
-static int verify_node_Sync(ir_node *n, ir_graph *irg) {
+static int verify_node_Sync(ir_node *n, ir_graph *irg)
+{
int i;
ir_mode *mymode = get_irn_mode(n);
(void) irg;
/**
* verify a Confirm node
*/
-static int verify_node_Confirm(ir_node *n, ir_graph *irg) {
+static int verify_node_Confirm(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Confirm_value(n));
ir_mode *op2mode = get_irn_mode(get_Confirm_bound(n));
/**
* verify a Mux node
*/
-static int verify_node_Mux(ir_node *n, ir_graph *irg) {
+static int verify_node_Mux(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Mux_sel(n));
ir_mode *op2mode = get_irn_mode(get_Mux_true(n));
/**
* verify a CopyB node
*/
-static int verify_node_CopyB(ir_node *n, ir_graph *irg) {
+static int verify_node_CopyB(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_CopyB_mem(n));
ir_mode *op2mode = get_irn_mode(get_CopyB_dst(n));
/**
* verify a Bound node
*/
-static int verify_node_Bound(ir_node *n, ir_graph *irg) {
+static int verify_node_Bound(ir_node *n, ir_graph *irg)
+{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Bound_mem(n));
ir_mode *op2mode = get_irn_mode(get_Bound_index(n));
*
* @return non-zero on success, 0 on dominance error
*/
-static int check_dominance_for_node(ir_node *use) {
+static int check_dominance_for_node(ir_node *use)
+{
if (is_Block(use)) {
ir_node *mbh = get_Block_MacroBlock(use);
}
/* Tests the modes of n and its predecessors. */
-int irn_vrfy_irg(ir_node *n, ir_graph *irg) {
+int irn_vrfy_irg(ir_node *n, ir_graph *irg)
+{
int i;
ir_op *op;
return 1;
}
-int irn_vrfy(ir_node *n) {
+int irn_vrfy(ir_node *n)
+{
#ifdef DEBUG_libfirm
return irn_vrfy_irg(n, current_ir_graph);
#else
/**
* Walker to check every node
*/
-static void vrfy_wrap(ir_node *node, void *env) {
+static void vrfy_wrap(ir_node *node, void *env)
+{
int *res = env;
*res = irn_vrfy_irg(node, current_ir_graph);
}
* Walker to check every node including SSA property.
* Only called if dominance info is available.
*/
-static void vrfy_wrap_ssa(ir_node *node, void *env) {
+static void vrfy_wrap_ssa(ir_node *node, void *env)
+{
int *res = env;
*res = irn_vrfy_irg(node, current_ir_graph);
* Graph must be in state "op_pin_state_pinned".
* If dominance info is available, check the SSA property.
*/
-int irg_verify(ir_graph *irg, unsigned flags) {
+int irg_verify(ir_graph *irg, unsigned flags)
+{
int res = 1;
#ifdef DEBUG_libfirm
ir_graph *rem;
/**
* Wrapper to irg_verify to be run as an ir_graph pass.
*/
-static int irg_verify_wrapper(ir_graph *irg, void *context) {
+static int irg_verify_wrapper(ir_graph *irg, void *context)
+{
struct pass_t *pass = context;
irg_verify(irg, pass->flags);
/* do NOT rerun the pass if verify is ok :-) */
}
/* Creates an ir_graph pass for irg_verify(). */
-ir_graph_pass_t *irg_verify_pass(const char *name, unsigned flags) {
+ir_graph_pass_t *irg_verify_pass(const char *name, unsigned flags)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
def_graph_pass_constructor(
}
/* create a verify pass */
-int irn_vrfy_irg_dump(ir_node *n, ir_graph *irg, const char **bad_string) {
+int irn_vrfy_irg_dump(ir_node *n, ir_graph *irg, const char **bad_string)
+{
int res;
firm_verification_t old = get_node_verification_mode();
/**
* Pre-Walker: check Bad predecessors of node.
*/
-static void check_bads(ir_node *node, void *env) {
+static void check_bads(ir_node *node, void *env)
+{
vrfy_bad_env_t *venv = env;
int i, arity = get_irn_arity(node);
/*
* verify occurrence of bad nodes
*/
-int irg_vrfy_bads(ir_graph *irg, int flags) {
+int irg_vrfy_bads(ir_graph *irg, int flags)
+{
vrfy_bad_env_t env;
env.flags = flags;
/*
* set the default verify operation
*/
-void firm_set_default_verifyer(ir_opcode code, ir_op_ops *ops) {
+void firm_set_default_verifyer(ir_opcode code, ir_op_ops *ops)
+{
#define CASE(a) \
case iro_##a: \
ops->verify_node = verify_node_##a; \
/* Returns the number of pseudo graphs in the program. */
-int get_irp_n_pseudo_irgs(void) {
+int get_irp_n_pseudo_irgs(void)
+{
assert (irp && irp->pseudo_graphs);
return ARR_LEN(irp->pseudo_graphs);
}
/* Returns the pos'th pseudo graph in the program. */
-ir_graph *get_irp_pseudo_irg(int pos) {
+ir_graph *get_irp_pseudo_irg(int pos)
+{
assert(0 <= pos && pos <= get_irp_n_pseudo_irgs());
return irp->pseudo_graphs[pos];
}
-void add_irp_pseudo_irg(ir_graph *irg) {
+void add_irp_pseudo_irg(ir_graph *irg)
+{
assert (irp && irp->pseudo_graphs);
ARR_APP1(ir_graph *, irp->pseudo_graphs, irg);
}
* optimized. Pseudo graphs are kept in a separate graph list in irprog.
*/
ir_graph *
-new_pseudo_ir_graph(ir_entity *ent, int n_loc) {
+new_pseudo_ir_graph(ir_entity *ent, int n_loc)
+{
ir_graph *res = new_r_ir_graph(ent, n_loc);
add_irp_pseudo_irg(res); /* remember this graph global. */
return res;
}
/* Returns non-zero ir ir_graph is pseudo graph. */
-int is_pseudo_ir_graph(ir_graph *irg) {
+int is_pseudo_ir_graph(ir_graph *irg)
+{
int i, n_pseudo_irgs;
assert(irg && "nothing here");
static int visit_pseudo_irgs = 0;
-void set_visit_pseudo_irgs(int x) {
+void set_visit_pseudo_irgs(int x)
+{
visit_pseudo_irgs = x;
}
-int get_visit_pseudo_irgs(void) {
+int get_visit_pseudo_irgs(void)
+{
return visit_pseudo_irgs;
}
iterator->valueset = valueset;
}
-ir_node *ir_valueset_iterator_next(ir_valueset_iterator_t *iterator, ir_node **expr) {
+ir_node *ir_valueset_iterator_next(ir_valueset_iterator_t *iterator, ir_node **expr)
+{
ir_valueset_entry_t *entry;
if (iterator->iter == &iterator->valueset->elem_list) {
return entry->value;
}
-void ir_valueset_remove_iterator(ir_valueset_t *valueset, ir_valueset_iterator_t *iterator) {
+void ir_valueset_remove_iterator(ir_valueset_t *valueset, ir_valueset_iterator_t *iterator)
+{
ir_valueset_entry_t *rem = list_entry(iterator->iter->prev, ir_valueset_entry_t, list);
ir_valueset_remove(valueset, rem->value);
* @param call A Call node.
* @param env The environment.
*/
-static cl_entry *get_Call_entry(ir_node *call, wlk_env *env) {
+static cl_entry *get_Call_entry(ir_node *call, wlk_env *env)
+{
cl_entry *res = get_irn_link(call);
if (res == NULL) {
cl_entry *res = OALLOC(&env->obst, cl_entry);
* @param adr the address
* @param pEnt points to the base entity if any
*/
-static ir_node *find_base_adr(ir_node *ptr, ir_entity **pEnt) {
+static ir_node *find_base_adr(ir_node *ptr, ir_entity **pEnt)
+{
ir_entity *ent = NULL;
assert(mode_is_reference(get_irn_mode(ptr)));
/**
* Check if a given pointer represents non-local memory.
*/
-static void check_ptr(ir_node *ptr, wlk_env *env) {
+static void check_ptr(ir_node *ptr, wlk_env *env)
+{
ir_storage_class_class_t sc;
ir_entity *ent;
* If a non-alias free memory access is found, reset the alias free
* flag.
*/
-static void fix_args_and_collect_calls(ir_node *n, void *ctx) {
+static void fix_args_and_collect_calls(ir_node *n, void *ctx)
+{
wlk_env *env = ctx;
int i;
ir_type *ctp;
* return values) to be 1 (C, C++) in almost all cases, so ignore the
* linear search complexity here.
*/
-static void do_copy_return_opt(ir_node *n, void *ctx) {
+static void do_copy_return_opt(ir_node *n, void *ctx)
+{
cr_pair *arr = ctx;
int i;
* @param irg the graph
* @param env the environment
*/
-static void fix_call_list(ir_graph *irg, wlk_env *env) {
+static void fix_call_list(ir_graph *irg, wlk_env *env)
+{
const lower_params_t *lp = env->params;
cl_entry *p;
ir_node *call, **new_in;
* @param lp lowering parameters
* @param tp The type.
*/
-static int must_be_lowered(const lower_params_t *lp, ir_type *tp) {
+static int must_be_lowered(const lower_params_t *lp, ir_type *tp)
+{
int i, n_ress;
ir_type *res_tp;
/**
* lower a CopyB node.
*/
-static void lower_copyb_nodes(ir_node *irn, unsigned mode_bytes) {
+static void lower_copyb_nodes(ir_node *irn, unsigned mode_bytes)
+{
ir_graph *irg = current_ir_graph;
unsigned size;
unsigned offset;
/**
* Post-Walker: find small CopyB nodes.
*/
-static void find_copyb_nodes(ir_node *irn, void *ctx) {
+static void find_copyb_nodes(ir_node *irn, void *ctx)
+{
walk_env_t *env = ctx;
ir_type *tp;
unsigned size;
/**
* Get a primitive mode for a mode.
*/
-static ir_type *get_primitive_type(ir_mode *mode) {
+static ir_type *get_primitive_type(ir_mode *mode)
+{
pmap_entry *entry = pmap_find(prim_types, mode);
ir_type *tp;
char buf[64];
/**
* Create a method type for a Conv emulation from imode to omode.
*/
-static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode, lower_env_t *env) {
+static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode, lower_env_t *env)
+{
conv_tp_entry_t key, *entry;
ir_type *mtd;
/**
* Translate a Constant: create two.
*/
-static void lower_Const(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Const(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
tarval *tv, *tv_l, *tv_h;
ir_node *low, *high;
dbg_info *dbg = get_irn_dbg_info(node);
/**
* Translate a Load: create two.
*/
-static void lower_Load(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Load(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_mode *low_mode = env->params->low_unsigned;
ir_graph *irg = current_ir_graph;
ir_node *adr = get_Load_ptr(node);
/**
* Translate a Store: create two.
*/
-static void lower_Store(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Store(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_graph *irg;
ir_node *block, *adr, *mem;
ir_node *low, *high, *irn, *proj;
*
* Create an intrinsic Call.
*/
-static void lower_Div(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Div(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *irn, *call, *proj;
ir_node *in[4];
ir_mode *opmode;
*
* Create an intrinsic Call.
*/
-static void lower_Mod(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Mod(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *proj, *irn, *call;
ir_node *in[4];
ir_mode *opmode;
*
* Create two intrinsic Calls.
*/
-static void lower_DivMod(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_DivMod(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *proj, *irn, *mem, *callDiv, *callMod;
ir_node *resDiv = NULL;
ir_node *resMod = NULL;
*
* Create an intrinsic Call.
*/
-static void lower_Binop(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Binop(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *irn;
ir_node *in[4];
dbg_info *dbg;
*
* Create an intrinsic Call.
*/
-static void lower_Shiftop(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Shiftop(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *irn;
ir_node *in[3];
dbg_info *dbg;
/**
* Translate a Shr and handle special cases.
*/
-static void lower_Shr(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Shr(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *right = get_Shr_right(node);
ir_graph *irg = current_ir_graph;
/**
* Translate a Shl and handle special cases.
*/
-static void lower_Shl(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Shl(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *right = get_Shl_right(node);
ir_graph *irg = current_ir_graph;
/**
* Translate a Shrs and handle special cases.
*/
-static void lower_Shrs(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Shrs(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *right = get_Shrs_right(node);
ir_graph *irg = current_ir_graph;
/**
* Rebuild Rotl nodes into Or(Shl, Shr) and prepare all nodes.
*/
-static void prepare_links_and_handle_rotl(ir_node *node, void *env) {
+static void prepare_links_and_handle_rotl(ir_node *node, void *env)
+{
lower_env_t *lenv = env;
if (is_Rotl(node)) {
/**
* Translate a special case Rotl(x, sizeof(w)).
*/
-static void lower_Rotl(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Rotl(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *right = get_Rotl_right(node);
ir_node *left = get_Rotl_left(node);
ir_node *h, *l;
*
* Create an intrinsic Call.
*/
-static void lower_Unop(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Unop(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *irn;
ir_node *in[2];
dbg_info *dbg;
*
* Create two logical Nots.
*/
-static void lower_Not(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Not(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *irn;
ir_node *op_l, *op_h;
dbg_info *dbg;
/**
* Translate a Cond.
*/
-static void lower_Cond(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Cond(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_node *cmp, *left, *right, *block;
ir_node *sel = get_Cond_selector(node);
ir_mode *m = get_irn_mode(sel);
*
* @return the lowered type
*/
-static ir_type *lower_mtp(ir_type *mtp, lower_env_t *env) {
+static ir_type *lower_mtp(ir_type *mtp, lower_env_t *env)
+{
pmap_entry *entry;
ident *lid;
ir_type *res, *value_type;
/**
* Translate a Return.
*/
-static void lower_Return(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Return(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_graph *irg = current_ir_graph;
ir_entity *ent = get_irg_entity(irg);
ir_type *mtp = get_entity_type(ent);
/**
* Translate the parameters.
*/
-static void lower_Start(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Start(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_graph *irg = get_irn_irg(node);
ir_entity *ent = get_irg_entity(irg);
ir_type *tp = get_entity_type(ent);
/**
* Translate a Call.
*/
-static void lower_Call(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Call(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
ir_type *tp = get_Call_type(node);
ir_type *call_tp;
ir_node **in, *proj, *results;
/**
* Translate an Unknown into two.
*/
-static void lower_Unknown(ir_node *node, ir_mode *mode, lower_env_t *env) {
+static void lower_Unknown(ir_node *node, ir_mode *mode, lower_env_t *env)
+{
int idx = get_irn_idx(node);
ir_graph *irg = get_irn_irg(node);
ir_mode *low_mode = env->params->low_unsigned;
*
* First step: just create two templates
*/
-static void lower_Phi(ir_node *phi, ir_mode *mode, lower_env_t *env) {
+static void lower_Phi(ir_node *phi, ir_mode *mode, lower_env_t *env)
+{
ir_mode *mode_l = env->params->low_unsigned;
ir_graph *irg = get_irn_irg(phi);
ir_node *block, *unk_l, *unk_h, *phi_l, *phi_h;
/**
* Translate a Mux.
*/
-static void lower_Mux(ir_node *mux, ir_mode *mode, lower_env_t *env) {
+static void lower_Mux(ir_node *mux, ir_mode *mode, lower_env_t *env)
+{
ir_node *block, *val;
ir_node *true_l, *true_h, *false_l, *false_h, *sel;
dbg_info *dbg;
/**
* Translate an ASM node.
*/
-static void lower_ASM(ir_node *asmn, ir_mode *mode, lower_env_t *env) {
+static void lower_ASM(ir_node *asmn, ir_mode *mode, lower_env_t *env)
+{
ir_mode *his = env->params->high_signed;
ir_mode *hiu = env->params->high_unsigned;
int i;
/**
* Translate a Sel node.
*/
-static void lower_Sel(ir_node *sel, ir_mode *mode, lower_env_t *env) {
+static void lower_Sel(ir_node *sel, ir_mode *mode, lower_env_t *env)
+{
(void) mode;
/* we must only lower value parameter Sels if we change the
/**
* check for opcodes that must always be lowered.
*/
-static int always_lower(ir_opcode code) {
+static int always_lower(ir_opcode code)
+{
switch (code) {
case iro_ASM:
case iro_Proj:
/**
* lower boolean Proj(Cmp)
*/
-static ir_node *lower_boolean_Proj_Cmp(ir_node *proj, ir_node *cmp, lower_env_t *env) {
+static ir_node *lower_boolean_Proj_Cmp(ir_node *proj, ir_node *cmp, lower_env_t *env)
+{
int lidx, ridx;
ir_node *l, *r, *low, *high, *t, *res;
pn_Cmp pnc;
/**
* Compare two op_mode_entry_t's.
*/
-static int cmp_op_mode(const void *elt, const void *key, size_t size) {
+static int cmp_op_mode(const void *elt, const void *key, size_t size)
+{
const op_mode_entry_t *e1 = elt;
const op_mode_entry_t *e2 = key;
(void) size;
/**
* Compare two conv_tp_entry_t's.
*/
-static int cmp_conv_tp(const void *elt, const void *key, size_t size) {
+static int cmp_conv_tp(const void *elt, const void *key, size_t size)
+{
const conv_tp_entry_t *e1 = elt;
const conv_tp_entry_t *e2 = key;
(void) size;
/**
* Enter a lowering function into an ir_op.
*/
-static void enter_lower_func(ir_op *op, lower_func func) {
+static void enter_lower_func(ir_op *op, lower_func func)
+{
op->ops.generic = (op_func)func;
} /* enter_lower_func */
*
* @param mtp the method type
*/
-static int mtp_must_to_lowered(ir_type *mtp, lower_env_t *env) {
+static int mtp_must_to_lowered(ir_type *mtp, lower_env_t *env)
+{
int i, n_params;
n_params = get_method_n_params(mtp);
return 0;
}
-ir_prog_pass_t *lower_dw_ops_pass(const char *name, const lwrdw_param_t *param) {
+ir_prog_pass_t *lower_dw_ops_pass(const char *name, const lwrdw_param_t *param)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
pass->param = param;
/**
* Lower a Sel node. Do not touch Sels accessing entities on the frame type.
*/
-static void lower_sel(ir_node *sel) {
+static void lower_sel(ir_node *sel)
+{
ir_graph *irg = current_ir_graph;
ir_entity *ent;
ir_node *newn, *cnst, *index, *ptr, *bl;
/**
* Lower a all possible SymConst nodes.
*/
-static void lower_symconst(ir_node *symc) {
+static void lower_symconst(ir_node *symc)
+{
ir_node *newn;
ir_type *tp;
ir_entity *ent;
*
* @param size the size on bits
*/
-static int is_integral_size(int size) {
+static int is_integral_size(int size)
+{
/* must be a 2^n */
if (size & (size-1))
return 0;
* @param proj the Proj(result) node
* @param load the Load node
*/
-static void lower_bitfields_loads(ir_node *proj, ir_node *load) {
+static void lower_bitfields_loads(ir_node *proj, ir_node *load)
+{
ir_node *sel = get_Load_ptr(load);
ir_node *block, *n_proj, *res, *ptr;
ir_entity *ent;
*
* @todo: It adds a load which may produce an exception!
*/
-static void lower_bitfields_stores(ir_node *store) {
+static void lower_bitfields_stores(ir_node *store)
+{
ir_node *sel = get_Store_ptr(store);
ir_node *ptr, *value;
ir_entity *ent;
/**
* Lowers unaligned Loads.
*/
-static void lower_unaligned_Load(ir_node *load) {
+static void lower_unaligned_Load(ir_node *load)
+{
(void) load;
/* NYI */
}
/**
* Lowers unaligned Stores
*/
-static void lower_unaligned_Store(ir_node *store) {
+static void lower_unaligned_Store(ir_node *store)
+{
(void) store;
/* NYI */
}
/**
* Wrapper for running lower_highlevel_graph() as an ir_graph pass.
*/
-static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context) {
+static int lower_highlevel_graph_wrapper(ir_graph *irg, void *context)
+{
struct pass_t *pass = context;
lower_highlevel_graph(irg, pass->lower_bitfields);
return 0;
} /* lower_highlevel_graph_wrapper */
-ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields) {
+ir_graph_pass_t *lower_highlevel_graph_pass(const char *name, int lower_bitfields)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
pass->lower_bitfields = lower_bitfields;
/*
* does the same as lower_highlevel() for all nodes on the const code irg
*/
-void lower_const_code(void) {
+void lower_const_code(void)
+{
walk_const_code(NULL, lower_irnode, NULL);
} /* lower_const_code */
-ir_prog_pass_t *lower_const_code_pass(const char *name) {
+ir_prog_pass_t *lower_const_code_pass(const char *name)
+{
return def_prog_pass(name ? name : "lower_const_code", lower_const_code);
}
* Replace Sel nodes by address computation. Also resolves array access.
* Handle Bitfields by added And/Or calculations.
*/
-void lower_highlevel(int lower_bitfields) {
+void lower_highlevel(int lower_bitfields)
+{
int i, n;
n = get_irp_n_irgs();
/**
* walker: call all mapper functions
*/
-static void call_mapper(ir_node *node, void *env) {
+static void call_mapper(ir_node *node, void *env)
+{
walker_env_t *wenv = env;
ir_op *op = get_irn_op(node);
} /* call_mapper */
/* Go through all graphs and map calls to intrinsic functions. */
-unsigned lower_intrinsics(i_record *list, int length, int part_block_used) {
+unsigned lower_intrinsics(i_record *list, int length, int part_block_used)
+{
int i, n_ops = get_irp_n_opcodes();
ir_graph *irg;
pmap *c_map = pmap_create_ex(length);
* @param reg_jmp new regular control flow, if NULL, a Jmp will be used
* @param exc_jmp new exception control flow, if reg_jmp == NULL, a Bad will be used
*/
-static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp) {
+static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp)
+{
ir_node *block = get_nodes_block(call);
if (reg_jmp == NULL) {
} /* replace_call */
/* A mapper for the integer abs. */
-int i_mapper_abs(ir_node *call, void *ctx) {
+int i_mapper_abs(ir_node *call, void *ctx)
+{
ir_node *mem = get_Call_mem(call);
ir_node *block = get_nodes_block(call);
ir_node *op = get_Call_param(call, 0);
} /* i_mapper_abs */
/* A mapper for the integer bswap. */
-int i_mapper_bswap(ir_node *call, void *ctx) {
+int i_mapper_bswap(ir_node *call, void *ctx)
+{
ir_node *mem = get_Call_mem(call);
ir_node *block = get_nodes_block(call);
ir_node *op = get_Call_param(call, 0);
} /* i_mapper_bswap */
/* A mapper for the alloca() function. */
-int i_mapper_alloca(ir_node *call, void *ctx) {
+int i_mapper_alloca(ir_node *call, void *ctx)
+{
ir_node *mem = get_Call_mem(call);
ir_node *block = get_nodes_block(call);
ir_node *op = get_Call_param(call, 0);
} /* i_mapper_alloca */
/* A mapper for the floating point sqrt. */
-int i_mapper_sqrt(ir_node *call, void *ctx) {
+int i_mapper_sqrt(ir_node *call, void *ctx)
+{
ir_node *mem;
tarval *tv;
ir_node *op = get_Call_param(call, 0);
} /* i_mapper_sqrt */
/* A mapper for the floating point cbrt. */
-int i_mapper_cbrt(ir_node *call, void *ctx) {
+int i_mapper_cbrt(ir_node *call, void *ctx)
+{
ir_node *mem;
tarval *tv;
ir_node *op = get_Call_param(call, 0);
} /* i_mapper_cbrt */
/* A mapper for the floating point pow. */
-int i_mapper_pow(ir_node *call, void *ctx) {
+int i_mapper_pow(ir_node *call, void *ctx)
+{
dbg_info *dbg;
ir_node *mem;
ir_node *left = get_Call_param(call, 0);
} /* i_mapper_pow */
/* A mapper for the floating point exp. */
-int i_mapper_exp(ir_node *call, void *ctx) {
+int i_mapper_exp(ir_node *call, void *ctx)
+{
ir_node *val = get_Call_param(call, 0);
(void) ctx;
/**
* A mapper for mapping f(0.0) to 0.0.
*/
-static int i_mapper_zero_to_zero(ir_node *call, void *ctx, int reason) {
+static int i_mapper_zero_to_zero(ir_node *call, void *ctx, int reason)
+{
ir_node *val = get_Call_param(call, 0);
(void) ctx;
/**
* A mapper for mapping f(1.0) to 0.0.
*/
-static int i_mapper_one_to_zero(ir_node *call, void *ctx, int reason) {
+static int i_mapper_one_to_zero(ir_node *call, void *ctx, int reason)
+{
ir_node *val = get_Call_param(call, 0);
(void) ctx;
* f(-x) = f(x).
* f(0.0) = 1.0
*/
-static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason) {
+static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason)
+{
ir_node *val = get_Call_param(call, 0);
(void) ctx;
} /* i_mapper_symmetric_zero_to_one */
/* A mapper for the floating point log. */
-int i_mapper_log(ir_node *call, void *ctx) {
+int i_mapper_log(ir_node *call, void *ctx)
+{
/* log(1.0) = 0.0 */
return i_mapper_one_to_zero(call, ctx, FS_OPT_RTS_LOG);
} /* i_mapper_log */
/* A mapper for the floating point sin. */
-int i_mapper_sin(ir_node *call, void *ctx) {
+int i_mapper_sin(ir_node *call, void *ctx)
+{
/* sin(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_SIN);
} /* i_mapper_sin */
/* A mapper for the floating point cos. */
-int i_mapper_cos(ir_node *call, void *ctx) {
+int i_mapper_cos(ir_node *call, void *ctx)
+{
/* cos(0.0) = 1.0, cos(-x) = x */
return i_mapper_symmetric_zero_to_one(call, ctx, FS_OPT_RTS_COS);
} /* i_mapper_cos */
/* A mapper for the floating point tan. */
-int i_mapper_tan(ir_node *call, void *ctx) {
+int i_mapper_tan(ir_node *call, void *ctx)
+{
/* tan(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_TAN);
} /* i_mapper_tan */
/* A mapper for the floating point asin. */
-int i_mapper_asin(ir_node *call, void *ctx) {
+int i_mapper_asin(ir_node *call, void *ctx)
+{
/* asin(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_ASIN);
} /* i_mapper_asin */
/* A mapper for the floating point acos. */
-int i_mapper_acos(ir_node *call, void *ctx) {
+int i_mapper_acos(ir_node *call, void *ctx)
+{
/* acos(1.0) = 0.0 */
return i_mapper_one_to_zero(call, ctx, FS_OPT_RTS_ACOS);
} /* i_mapper_acos */
/* A mapper for the floating point atan. */
-int i_mapper_atan(ir_node *call, void *ctx) {
+int i_mapper_atan(ir_node *call, void *ctx)
+{
/* atan(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_ATAN);
} /* i_mapper_atan */
/* A mapper for the floating point sinh. */
-int i_mapper_sinh(ir_node *call, void *ctx) {
+int i_mapper_sinh(ir_node *call, void *ctx)
+{
/* sinh(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_SINH);
} /* i_mapper_sinh */
/* A mapper for the floating point cosh. */
-int i_mapper_cosh(ir_node *call, void *ctx) {
+int i_mapper_cosh(ir_node *call, void *ctx)
+{
/* cosh(0.0) = 1.0, cosh(-x) = x */
return i_mapper_symmetric_zero_to_one(call, ctx, FS_OPT_RTS_COSH);
} /* i_mapper_cosh */
/* A mapper for the floating point tanh. */
-int i_mapper_tanh(ir_node *call, void *ctx) {
+int i_mapper_tanh(ir_node *call, void *ctx)
+{
/* tanh(0.0) = 0.0 */
return i_mapper_zero_to_zero(call, ctx, FS_OPT_RTS_TANH);
} /* i_mapper_tanh */
* @return a Const node containing the strlen() result or NULL
* if the evaluation fails
*/
-static ir_node *eval_strlen(ir_entity *ent, ir_type *res_tp) {
+static ir_node *eval_strlen(ir_entity *ent, ir_type *res_tp)
+{
ir_type *tp = get_entity_type(ent);
ir_mode *mode;
ir_initializer_t *initializer;
} /* eval_strlen */
/* A mapper for strlen */
-int i_mapper_strlen(ir_node *call, void *ctx) {
+int i_mapper_strlen(ir_node *call, void *ctx)
+{
ir_node *s = get_Call_param(call, 0);
ir_entity *ent = get_const_entity(s);
* @return a Const node containing the strcmp() result or NULL
* if the evaluation fails
*/
-static ir_node *eval_strcmp(ir_entity *left, ir_entity *right, ir_type *res_tp) {
+static ir_node *eval_strcmp(ir_entity *left, ir_entity *right, ir_type *res_tp)
+{
ir_type *tp;
ir_mode *mode;
int i, n, n_r, res;
*
* @return non-zero if ent represents the empty string
*/
-static int is_empty_string(ir_entity *ent) {
+static int is_empty_string(ir_entity *ent)
+{
ir_type *tp = get_entity_type(ent);
ir_mode *mode;
ir_node *irn;
} /* is_empty_string */
/* A mapper for strcmp */
-int i_mapper_strcmp(ir_node *call, void *ctx) {
+int i_mapper_strcmp(ir_node *call, void *ctx)
+{
ir_node *left = get_Call_param(call, 0);
ir_node *right = get_Call_param(call, 1);
ir_node *irn = NULL;
} /* i_mapper_strcmp */
/* A mapper for strncmp */
-int i_mapper_strncmp(ir_node *call, void *ctx) {
+int i_mapper_strncmp(ir_node *call, void *ctx)
+{
ir_node *left = get_Call_param(call, 0);
ir_node *right = get_Call_param(call, 1);
ir_node *len = get_Call_param(call, 2);
} /* i_mapper_strncmp */
/* A mapper for strcpy */
-int i_mapper_strcpy(ir_node *call, void *ctx) {
+int i_mapper_strcpy(ir_node *call, void *ctx)
+{
ir_node *dst = get_Call_param(call, 0);
ir_node *src = get_Call_param(call, 1);
(void) ctx;
} /* i_mapper_strcpy */
/* A mapper for memcpy */
-int i_mapper_memcpy(ir_node *call, void *ctx) {
+int i_mapper_memcpy(ir_node *call, void *ctx)
+{
ir_node *dst = get_Call_param(call, 0);
ir_node *src = get_Call_param(call, 1);
ir_node *len = get_Call_param(call, 2);
} /* i_mapper_memcpy */
/* A mapper for mempcpy */
-int i_mapper_mempcpy(ir_node *call, void *ctx) {
+int i_mapper_mempcpy(ir_node *call, void *ctx)
+{
ir_node *dst = get_Call_param(call, 0);
ir_node *src = get_Call_param(call, 1);
ir_node *len = get_Call_param(call, 2);
} /* i_mapper_mempcpy */
/* A mapper for memmove */
-int i_mapper_memmove(ir_node *call, void *ctx) {
+int i_mapper_memmove(ir_node *call, void *ctx)
+{
ir_node *dst = get_Call_param(call, 0);
ir_node *src = get_Call_param(call, 1);
ir_node *len = get_Call_param(call, 2);
} /* i_mapper_memmove */
/* A mapper for memset */
-int i_mapper_memset(ir_node *call, void *ctx) {
+int i_mapper_memset(ir_node *call, void *ctx)
+{
ir_node *len = get_Call_param(call, 2);
(void) ctx;
} /* i_mapper_memset */
/* A mapper for memcmp */
-int i_mapper_memcmp(ir_node *call, void *ctx) {
+int i_mapper_memcmp(ir_node *call, void *ctx)
+{
ir_node *left = get_Call_param(call, 0);
ir_node *right = get_Call_param(call, 1);
ir_node *len = get_Call_param(call, 2);
/**
* Returns the result mode of a node.
*/
-static ir_mode *get_irn_res_mode(ir_node *node) {
+static ir_mode *get_irn_res_mode(ir_node *node)
+{
switch (get_irn_opcode(node)) {
case iro_Load: return get_Load_mode(node);
case iro_Quot: return get_Quot_resmode(node);
#define LMAX(a, b) ((a) > (b) ? (a) : (b))
/* A mapper for mapping unsupported instructions to runtime calls. */
-int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
+int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
+{
int i, j, arity, first, n_param, n_res;
long n_proj;
ir_type *mtp;
return 0;
}
-ir_graph_pass_t *lower_mux_pass(const char *name, lower_mux_callback *cb_func) {
+ir_graph_pass_t *lower_mux_pass(const char *name, lower_mux_callback *cb_func)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
pass->cb_func = cb_func;
/**
* Wrapper for running lower_switch() as a pass.
*/
-static int pass_wrapper(ir_graph *irg, void *context) {
+static int pass_wrapper(ir_graph *irg, void *context)
+{
struct pass_t *pass = context;
lower_switch(irg, pass->spare_size);
}
/* creates a pass for lower_switch */
-ir_graph_pass_t *lower_switch_pass(const char *name, unsigned spare_size) {
+ir_graph_pass_t *lower_switch_pass(const char *name, unsigned spare_size)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
pass->spare_size = spare_size;
* This can be done, if block contains no Phi node that depends on
* different inputs idx_i and idx_j.
*/
-static int can_fuse_block_inputs(const ir_node *block, int idx_i, int idx_j) {
+static int can_fuse_block_inputs(const ir_node *block, int idx_i, int idx_j)
+{
const ir_node *phi;
for (phi = get_Block_phis(block); phi != NULL; phi = get_Phi_next(phi)) {
* Under the preposition that we have a chain of blocks from
* from_block to to_block, collapse them all into to_block.
*/
-static void move_nodes_to_block(ir_node *jmp, ir_node *to_block) {
+static void move_nodes_to_block(ir_node *jmp, ir_node *to_block)
+{
ir_node *new_jmp = NULL;
ir_node *block, *next_block;
* Note that the simple case that Block has only these two
* predecessors are already handled in equivalent_node_Block().
*/
-static int remove_senseless_conds(ir_node *bl) {
+static int remove_senseless_conds(ir_node *bl)
+{
int i, j;
int n = get_Block_n_cfgpreds(bl);
int changed = 0;
* Therefore we also optimize at control flow operations, depending
* how we first reach the Block.
*/
-static void merge_blocks(ir_node *node, void *ctx) {
+static void merge_blocks(ir_node *node, void *ctx)
+{
int i;
ir_node *new_block;
merge_env *env = ctx;
*
* Must be run in the post walker.
*/
-static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
+static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
+{
int i;
int *changed = env;
* Links all Proj nodes to their predecessors.
* Collects all switch-Conds in a list.
*/
-static void collect_nodes(ir_node *n, void *ctx) {
+static void collect_nodes(ir_node *n, void *ctx)
+{
ir_opcode code = get_irn_opcode(n);
merge_env *env = ctx;
}
/** Returns true if pred is predecessor of block. */
-static int is_pred_of(ir_node *pred, ir_node *b) {
+static int is_pred_of(ir_node *pred, ir_node *b)
+{
int i;
for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
* To perform the test for pos, we must regard predecessors before pos
* as already removed.
**/
-static int test_whether_dispensable(ir_node *b, int pos) {
+static int test_whether_dispensable(ir_node *b, int pos)
+{
int i, j, n_preds = 1;
ir_node *pred = get_Block_cfgpred_block(b, pos);
* @@@ It is negotiable whether we should do this ... there might end up a copy
* from the Phi in the loop when removing the Phis.
*/
-static void optimize_blocks(ir_node *b, void *ctx) {
+static void optimize_blocks(ir_node *b, void *ctx)
+{
int i, j, k, n, max_preds, n_preds, p_preds = -1;
ir_node *pred, *phi, *next;
ir_node **in;
* Block walker: optimize all blocks using the default optimizations.
* This removes Blocks that with only a Jmp predecessor.
*/
-static void remove_simple_blocks(ir_node *block, void *ctx) {
+static void remove_simple_blocks(ir_node *block, void *ctx)
+{
ir_node *new_blk = equivalent_node(block);
merge_env *env = ctx;
*
* Expects all Proj's linked to the cond node
*/
-static int handle_switch_cond(ir_node *cond) {
+static int handle_switch_cond(ir_node *cond)
+{
ir_node *sel = get_Cond_selector(cond);
ir_node *proj1 = get_irn_link(cond);
* We use the mark flag to mark removable blocks in the first
* phase.
*/
-void optimize_cf(ir_graph *irg) {
+void optimize_cf(ir_graph *irg)
+{
int i, j, n, changed;
ir_node **in = NULL;
ir_node *cond, *end = get_irg_end(irg);
/**
* Check a partition.
*/
-static void check_partition(const partition_t *T) {
+static void check_partition(const partition_t *T)
+{
node_t *node;
unsigned n = 0;
/**
* check that all leader nodes in the partition have the same opcode.
*/
-static void check_opcode(const partition_t *Z) {
+static void check_opcode(const partition_t *Z)
+{
node_t *node;
opcode_key_t key;
int first = 1;
}
} /* check_opcode */
-static void check_all_partitions(environment_t *env) {
+static void check_all_partitions(environment_t *env)
+{
#ifdef DEBUG_libfirm
partition_t *P;
node_t *node;
/**
* Check list.
*/
-static void do_check_list(const node_t *list, int ofs, const partition_t *Z) {
+static void do_check_list(const node_t *list, int ofs, const partition_t *Z)
+{
#ifndef NDEBUG
const node_t *e;
/**
* Check a local list.
*/
-static void check_list(const node_t *list, const partition_t *Z) {
+static void check_list(const node_t *list, const partition_t *Z)
+{
do_check_list(list, offsetof(node_t, next), Z);
} /* check_list */
/**
* Dump partition to output.
*/
-static void dump_partition(const char *msg, const partition_t *part) {
+static void dump_partition(const char *msg, const partition_t *part)
+{
const node_t *node;
int first = 1;
lattice_elem_t type = get_partition_type(part);
/**
* Dumps a list.
*/
-static void do_dump_list(const char *msg, const node_t *node, int ofs) {
+static void do_dump_list(const char *msg, const node_t *node, int ofs)
+{
const node_t *p;
int first = 1;
/**
* Dumps a race list.
*/
-static void dump_race_list(const char *msg, const node_t *list) {
+static void dump_race_list(const char *msg, const node_t *list)
+{
do_dump_list(msg, list, offsetof(node_t, race_next));
} /* dump_race_list */
/**
* Dumps a local list.
*/
-static void dump_list(const char *msg, const node_t *list) {
+static void dump_list(const char *msg, const node_t *list)
+{
do_dump_list(msg, list, offsetof(node_t, next));
} /* dump_list */
/**
* Dump all partitions.
*/
-static void dump_all_partitions(const environment_t *env) {
+static void dump_all_partitions(const environment_t *env)
+{
const partition_t *P;
DB((dbg, LEVEL_2, "All partitions\n===============\n"));
/**
* Sump a split list.
*/
-static void dump_split_list(const partition_t *list) {
+static void dump_split_list(const partition_t *list)
+{
const partition_t *p;
DB((dbg, LEVEL_2, "Split by %s produced = {\n", what_reason));
/**
* Dump partition and type for a node.
*/
-static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local) {
+static int dump_partition_hook(FILE *F, ir_node *n, ir_node *local)
+{
ir_node *irn = local != NULL ? local : n;
node_t *node = get_irn_node(irn);
/**
* Verify that a type transition is monotone
*/
-static void verify_type(const lattice_elem_t old_type, node_t *node) {
+static void verify_type(const lattice_elem_t old_type, node_t *node)
+{
if (old_type.tv == node->type.tv) {
/* no change */
return;
/**
* Compare two pointer values of a listmap.
*/
-static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
+static int listmap_cmp_ptr(const void *elt, const void *key, size_t size)
+{
const listmap_entry_t *e1 = elt;
const listmap_entry_t *e2 = key;
*
* @param map the listmap
*/
-static void listmap_init(listmap_t *map) {
+static void listmap_init(listmap_t *map)
+{
map->map = new_set(listmap_cmp_ptr, 16);
map->values = NULL;
} /* listmap_init */
*
* @param map the listmap
*/
-static void listmap_term(listmap_t *map) {
+static void listmap_term(listmap_t *map)
+{
del_set(map->map);
} /* listmap_term */
*
* @return the associated listmap entry for the given id
*/
-static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
+static listmap_entry_t *listmap_find(listmap_t *map, void *id)
+{
listmap_entry_t key, *entry;
key.id = id;
*
* @return a hash value for the given opcode map entry
*/
-static unsigned opcode_hash(const opcode_key_t *entry) {
+static unsigned opcode_hash(const opcode_key_t *entry)
+{
return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.ptr) + entry->arity;
} /* opcode_hash */
/**
* Compare two entries in the opcode map.
*/
-static int cmp_opcode(const void *elt, const void *key, size_t size) {
+static int cmp_opcode(const void *elt, const void *key, size_t size)
+{
const opcode_key_t *o1 = elt;
const opcode_key_t *o2 = key;
/**
* Compare two Def-Use edges for input position.
*/
-static int cmp_def_use_edge(const void *a, const void *b) {
+static int cmp_def_use_edge(const void *a, const void *b)
+{
const ir_def_use_edge *ea = a;
const ir_def_use_edge *eb = b;
/**
* We need the Def-Use edges sorted.
*/
-static void sort_irn_outs(node_t *node) {
+static void sort_irn_outs(node_t *node)
+{
ir_node *irn = node->node;
int n_outs = get_irn_n_outs(irn);
*
* @return the associated type of this node
*/
-static inline lattice_elem_t get_node_type(const ir_node *irn) {
+static inline lattice_elem_t get_node_type(const ir_node *irn)
+{
return get_irn_node(irn)->type;
} /* get_node_type */
*
* @return the associated type of this node
*/
-static inline tarval *get_node_tarval(const ir_node *irn) {
+static inline tarval *get_node_tarval(const ir_node *irn)
+{
lattice_elem_t type = get_node_type(irn);
if (is_tarval(type.tv))
/**
* Add a partition to the worklist.
*/
-static inline void add_to_worklist(partition_t *X, environment_t *env) {
+static inline void add_to_worklist(partition_t *X, environment_t *env)
+{
assert(X->on_worklist == 0);
DB((dbg, LEVEL_2, "Adding part%d to worklist\n", X->nr));
X->wl_next = env->worklist;
*
* @return a newly allocated partition
*/
-static inline partition_t *new_partition(environment_t *env) {
+static inline partition_t *new_partition(environment_t *env)
+{
partition_t *part = OALLOC(&env->obst, partition_t);
INIT_LIST_HEAD(&part->Leader);
/**
* Get the first node from a partition.
*/
-static inline node_t *get_first_node(const partition_t *X) {
+static inline node_t *get_first_node(const partition_t *X)
+{
return list_entry(X->Leader.next, node_t, node_list);
} /* get_first_node */
*
* @return the type of the first element of the partition
*/
-static inline lattice_elem_t get_partition_type(const partition_t *X) {
+static inline lattice_elem_t get_partition_type(const partition_t *X)
+{
const node_t *first = get_first_node(X);
return first->type;
} /* get_partition_type */
*
* @return the created node
*/
-static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env) {
+static node_t *create_partition_node(ir_node *irn, partition_t *part, environment_t *env)
+{
/* create a partition node and place it in the partition */
node_t *node = OALLOC(&env->obst, node_t);
* Pre-Walker, initialize all Nodes' type to U or top and place
* all nodes into the TOP partition.
*/
-static void create_initial_partitions(ir_node *irn, void *ctx) {
+static void create_initial_partitions(ir_node *irn, void *ctx)
+{
environment_t *env = ctx;
partition_t *part = env->initial;
node_t *node;
/**
* Post-Walker, collect all Block-Phi lists, set Cond.
*/
-static void init_block_phis(ir_node *irn, void *ctx) {
+static void init_block_phis(ir_node *irn, void *ctx)
+{
(void) ctx;
if (is_Phi(irn)) {
* @param y a node
* @param env the environment
*/
-static inline void add_to_touched(node_t *y, environment_t *env) {
+static inline void add_to_touched(node_t *y, environment_t *env)
+{
if (y->on_touched == 0) {
partition_t *part = y->part;
* @param y the node
* @param env the environment
*/
-static void add_to_cprop(node_t *y, environment_t *env) {
+static void add_to_cprop(node_t *y, environment_t *env)
+{
ir_node *irn;
/* Add y to y.partition.cprop. */
* @param Z_prime the Z' partition, a previous part of Z
* @param env the environment
*/
-static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env) {
+static void update_worklist(partition_t *Z, partition_t *Z_prime, environment_t *env)
+{
if (Z->on_worklist || Z_prime->n_leader < Z->n_leader) {
add_to_worklist(Z_prime, env);
} else {
*
* @param x the node
*/
-static void move_edges_to_leader(node_t *x) {
+static void move_edges_to_leader(node_t *x)
+{
ir_node *irn = x->node;
int i, j, k;
*
* @return a new partition containing the nodes of g
*/
-static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env) {
+static partition_t *split_no_followers(partition_t *Z, node_t *g, environment_t *env)
+{
partition_t *Z_prime;
node_t *node;
unsigned n = 0;
*
* @param n the node
*/
-static void follower_to_leader(node_t *n) {
+static void follower_to_leader(node_t *n)
+{
assert(n->is_follower == 1);
DB((dbg, LEVEL_2, "%+F make the follower -> leader transition\n", n->node));
* @param irn the node to check
* @param input number of the input
*/
-static int is_real_follower(const ir_node *irn, int input) {
+static int is_real_follower(const ir_node *irn, int input)
+{
node_t *pred;
switch (get_irn_opcode(irn)) {
/**
* Do one step in the race.
*/
-static int step(step_env *env) {
+static int step(step_env *env)
+{
node_t *n;
if (env->initial != NULL) {
*
* @param list the list
*/
-static int clear_flags(node_t *list) {
+static int clear_flags(node_t *list)
+{
int res = 0;
node_t *n;
*
* @return a new partition containing the nodes of gg
*/
-static partition_t *split(partition_t **pX, node_t *gg, environment_t *env) {
+static partition_t *split(partition_t **pX, node_t *gg, environment_t *env)
+{
partition_t *X = *pX;
partition_t *X_prime;
list_head tmp;
*
* @return non-zero if the i'th input of the given Phi node is live
*/
-static int is_live_input(ir_node *phi, int i) {
+static int is_live_input(ir_node *phi, int i)
+{
if (i >= 0) {
ir_node *block = get_nodes_block(phi);
ir_node *pred = get_Block_cfgpred(block, i);
/**
* Return non-zero if a type is a constant.
*/
-static int is_constant_type(lattice_elem_t type) {
+static int is_constant_type(lattice_elem_t type)
+{
if (type.tv != tarval_bottom && type.tv != tarval_top)
return 1;
return 0;
*
* @param type the type to check
*/
-static int type_is_neither_top_nor_const(const lattice_elem_t type) {
+static int type_is_neither_top_nor_const(const lattice_elem_t type)
+{
if (is_tarval(type.tv)) {
if (type.tv == tarval_top)
return 0;
* @param idx the index of the def_use edge to evaluate
* @param env the environment
*/
-static void collect_touched(list_head *list, int idx, environment_t *env) {
+static void collect_touched(list_head *list, int idx, environment_t *env)
+{
node_t *x, *y;
int end_idx = env->end_idx;
* @param list the list which contains the nodes that must be evaluated
* @param env the environment
*/
-static void collect_commutative_touched(list_head *list, environment_t *env) {
+static void collect_commutative_touched(list_head *list, environment_t *env)
+{
node_t *x, *y;
list_for_each_entry(node_t, x, list, node_list) {
*
* @param env the environment
*/
-static void cause_splits(environment_t *env) {
+static void cause_splits(environment_t *env)
+{
partition_t *X, *Z, *N;
int idx;
} /* split_by_what */
/** lambda n.(n.type) */
-static void *lambda_type(const node_t *node, environment_t *env) {
+static void *lambda_type(const node_t *node, environment_t *env)
+{
(void)env;
return node->type.tv;
} /* lambda_type */
/** lambda n.(n.opcode) */
-static void *lambda_opcode(const node_t *node, environment_t *env) {
+static void *lambda_opcode(const node_t *node, environment_t *env)
+{
opcode_key_t key, *entry;
ir_node *irn = node->node;
} /* lambda_opcode */
/** lambda n.(n[i].partition) */
-static void *lambda_partition(const node_t *node, environment_t *env) {
+static void *lambda_partition(const node_t *node, environment_t *env)
+{
ir_node *skipped = skip_Proj(node->node);
ir_node *pred;
node_t *p;
} /* lambda_partition */
/** lambda n.(n[i].partition) for commutative nodes */
-static void *lambda_commutative_partition(const node_t *node, environment_t *env) {
+static void *lambda_commutative_partition(const node_t *node, environment_t *env)
+{
ir_node *irn = node->node;
ir_node *skipped = skip_Proj(irn);
ir_node *pred, *left, *right;
* Returns true if a type is a constant (and NOT Top
* or Bottom).
*/
-static int is_con(const lattice_elem_t type) {
+static int is_con(const lattice_elem_t type)
+{
/* be conservative */
if (is_tarval(type.tv))
return tarval_is_constant(type.tv);
* @param X the partition to split
* @param env the environment
*/
-static void split_by(partition_t *X, environment_t *env) {
+static void split_by(partition_t *X, environment_t *env)
+{
partition_t *I, *P = NULL;
int input;
*
* @param node the node
*/
-static void default_compute(node_t *node) {
+static void default_compute(node_t *node)
+{
int i;
ir_node *irn = node->node;
*
* @param node the node
*/
-static void compute_Block(node_t *node) {
+static void compute_Block(node_t *node)
+{
int i;
ir_node *block = node->node;
*
* @param node the node
*/
-static void compute_Bad(node_t *node) {
+static void compute_Bad(node_t *node)
+{
/* Bad nodes ALWAYS compute Top */
node->type.tv = tarval_top;
} /* compute_Bad */
*
* @param node the node
*/
-static void compute_Unknown(node_t *node) {
+static void compute_Unknown(node_t *node)
+{
/* While Unknown nodes should compute Top this is dangerous:
* a Top input to a Cond would lead to BOTH control flows unreachable.
* While this is correct in the given semantics, it would destroy the Firm
*
* @param node the node
*/
-static void compute_Jmp(node_t *node) {
+static void compute_Jmp(node_t *node)
+{
node_t *block = get_irn_node(get_nodes_block(node->node));
node->type = block->type;
*
* @param node the node
*/
-static void compute_Return(node_t *node) {
+static void compute_Return(node_t *node)
+{
/* The Return node is NOT dead if it is in a reachable block.
* This is already checked in compute(). so we can return
* Reachable here. */
*
* @param node the node
*/
-static void compute_End(node_t *node) {
+static void compute_End(node_t *node)
+{
/* the End node is NOT dead of course */
node->type.tv = tarval_reachable;
} /* compute_End */
*
* @param node the node
*/
-static void compute_Call(node_t *node) {
+static void compute_Call(node_t *node)
+{
/*
* A Call computes always bottom, even if it has Unknown
* predecessors.
*
* @param node the node
*/
-static void compute_SymConst(node_t *node) {
+static void compute_SymConst(node_t *node)
+{
ir_node *irn = node->node;
node_t *block = get_irn_node(get_nodes_block(irn));
*
* @param node the node
*/
-static void compute_Phi(node_t *node) {
+static void compute_Phi(node_t *node)
+{
int i;
ir_node *phi = node->node;
lattice_elem_t type;
*
* @param node the node
*/
-static void compute_Add(node_t *node) {
+static void compute_Add(node_t *node)
+{
ir_node *sub = node->node;
node_t *l = get_irn_node(get_Add_left(sub));
node_t *r = get_irn_node(get_Add_right(sub));
*
* @param node the node
*/
-static void compute_Sub(node_t *node) {
+static void compute_Sub(node_t *node)
+{
ir_node *sub = node->node;
node_t *l = get_irn_node(get_Sub_left(sub));
node_t *r = get_irn_node(get_Sub_right(sub));
*
* @param node the node
*/
-static void compute_Eor(node_t *node) {
+static void compute_Eor(node_t *node)
+{
ir_node *eor = node->node;
node_t *l = get_irn_node(get_Eor_left(eor));
node_t *r = get_irn_node(get_Eor_right(eor));
*
* @param node the node
*/
-static void compute_Cmp(node_t *node) {
+static void compute_Cmp(node_t *node)
+{
ir_node *cmp = node->node;
node_t *l = get_irn_node(get_Cmp_left(cmp));
node_t *r = get_irn_node(get_Cmp_right(cmp));
* @param node the node
* @param cond the predecessor Cmp node
*/
-static void compute_Proj_Cmp(node_t *node, ir_node *cmp) {
+static void compute_Proj_Cmp(node_t *node, ir_node *cmp)
+{
ir_node *proj = node->node;
node_t *l = get_irn_node(get_Cmp_left(cmp));
node_t *r = get_irn_node(get_Cmp_right(cmp));
* @param node the node
* @param cond the predecessor Cond node
*/
-static void compute_Proj_Cond(node_t *node, ir_node *cond) {
+static void compute_Proj_Cond(node_t *node, ir_node *cond)
+{
ir_node *proj = node->node;
long pnc = get_Proj_proj(proj);
ir_node *sel = get_Cond_selector(cond);
*
* @param node the node
*/
-static void compute_Proj(node_t *node) {
+static void compute_Proj(node_t *node)
+{
ir_node *proj = node->node;
ir_mode *mode = get_irn_mode(proj);
node_t *block = get_irn_node(get_nodes_block(skip_Proj(proj)));
*
* @param node the node
*/
-static void compute_Confirm(node_t *node) {
+static void compute_Confirm(node_t *node)
+{
ir_node *confirm = node->node;
node_t *pred = get_irn_node(get_Confirm_value(confirm));
*
* @param node the node
*/
-static void compute(node_t *node) {
+static void compute(node_t *node)
+{
ir_node *irn = node->node;
compute_func func;
/**
* Calculates the Identity for Phi nodes
*/
-static node_t *identity_Phi(node_t *node) {
+static node_t *identity_Phi(node_t *node)
+{
ir_node *phi = node->node;
ir_node *block = get_nodes_block(phi);
node_t *n_part = NULL;
/**
* Calculates the Identity for commutative 0 neutral nodes.
*/
-static node_t *identity_comm_zero_binop(node_t *node) {
+static node_t *identity_comm_zero_binop(node_t *node)
+{
ir_node *op = node->node;
node_t *a = get_irn_node(get_binop_left(op));
node_t *b = get_irn_node(get_binop_right(op));
/**
* Calculates the Identity for Shift nodes.
*/
-static node_t *identity_shift(node_t *node) {
+static node_t *identity_shift(node_t *node)
+{
ir_node *op = node->node;
node_t *b = get_irn_node(get_binop_right(op));
ir_mode *mode = get_irn_mode(b->node);
/**
* Calculates the Identity for Mul nodes.
*/
-static node_t *identity_Mul(node_t *node) {
+static node_t *identity_Mul(node_t *node)
+{
ir_node *op = node->node;
node_t *a = get_irn_node(get_Mul_left(op));
node_t *b = get_irn_node(get_Mul_right(op));
/**
* Calculates the Identity for Sub nodes.
*/
-static node_t *identity_Sub(node_t *node) {
+static node_t *identity_Sub(node_t *node)
+{
ir_node *sub = node->node;
node_t *b = get_irn_node(get_Sub_right(sub));
ir_mode *mode = get_irn_mode(sub);
/**
* Calculates the Identity for And nodes.
*/
-static node_t *identity_And(node_t *node) {
+static node_t *identity_And(node_t *node)
+{
ir_node *and = node->node;
node_t *a = get_irn_node(get_And_left(and));
node_t *b = get_irn_node(get_And_right(and));
/**
* Calculates the Identity for Confirm nodes.
*/
-static node_t *identity_Confirm(node_t *node) {
+static node_t *identity_Confirm(node_t *node)
+{
ir_node *confirm = node->node;
/* a Confirm is always a Copy */
/**
* Calculates the Identity for Mux nodes.
*/
-static node_t *identity_Mux(node_t *node) {
+static node_t *identity_Mux(node_t *node)
+{
ir_node *mux = node->node;
node_t *t = get_irn_node(get_Mux_true(mux));
node_t *f = get_irn_node(get_Mux_false(mux));
/**
* Calculates the Identity for nodes.
*/
-static node_t *identity(node_t *node) {
+static node_t *identity(node_t *node)
+{
ir_node *irn = node->node;
switch (get_irn_opcode(irn)) {
* Node follower is a (new) follower of leader, segregate Leader
* out edges.
*/
-static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader) {
+static void segregate_def_use_chain_1(const ir_node *follower, node_t *leader)
+{
ir_node *l = leader->node;
int j, i, n = get_irn_n_outs(l);
*
* @param follower the follower IR node
*/
-static void segregate_def_use_chain(const ir_node *follower) {
+static void segregate_def_use_chain(const ir_node *follower)
+{
int i;
for (i = get_irn_arity(follower) - 1; i >= 0; --i) {
*
* @param env the environment
*/
-static void propagate(environment_t *env) {
+static void propagate(environment_t *env)
+{
partition_t *X, *Y;
node_t *x;
lattice_elem_t old_type;
*
* @param irn the node
*/
-static ir_node *get_leader(node_t *node) {
+static ir_node *get_leader(node_t *node)
+{
partition_t *part = node->part;
if (part->n_leader > 1 || node->is_follower) {
/**
* Returns non-zero if a mode_T node has only one reachable output.
*/
-static int only_one_reachable_proj(ir_node *n) {
+static int only_one_reachable_proj(ir_node *n)
+{
int i, k = 0;
for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
* @param pred the control flow exit
* @param block the destination block
*/
-static int can_exchange(ir_node *pred, ir_node *block) {
+static int can_exchange(ir_node *pred, ir_node *block)
+{
if (is_Start(pred) || has_Block_entity(block))
return 0;
else if (is_Jmp(pred))
* Block Post-Walker, apply the analysis results on control flow by
* shortening Phi's and Block inputs.
*/
-static void apply_cf(ir_node *block, void *ctx) {
+static void apply_cf(ir_node *block, void *ctx)
+{
environment_t *env = ctx;
node_t *node = get_irn_node(block);
int i, j, k, n;
* AddP(x, NULL) is a follower of x, but with different mode.
* Fix it here.
*/
-static void exchange_leader(ir_node *irn, ir_node *leader) {
+static void exchange_leader(ir_node *irn, ir_node *leader)
+{
ir_mode *mode = get_irn_mode(irn);
if (mode != get_irn_mode(leader)) {
/* The conv is a no-op, so we are free to place it
* the Def-Use edges for this purpose, as they still
* reflect the situation.
*/
-static int all_users_are_dead(const ir_node *irn) {
+static int all_users_are_dead(const ir_node *irn)
+{
int i, n = get_irn_n_outs(irn);
for (i = 1; i <= n; ++i) {
* Walker: Find reachable mode_M nodes that have only
* unreachable users. These nodes must be kept later.
*/
-static void find_kept_memory(ir_node *irn, void *ctx) {
+static void find_kept_memory(ir_node *irn, void *ctx)
+{
environment_t *env = ctx;
node_t *node, *block;
/**
* Post-Walker, apply the analysis results;
*/
-static void apply_result(ir_node *irn, void *ctx) {
+static void apply_result(ir_node *irn, void *ctx)
+{
environment_t *env = ctx;
node_t *node = get_irn_node(irn);
/**
* Fix the keep-alives by deleting unreachable ones.
*/
-static void apply_end(ir_node *end, environment_t *env) {
+static void apply_end(ir_node *end, environment_t *env)
+{
int i, j, n = get_End_n_keepalives(end);
ir_node **in;
/**
* sets the generic functions to compute.
*/
-static void set_compute_functions(void) {
+static void set_compute_functions(void)
+{
int i;
/* set the default compute function */
/**
* Add memory keeps.
*/
-static void add_memory_keeps(ir_node **kept_memory, int len) {
+static void add_memory_keeps(ir_node **kept_memory, int len)
+{
ir_node *end = get_irg_end(current_ir_graph);
int i;
ir_nodeset_t set;
ir_nodeset_destroy(&set);
} /* add_memory_keeps */
-void combo(ir_graph *irg) {
+void combo(ir_graph *irg)
+{
environment_t env;
ir_node *initial_bl;
node_t *start;
* @param n IR node
* @param env Environment of walker.
*/
-static void walk_critical_cf_edges(ir_node *n, void *env) {
+static void walk_critical_cf_edges(ir_node *n, void *env)
+{
int arity, i;
ir_node *pre, *block, *jmp;
cf_env *cenv = env;
} /* n is a multi-entry block */
}
-void remove_critical_cf_edges_ex(ir_graph *irg, int ignore_exception_edges) {
+void remove_critical_cf_edges_ex(ir_graph *irg, int ignore_exception_edges)
+{
cf_env env;
env.ignore_exc_edges = (char)ignore_exception_edges;
}
}
-void remove_critical_cf_edges(ir_graph *irg) {
+void remove_critical_cf_edges(ir_graph *irg)
+{
remove_critical_cf_edges_ex(irg, 1);
}
* returns an Alloc node if the node adr Select
* from one
*/
-static ir_node *is_depend_alloc(ir_node *adr) {
+static ir_node *is_depend_alloc(ir_node *adr)
+{
ir_node *alloc;
if (!is_Sel(adr))
* determine if a value calculated by n "escape", ie
* is stored somewhere we could not track
*/
-static int can_escape(ir_node *n) {
+static int can_escape(ir_node *n)
+{
int i, j, k;
/* should always be pointer mode or we made some mistake */
* Walker: Collect all calls to const and pure functions
* to lists. Collect all Proj(Call) nodes into a Proj list.
*/
-static void collect_const_and_pure_calls(ir_node *node, void *env) {
+static void collect_const_and_pure_calls(ir_node *node, void *env)
+{
env_t *ctx = env;
ir_node *call, *ptr;
ir_entity *ent;
* @param irg the graph that contained calls to pure functions
* @param ctx context
*/
-static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
+static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
+{
ir_node *call, *next, *mem, *proj;
int exc_changed = 0;
ir_graph *rem = current_ir_graph;
* Walker: Collect all calls to nothrow functions
* to lists. Collect all Proj(Call) nodes into a Proj list.
*/
-static void collect_nothrow_calls(ir_node *node, void *env) {
+static void collect_nothrow_calls(ir_node *node, void *env)
+{
env_t *ctx = env;
ir_node *call, *ptr;
ir_entity *ent;
* @param call_list the list of all call sites of const functions
* @param proj_list the list of all memory/exception Proj's of this call sites
*/
-static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
+static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
+{
ir_node *call, *next, *proj;
int exc_changed = 0;
ir_graph *rem = current_ir_graph;
/**
* Calculate the bigger property of two. Handle the temporary flag right.
*/
-static unsigned max_property(unsigned a, unsigned b) {
+static unsigned max_property(unsigned a, unsigned b)
+{
unsigned r, t = (a | b) & mtp_temporary;
a &= ~mtp_temporary;
b &= ~mtp_temporary;
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned _follow_mem(ir_node *node) {
+static unsigned _follow_mem(ir_node *node)
+{
unsigned m, mode = mtp_property_const;
ir_node *ptr;
int i;
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned follow_mem(ir_node *node, unsigned mode) {
+static unsigned follow_mem(ir_node *node, unsigned mode)
+{
unsigned m;
m = _follow_mem(node);
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
+static unsigned check_const_or_pure_function(ir_graph *irg, int top)
+{
ir_node *end, *endbl;
int j;
unsigned prop = get_irg_additional_properties(irg);
*
* @param ctx context
*/
-static void handle_const_Calls(env_t *ctx) {
+static void handle_const_Calls(env_t *ctx)
+{
int i;
ctx->n_calls_SymConst = 0;
*
* @param ctx context
*/
-static void handle_nothrow_Calls(env_t *ctx) {
+static void handle_nothrow_Calls(env_t *ctx)
+{
int i;
ctx->n_calls_SymConst = 0;
*
* @param node the node to check
*/
-static int is_malloc_call_result(const ir_node *node) {
+static int is_malloc_call_result(const ir_node *node)
+{
if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
/* Firm style high-level allocation */
return 1;
/**
* Update a property depending on a call property.
*/
-static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
+static unsigned update_property(unsigned orig_prop, unsigned call_prop)
+{
unsigned t = (orig_prop | call_prop) & mtp_temporary;
unsigned r = orig_prop & call_prop;
return r | t;
/**
* Check if a node is stored.
*/
-static int is_stored(const ir_node *n) {
+static int is_stored(const ir_node *n)
+{
const ir_edge_t *edge;
const ir_node *ptr;
*
* return ~mtp_property_malloc if return values are stored, ~0 else
*/
-static unsigned check_stored_result(ir_graph *irg) {
+static unsigned check_stored_result(ir_graph *irg)
+{
ir_node *end_blk = get_irg_end_block(irg);
int i, j;
unsigned res = ~0;
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
+static unsigned check_nothrow_or_malloc(ir_graph *irg, int top)
+{
ir_node *end_blk = get_irg_end_block(irg);
ir_entity *ent;
ir_type *mtp;
* When a function was detected as "const", it might be moved out of loops.
* This might be dangerous if the graph can contain endless loops.
*/
-static void check_for_possible_endless_loops(ir_graph *irg) {
+static void check_for_possible_endless_loops(ir_graph *irg)
+{
ir_loop *root_loop;
assure_cf_loop(irg);
} /* optimize_funccalls */
/* initialize the funccall optimization */
-void firm_init_funccalls(void) {
+void firm_init_funccalls(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
} /* firm_init_funccalls */
*
* @param block the block
*/
-static block_info *get_block_info(ir_node *block) {
+static block_info *get_block_info(ir_node *block)
+{
return get_irn_link(block);
} /* get_block_info */
* @param block the block
* @param env the environment
*/
-static void alloc_blk_info(ir_node *block, pre_env *env) {
+static void alloc_blk_info(ir_node *block, pre_env *env)
+{
block_info *info = OALLOC(env->obst, block_info);
set_irn_link(block, info);
*
* @param n the node
*/
-static int is_nice_value(ir_node *n) {
+static int is_nice_value(ir_node *n)
+{
ir_mode *mode;
while (is_Proj(n))
* @param txt a text to describe the set
* @param block the owner block of the set
*/
-static void dump_value_set(ir_valueset_t *set, char *txt, ir_node *block) {
+static void dump_value_set(ir_valueset_t *set, char *txt, ir_node *block)
+{
ir_valueset_iterator_t iter;
ir_node *value, *expr;
int i;
* Topological walker. Allocates block info for every block and place nodes in topological
* order into the nodes set.
*/
-static void topo_walker(ir_node *irn, void *ctx) {
+static void topo_walker(ir_node *irn, void *ctx)
+{
pre_env *env = ctx;
ir_node *block;
block_info *info;
* @param block the block
* @param set a value set, containing the already processed predecessors
*/
-static int is_clean_in_block(ir_node *n, ir_node *block, ir_valueset_t *set) {
+static int is_clean_in_block(ir_node *n, ir_node *block, ir_valueset_t *set)
+{
int i;
if (is_Phi(n))
* @param block the block
* @param ctx the walker environment
*/
-static void compute_antic(ir_node *block, void *ctx) {
+static void compute_antic(ir_node *block, void *ctx)
+{
pre_env *env = ctx;
block_info *succ_info;
block_info *info = get_block_info(block);
* @param irn the node
* @param ctx the walker environment
*/
-static void eliminate(ir_node *irn, void *ctx) {
+static void eliminate(ir_node *irn, void *ctx)
+{
pre_env *env = ctx;
if (is_no_Block(irn)) {
*
* @param pairs list of elimination pairs
*/
-static void eliminate_nodes(elim_pair *pairs) {
+static void eliminate_nodes(elim_pair *pairs)
+{
elim_pair *p;
for (p = pairs; p != NULL; p = p->next) {
/**
* Walker: adds Call operations to a head's link list.
*/
-static void collect_call(ir_node *node, void *env) {
+static void collect_call(ir_node *node, void *env)
+{
ir_node *head = env;
if (is_Call(node)) {
}
}
-static void split_critical_edge(ir_node *block, int pos) {
+static void split_critical_edge(ir_node *block, int pos)
+{
ir_graph *irg = get_irn_irg(block);
ir_node *in[1];
ir_node *new_block;
/**
* get the Load/Store info of a node
*/
-static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
+static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
+{
ldst_info_t *info = get_irn_link(node);
if (! info) {
/**
* get the Block info of a node
*/
-static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
+static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
+{
block_info_t *info = get_irn_link(node);
if (! info) {
/**
* Return the Selection index of a Sel node from dimension n
*/
-static long get_Sel_array_index_long(ir_node *n, int dim) {
+static long get_Sel_array_index_long(ir_node *n, int dim)
+{
ir_node *index = get_Sel_index(n, dim);
assert(is_Const(index));
return get_tarval_long(get_Const_tarval(index));
* @param depth current depth in steps upward from the root
* of the address
*/
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
+{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
int path_len, pos, idx;
* Returns an access path or NULL. The access path is only
* valid, if the graph is in phase_high and _no_ address computation is used.
*/
-static compound_graph_path *get_accessed_path(ir_node *ptr) {
+static compound_graph_path *get_accessed_path(ir_node *ptr)
+{
compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
return gr;
} /* get_accessed_path */
long index;
} path_entry;
-static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
+static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
+{
path_entry entry, *p;
ir_entity *ent, *field;
ir_initializer_t *initializer;
return NULL;
}
-static ir_node *find_compound_ent_value(ir_node *ptr) {
+static ir_node *find_compound_ent_value(ir_node *ptr)
+{
return rec_find_compound_ent_value(ptr, NULL);
}
/**
* Update a Load that may have lost its users.
*/
-static void handle_load_update(ir_node *load) {
+static void handle_load_update(ir_node *load)
+{
ldst_info_t *info = get_irn_link(load);
/* do NOT touch volatile loads for now */
* A use of an address node has vanished. Check if this was a Proj
* node and update the counters.
*/
-static void reduce_adr_usage(ir_node *ptr) {
+static void reduce_adr_usage(ir_node *ptr)
+{
if (is_Proj(ptr)) {
if (get_irn_n_edges(ptr) <= 0) {
/* this Proj is dead now */
* Check, if an already existing value of mode old_mode can be converted
* into the needed one new_mode without loss.
*/
-static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
+static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
+{
if (old_mode == new_mode)
return 1;
/**
* Check whether a Call is at least pure, ie. does only read memory.
*/
-static unsigned is_Call_pure(ir_node *call) {
+static unsigned is_Call_pure(ir_node *call)
+{
ir_type *call_tp = get_Call_type(call);
unsigned prop = get_method_additional_properties(call_tp);
*
* INC_MASTER() must be called before dive into
*/
-static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
+static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
+{
unsigned res = 0;
ldst_info_t *info = get_irn_link(load);
ir_node *pred;
* Check if we can replace the load by a given const from
* the const code irg.
*/
-ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
+ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
+{
ir_mode *c_mode = get_irn_mode(c);
ir_mode *l_mode = get_Load_mode(load);
ir_node *res = NULL;
*
* INC_MASTER() must be called before dive into
*/
-static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
+static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
+{
unsigned res = 0;
ldst_info_t *info = get_irn_link(store);
ir_node *pred;
*
* @param store the Store node
*/
-static unsigned optimize_store(ir_node *store) {
+static unsigned optimize_store(ir_node *store)
+{
ir_node *ptr;
ir_node *mem;
ir_entity *entity;
/**
* walker, do the optimizations
*/
-static void do_load_store_optimize(ir_node *n, void *env) {
+static void do_load_store_optimize(ir_node *n, void *env)
+{
walk_env_t *wenv = env;
switch (get_irn_opcode(n)) {
/**
* Gets the node_entry of a node
*/
-static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
+static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
+{
ir_phase *ph = &env->ph;
node_entry *e = phase_get_irn_data(&env->ph, irn);
* @param env the loop environment
* @param n the node to push
*/
-static void push(loop_env *env, ir_node *n) {
+static void push(loop_env *env, ir_node *n)
+{
node_entry *e;
if (env->tos == ARR_LEN(env->stack)) {
*
* @return The topmost node
*/
-static ir_node *pop(loop_env *env) {
+static ir_node *pop(loop_env *env)
+{
ir_node *n = env->stack[--env->tos];
node_entry *e = get_irn_ne(n, env);
* @param irn the node to check
* @param header_block the header block of the induction variable
*/
-static int is_rc(ir_node *irn, ir_node *header_block) {
+static int is_rc(ir_node *irn, ir_node *header_block)
+{
ir_node *block = get_nodes_block(irn);
return (block != header_block) && block_dominates(block, header_block);
/**
* Compare two avail entries.
*/
-static int cmp_avail_entry(const void *elt, const void *key, size_t size) {
+static int cmp_avail_entry(const void *elt, const void *key, size_t size)
+{
const avail_entry_t *a = elt;
const avail_entry_t *b = key;
(void) size;
/**
* Calculate the hash value of an avail entry.
*/
-static unsigned hash_cache_entry(const avail_entry_t *entry) {
+static unsigned hash_cache_entry(const avail_entry_t *entry)
+{
return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
} /* hash_cache_entry */
* @param pscc the loop described by an SCC
* @param env the loop environment
*/
-static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
+static void move_loads_out_of_loops(scc *pscc, loop_env *env)
+{
ir_node *phi, *load, *next, *other, *next_other;
ir_entity *ent;
int j;
* @param pscc the SCC
* @param env the loop environment
*/
-static void process_loop(scc *pscc, loop_env *env) {
+static void process_loop(scc *pscc, loop_env *env)
+{
ir_node *irn, *next, *header = NULL;
node_entry *b, *h = NULL;
int j, only_phi, num_outside, process = 0;
* @param pscc the SCC
* @param env the loop environment
*/
-static void process_scc(scc *pscc, loop_env *env) {
+static void process_scc(scc *pscc, loop_env *env)
+{
ir_node *head = pscc->head;
node_entry *e = get_irn_ne(head, env);
* @param irg the graph to process
* @param env the loop environment
*/
-static void do_dfs(ir_graph *irg, loop_env *env) {
+static void do_dfs(ir_graph *irg, loop_env *env)
+{
ir_graph *rem = current_ir_graph;
ir_node *endblk, *end;
int i;
/**
* Initialize new phase data. We do this always explicit, so return NULL here
*/
-static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
+static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data)
+{
(void)ph;
(void)irn;
(void)data;
*
* @param irg the graph
*/
-static int optimize_loops(ir_graph *irg) {
+static int optimize_loops(ir_graph *irg)
+{
loop_env env;
env.stack = NEW_ARR_F(ir_node *, 128);
/*
* do the load store optimization
*/
-int optimize_load_store(ir_graph *irg) {
+int optimize_load_store(ir_graph *irg)
+{
walk_env_t env;
FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
/**
* Creates object on the heap, and adds it to a linked list to free it later.
*/
-static node_info *new_node_info(void) {
+static node_info *new_node_info(void)
+{
node_info *l = XMALLOCZ(node_info);
l->freelistnext = link_node_state_list;
link_node_state_list = l;
}
/* Returns the number of blocks in a loop. */
-int get_loop_n_blocks(ir_loop *loop) {
+int get_loop_n_blocks(ir_loop *loop)
+{
int elements, e;
int blocks = 0;
elements = get_loop_n_elements(loop);
* A block belongs to the chain if a condition branches out of the loop.
* Returns 1 if the given block belongs to the condition chain.
*/
-static unsigned find_condition_chains(ir_node *block) {
+static unsigned find_condition_chains(ir_node *block)
+{
const ir_edge_t *edge;
unsigned mark = 0;
int nodes_n = 0;
}
-ir_graph_pass_t *loop_inversion_pass(const char *name) {
+ir_graph_pass_t *loop_inversion_pass(const char *name)
+{
return def_graph_pass(name ? name : "loop_inversion", do_loop_inversion);
}
-ir_graph_pass_t *loop_unroll_pass(const char *name) {
+ir_graph_pass_t *loop_unroll_pass(const char *name)
+{
return def_graph_pass(name ? name : "loop_unroll", do_loop_unrolling);
}
-ir_graph_pass_t *loop_peeling_pass(const char *name) {
+ir_graph_pass_t *loop_peeling_pass(const char *name)
+{
return def_graph_pass(name ? name : "loop_peeling", do_loop_peeling);
}
/**
* Dump partition to output.
*/
-static void dump_partition(const char *msg, const partition_t *part) {
+static void dump_partition(const char *msg, const partition_t *part)
+{
const block_t *block;
int first = 1;
/**
* Dumps a list.
*/
-static void dump_list(const char *msg, const block_t *block) {
+static void dump_list(const char *msg, const block_t *block)
+{
const block_t *p;
int first = 1;
/**
* Compare two pointer values of a listmap.
*/
-static int listmap_cmp_ptr(const void *elt, const void *key, size_t size) {
+static int listmap_cmp_ptr(const void *elt, const void *key, size_t size)
+{
const listmap_entry_t *e1 = elt;
const listmap_entry_t *e2 = key;
*
* @param map the listmap
*/
-static void listmap_init(listmap_t *map) {
+static void listmap_init(listmap_t *map)
+{
map->map = new_set(listmap_cmp_ptr, 16);
map->values = NULL;
} /* listmap_init */
*
* @param map the listmap
*/
-static void listmap_term(listmap_t *map) {
+static void listmap_term(listmap_t *map)
+{
del_set(map->map);
} /* listmap_term */
*
* @return the associated listmap entry for the given id
*/
-static listmap_entry_t *listmap_find(listmap_t *map, void *id) {
+static listmap_entry_t *listmap_find(listmap_t *map, void *id)
+{
listmap_entry_t key, *entry;
key.id = id;
*
* @return a hash value for the given opcode map entry
*/
-static unsigned opcode_hash(const opcode_key_t *entry) {
+static unsigned opcode_hash(const opcode_key_t *entry)
+{
/* assume long >= int */
return (entry->mode - (ir_mode *)0) * 9 + entry->code + entry->u.proj * 3 + HASH_PTR(entry->u.addr) + entry->arity;
} /* opcode_hash */
/**
* Compare two entries in the opcode map.
*/
-static int cmp_opcode(const void *elt, const void *key, size_t size) {
+static int cmp_opcode(const void *elt, const void *key, size_t size)
+{
const opcode_key_t *o1 = elt;
const opcode_key_t *o2 = key;
* @param meet_block the control flow meet block of this partition
* @param env the environment
*/
-static partition_t *create_partition(ir_node *meet_block, environment_t *env) {
+static partition_t *create_partition(ir_node *meet_block, environment_t *env)
+{
partition_t *part = OALLOC(&env->obst, partition_t);
INIT_LIST_HEAD(&part->blocks);
* @param partition the partition to add to
* @param env the environment
*/
-static block_t *create_block(ir_node *block, int meet_input, partition_t *partition, environment_t *env) {
+static block_t *create_block(ir_node *block, int meet_input, partition_t *partition, environment_t *env)
+{
block_t *bl = OALLOC(&env->obst, block_t);
set_irn_link(block, bl);
* @param block the block to add to
* @param env the environment
*/
-static node_t *create_node(ir_node *irn, block_t *block, environment_t *env) {
+static node_t *create_node(ir_node *irn, block_t *block, environment_t *env)
+{
node_t *node = OALLOC(&env->obst, node_t);
node->node = irn;
* @param idx the index of the block input in node's predecessors
* @param env the environment
*/
-static void add_pair(block_t *block, ir_node *irn, int idx, environment_t *env) {
+static void add_pair(block_t *block, ir_node *irn, int idx, environment_t *env)
+{
pair_t *pair = OALLOC(&env->obst, pair_t);
pair->next = block->input_pairs;
* @param phi the Phi node
* @param env the environment
*/
-static void add_phi(block_t *block, ir_node *phi, environment_t *env) {
+static void add_phi(block_t *block, ir_node *phi, environment_t *env)
+{
phi_t *node = OALLOC(&env->obst, phi_t);
node->next = block->phis;
/**
* Creates an opcode from a node.
*/
-static opcode_key_t *opcode(const node_t *node, environment_t *env) {
+static opcode_key_t *opcode(const node_t *node, environment_t *env)
+{
opcode_key_t key, *entry;
ir_node *irn = node->node;
*
* @return a new partition containing the nodes of g
*/
-static partition_t *split(partition_t *Z, block_t *g, environment_t *env) {
+static partition_t *split(partition_t *Z, block_t *g, environment_t *env)
+{
partition_t *Z_prime;
block_t *block;
unsigned n = 0;
/**
* Return non-zero if pred should be tread as a input node.
*/
-static int is_input_node(ir_node *pred, ir_node *irn, int index) {
+static int is_input_node(ir_node *pred, ir_node *irn, int index)
+{
/* for now, do NOT turn direct calls into indirect one */
if (index != 1)
return 1;
* @param part the partition
* @param env the environment
*/
-static void propagate_blocks(partition_t *part, environment_t *env) {
+static void propagate_blocks(partition_t *part, environment_t *env)
+{
block_t *ready_blocks = NULL;
unsigned n_ready = 0;
block_t *bl, *next;
*
* @param env the environment
*/
-static void propagate(environment_t *env) {
+static void propagate(environment_t *env)
+{
partition_t *part, *next;
list_for_each_entry_safe(partition_t, part, next, &env->partitions, part_list) {
/**
* Map a block to the phi[block->input] live-trough.
*/
-static void *live_throughs(const block_t *bl, const ir_node *phi) {
+static void *live_throughs(const block_t *bl, const ir_node *phi)
+{
ir_node *input = get_Phi_pred(phi, bl->meet_input);
/* If this input is inside our block, this
* @param part the partition
* @param env the environment
*/
-void propagate_blocks_live_troughs(partition_t *part, environment_t *env) {
+void propagate_blocks_live_troughs(partition_t *part, environment_t *env)
+{
const ir_node *meet_block = part->meet_block;
block_t *bl, *next;
listmap_t map;
*
* @param env the environment
*/
-void propagate_live_troughs(environment_t *env) {
+void propagate_live_troughs(environment_t *env)
+{
partition_t *part, *next;
list_for_each_entry_safe(partition_t, part, next, &env->partitions, part_list) {
*
* @param part the partition to process
*/
-static void apply(ir_graph *irg, partition_t *part) {
+static void apply(ir_graph *irg, partition_t *part)
+{
block_t *repr = list_entry(part->blocks.next, block_t, block_list);
block_t *bl;
ir_node *block, *end, *meet_block, *p, *next;
* @param end_block the end block
* @param env the environment
*/
-static void partition_for_end_block(ir_node *end_block, environment_t *env) {
+static void partition_for_end_block(ir_node *end_block, environment_t *env)
+{
partition_t *part = create_partition(end_block, env);
ir_node *end;
int i;
* @param n_preds number of elements in preds
* @param env the environment
*/
-static void partition_for_block(ir_node *block, pred_t preds[], int n_preds, environment_t *env) {
+static void partition_for_block(ir_node *block, pred_t preds[], int n_preds, environment_t *env)
+{
partition_t *part = create_partition(block, env);
int i;
* Walker: clear the links of all block phi lists and normal
* links.
*/
-static void clear_phi_links(ir_node *irn, void *env) {
+static void clear_phi_links(ir_node *irn, void *env)
+{
(void) env;
if (is_Block(irn)) {
set_Block_phis(irn, NULL);
/**
* Walker, detect live-out nodes.
*/
-static void find_liveouts(ir_node *irn, void *ctx) {
+static void find_liveouts(ir_node *irn, void *ctx)
+{
environment_t *env = ctx;
ir_node **live_outs = env->live_outs;
ir_node *this_block;
/**
* Check if the current block is the meet block of a its predecessors.
*/
-static void check_for_cf_meet(ir_node *block, void *ctx) {
+static void check_for_cf_meet(ir_node *block, void *ctx)
+{
environment_t *env = ctx;
int i, k, n;
pred_t *preds;
/**
* Compare two nodes for root ordering.
*/
-static int cmp_nodes(const void *a, const void *b) {
+static int cmp_nodes(const void *a, const void *b)
+{
const ir_node *const *pa = a;
const ir_node *const *pb = b;
const ir_node *irn_a = *pa;
/**
* Add the roots to all blocks.
*/
-static void add_roots(ir_graph *irg, environment_t *env) {
+static void add_roots(ir_graph *irg, environment_t *env)
+{
unsigned idx, n = get_irg_last_idx(irg);
ir_node **live_outs = env->live_outs;
block_t *bl;
#endif /* GENERAL_SHAPE */
/* Combines congruent end blocks into one. */
-int shape_blocks(ir_graph *irg) {
+int shape_blocks(ir_graph *irg)
+{
ir_graph *rem;
environment_t env;
partition_t *part;
return res;
} /* shape_blocks */
-ir_graph_pass_t *shape_blocks_pass(const char *name) {
+ir_graph_pass_t *shape_blocks_pass(const char *name)
+{
return def_graph_pass_ret(name ? name : "shape_blocks", shape_blocks);
} /* shape_blocks_pass */
* This is a often needed case, so we handle here Confirm
* nodes too.
*/
-int value_not_zero(const ir_node *n, ir_node_cnst_ptr *confirm) {
+int value_not_zero(const ir_node *n, ir_node_cnst_ptr *confirm)
+{
#define RET_ON(x) if (x) { *confirm = n; return 1; }; break
tarval *tv;
* - A SymConst(entity) is NEVER a NULL pointer
* - Confirms are evaluated
*/
-int value_not_null(const ir_node *n, ir_node_cnst_ptr *confirm) {
+int value_not_null(const ir_node *n, ir_node_cnst_ptr *confirm)
+{
tarval *tv;
*confirm = NULL;
* If the mode of the value did not honor signed zeros, else
* check for >= 0 or < 0.
*/
-value_classify_sign classify_value_sign(ir_node *n) {
+value_classify_sign classify_value_sign(ir_node *n)
+{
tarval *tv, *c;
ir_mode *mode;
pn_Cmp cmp, ncmp;
* @return the filled interval or NULL if no interval
* can be created (happens only on floating point
*/
-static interval_t *get_interval_from_tv(interval_t *iv, tarval *tv) {
+static interval_t *get_interval_from_tv(interval_t *iv, tarval *tv)
+{
ir_mode *mode = get_tarval_mode(tv);
if (tv == tarval_bad) {
* @return the filled interval or NULL if no interval
* can be created (happens only on floating point
*/
-static interval_t *get_interval(interval_t *iv, ir_node *bound, pn_Cmp pnc) {
+static interval_t *get_interval(interval_t *iv, ir_node *bound, pn_Cmp pnc)
+{
ir_mode *mode = get_irn_mode(bound);
tarval *tv = value_of(bound);
* tarval_b_true or tarval_b_false it it can be evaluated,
* tarval_bad else
*/
-static tarval *(compare_iv)(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc) {
+static tarval *(compare_iv)(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc)
+{
pn_Cmp res;
unsigned flags;
tarval *tv_true = tarval_b_true, *tv_false = tarval_b_false;
/**
* Returns non-zero, if a given relation is transitive.
*/
-static int is_transitive(pn_Cmp pnc) {
+static int is_transitive(pn_Cmp pnc)
+{
return (pn_Cmp_False < pnc && pnc < pn_Cmp_Lg);
} /* is_transitive */
* @param right the right operand of the Cmp
* @param pnc the compare relation
*/
-tarval *computed_value_Cmp_Confirm(ir_node *cmp, ir_node *left, ir_node *right, pn_Cmp pnc) {
+tarval *computed_value_Cmp_Confirm(ir_node *cmp, ir_node *left, ir_node *right, pn_Cmp pnc)
+{
ir_node *l_bound;
pn_Cmp l_pnc, res_pnc, neg_pnc;
interval_t l_iv, r_iv;
* @param len length of the string buffer
* @param iv the interval
*/
-static int iv_snprintf(char *buf, size_t len, const interval_t *iv) {
+static int iv_snprintf(char *buf, size_t len, const interval_t *iv)
+{
char smin[64], smax[64];
if (iv) {
* @param r_iv the right interval
* @param pnc the compare relation
*/
-static void print_iv_cmp(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc) {
+static void print_iv_cmp(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc)
+{
char sl[128], sr[128];
iv_snprintf(sl, sizeof(sl), l_iv);
* @param r_iv the right interval
* @param pnc the compare relation
*/
-static tarval *compare_iv_dbg(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc) {
+static tarval *compare_iv_dbg(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc)
+{
tarval *tv = (compare_iv)(l_iv, r_iv, pnc);
if (tv == tarval_bad)
* Optimize the frame type of an irg by removing
* never touched entities.
*/
-void opt_frame_irg(ir_graph *irg) {
+void opt_frame_irg(ir_graph *irg)
+{
ir_type *frame_tp = get_irg_frame_type(irg);
ir_entity *ent, *list;
ir_node *frame, *sel;
* in a Block.
*/
static inline int
-compute_new_arity(ir_node *b) {
+compute_new_arity(ir_node *b)
+{
int i, res, irn_arity;
int irg_v, block_v;
*
* Note: Also used for loop unrolling.
*/
-static void copy_node(ir_node *n, void *env) {
+static void copy_node(ir_node *n, void *env)
+{
ir_node *nn, *block;
int new_arity;
ir_op *op = get_irn_op(n);
* Copies new predecessors of old node to new node remembered in link.
* Spare the Bad predecessors of Phi and Block nodes.
*/
-static void copy_preds(ir_node *n, void *env) {
+static void copy_preds(ir_node *n, void *env)
+{
ir_node *nn, *block;
int i, j, irn_arity;
(void) env;
* @param irg the graph to be copied
* @param copy_node_nr If non-zero, the node number will be copied
*/
-static void copy_graph(ir_graph *irg, int copy_node_nr) {
+static void copy_graph(ir_graph *irg, int copy_node_nr)
+{
ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
ir_node *ka; /* keep alive */
int i, irn_arity;
* @param copy_node_nr If non-zero, the node number will be copied
*/
static void
-copy_graph_env(int copy_node_nr) {
+copy_graph_env(int copy_node_nr)
+{
ir_graph *irg = current_ir_graph;
ir_node *old_end, *new_anchor;
int i;
* Adds all new nodes to a new hash table for CSE. Does not
* perform CSE, so the hash table might contain common subexpressions.
*/
-void dead_node_elimination(ir_graph *irg) {
+void dead_node_elimination(ir_graph *irg)
+{
ir_graph *rem;
#ifdef INTERPROCEDURAL_VIEW
int rem_ipview = get_interprocedural_view();
#endif
}
-ir_graph_pass_t *dead_node_elimination_pass(const char *name) {
+ir_graph_pass_t *dead_node_elimination_pass(const char *name)
+{
return def_graph_pass(name ? name : "dce", dead_node_elimination);
}
* If block has bad predecessors, create a new in array without bad preds.
* Otherwise let in array untouched.
*/
-static void relink_bad_block_predecessors(ir_node *n, void *env) {
+static void relink_bad_block_predecessors(ir_node *n, void *env)
+{
ir_node **new_in, *irn;
int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
(void) env;
* function of Phi's Block. If this block has bad predecessors, relink preds
* of the Phi-node.
*/
-static void relink_bad_predecessors(ir_node *n, void *env) {
+static void relink_bad_predecessors(ir_node *n, void *env)
+{
ir_node *block, **old_in;
int i, old_irn_arity, new_irn_arity;
* to the link field and sets a new in array if arity of predecessors
* changes).
*/
-void remove_bad_predecessors(ir_graph *irg) {
+void remove_bad_predecessors(ir_graph *irg)
+{
panic("Fix backedge handling first");
irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
}
ir_node **place;
} survive_dce_list_t;
-static void dead_node_hook(void *context, ir_graph *irg, int start) {
+static void dead_node_hook(void *context, ir_graph *irg, int start)
+{
survive_dce_t *sd = context;
(void) irg;
/**
* Hook called when dead node elimination replaces old by nw.
*/
-static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
+static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw)
+{
survive_dce_t *sd = context;
survive_dce_list_t *list = pmap_get(sd->places, old);
(void) irg;
/**
* Make a new Survive DCE environment.
*/
-survive_dce_t *new_survive_dce(void) {
+survive_dce_t *new_survive_dce(void)
+{
survive_dce_t *res = XMALLOC(survive_dce_t);
obstack_init(&res->obst);
res->places = pmap_create();
/**
* Free a Survive DCE environment.
*/
-void free_survive_dce(survive_dce_t *sd) {
+void free_survive_dce(survive_dce_t *sd)
+{
obstack_free(&sd->obst, NULL);
pmap_destroy(sd->places);
unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
* @param sd The Survive DCE environment.
* @param place The address of the node pointer.
*/
-void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
+void survive_dce_register_irn(survive_dce_t *sd, ir_node **place)
+{
if (*place != NULL) {
ir_node *irn = *place;
survive_dce_list_t *curr = pmap_get(sd->places, irn);
* inlined procedure. The new entities must be in the link field of
* the entities.
*/
-static void copy_node_inline(ir_node *n, void *env) {
+static void copy_node_inline(ir_node *n, void *env)
+{
ir_node *nn;
ir_type *frame_tp = (ir_type *)env;
* Copies new predecessors of old node and move constants to
* the Start Block.
*/
-static void copy_preds_inline(ir_node *n, void *env) {
+static void copy_preds_inline(ir_node *n, void *env)
+{
ir_node *nn;
copy_preds(n, env);
/**
* Walker: checks if P_value_arg_base is used.
*/
-static void find_addr(ir_node *node, void *env) {
+static void find_addr(ir_node *node, void *env)
+{
int *allow_inline = env;
if (is_Sel(node)) {
ir_graph *irg = current_ir_graph;
*
* check these conditions here
*/
-static int can_inline(ir_node *call, ir_graph *called_graph) {
+static int can_inline(ir_node *call, ir_graph *called_graph)
+{
ir_type *call_type = get_Call_type(call);
int params, ress, i, res;
assert(is_Method_type(call_type));
};
/* Inlines a method at the given call site. */
-int inline_method(ir_node *call, ir_graph *called_graph) {
+int inline_method(ir_node *call, ir_graph *called_graph)
+{
ir_node *pre_call;
ir_node *post_call, *post_bl;
ir_node *in[pn_Start_max];
/**
* Walker: Collect all calls to known graphs inside a graph.
*/
-static void collect_calls(ir_node *call, void *env) {
+static void collect_calls(ir_node *call, void *env)
+{
(void) env;
if (is_Call(call)) {
ir_graph *called_irg = get_call_called_irg(call);
* Methods where the obstack containing the firm graph is smaller than
* size are inlined.
*/
-void inline_small_irgs(ir_graph *irg, int size) {
+void inline_small_irgs(ir_graph *irg, int size)
+{
ir_graph *rem = current_ir_graph;
inline_env_t env;
call_entry *entry;
/**
* Wrapper to run inline_small_irgs() as a pass.
*/
-static int inline_small_irgs_wrapper(ir_graph *irg, void *context) {
+static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
+{
struct inline_small_irgs_pass_t *pass = context;
inline_small_irgs(irg, pass->size);
}
/* create a pass for inline_small_irgs() */
-ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size) {
+ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size)
+{
struct inline_small_irgs_pass_t *pass =
XMALLOCZ(struct inline_small_irgs_pass_t);
/**
* Allocate a new environment for inlining.
*/
-static inline_irg_env *alloc_inline_irg_env(void) {
+static inline_irg_env *alloc_inline_irg_env(void)
+{
inline_irg_env *env = OALLOC(&temp_obst, inline_irg_env);
INIT_LIST_HEAD(&env->calls);
env->local_weights = NULL;
* post-walker: collect all calls in the inline-environment
* of a graph and sum some statistics.
*/
-static void collect_calls2(ir_node *call, void *ctx) {
+static void collect_calls2(ir_node *call, void *ctx)
+{
wenv_t *env = ctx;
inline_irg_env *x = env->x;
ir_opcode code = get_irn_opcode(call);
* Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
-inline static int is_leave(ir_graph *irg) {
+inline static int is_leave(ir_graph *irg)
+{
inline_irg_env *env = get_irg_link(irg);
return env->n_call_nodes == 0;
}
* Returns TRUE if the number of nodes in the callee is
* smaller then size in the irg's environment.
*/
-inline static int is_smaller(ir_graph *callee, unsigned size) {
+inline static int is_smaller(ir_graph *callee, unsigned size)
+{
inline_irg_env *env = get_irg_link(callee);
return env->n_nodes < size;
}
* @param src source environment
* @param loop_depth the loop depth of the call that is replaced by the src list
*/
-static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
+static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
+{
call_entry *entry, *nentry;
/* Note that the src list points to Call nodes in the inlined graph, but
/**
* Wrapper to run inline_leave_functions() as a ir_prog pass.
*/
-static int inline_leave_functions_wrapper(ir_prog *irp, void *context) {
+static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
+{
struct inline_leave_functions_pass_t *pass = context;
(void)irp;
/**
* Calculate the parameter weights for transmitting the address of a local variable.
*/
-static unsigned calc_method_local_weight(ir_node *arg) {
+static unsigned calc_method_local_weight(ir_node *arg)
+{
int i, j, k;
unsigned v, weight = 0;
/**
* Calculate the parameter weights for transmitting the address of a local variable.
*/
-static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
+static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
+{
ir_entity *ent = get_irg_entity(irg);
ir_type *mtp;
int nparams, i, proj_nr;
* After inlining, the local variable might be transformed into a
* SSA variable by scalar_replacement().
*/
-static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
+static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
+{
inline_irg_env *env = get_irg_link(callee);
if (env->local_weights != NULL) {
/**
* Callgraph walker, collect all visited graphs.
*/
-static void callgraph_walker(ir_graph *irg, void *data) {
+static void callgraph_walker(ir_graph *irg, void *data)
+{
(void) data;
irgs[last_irg++] = irg;
}
*
* @return the list of graphs.
*/
-static ir_graph **create_irg_list(void) {
+static ir_graph **create_irg_list(void)
+{
ir_entity **free_methods;
int arr_len;
int n_irgs = get_irp_n_irgs();
/**
* Wrapper to run inline_functions() as a ir_prog pass.
*/
-static int inline_functions_wrapper(ir_prog *irp, void *context) {
+static int inline_functions_wrapper(ir_prog *irp, void *context)
+{
struct inline_functions_pass_t *pass = context;
(void)irp;
inline_functions_wrapper);
}
-void firm_init_inline(void) {
+void firm_init_inline(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
}
*
* @param ldst environment
*/
-static void dump_block_list(ldst_env *env) {
+static void dump_block_list(ldst_env *env)
+{
block_t *entry;
memop_t *op;
int i;
* @param bl current block
* @param s name of the set
*/
-static void dump_curr(block_t *bl, const char *s) {
+static void dump_curr(block_t *bl, const char *s)
+{
unsigned end = env.rbs_size - 1;
unsigned pos;
int i;
} /* dump_curr */
#else
-static void dump_block_list(ldst_env *env) {
+static void dump_block_list(ldst_env *env)
+{
(void) env;
}
-static void dump_curr(block_t *bl, const char *s) {
+static void dump_curr(block_t *bl, const char *s)
+{
(void) bl;
(void) s;
}
#endif /* DEBUG_libfirm */
/** Get the block entry for a block node */
-static block_t *get_block_entry(const ir_node *block) {
+static block_t *get_block_entry(const ir_node *block)
+{
assert(is_Block(block));
return get_irn_link(block);
} /* get_block_entry */
/** Get the memop entry for a memory operation node */
-static memop_t *get_irn_memop(const ir_node *irn) {
+static memop_t *get_irn_memop(const ir_node *irn)
+{
assert(! is_Block(irn));
return get_irn_link(irn);
} /* get_irn_memop */
* @param post post walker function
* @param ctx context parameter for the walker functions
*/
-static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx) {
+static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
+{
int i;
ir_mode *mode;
* @param post post walker function
* @param ctx context parameter for the walker functions
*/
-static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx) {
+static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
+{
inc_irg_visited(irg);
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
*
* @return the allocated id
*/
-static unsigned register_address(ir_node *adr) {
+static unsigned register_address(ir_node *adr)
+{
address_entry *entry;
/* skip Confirms and Casts */
* @param block the block
* @param pos the position of the predecessor in block
*/
-static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos) {
+static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
+{
if (is_Phi(address) && get_nodes_block(address) == block)
address = get_Phi_pred(address, pos);
return address;
* Walker: allocate an block entry for every block
* and register all potential addresses.
*/
-static void prepare_blocks(ir_node *irn, void *ctx) {
+static void prepare_blocks(ir_node *irn, void *ctx)
+{
(void)ctx;
if (is_Block(irn)) {
/**
* Post-Walker, link in all Phi's
*/
-static void link_phis(ir_node *irn, void *ctx) {
+static void link_phis(ir_node *irn, void *ctx)
+{
(void)ctx;
if (is_Phi(irn)) {
/**
* Block walker: creates the inverse post-order list for the CFG.
*/
-static void inverse_post_order(ir_node *block, void *ctx) {
+static void inverse_post_order(ir_node *block, void *ctx)
+{
block_t *entry = get_block_entry(block);
(void)ctx;
/**
* Block walker: create backward links for the memops of a block.
*/
-static void collect_backward(ir_node *block, void *ctx) {
+static void collect_backward(ir_node *block, void *ctx)
+{
block_t *entry = get_block_entry(block);
memop_t *last, *op;
*
* @return the allocated memop
*/
-static memop_t *alloc_memop(ir_node *irn) {
+static memop_t *alloc_memop(ir_node *irn)
+{
memop_t *m = OALLOC(&env.obst, memop_t);
m->value.address = NULL;
* @param op the memop to clone
* @param phi the Phi-node representing the new value
*/
-static memop_t *clone_memop_phi(memop_t *op, ir_node *phi) {
+static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
+{
memop_t *m = OALLOC(&env.obst, memop_t);
m->value = op->value;
*
* return a bitset of mtp_property_const and mtp_property_pure
*/
-static unsigned get_Call_memory_properties(ir_node *call) {
+static unsigned get_Call_memory_properties(ir_node *call)
+{
ir_type *call_tp = get_Call_type(call);
unsigned prop = get_method_additional_properties(call_tp);
*
* @return an entity or NULL
*/
-static ir_entity *find_constant_entity(ir_node *ptr) {
+static ir_entity *find_constant_entity(ir_node *ptr)
+{
for (;;) {
if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
return get_SymConst_entity(ptr);
/**
* Return the Selection index of a Sel node from dimension n
*/
-static long get_Sel_array_index_long(ir_node *n, int dim) {
+static long get_Sel_array_index_long(ir_node *n, int dim)
+{
ir_node *index = get_Sel_index(n, dim);
assert(is_Const(index));
return get_tarval_long(get_Const_tarval(index));
* @param depth current depth in steps upward from the root
* of the address
*/
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
+{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
int path_len, pos, idx;
* Returns an access path or NULL. The access path is only
* valid, if the graph is in phase_high and _no_ address computation is used.
*/
-static compound_graph_path *get_accessed_path(ir_node *ptr) {
+static compound_graph_path *get_accessed_path(ir_node *ptr)
+{
compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
return gr;
} /* get_accessed_path */
long index;
} path_entry;
-static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
+static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
+{
path_entry entry, *p;
ir_entity *ent, *field;
ir_initializer_t *initializer;
return NULL;
} /* rec_find_compound_ent_value */
-static ir_node *find_compound_ent_value(ir_node *ptr) {
+static ir_node *find_compound_ent_value(ir_node *ptr)
+{
return rec_find_compound_ent_value(ptr, NULL);
} /* find_compound_ent_value */
*
* @param op the Load memop
*/
-static void mark_replace_load(memop_t *op, ir_node *def) {
+static void mark_replace_load(memop_t *op, ir_node *def)
+{
op->replace = def;
op->flags |= FLAG_KILLED_NODE;
env.changed = 1;
*
* @param op the Store memop
*/
-static void mark_remove_store(memop_t *op) {
+static void mark_remove_store(memop_t *op)
+{
op->flags |= FLAG_KILLED_NODE;
env.changed = 1;
} /* mark_remove_store */
*
* @param m the memop
*/
-static void update_Load_memop(memop_t *m) {
+static void update_Load_memop(memop_t *m)
+{
int i;
ir_node *load = m->node;
ir_node *ptr;
*
* @param m the memop
*/
-static void update_Store_memop(memop_t *m) {
+static void update_Store_memop(memop_t *m)
+{
int i;
ir_node *store = m->node;
ir_node *adr = get_Store_ptr(store);
*
* @param m the memop
*/
-static void update_Call_memop(memop_t *m) {
+static void update_Call_memop(memop_t *m)
+{
ir_node *call = m->node;
unsigned prop = get_Call_memory_properties(call);
int i;
*
* @param m the memop
*/
-static void update_DivOp_memop(memop_t *m) {
+static void update_DivOp_memop(memop_t *m)
+{
ir_node *div = m->node;
int i;
*
* @param m the memop
*/
-static void update_Phi_memop(memop_t *m) {
+static void update_Phi_memop(memop_t *m)
+{
/* the Phi is it's own mem */
m->mem = m->node;
} /* update_Phi_memop */
/**
* Memory walker: collect all memory ops and build topological lists.
*/
-static void collect_memops(ir_node *irn, void *ctx) {
+static void collect_memops(ir_node *irn, void *ctx)
+{
memop_t *op;
ir_node *block;
block_t *entry;
* not exists in the set or cannot be converted into
* the requested mode
*/
-static memop_t *find_address(const value_t *value) {
+static memop_t *find_address(const value_t *value)
+{
if (rbitset_is_set(env.curr_set, value->id)) {
memop_t *res = env.curr_id_2_memop[value->id];
*
* @param bl the block
*/
-static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode) {
+static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
+{
if (rbitset_is_set(bl->avail_out, id)) {
memop_t *res = bl->id_2_memop_avail[id];
/**
* Kill all addresses from the current set.
*/
-static void kill_all(void) {
+static void kill_all(void)
+{
rbitset_clear_all(env.curr_set, env.rbs_size);
/* set sentinel */
*
* @param value the Store value
*/
-static void kill_memops(const value_t *value) {
+static void kill_memops(const value_t *value)
+{
unsigned end = env.rbs_size - 1;
unsigned pos;
*
* @param op the memory op
*/
-static void add_memop(memop_t *op) {
+static void add_memop(memop_t *op)
+{
rbitset_set(env.curr_set, op->value.id);
env.curr_id_2_memop[op->value.id] = op;
} /* add_memop */
* @param bl the block
* @param op the memory op
*/
-static void add_memop_avail(block_t *bl, memop_t *op) {
+static void add_memop_avail(block_t *bl, memop_t *op)
+{
rbitset_set(bl->avail_out, op->value.id);
bl->id_2_memop_avail[op->value.id] = op;
} /* add_memop_avail */
* @param from the original mode
* @param to the destination mode
*/
-static int can_convert_to(const ir_mode *from, const ir_mode *to) {
+static int can_convert_to(const ir_mode *from, const ir_mode *to)
+{
if (get_mode_arithmetic(from) == irma_twos_complement &&
get_mode_arithmetic(to) == irma_twos_complement &&
get_mode_size_bits(from) == get_mode_size_bits(to))
* @return the possible converted node or NULL
* if the conversion is not possible
*/
-static ir_node *conv_to(ir_node *irn, ir_mode *mode) {
+static ir_node *conv_to(ir_node *irn, ir_mode *mode)
+{
ir_mode *other = get_irn_mode(irn);
if (other != mode) {
/* different modes: check if conversion is possible without changing the bits */
*
* @param value the value whose address is updated
*/
-static void update_address(value_t *value) {
+static void update_address(value_t *value)
+{
if (is_Proj(value->address)) {
ir_node *load = get_Proj_pred(value->address);
*
* @param bl the block
*/
-static void calc_gen_kill_avail(block_t *bl) {
+static void calc_gen_kill_avail(block_t *bl)
+{
memop_t *op;
ir_node *def;
*
* @param block the block
*/
-static void forward_avail(block_t *bl) {
+static void forward_avail(block_t *bl)
+{
/* fill the data from the current block */
env.curr_id_2_memop = bl->id_2_memop_avail;
env.curr_set = bl->avail_out;
*
* @return non-zero if the set has changed since last iteration
*/
-static int backward_antic(block_t *bl) {
+static int backward_antic(block_t *bl)
+{
memop_t *op;
ir_node *block = bl->block;
int n = get_Block_n_cfg_outs(block);
*
* @param op the Load memop
*/
-static void replace_load(memop_t *op) {
+static void replace_load(memop_t *op)
+{
ir_node *load = op->node;
ir_node *def = skip_Id(op->replace);
ir_node *proj;
*
* @param op the Store memop
*/
-static void remove_store(memop_t *op) {
+static void remove_store(memop_t *op)
+{
ir_node *store = op->node;
ir_node *proj;
*
* @param bl the block
*/
-static void do_replacements(block_t *bl) {
+static void do_replacements(block_t *bl)
+{
memop_t *op;
for (op = bl->memop_forward; op != NULL; op = op->next) {
/**
* Calculate the Avail_out sets for all basic blocks.
*/
-static void calcAvail(void) {
+static void calcAvail(void)
+{
memop_t **tmp_memop = env.curr_id_2_memop;
unsigned *tmp_set = env.curr_set;
block_t *bl;
/**
* Calculate the Antic_in sets for all basic blocks.
*/
-static void calcAntic(void) {
+static void calcAntic(void)
+{
int i, need_iter;
/* calculate antic_out */
*
* @param bl the block
*/
-static ir_node *find_last_memory(block_t *bl) {
+static ir_node *find_last_memory(block_t *bl)
+{
for (;;) {
if (bl->memop_backward != NULL) {
return bl->memop_backward->mem;
* @param omem the old memory IR-node
* @param nmem the new memory IR-node
*/
-static void reroute_all_mem_users(ir_node *omem, ir_node *nmem) {
+static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
+{
int i;
for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
* @param nmem the new memory IR-node
* @param pass_bl the block the memory must pass
*/
-static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl) {
+static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
+{
int i, j, n = get_irn_n_outs(omem);
ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
/**
* insert Loads, making partly redundant Loads fully redundant
*/
-static int insert_Load(block_t *bl) {
+static int insert_Load(block_t *bl)
+{
ir_node *block = bl->block;
int i, n = get_Block_n_cfgpreds(block);
unsigned end = env.rbs_size - 1;
/**
* Insert Loads upwards.
*/
-static void insert_Loads_upwards(void) {
+static void insert_Loads_upwards(void)
+{
int i, need_iter;
block_t *bl;
*
* @param irg the graph to operate on
*/
-static void kill_unreachable_blocks(ir_graph *irg) {
+static void kill_unreachable_blocks(ir_graph *irg)
+{
block_t *bl;
ir_node **ins;
int changed = 0;
}
} /* kill_unreachable_blocks */
-int opt_ldst(ir_graph *irg) {
+int opt_ldst(ir_graph *irg)
+{
block_t *bl;
ir_graph *rem = current_ir_graph;
/**
* Compare two LFTR edges.
*/
-static int LFTR_cmp(const void *e1, const void *e2, size_t size) {
+static int LFTR_cmp(const void *e1, const void *e2, size_t size)
+{
const LFTR_edge *l1 = e1;
const LFTR_edge *l2 = e2;
(void) size;
*
* @param src the source node of the transition
*/
-static LFTR_edge *LFTR_find(ir_node *src, iv_env *env) {
+static LFTR_edge *LFTR_find(ir_node *src, iv_env *env)
+{
LFTR_edge key;
key.src = src;
* @param rc the region const used in the transition
* @param env the environment
*/
-static void LFTR_add(ir_node *src, ir_node *dst, ir_opcode code, ir_node *rc, iv_env *env) {
+static void LFTR_add(ir_node *src, ir_node *dst, ir_opcode code, ir_node *rc, iv_env *env)
+{
LFTR_edge key;
key.src = src;
* @param irn the node
* @param env the environment
*/
-static node_entry *get_irn_ne(ir_node *irn, iv_env *env) {
+static node_entry *get_irn_ne(ir_node *irn, iv_env *env)
+{
node_entry *e = get_irn_link(irn);
if (e == NULL) {
* @param iv any node of the induction variable
* @param env the environment
*/
-static scc *get_iv_scc(ir_node *iv, iv_env *env) {
+static scc *get_iv_scc(ir_node *iv, iv_env *env)
+{
node_entry *e = get_irn_ne(iv, env);
return e->pscc;
} /* get_iv_scc */
*
* @returns the header if it is one, NULL else
*/
-static ir_node *is_iv(ir_node *irn, iv_env *env) {
+static ir_node *is_iv(ir_node *irn, iv_env *env)
+{
return get_irn_ne(irn, env)->header;
} /* is_iv */
* @param irn the node to check
* @param header_block the header block of the induction variable
*/
-static int is_rc(ir_node *irn, ir_node *header_block) {
+static int is_rc(ir_node *irn, ir_node *header_block)
+{
ir_node *block = get_nodes_block(irn);
return (block != header_block) && block_dominates(block, header_block);
/**
* Set compare function for the quad set.
*/
-static int quad_cmp(const void *e1, const void *e2, size_t size) {
+static int quad_cmp(const void *e1, const void *e2, size_t size)
+{
const quadruple_t *c1 = e1;
const quadruple_t *c2 = e2;
(void) size;
*
* @return the already reduced node or NULL if this operation is not yet reduced
*/
-static ir_node *search(ir_opcode code, ir_node *op1, ir_node *op2, iv_env *env) {
+static ir_node *search(ir_opcode code, ir_node *op1, ir_node *op2, iv_env *env)
+{
quadruple_t key, *entry;
key.code = code;
* @param result the result of the reduced operation
* @param env the environment
*/
-static void add(ir_opcode code, ir_node *op1, ir_node *op2, ir_node *result, iv_env *env) {
+static void add(ir_opcode code, ir_node *op1, ir_node *op2, ir_node *result, iv_env *env)
+{
quadruple_t key;
key.code = code;
* that either block1 dominates block2 or vice versa. So, just return
* the "smaller" one.
*/
-static ir_node *find_location(ir_node *block1, ir_node *block2) {
+static ir_node *find_location(ir_node *block1, ir_node *block2)
+{
if (block_dominates(block1, block2))
return block2;
assert(block_dominates(block2, block1));
*
* @return the newly created node
*/
-static ir_node *do_apply(ir_opcode code, dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode) {
+static ir_node *do_apply(ir_opcode code, dbg_info *db, ir_node *op1, ir_node *op2, ir_mode *mode)
+{
ir_node *result;
ir_node *block = find_location(get_nodes_block(op1), get_nodes_block(op2));
*
* @return the newly created node
*/
-static ir_node *apply(ir_node *header, ir_node *orig, ir_node *op1, ir_node *op2, iv_env *env) {
+static ir_node *apply(ir_node *header, ir_node *orig, ir_node *op1, ir_node *op2, iv_env *env)
+{
ir_opcode code = get_irn_opcode(orig);
ir_node *result = search(code, op1, op2, env);
*
* @return the reduced node
*/
-static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env) {
+static ir_node *reduce(ir_node *orig, ir_node *iv, ir_node *rc, iv_env *env)
+{
ir_opcode code = get_irn_opcode(orig);
ir_node *result = search(code, iv, rc, env);
/**
* Update the scc for a newly created IV.
*/
-static void update_scc(ir_node *iv, node_entry *e, iv_env *env) {
+static void update_scc(ir_node *iv, node_entry *e, iv_env *env)
+{
scc *pscc = e->pscc;
ir_node *header = e->header;
waitq *wq = new_waitq();
* @param rc the region constant
* @param env the environment
*/
-static int replace(ir_node *irn, ir_node *iv, ir_node *rc, iv_env *env) {
+static int replace(ir_node *irn, ir_node *iv, ir_node *rc, iv_env *env)
+{
ir_node *result;
DB((dbg, LEVEL_2, " Replacing %+F\n", irn));
/**
* check if a given node is a mul with 2, 4, 8
*/
-static int is_x86_shift_const(ir_node *mul) {
+static int is_x86_shift_const(ir_node *mul)
+{
ir_node *rc;
if (! is_Mul(mul))
* @param iv any node of the induction variable
* @param env the environment
*/
-static int is_counter_iv(ir_node *iv, iv_env *env) {
+static int is_counter_iv(ir_node *iv, iv_env *env)
+{
node_entry *e = get_irn_ne(iv, env);
scc *pscc = e->pscc;
ir_node *have_init = NULL;
* @return non-zero if the register pressure is estimated
* to not increase, zero else
*/
-static int check_users_for_reg_pressure(ir_node *iv, iv_env *env) {
+static int check_users_for_reg_pressure(ir_node *iv, iv_env *env)
+{
ir_node *irn, *header;
ir_node *have_user = NULL;
ir_node *have_cmp = NULL;
*
* @return non-zero if irn should be Replace'd
*/
-static int check_replace(ir_node *irn, iv_env *env) {
+static int check_replace(ir_node *irn, iv_env *env)
+{
ir_node *left, *right, *iv, *rc;
ir_op *op = get_irn_op(irn);
ir_opcode code = get_op_code(op);
* @param pscc a SCC
* @param env the environment
*/
-static void classify_iv(scc *pscc, iv_env *env) {
+static void classify_iv(scc *pscc, iv_env *env)
+{
ir_node *irn, *next, *header = NULL;
node_entry *b, *h = NULL;
int j, only_phi, num_outside;
* @param pscc the SCC
* @param env the environment
*/
-static void process_scc(scc *pscc, iv_env *env) {
+static void process_scc(scc *pscc, iv_env *env)
+{
ir_node *head = pscc->head;
node_entry *e = get_irn_link(head);
* @param pscc an SCC that consists of Phi nodes only
* @param env the environment
*/
-static void remove_phi_cycle(scc *pscc, iv_env *env) {
+static void remove_phi_cycle(scc *pscc, iv_env *env)
+{
ir_node *irn, *next;
int j;
ir_node *out_rc;
* @param pscc the SCC
* @param env the environment
*/
-static void process_phi_only_scc(scc *pscc, iv_env *env) {
+static void process_phi_only_scc(scc *pscc, iv_env *env)
+{
ir_node *head = pscc->head;
node_entry *e = get_irn_link(head);
* @param env the environment
* @param n the node to push
*/
-static void push(iv_env *env, ir_node *n) {
+static void push(iv_env *env, ir_node *n)
+{
node_entry *e;
if (env->tos == ARR_LEN(env->stack)) {
*
* @return The topmost node
*/
-static ir_node *pop(iv_env *env) {
+static ir_node *pop(iv_env *env)
+{
ir_node *n = env->stack[--env->tos];
node_entry *e = get_irn_ne(n, env);
* @param irn start at this node
* @param env the environment
*/
-static void dfs(ir_node *irn, iv_env *env) {
+static void dfs(ir_node *irn, iv_env *env)
+{
int i, n;
node_entry *node = get_irn_ne(irn, env);
* @param irg the graph to process
* @param env the environment
*/
-static void do_dfs(ir_graph *irg, iv_env *env) {
+static void do_dfs(ir_graph *irg, iv_env *env)
+{
ir_graph *rem = current_ir_graph;
ir_node *end = get_irg_end(irg);
int i;
/**
* Post-block-walker: assign the post-order number.
*/
-static void assign_po(ir_node *block, void *ctx) {
+static void assign_po(ir_node *block, void *ctx)
+{
iv_env *env = ctx;
node_entry *e = get_irn_ne(block, env);
* In the current implementation only the last edge is stored, so
* only one chain exists. That's why we might miss some opportunities.
*/
-static ir_node *applyOneEdge(ir_node *iv, ir_node *rc, LFTR_edge *e, iv_env *env) {
+static ir_node *applyOneEdge(ir_node *iv, ir_node *rc, LFTR_edge *e, iv_env *env)
+{
if (env->osr_flags & osr_flag_lftr_with_ov_check) {
tarval *tv_l, *tv_r, *tv, *tv_init, *tv_incr, *tv_end;
tarval_int_overflow_mode_t ovmode;
* @return the translated region constant or NULL
* if the translation was not possible
*/
-static ir_node *applyEdges(ir_node **pIV, ir_node *rc, iv_env *env) {
+static ir_node *applyEdges(ir_node **pIV, ir_node *rc, iv_env *env)
+{
ir_node *iv = *pIV;
if (env->osr_flags & osr_flag_lftr_with_ov_check) {
/* overflow can only be decided for Consts */
* Walker, finds Cmp(iv, rc) or Cmp(rc, iv)
* and tries to optimize them.
*/
-static void do_lftr(ir_node *cmp, void *ctx) {
+static void do_lftr(ir_node *cmp, void *ctx)
+{
iv_env *env = ctx;
ir_node *left, *right, *liv, *riv;
ir_node *iv, *rc;
* @param irg the graph that should be optimized
* @param env the IV environment
*/
-static void lftr(ir_graph *irg, iv_env *env) {
+static void lftr(ir_graph *irg, iv_env *env)
+{
irg_walk_graph(irg, NULL, do_lftr, env);
} /* lftr */
* Pre-walker: set all node links to NULL and fix the
* block of Proj nodes.
*/
-static void clear_and_fix(ir_node *irn, void *env) {
+static void clear_and_fix(ir_node *irn, void *env)
+{
int *moved = env;
set_irn_link(irn, NULL);
/* Remove any Phi cycles with only one real input. */
-void remove_phi_cycles(ir_graph *irg) {
+void remove_phi_cycles(ir_graph *irg)
+{
iv_env env;
ir_graph *rem;
int projs_moved;
/**
* Post-walker: fix Add and Sub nodes that where results of I<->P conversions.
*/
-static void fix_adds_and_subs(ir_node *irn, void *ctx) {
+static void fix_adds_and_subs(ir_node *irn, void *ctx)
+{
(void) ctx;
if (is_Add(irn)) {
} /* fix_adds_and_subs */
/* Performs Operator Strength Reduction for the passed graph. */
-void opt_osr(ir_graph *irg, unsigned flags) {
+void opt_osr(ir_graph *irg, unsigned flags)
+{
iv_env env;
ir_graph *rem;
int edges;
/**
* Wrapper for running opt_osr() as an ir_graph pass.
*/
-static int pass_wrapper(ir_graph *irg, void *context) {
+static int pass_wrapper(ir_graph *irg, void *context)
+{
struct pass_t *pass = context;
opt_osr(irg, pass->flags);
return 0;
*
* The default implementation hecks for Alloc nodes only.
*/
-ir_type *default_firm_get_Alloc(ir_node *n) {
+ir_type *default_firm_get_Alloc(ir_node *n)
+{
n = skip_Proj(n);
if (is_Alloc(n)) {
return get_Alloc_type(n);
static get_Alloc_func firm_get_Alloc = default_firm_get_Alloc;
/** Set a new get_Alloc_func and returns the old one. */
-get_Alloc_func firm_set_Alloc_func(get_Alloc_func newf) {
+get_Alloc_func firm_set_Alloc_func(get_Alloc_func newf)
+{
get_Alloc_func old = firm_get_Alloc;
firm_get_Alloc = newf;
return old;
* If we find a dynamic type this means that the pointer always points
* to an object of this type during runtime. We resolved polymorphy.
*/
-static ir_type *get_dynamic_type(ir_node *ptr) {
+static ir_type *get_dynamic_type(ir_node *ptr)
+{
ir_type *tp;
/* skip Cast and Confirm nodes */
/*
* Transform Sel[method] to SymC[method] if possible.
*/
-ir_node *transform_node_Sel(ir_node *node) {
+ir_node *transform_node_Sel(ir_node *node)
+{
ir_node *new_node, *ptr;
ir_type *dyn_tp;
ir_entity *ent = get_Sel_entity(node);
* a tuple, or replace the Projs of the load.
* Therefore we call this optimization in ldstopt().
*/
-ir_node *transform_polymorph_Load(ir_node *load) {
+ir_node *transform_polymorph_Load(ir_node *load)
+{
ir_node *new_node = NULL;
ir_node *field_ptr, *ptr;
ir_entity *ent;
*
* @return zero if they are identically, non-zero else
*/
-static int entry_cmp(const void *elt, const void *key) {
+static int entry_cmp(const void *elt, const void *key)
+{
const entry_t *e1 = elt;
const entry_t *e2 = key;
*
* @param entry The element to be hashed.
*/
-static int hash_entry(const entry_t *entry) {
+static int hash_entry(const entry_t *entry)
+{
return HASH_PTR(entry->q.ent) ^ HASH_PTR(entry->q.tv) ^ (entry->q.pos * 9);
}
/**
* Free memory associated with a quadruplet.
*/
-static void kill_entry(entry_t *entry) {
+static void kill_entry(entry_t *entry)
+{
if (entry->q.calls) {
DEL_ARR_F(entry->q.calls);
entry->q.calls = NULL;
* @param callee The entity of the callee
* @param hmap The quadruple-set containing the calls with constant parameters
*/
-static void process_call(ir_node *call, ir_entity *callee, q_set *hmap) {
+static void process_call(ir_node *call, ir_entity *callee, q_set *hmap)
+{
ir_type *mtp;
entry_t *key, *entry;
ir_node *call_param;
* @param call A ir_node to be checked.
* @param env The quadruple-set containing the calls with constant parameters
*/
-static void collect_irg_calls(ir_node *call, void *env) {
+static void collect_irg_calls(ir_node *call, void *env)
+{
q_set *hmap = env;
ir_node *call_ptr;
ir_entity *callee;
* @param pos The "pos" from our quadruplet.
* @param nr A counter for the clones.
*/
-static ident *get_clone_ident(ident *id, int pos, unsigned nr) {
+static ident *get_clone_ident(ident *id, int pos, unsigned nr)
+{
char clone_postfix[32];
snprintf(clone_postfix, sizeof(clone_postfix), "_cl_%d_%u", pos, nr);
* @param irn A node from the original method graph.
* @param env The clone graph.
*/
-static void copy_nodes(ir_node *irn, void *env) {
+static void copy_nodes(ir_node *irn, void *env)
+{
ir_node *arg, *irg_args, *irn_copy;
int proj_nr;
ir_graph *clone_irg = env;
* The copied nodes are set as link of their original nodes. The links of
* "irn" predecessors are the predecessors of copied node.
*/
-static void set_preds(ir_node *irn, void *env) {
+static void set_preds(ir_node *irn, void *env)
+{
int i;
ir_node *irn_copy, *pred, *arg;
ir_graph *clone_irg = env;
* @param irg irg that must be cloned.
* @param pos The position of the argument.
*/
-static ir_node *get_irg_arg(ir_graph *irg, int pos) {
+static ir_node *get_irg_arg(ir_graph *irg, int pos)
+{
ir_node *irg_args = get_irg_args(irg), *arg = NULL;
int i;
* @param ent The entity of the method that must be cloned.
* @param q Our quadruplet.
*/
-static void create_clone_proc_irg(ir_entity *ent, quadruple_t *q) {
+static void create_clone_proc_irg(ir_entity *ent, quadruple_t *q)
+{
ir_graph *method_irg, *clone_irg;
ir_node *arg, *const_arg;
* @param ent The entity of the clone.
* @param nr A pointer to the counter of clones.
**/
-static void change_entity_type(quadruple_t *q, ir_entity *ent) {
+static void change_entity_type(quadruple_t *q, ir_entity *ent)
+{
ir_type *mtp, *new_mtp, *tp;
int i, j, n_params, n_ress;
* @param new_entity The entity of the cloned function.
* @param pos The position of the replaced parameter of this call.
**/
-static ir_node *new_cl_Call(ir_node *call, ir_entity *new_entity, int pos) {
+static ir_node *new_cl_Call(ir_node *call, ir_entity *new_entity, int pos)
+{
ir_node **in;
ir_type *mtp;
int i, n_params, new_params = 0;
* @param cloned_ent The entity of the new function that must be called
* from the new Call.
*/
-static void exchange_calls(quadruple_t *q, ir_entity *cloned_ent) {
+static void exchange_calls(quadruple_t *q, ir_entity *cloned_ent)
+{
int pos = q->pos;
ir_node *new_call, *call;
int i;
* We save one instruction in every caller and param_weight instructions
* in the callee.
*/
-static float calculate_weight(const entry_t *entry) {
+static float calculate_weight(const entry_t *entry)
+{
return ARR_LEN(entry->q.calls) *
(float)(get_method_param_weight(entry->q.ent, entry->q.pos) + 1);
}
* the next cloned entity may get invalid, so we have to check
* them and may even update the list of heavy uses.
*/
-static void reorder_weights(q_set *hmap, float threshold) {
+static void reorder_weights(q_set *hmap, float threshold)
+{
entry_t **adr, *p, *entry;
int i, len;
ir_entity *callee;
* call(..., Const, ...). If the weight is bigger than threshold,
* clone the entity and fix the calls.
*/
-void proc_cloning(float threshold) {
+void proc_cloning(float threshold)
+{
entry_t *entry = NULL, *p;
ir_graph *irg;
int i;
/**
* Wrapper to run proc_cloning() as an ir_prog pass.
*/
-static int proc_cloning_wrapper(ir_prog *irp, void *context) {
+static int proc_cloning_wrapper(ir_prog *irp, void *context)
+{
struct pass_t *pass = context;
(void)irp;
}
/* create a ir_prog pass */
-ir_prog_pass_t *proc_cloning_pass(const char *name, float threshold) {
+ir_prog_pass_t *proc_cloning_pass(const char *name, float threshold)
+{
struct pass_t *pass = XMALLOCZ(struct pass_t);
pass->threshold = threshold;
/**
* Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
*/
-static int reassoc_Shl(ir_node **node) {
+static int reassoc_Shl(ir_node **node)
+{
ir_node *n = *node;
ir_node *c = get_Shl_right(n);
ir_node *x, *blk, *irn;
*
* If the earliest block is the start block, return curr_blk instead
*/
-static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk) {
+static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
+{
ir_node *blk_a = get_nodes_block(a);
ir_node *blk_b = get_nodes_block(b);
ir_node *res;
* Handling SymConsts as const might be not a good idea for all
* architectures ...
*/
-static int is_constant_expr(ir_node *irn) {
+static int is_constant_expr(ir_node *irn)
+{
ir_op *op;
switch (get_irn_opcode(irn)) {
/**
* Apply distributive Law for Mul and Add/Sub
*/
-static int reverse_rule_distributive(ir_node **node) {
+static int reverse_rule_distributive(ir_node **node)
+{
ir_node *n = *node;
ir_node *left = get_binop_left(n);
ir_node *right = get_binop_right(n);
/**
* Move Constants towards the root.
*/
-static int move_consts_up(ir_node **node) {
+static int move_consts_up(ir_node **node)
+{
ir_node *n = *node;
ir_op *op;
ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
/**
* Apply the rules in reverse order, removing code that was not collapsed
*/
-static void reverse_rules(ir_node *node, void *env) {
+static void reverse_rules(ir_node *node, void *env)
+{
walker_t *wenv = env;
ir_mode *mode = get_irn_mode(node);
int res;
} /* optimize_reassociation */
/* create a pass for the reassociation */
-ir_graph_pass_t *optimize_reassociation_pass(const char *name) {
+ir_graph_pass_t *optimize_reassociation_pass(const char *name)
+{
return def_graph_pass_ret(name ? name : "reassoc", optimize_reassociation);
} /* optimize_reassociation_pass */
* res = c;
* return res;
*/
-void normalize_one_return(ir_graph *irg) {
+void normalize_one_return(ir_graph *irg)
+{
ir_node *endbl = get_irg_end_block(irg);
int i, j, k, n, last_idx, n_rets, n_ret_vals = -1;
unsigned char *returns;
* All predecessors of the Return block must be Jmp's of course, or we
* cannot move it up, so we add blocks if needed.
*/
-static int can_move_ret(ir_node *ret) {
+static int can_move_ret(ir_node *ret)
+{
ir_node *retbl = get_nodes_block(ret);
int i, n = get_irn_arity(ret);
* else
* return c;
*/
-void normalize_n_returns(ir_graph *irg) {
+void normalize_n_returns(ir_graph *irg)
+{
int i, j, n, n_rets, n_finals, n_ret_vals;
ir_node *list = NULL;
ir_node *final = NULL;
*
* @return 0 if they are identically
*/
-static int path_cmp(const void *elt, const void *key, size_t size) {
+static int path_cmp(const void *elt, const void *key, size_t size)
+{
const path_t *p1 = elt;
const path_t *p2 = key;
(void) size;
*
* @return 0 if they are identically
*/
-static int ent_cmp(const void *elt, const void *key, size_t size) {
+static int ent_cmp(const void *elt, const void *key, size_t size)
+{
const scalars_t *c1 = elt;
const scalars_t *c2 = key;
(void) size;
/**
* Calculate a hash value for a path.
*/
-static unsigned path_hash(const path_t *path) {
+static unsigned path_hash(const path_t *path)
+{
unsigned hash = 0;
unsigned i;
*
* @param sel the Sel node that will be checked
*/
-static int is_const_sel(ir_node *sel) {
+static int is_const_sel(ir_node *sel)
+{
int i, n = get_Sel_n_indexs(sel);
for (i = 0; i < n; ++i) {
* @param mode the mode of the Load/Store
* @param ent_mode the mode of the accessed entity
*/
-static int check_load_store_mode(ir_mode *mode, ir_mode *ent_mode) {
+static int check_load_store_mode(ir_mode *mode, ir_mode *ent_mode)
+{
if (ent_mode != mode) {
if (ent_mode == NULL ||
get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
* @param ent the entity that will be scalar replaced
* @param sel a Sel node that selects some fields of this entity
*/
-static int link_all_leave_sels(ir_entity *ent, ir_node *sel) {
+static int link_all_leave_sels(ir_entity *ent, ir_node *sel)
+{
int i, is_leave = 1;
for (i = get_irn_n_outs(sel) - 1; i >= 0; --i) {
* @return non-zero if at least one entity could be replaced
* potentially
*/
-static int find_possible_replacements(ir_graph *irg) {
+static int find_possible_replacements(ir_graph *irg)
+{
ir_node *irg_frame;
ir_type *frame_tp;
int i, j, k, static_link_arg;
* @param sel the Sel node
* @param len the length of the path so far
*/
-static path_t *find_path(ir_node *sel, unsigned len) {
+static path_t *find_path(ir_node *sel, unsigned len)
+{
int pos, i, n;
path_t *res;
ir_node *pred = get_Sel_ptr(sel);
/**
* topological post-walker.
*/
-static void topologic_walker(ir_node *node, void *ctx) {
+static void topologic_walker(ir_node *node, void *ctx)
+{
env_t *env = ctx;
ir_node *adr, *block, *mem, *val;
ir_mode *mode;
* @param modes A flexible array, containing all the modes of
* the value numbers.
*/
-static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes) {
+static void do_scalar_replacements(pset *sels, int nvals, ir_mode **modes)
+{
env_t env;
ssa_cons_start(current_ir_graph, nvals);
*
* @param irg The current ir graph.
*/
-int scalar_replacement_opt(ir_graph *irg) {
+int scalar_replacement_opt(ir_graph *irg)
+{
unsigned nvals;
int i;
scalars_t key, *value;
return res;
}
-ir_graph_pass_t *scalar_replacement_opt_pass(const char *name) {
+ir_graph_pass_t *scalar_replacement_opt_pass(const char *name)
+{
return def_graph_pass_ret(name ? name : "scalar_rep",
scalar_replacement_opt);
}
-void firm_init_scalar_replace(void) {
+void firm_init_scalar_replace(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.opt.scalar_replace");
}
/**
* walker for collecting data, fills a collect_t environment
*/
-static void collect_data(ir_node *node, void *env) {
+static void collect_data(ir_node *node, void *env)
+{
collect_t *data = env;
ir_node *pred;
ir_op *op;
* @param rets linked list of all rets
* @param n_tail_calls number of tail-recursion calls
*/
-static void do_opt_tail_rec(ir_graph *irg, tr_env *env) {
+static void do_opt_tail_rec(ir_graph *irg, tr_env *env)
+{
ir_node *end_block = get_irg_end_block(irg);
ir_node *block, *jmp, *call, *calls;
ir_node **in;
*
* @return non-zero if it's ok to do tail recursion
*/
-static int check_lifetime_of_locals(ir_graph *irg) {
+static int check_lifetime_of_locals(ir_graph *irg)
+{
ir_node *irg_frame;
int i;
ir_type *frame_tp = get_irg_frame_type(irg);
/**
* Examine irn and detect the recursion variant.
*/
-static tail_rec_variants find_variant(ir_node *irn, ir_node *call) {
+static tail_rec_variants find_variant(ir_node *irn, ir_node *call)
+{
ir_node *a, *b;
tail_rec_variants va, vb, res;
/*
* convert simple tail-calls into loops
*/
-int opt_tail_rec_irg(ir_graph *irg) {
+int opt_tail_rec_irg(ir_graph *irg)
+{
tr_env env;
ir_node *end_block;
int i, n_ress, n_tail_calls = 0;
/*
* optimize tail recursion away
*/
-void opt_tail_recursion(void) {
+void opt_tail_recursion(void)
+{
int i;
int n_opt_applications = 0;
ir_graph *irg;
* Find a pointer type to a given type.
* Uses and updates trouts if available.
*/
-static ir_type *default_gen_pointer_type_to(ir_type *tp) {
+static ir_type *default_gen_pointer_type_to(ir_type *tp)
+{
ir_type *res = NULL;
if (get_trouts_state() == outs_consistent) {
if (get_type_n_pointertypes_to(tp) > 0) {
}
/** Return a type that is a depth times pointer to type. */
-static ir_type *pointerize_type(ir_type *tp, int depth) {
+static ir_type *pointerize_type(ir_type *tp, int depth)
+{
for (; depth > 0; --depth) {
tp = gen_pointer_type_to(tp);
}
}
-static ir_node *normalize_values_type(ir_type *totype, ir_node *pred) {
+static ir_node *normalize_values_type(ir_type *totype, ir_node *pred)
+{
ir_type *fromtype = get_irn_typeinfo_type(pred);
ir_node *new_cast = pred;
int ref_depth = 0;
/**
* Post-Walker.
*/
-static void normalize_irn_class_cast(ir_node *n, void *env) {
+static void normalize_irn_class_cast(ir_node *n, void *env)
+{
ir_node *res;
(void) env;
if (is_Cast(n)) {
}
-static void pure_normalize_irg_class_casts(ir_graph *irg) {
+static void pure_normalize_irg_class_casts(ir_graph *irg)
+{
assert(get_irg_class_cast_state(irg) != ir_class_casts_any &&
"Cannot normalize irregular casts.");
if (get_irg_class_cast_state(irg) == ir_class_casts_normalized) {
}
-void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct) {
+void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct)
+{
assert(get_irp_typeinfo_state() == ir_typeinfo_consistent);
if (gppt_fct) gen_pointer_type_to = gppt_fct;
gen_pointer_type_to = default_gen_pointer_type_to;
}
-void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct) {
+void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct)
+{
int i;
if (gppt_fct) gen_pointer_type_to = gppt_fct;
*
* @return 1 if the cast was changed
*/
-static int cancel_out_casts(ir_node *cast) {
+static int cancel_out_casts(ir_node *cast)
+{
ir_node *orig, *pred = get_Cast_op(cast);
ir_type *tp_cast, *tp_pred, *tp_orig;
int ref_depth = 0;
*
* @return 1 if Cast's where removed
*/
-static int concretize_selected_entity(ir_node *sel) {
+static int concretize_selected_entity(ir_node *sel)
+{
ir_node *cast, *ptr = get_Sel_ptr(sel);
ir_type *orig_tp, *cast_tp;
ir_entity *new_ent, *sel_ent;
*
* @return 1 if Cast's where removed
*/
-static int remove_Cmp_Null_cast(ir_node *cmp) {
+static int remove_Cmp_Null_cast(ir_node *cmp)
+{
ir_node *cast, *null, *new_null;
int cast_pos, null_pos;
ir_type *fromtype;
/**
* Post-Walker: Optimize class casts (mostly by trying to remove them)
*/
-static void irn_optimize_class_cast(ir_node *n, void *env) {
+static void irn_optimize_class_cast(ir_node *n, void *env)
+{
int *changed = env;
if (is_Cast(n))
*changed |= remove_Cmp_Null_cast(n);
}
-void optimize_class_casts(void) {
+void optimize_class_casts(void)
+{
int changed;
if (get_irp_typeinfo_state() != ir_typeinfo_consistent)
n_casts_removed, n_sels_concretized));
}
-void firm_init_class_casts_opt(void) {
+void firm_init_class_casts_opt(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.opt.tropt");
}
/**
* calculated the dual logarithm of |value|
*/
-static unsigned log2abs(long value) {
+static unsigned log2abs(long value)
+{
unsigned res = 0;
if (value < 0)
/**
* classify the value of a float tarval
*/
-static float_classify_t classify_float_value(tarval *tv) {
+static float_classify_t classify_float_value(tarval *tv)
+{
ir_mode *mode = get_tarval_mode(tv);
if (tv == get_mode_null(mode))
}
/* return a human readable name for an float classification */
-const char *stat_fc_name(float_classify_t classification) {
+const char *stat_fc_name(float_classify_t classification)
+{
switch (classification) {
case STAT_FC_0: return "0.0";
case STAT_FC_1: return "1.0";
}
/* clears the const statistics for a new snapshot */
-void stat_const_clear(stat_info_t *status) {
+void stat_const_clear(stat_info_t *status)
+{
size_t i;
for (i = 0; i < ARR_SIZE(status->const_info.int_bits_count); ++i)
}
/* initialize the Const statistic. */
-void stat_init_const_cnt(stat_info_t *status) {
+void stat_init_const_cnt(stat_info_t *status)
+{
(void) status;
/* currently nothing */
}
/**
* Allocate a new DAG entry.
*/
-static dag_entry_t *new_dag_entry(dag_env_t *dag_env, ir_node *node) {
+static dag_entry_t *new_dag_entry(dag_env_t *dag_env, ir_node *node)
+{
dag_entry_t *entry = OALLOC(&dag_env->obst, dag_entry_t);
entry->num_nodes = 1;
/**
* calculates a hash value for an address
*/
-static unsigned addr_hash(const void *object) {
+static unsigned addr_hash(const void *object)
+{
return HASH_PTR(object);
}
/**
* calculates a hash value for an integer
*/
-static unsigned int_hash(const void *object) {
+static unsigned int_hash(const void *object)
+{
return (unsigned)PTR_TO_INT(object);
}
/**
* compare function for integer distribution tables
*/
-static int int_cmp_fun(const void *elt, const void *key) {
+static int int_cmp_fun(const void *elt, const void *key)
+{
const distrib_entry_t *p1 = elt;
const distrib_entry_t *p2 = key;
/*
* create a new distribution table
*/
-distrib_tbl_t *stat_new_distrib_tbl(pset_cmp_fun cmp_func, distrib_hash_fun hash_func) {
+distrib_tbl_t *stat_new_distrib_tbl(pset_cmp_fun cmp_func, distrib_hash_fun hash_func)
+{
distrib_tbl_t *res = XMALLOC(distrib_tbl_t);
obstack_init(&res->cnts);
/*
* create a new distribution table for an integer distribution
*/
-distrib_tbl_t *stat_new_int_distrib_tbl(void) {
+distrib_tbl_t *stat_new_int_distrib_tbl(void)
+{
distrib_tbl_t *res = stat_new_distrib_tbl(int_cmp_fun, int_hash);
if (res)
/*
* destroy a distribution table
*/
-void stat_delete_distrib_tbl(distrib_tbl_t *tbl) {
+void stat_delete_distrib_tbl(distrib_tbl_t *tbl)
+{
if (tbl) {
/* free all entries */
obstack_free(&tbl->cnts, NULL);
/**
* Returns the associates distrib_entry_t for an object
*/
-static distrib_entry_t *distrib_get_entry(distrib_tbl_t *tbl, const void *object) {
+static distrib_entry_t *distrib_get_entry(distrib_tbl_t *tbl, const void *object)
+{
distrib_entry_t key;
distrib_entry_t *elem;
/*
* adds a new object count into the distribution table
*/
-void stat_add_distrib_tbl(distrib_tbl_t *tbl, const void *object, const counter_t *cnt) {
+void stat_add_distrib_tbl(distrib_tbl_t *tbl, const void *object, const counter_t *cnt)
+{
distrib_entry_t *elem = distrib_get_entry(tbl, object);
cnt_add(&elem->cnt, cnt);
/*
* adds a new key count into the integer distribution table
*/
-void stat_add_int_distrib_tbl(distrib_tbl_t *tbl, int key, const counter_t *cnt) {
+void stat_add_int_distrib_tbl(distrib_tbl_t *tbl, int key, const counter_t *cnt)
+{
stat_add_distrib_tbl(tbl, INT_TO_PTR(key), cnt);
}
/*
* increases object count by one
*/
-void stat_inc_distrib_tbl(distrib_tbl_t *tbl, const void *object) {
+void stat_inc_distrib_tbl(distrib_tbl_t *tbl, const void *object)
+{
distrib_entry_t *elem = distrib_get_entry(tbl, object);
cnt_inc(&elem->cnt);
/*
* increases key count by one
*/
-void stat_inc_int_distrib_tbl(distrib_tbl_t *tbl, int key) {
+void stat_inc_int_distrib_tbl(distrib_tbl_t *tbl, int key)
+{
stat_inc_distrib_tbl(tbl, INT_TO_PTR(key));
}
* inserts a new object with count 0 into the distribution table
* if object is already present, nothing happens
*/
-void stat_insert_distrib_tbl(distrib_tbl_t *tbl, const void *object) {
+void stat_insert_distrib_tbl(distrib_tbl_t *tbl, const void *object)
+{
/* executed for side effect */
(void)distrib_get_entry(tbl, object);
}
* inserts a new key with count 0 into the integer distribution table
* if key is already present, nothing happens
*/
-void stat_insert_int_distrib_tbl(distrib_tbl_t *tbl, int key) {
+void stat_insert_int_distrib_tbl(distrib_tbl_t *tbl, int key)
+{
stat_insert_distrib_tbl(tbl, INT_TO_PTR(key));
}
/*
* returns the sum over all counters in a distribution table
*/
-int stat_get_count_distrib_tbl(distrib_tbl_t *tbl) {
+int stat_get_count_distrib_tbl(distrib_tbl_t *tbl)
+{
distrib_entry_t *entry;
counter_t cnt = ZERO_CNT;
/*
* calculates the mean value of a distribution
*/
-double stat_calc_mean_distrib_tbl(distrib_tbl_t *tbl) {
+double stat_calc_mean_distrib_tbl(distrib_tbl_t *tbl)
+{
distrib_entry_t *entry;
unsigned count;
double sum;
/*
* calculates the average value of a distribution
*/
-double stat_calc_avg_distrib_tbl(distrib_tbl_t *tbl) {
+double stat_calc_avg_distrib_tbl(distrib_tbl_t *tbl)
+{
distrib_entry_t *entry;
unsigned count = 0;
double sum = 0.0;
/**
* iterates over all entries in a distribution table
*/
-void stat_iterate_distrib_tbl(const distrib_tbl_t *tbl, eval_distrib_entry_fun eval, void *env) {
+void stat_iterate_distrib_tbl(const distrib_tbl_t *tbl, eval_distrib_entry_fun eval, void *env)
+{
distrib_entry_t *entry;
foreach_pset(tbl->hash_map, entry)
/**
* Compare two elements of the opcode hash.
*/
-static int opcode_cmp(const void *elt, const void *key) {
+static int opcode_cmp(const void *elt, const void *key)
+{
const node_entry_t *e1 = elt;
const node_entry_t *e2 = key;
/**
* Compare two elements of the graph hash.
*/
-static int graph_cmp(const void *elt, const void *key) {
+static int graph_cmp(const void *elt, const void *key)
+{
const graph_entry_t *e1 = elt;
const graph_entry_t *e2 = key;
/**
* Compare two elements of the optimization hash.
*/
-static int opt_cmp(const void *elt, const void *key) {
+static int opt_cmp(const void *elt, const void *key)
+{
const opt_entry_t *e1 = elt;
const opt_entry_t *e2 = key;
/**
* Compare two elements of the block/extbb hash.
*/
-static int block_cmp(const void *elt, const void *key) {
+static int block_cmp(const void *elt, const void *key)
+{
const block_entry_t *e1 = elt;
const block_entry_t *e2 = key;
/**
* Compare two elements of the be_block hash.
*/
-static int be_block_cmp(const void *elt, const void *key) {
+static int be_block_cmp(const void *elt, const void *key)
+{
const be_block_entry_t *e1 = elt;
const be_block_entry_t *e2 = key;
/**
* Compare two elements of reg pressure hash.
*/
-static int reg_pressure_cmp(const void *elt, const void *key) {
+static int reg_pressure_cmp(const void *elt, const void *key)
+{
const reg_pressure_entry_t *e1 = elt;
const reg_pressure_entry_t *e2 = key;
/**
* Compare two elements of the perm_stat hash.
*/
-static int perm_stat_cmp(const void *elt, const void *key) {
+static int perm_stat_cmp(const void *elt, const void *key)
+{
const perm_stat_entry_t *e1 = elt;
const perm_stat_entry_t *e2 = key;
/**
* Compare two elements of the perm_class hash.
*/
-static int perm_class_cmp(const void *elt, const void *key) {
+static int perm_class_cmp(const void *elt, const void *key)
+{
const perm_class_entry_t *e1 = elt;
const perm_class_entry_t *e2 = key;
/**
* Compare two elements of the ir_op hash.
*/
-static int opcode_cmp_2(const void *elt, const void *key) {
+static int opcode_cmp_2(const void *elt, const void *key)
+{
const ir_op *e1 = elt;
const ir_op *e2 = key;
/**
* Compare two elements of the address_mark set.
*/
-static int address_mark_cmp(const void *elt, const void *key, size_t size) {
+static int address_mark_cmp(const void *elt, const void *key, size_t size)
+{
const address_mark_entry_t *e1 = elt;
const address_mark_entry_t *e2 = key;
(void) size;
/**
* Clear all counter in a node_entry_t.
*/
-static void opcode_clear_entry(node_entry_t *elem) {
+static void opcode_clear_entry(node_entry_t *elem)
+{
cnt_clr(&elem->cnt_alive);
cnt_clr(&elem->new_node);
cnt_clr(&elem->into_Id);
* @param op the IR operation
* @param hmap a hash map containing ir_op* -> node_entry_t*
*/
-static node_entry_t *opcode_get_entry(const ir_op *op, hmap_node_entry_t *hmap) {
+static node_entry_t *opcode_get_entry(const ir_op *op, hmap_node_entry_t *hmap)
+{
node_entry_t key;
node_entry_t *elem;
* @param code the IR opcode
* @param hmap the hash map containing opcode -> ir_op*
*/
-static ir_op *opcode_find_entry(ir_opcode code, hmap_ir_op *hmap) {
+static ir_op *opcode_find_entry(ir_opcode code, hmap_ir_op *hmap)
+{
ir_op key;
key.code = code;
* @param elem the graph entry
* @param all if non-zero, clears all counters, else leave accumulated ones
*/
-static void graph_clear_entry(graph_entry_t *elem, int all) {
+static void graph_clear_entry(graph_entry_t *elem, int all)
+{
int i;
/* clear accumulated / non-accumulated counter */
/**
* Clear all counter in an opt_entry_t.
*/
-static void opt_clear_entry(opt_entry_t *elem) {
+static void opt_clear_entry(opt_entry_t *elem)
+{
cnt_clr(&elem->count);
} /* opt_clear_entry */
/**
* clears all counter in a block_entry_t
*/
-static void block_clear_entry(block_entry_t *elem) {
+static void block_clear_entry(block_entry_t *elem)
+{
int i;
for (i = 0; i < _bcnt_last; ++i)
/**
* clears all sets in perm_class_entry_t
*/
-static void perm_class_clear_entry(perm_class_entry_t *elem) {
+static void perm_class_clear_entry(perm_class_entry_t *elem)
+{
if (elem->perm_stat)
del_pset(elem->perm_stat);
/**
* clears all sets in perm_stat_entry_t
*/
-static void perm_stat_clear_entry(perm_stat_entry_t *elem) {
+static void perm_stat_clear_entry(perm_stat_entry_t *elem)
+{
if (elem->chains)
stat_delete_distrib_tbl(elem->chains);
/**
* Clear optimizations counter,
*/
-static void clear_optimization_counter(void) {
+static void clear_optimization_counter(void)
+{
int i;
for (i = 0; i < FS_OPT_MAX; ++i)
cnt_clr(&status->num_opts[i]);
* Calculates how many arguments of the call are const, updates
* param distribution.
*/
-static void analyse_params_of_Call(graph_entry_t *graph, ir_node *call) {
+static void analyse_params_of_Call(graph_entry_t *graph, ir_node *call)
+{
int i, num_const_args = 0, num_local_adr = 0;
int n = get_Call_n_params(call);
*
* @return the base address.
*/
-static ir_node *find_base_adr(ir_node *sel) {
+static ir_node *find_base_adr(ir_node *sel)
+{
ir_node *ptr = get_Sel_ptr(sel);
while (is_Sel(ptr)) {
/**
* Update info on Load/Store address statistics.
*/
-static void stat_update_address(ir_node *node, graph_entry_t *graph) {
+static void stat_update_address(ir_node *node, graph_entry_t *graph)
+{
ir_opcode opc = get_irn_opcode(node);
ir_node *base;
ir_graph *irg;
/**
* Walker for reachable nodes count for graphs on the wait_q.
*/
-static void update_node_stat_2(ir_node *node, void *env) {
+static void update_node_stat_2(ir_node *node, void *env)
+{
graph_entry_t *graph = env;
/* check for properties that depends on calls like recursion/leaf/indirect call */
/**
* Get the current address mark.
*/
-static unsigned get_adr_mark(graph_entry_t *graph, ir_node *node) {
+static unsigned get_adr_mark(graph_entry_t *graph, ir_node *node)
+{
address_mark_entry_t *value = set_find(graph->address_mark, &node, sizeof(*value), HASH_PTR(node));
return value ? value->mark : 0;
/**
* Set the current address mark.
*/
-static void set_adr_mark(graph_entry_t *graph, ir_node *node, unsigned val) {
+static void set_adr_mark(graph_entry_t *graph, ir_node *node, unsigned val)
+{
address_mark_entry_t *value = set_insert(graph->address_mark, &node, sizeof(*value), HASH_PTR(node));
value->mark = val;
/**
* Return the "operational" mode of a Firm node.
*/
-static ir_mode *get_irn_op_mode(ir_node *node) {
+static ir_mode *get_irn_op_mode(ir_node *node)
+{
switch (get_irn_opcode(node)) {
case iro_Load:
return get_Load_mode(node);
* calling it in the post of an outs walk. This should work even in cycles,
* while the normal pre-walk will not.
*/
-static void mark_address_calc(ir_node *node, void *env) {
+static void mark_address_calc(ir_node *node, void *env)
+{
graph_entry_t *graph = env;
ir_mode *mode = get_irn_op_mode(node);
int i, n;
* calling it in the post of an outs walk. This should work even in cycles,
* while the normal pre-walk will not.
*/
-static void count_adr_ops(ir_node *node, void *env) {
+static void count_adr_ops(ir_node *node, void *env)
+{
graph_entry_t *graph = env;
unsigned mark = get_adr_mark(graph, node);
/**
* Register a dumper.
*/
-static void stat_register_dumper(const dumper_t *dumper) {
+static void stat_register_dumper(const dumper_t *dumper)
+{
dumper_t *p = XMALLOC(dumper_t);
memcpy(p, dumper, sizeof(*p));
/**
* Dumps the statistics of an IR graph.
*/
-static void stat_dump_graph(graph_entry_t *entry) {
+static void stat_dump_graph(graph_entry_t *entry)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Calls all registered dumper functions.
*/
-static void stat_dump_registered(graph_entry_t *entry) {
+static void stat_dump_registered(graph_entry_t *entry)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Dumps a constant table.
*/
-static void stat_dump_consts(const constant_info_t *tbl) {
+static void stat_dump_consts(const constant_info_t *tbl)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Dumps the parameter distribution
*/
-static void stat_dump_param_tbl(const distrib_tbl_t *tbl, graph_entry_t *global) {
+static void stat_dump_param_tbl(const distrib_tbl_t *tbl, graph_entry_t *global)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Dumps the optimization counter
*/
-static void stat_dump_opt_cnt(const counter_t *tbl, unsigned len) {
+static void stat_dump_opt_cnt(const counter_t *tbl, unsigned len)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Initialize the dumper.
*/
-static void stat_dump_init(const char *name) {
+static void stat_dump_init(const char *name)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Finish the dumper.
*/
-static void stat_dump_finish(void) {
+static void stat_dump_finish(void)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/**
* Register an additional function for all dumper.
*/
-void stat_register_dumper_func(dump_graph_FUNC func) {
+void stat_register_dumper_func(dump_graph_FUNC func)
+{
dumper_t *dumper;
for (dumper = status->dumper; dumper; dumper = dumper->next) {
/*
* Helper: get an ir_op from an opcode.
*/
-ir_op *stat_get_op_from_opcode(ir_opcode code) {
+ir_op *stat_get_op_from_opcode(ir_opcode code)
+{
return opcode_find_entry(code, status->ir_op_hash);
} /* stat_get_op_from_opcode */
* @param ctx the hook context
* @param op the new IR opcode that was created.
*/
-static void stat_new_ir_op(void *ctx, ir_op *op) {
+static void stat_new_ir_op(void *ctx, ir_op *op)
+{
(void) ctx;
if (! status->stat_options)
return;
* @param ctx the hook context
* @param op the IR opcode that is freed
*/
-static void stat_free_ir_op(void *ctx, ir_op *op) {
+static void stat_free_ir_op(void *ctx, ir_op *op)
+{
(void) ctx;
(void) op;
if (! status->stat_options)
* @param irg the IR graph on which the node is created
* @param node the new IR node that was created
*/
-static void stat_new_node(void *ctx, ir_graph *irg, ir_node *node) {
+static void stat_new_node(void *ctx, ir_graph *irg, ir_node *node)
+{
(void) ctx;
(void) irg;
if (! status->stat_options)
* @param ctx the hook context
* @param node the IR node that will be turned into an ID
*/
-static void stat_turn_into_id(void *ctx, ir_node *node) {
+static void stat_turn_into_id(void *ctx, ir_node *node)
+{
(void) ctx;
if (! status->stat_options)
return;
* @param ctx the hook context
* @param node the IR node that was normalized
*/
-static void stat_normalize(void *ctx, ir_node *node) {
+static void stat_normalize(void *ctx, ir_node *node)
+{
(void) ctx;
if (! status->stat_options)
return;
* @param irg the new IR graph that was created
* @param ent the entity of this graph
*/
-static void stat_new_graph(void *ctx, ir_graph *irg, ir_entity *ent) {
+static void stat_new_graph(void *ctx, ir_graph *irg, ir_entity *ent)
+{
(void) ctx;
if (! status->stat_options)
return;
* in our hash maps, only a flag is set which prevents this
* information from being changed, it's "frozen" from now.
*/
-static void stat_free_graph(void *ctx, ir_graph *irg) {
+static void stat_free_graph(void *ctx, ir_graph *irg)
+{
(void) ctx;
if (! status->stat_options)
return;
* @param hmap the hash map containing ir_op* -> opt_entry_t*
* @param kind the optimization kind
*/
-static void removed_due_opt(ir_node *n, hmap_opt_entry_t *hmap, hook_opt_kind kind) {
+static void removed_due_opt(ir_node *n, hmap_opt_entry_t *hmap, hook_opt_kind kind)
+{
opt_entry_t *entry;
ir_op *op = stat_get_irn_op(n);
* @param ctx the hook context
* @param flag if non-zero, reassociation is started else stopped
*/
-static void stat_reassociate(void *ctx, int flag) {
+static void stat_reassociate(void *ctx, int flag)
+{
(void) ctx;
if (! status->stat_options)
return;
* @param ctx the hook context
* @param node the IR node that will be lowered
*/
-static void stat_lower(void *ctx, ir_node *node) {
+static void stat_lower(void *ctx, ir_node *node)
+{
(void) ctx;
if (! status->stat_options)
return;
*
* @param ctx the hook context
*/
-static void stat_tail_rec(void *ctx, ir_graph *irg, int n_calls) {
+static void stat_tail_rec(void *ctx, ir_graph *irg, int n_calls)
+{
(void) ctx;
if (! status->stat_options)
return;
*
* @param ctx the hook context
*/
-static void stat_strength_red(void *ctx, ir_graph *irg, ir_node *strong) {
+static void stat_strength_red(void *ctx, ir_graph *irg, ir_node *strong)
+{
(void) ctx;
if (! status->stat_options)
return;
*
* @param ctx the hook context
*/
-static void stat_dead_node_elim(void *ctx, ir_graph *irg, int start) {
+static void stat_dead_node_elim(void *ctx, ir_graph *irg, int start)
+{
(void) ctx;
(void) irg;
if (! status->stat_options)
*
* @param ctx the hook context
*/
-static void stat_arch_dep_replace_mul_with_shifts(void *ctx, ir_node *mul) {
+static void stat_arch_dep_replace_mul_with_shifts(void *ctx, ir_node *mul)
+{
(void) ctx;
if (! status->stat_options)
return;
* @param ctx the hook context
* @param node the division node that will be optimized
*/
-static void stat_arch_dep_replace_division_by_const(void *ctx, ir_node *node) {
+static void stat_arch_dep_replace_division_by_const(void *ctx, ir_node *node)
+{
(void) ctx;
if (! status->stat_options)
return;
/**
* Wrapper to run stat_dump_snapshot() as a ir_prog wrapper.
*/
-static int stat_dump_snapshot_wrapper(ir_prog *irp, void *context) {
+static int stat_dump_snapshot_wrapper(ir_prog *irp, void *context)
+{
struct pass_t *pass = context;
(void)irp;
/**
* Frees all dumper structures.
*/
-static void stat_term_dumper(void) {
+static void stat_term_dumper(void)
+{
dumper_t *dumper, *next_dumper;
for (dumper = status->dumper; dumper; /* iteration done in loop body */ ) {
/* Terminates the statistics module, frees all memory. */
-void stat_term(void) {
+void stat_term(void)
+{
if (status != (stat_info_t *)&status_disable) {
obstack_free(&status->be_data, NULL);
obstack_free(&status->cnts, NULL);
} /* stat_term */
/* returns 1 if statistics were initialized, 0 otherwise */
-int stat_is_active(void) {
+int stat_is_active(void)
+{
return status != (stat_info_t *)&status_disable;
} /* stat_is_active */
/**
* Compare two pattern for its occurance counter.
*/
-static int pattern_count_cmp(const void *elt, const void *key) {
+static int pattern_count_cmp(const void *elt, const void *key)
+{
int cmp;
pattern_entry_t **e1 = (pattern_entry_t **)elt;
/**
* Compare two pattern for its pattern hash.
*/
-static int pattern_cmp(const void *elt, const void *key) {
+static int pattern_cmp(const void *elt, const void *key)
+{
const pattern_entry_t *e1 = elt;
const pattern_entry_t *e2 = key;
int diff = e1->len - e2->len;
* @param data a buffer address
* @param len the length of the data buffer
*/
-static void init_buf(CODE_BUFFER *buf, BYTE *data, unsigned len) {
+static void init_buf(CODE_BUFFER *buf, BYTE *data, unsigned len)
+{
buf->start =
buf->next = data;
buf->end = data + len;
*
* The hash value for the buffer content is updated.
*/
-static inline void put_byte(CODE_BUFFER *buf, BYTE byte) {
+static inline void put_byte(CODE_BUFFER *buf, BYTE byte)
+{
if (buf->next < buf->end) {
*buf->next++ = byte;
buf->hash = (buf->hash * 9) ^ byte;
*
* @return the length of the buffer content
*/
-static unsigned buf_lenght(const CODE_BUFFER *buf) {
+static unsigned buf_lenght(const CODE_BUFFER *buf)
+{
return buf->next - buf->start;
} /* buf_lenght */
*
* @return the start address of the buffer content
*/
-static const BYTE *buf_content(const CODE_BUFFER *buf) {
+static const BYTE *buf_content(const CODE_BUFFER *buf)
+{
return buf->start;
} /* buf_content */
*
* @return the hash value of the buffer content
*/
-static unsigned buf_hash(const CODE_BUFFER *buf) {
+static unsigned buf_hash(const CODE_BUFFER *buf)
+{
return buf->hash;
} /* buf_hash */
*
* @param buf the code buffer
*/
-static unsigned buf_overrun(const CODE_BUFFER *buf) {
+static unsigned buf_overrun(const CODE_BUFFER *buf)
+{
return buf->overrun;
} /* buf_overrun */
*
* @return the next byte from the code buffer
*/
-static inline BYTE look_byte(CODE_BUFFER *buf) {
+static inline BYTE look_byte(CODE_BUFFER *buf)
+{
if (buf->next < buf->end)
return *buf->next;
return VLC_TAG_END;
*
* @return the next byte from the code buffer
*/
-static inline BYTE get_byte(CODE_BUFFER *buf) {
+static inline BYTE get_byte(CODE_BUFFER *buf)
+{
if (buf->next < buf->end)
return *buf->next++;
return VLC_TAG_END;
* @param buf the code buffer
* @param code the code to be written into the buffer
*/
-static void put_code(CODE_BUFFER *buf, unsigned code) {
+static void put_code(CODE_BUFFER *buf, unsigned code)
+{
if (code < BITS(7)) {
put_byte(buf, VLC_7BIT | code);
} else if (code < BITS(6 + 8)) {
*
* @return next 32bit value from the code buffer
*/
-static unsigned get_code(CODE_BUFFER *buf) {
+static unsigned get_code(CODE_BUFFER *buf)
+{
unsigned code = get_byte(buf);
if (code < VLC_14BIT)
* @param buf the code buffer
* @param tag the tag to write to the code buffer
*/
-static void put_tag(CODE_BUFFER *buf, BYTE tag) {
+static void put_tag(CODE_BUFFER *buf, BYTE tag)
+{
assert(tag >= VLC_TAG_FIRST && "invalid tag");
put_byte(buf, tag);
*
* @return the next tag in the code buffer
*/
-static BYTE next_tag(CODE_BUFFER *buf) {
+static BYTE next_tag(CODE_BUFFER *buf)
+{
BYTE b = look_byte(buf);
if (b >= VLC_TAG_FIRST)
/**
* Compare two addresses.
*/
-static int addr_cmp(const void *p1, const void *p2, size_t size) {
+static int addr_cmp(const void *p1, const void *p2, size_t size)
+{
const addr_entry_t *e1 = p1;
const addr_entry_t *e2 = p2;
(void) size;
*
* @return reached depth
*/
-static int _encode_node(ir_node *node, int max_depth, codec_env_t *env) {
+static int _encode_node(ir_node *node, int max_depth, codec_env_t *env)
+{
addr_entry_t entry, *r_entry;
set_entry *s_entry;
int i, preds;
*
* @return The depth of the encoded graph (without cycles)
*/
-static int encode_node(ir_node *node, CODE_BUFFER *buf, int max_depth) {
+static int encode_node(ir_node *node, CODE_BUFFER *buf, int max_depth)
+{
codec_env_t env;
int res;
/**
* Decode an IR-node, recursive walker.
*/
-static void _decode_node(unsigned parent, int position, codec_env_t *env) {
+static void _decode_node(unsigned parent, int position, codec_env_t *env)
+{
unsigned code;
unsigned op_code;
unsigned mode_code = 0;
/**
* Decode an IR-node.
*/
-static void decode_node(BYTE *b, unsigned len, pattern_dumper_t *dump) {
+static void decode_node(BYTE *b, unsigned len, pattern_dumper_t *dump)
+{
codec_env_t env;
CODE_BUFFER buf;
unsigned code, options = 0;
* If the code content was never seen before, a new pattern_entry is created
* and returned.
*/
-static pattern_entry_t *pattern_get_entry(CODE_BUFFER *buf, pset *set) {
+static pattern_entry_t *pattern_get_entry(CODE_BUFFER *buf, pset *set)
+{
pattern_entry_t *key, *elem;
unsigned len = buf_lenght(buf);
unsigned hash;
*
* @note Single node patterns are ignored
*/
-static void count_pattern(CODE_BUFFER *buf, int depth) {
+static void count_pattern(CODE_BUFFER *buf, int depth)
+{
pattern_entry_t *entry;
/* ignore single node pattern (i.e. constants) */
/**
* Pre-walker for nodes pattern calculation.
*/
-static void calc_nodes_pattern(ir_node *node, void *ctx) {
+static void calc_nodes_pattern(ir_node *node, void *ctx)
+{
pattern_env_t *env = ctx;
BYTE buffer[PATTERN_STORE_SIZE];
CODE_BUFFER buf;
*
* @param fname filename for storage
*/
-static void store_pattern(const char *fname) {
+static void store_pattern(const char *fname)
+{
FILE *f;
pattern_entry_t *entry;
int i, count = pset_count(status->pattern_hash);
*
* @param fname filename
*/
-static HASH_MAP(pattern_entry_t) *read_pattern(const char *fname) {
+static HASH_MAP(pattern_entry_t) *read_pattern(const char *fname)
+{
FILE *f;
pattern_entry_t *entry, tmp;
int i, count;
*
* @param fname name of the VCG file to create
*/
-static void pattern_output(const char *fname) {
+static void pattern_output(const char *fname)
+{
pattern_entry_t *entry;
pattern_entry_t **pattern_arr;
pattern_dumper_t *dump;
/*
* Calculates the pattern history.
*/
-void stat_calc_pattern_history(ir_graph *irg) {
+void stat_calc_pattern_history(ir_graph *irg)
+{
pattern_env_t env;
unsigned i;
/*
* Initializes the pattern history.
*/
-void stat_init_pattern_history(int enable) {
+void stat_init_pattern_history(int enable)
+{
HASH_MAP(pattern_entry_t) *pattern_hash = NULL;
status->enable = enable;
/*
* Finish the pattern history.
*/
-void stat_finish_pattern_history(const char *fname) {
+void stat_finish_pattern_history(const char *fname)
+{
(void) fname;
if (! status->enable)
return;
/**
* Return the name of an optimization.
*/
-static const char *get_opt_name(int index) {
+static const char *get_opt_name(int index)
+{
assert(index < (int) ARR_SIZE(opt_names) && "index out of range");
assert((int) opt_names[index].kind == index && "opt_names broken");
return opt_names[index].name;
} /* simple_dump_be_block_reg_pressure */
/** prints a distribution entry */
-static void simple_dump_distrib_entry(const distrib_entry_t *entry, void *env) {
+static void simple_dump_distrib_entry(const distrib_entry_t *entry, void *env)
+{
dumper_t *dmp = env;
fprintf(dmp->f, "%12d", cnt_to_uint(&entry->cnt));
} /* simple_dump_distrib_entry */
/**
* Adds the counter for given entry to another distribution table.
*/
-static void add_distrib_entry(const distrib_entry_t *entry, void *env) {
+static void add_distrib_entry(const distrib_entry_t *entry, void *env)
+{
distrib_tbl_t *sum_tbl = env;
stat_add_int_distrib_tbl(sum_tbl, PTR_TO_INT(entry->object), &entry->cnt);
/**
* Dumps a line of the parameter table
*/
-static void dump_tbl_line(const distrib_entry_t *entry, void *env) {
+static void dump_tbl_line(const distrib_entry_t *entry, void *env)
+{
dumper_t *dmp = env;
fprintf(dmp->f, "%d : %u\n", PTR_TO_INT(entry->object), cnt_to_uint(&entry->cnt));
/**
* dumps the parameter distribution table
*/
-static void simple_dump_param_tbl(dumper_t *dmp, const distrib_tbl_t *tbl, graph_entry_t *global) {
+static void simple_dump_param_tbl(dumper_t *dmp, const distrib_tbl_t *tbl, graph_entry_t *global)
+{
fprintf(dmp->f, "\nCall parameter Information:\n");
fprintf(dmp->f, "---------------------\n");
/**
* dumps the optimization counter table
*/
-static void simple_dump_opt_cnt(dumper_t *dmp, const counter_t *tbl, unsigned len) {
+static void simple_dump_opt_cnt(dumper_t *dmp, const counter_t *tbl, unsigned len)
+{
unsigned i;
fprintf(dmp->f, "\nOptimization counts:\n");
/**
* initialize the simple dumper
*/
-static void simple_init(dumper_t *dmp, const char *name) {
+static void simple_init(dumper_t *dmp, const char *name)
+{
char fname[2048];
snprintf(fname, sizeof(fname), "%s.txt", name);
/**
* finishes the simple dumper
*/
-static void simple_finish(dumper_t *dmp) {
+static void simple_finish(dumper_t *dmp)
+{
if (dmp->f)
fclose(dmp->f);
dmp->f = NULL;
/**
* dumps the parameter distribution table
*/
-static void csv_dump_param_tbl(dumper_t *dmp, const distrib_tbl_t *tbl, graph_entry_t *global) {
+static void csv_dump_param_tbl(dumper_t *dmp, const distrib_tbl_t *tbl, graph_entry_t *global)
+{
(void) dmp;
(void) tbl;
(void) global;
/**
* dumps the optimization counter
*/
-static void csv_dump_opt_cnt(dumper_t *dmp, const counter_t *tbl, unsigned len) {
+static void csv_dump_opt_cnt(dumper_t *dmp, const counter_t *tbl, unsigned len)
+{
(void) dmp;
(void) tbl;
(void) len;
/**
* Get the block entry or allocate one if not yet assigned.
*/
-static block_entry_t *get_block_entry(ir_node *block) {
+static block_entry_t *get_block_entry(ir_node *block)
+{
block_entry_t *entry = get_irn_link(block);
if (entry == NULL) {
return entry;
}
-static void add_entry(ir_node ***arr, ir_node *irn) {
+static void add_entry(ir_node ***arr, ir_node *irn)
+{
ir_node **list = *arr;
int i;
ARR_APP1(ir_node *, *arr, irn);
}
-static void add_live_in(ir_node *block, ir_node *irn) {
+static void add_live_in(ir_node *block, ir_node *irn)
+{
block_entry_t *entry = get_block_entry(block);
add_entry(&entry->live_ins, irn);
}
-static void add_live_out(ir_node *block, ir_node *irn) {
+static void add_live_out(ir_node *block, ir_node *irn)
+{
block_entry_t *entry = get_block_entry(block);
add_entry(&entry->live_outs, irn);
* @param def The node (value).
* @param block The block to mark the value live out of.
*/
-static void live_end_at_block(ir_node *def, ir_node *block) {
+static void live_end_at_block(ir_node *def, ir_node *block)
+{
add_live_out(block, def);
if (is_irn_constlike(def)) {
* Calculate the live-in and live out of blocks for datab nodes.
* Use it to estimate register pressure.
*/
-void stat_liveness(ir_graph *irg) {
+void stat_liveness(ir_graph *irg)
+{
environment_t genv;
block_entry_t *p;
/**
* Add an entity to it's already set owner type.
*/
-static inline void insert_entity_in_owner(ir_entity *ent) {
+static inline void insert_entity_in_owner(ir_entity *ent)
+{
ir_type *owner = ent->owner;
switch (get_type_tpop_code(owner)) {
case tpo_class:
} /* new_rd_entity */
ir_entity *
-new_d_entity(ir_type *owner, ident *name, ir_type *type, dbg_info *db) {
+new_d_entity(ir_type *owner, ident *name, ir_type *type, dbg_info *db)
+{
ir_entity *res;
assert(is_compound_type(owner));
} /* new_d_entity */
ir_entity *
-new_entity(ir_type *owner, ident *name, ir_type *type) {
+new_entity(ir_type *owner, ident *name, ir_type *type)
+{
return new_d_entity(owner, name, type, NULL);
} /* new_entity */
* owner of the old entity, else returns the old entity.
*/
ir_entity *
-copy_entity_own(ir_entity *old, ir_type *new_owner) {
+copy_entity_own(ir_entity *old, ir_type *new_owner)
+{
ir_entity *newe;
assert(is_entity(old));
assert(is_compound_type(new_owner));
} /* copy_entity_own */
ir_entity *
-copy_entity_name(ir_entity *old, ident *new_name) {
+copy_entity_name(ir_entity *old, ident *new_name)
+{
ir_entity *newe;
assert(old && old->kind == k_entity);
} /* copy_entity_name */
void
-free_entity(ir_entity *ent) {
+free_entity(ir_entity *ent)
+{
assert(ent && ent->kind == k_entity);
free_entity_attrs(ent);
ent->kind = k_BAD;
/* Outputs a unique number for this node */
long
-get_entity_nr(const ir_entity *ent) {
+get_entity_nr(const ir_entity *ent)
+{
assert(ent && ent->kind == k_entity);
#ifdef DEBUG_libfirm
return ent->nr;
}
void
-set_entity_owner(ir_entity *ent, ir_type *owner) {
+set_entity_owner(ir_entity *ent, ir_type *owner)
+{
assert(is_entity(ent));
assert(is_compound_type(owner));
ent->owner = owner;
}
/* Checks if an entity is compiler generated */
-int (is_entity_compiler_generated)(const ir_entity *ent) {
+int (is_entity_compiler_generated)(const ir_entity *ent)
+{
return _is_entity_compiler_generated(ent);
} /* is_entity_compiler_generated */
/* Sets/resets the compiler generated flag */
-void (set_entity_compiler_generated)(ir_entity *ent, int flag) {
+void (set_entity_compiler_generated)(ir_entity *ent, int flag)
+{
_set_entity_compiler_generated(ent, flag);
} /* set_entity_compiler_generated */
-ir_entity_usage (get_entity_usage)(const ir_entity *ent) {
+ir_entity_usage (get_entity_usage)(const ir_entity *ent)
+{
return _get_entity_usage(ent);
}
-void (set_entity_usage)(ir_entity *ent, ir_entity_usage flags) {
+void (set_entity_usage)(ir_entity *ent, ir_entity_usage flags)
+{
_set_entity_usage(ent, flags);
}
/* Returns true if the the node is representable as code on
* const_code_irg. */
-int is_irn_const_expression(ir_node *n) {
+int is_irn_const_expression(ir_node *n)
+{
ir_mode *m;
/* we are in danger iff an exception will arise. TODO: be more precisely,
* Copies a firm subgraph that complies to the restrictions for
* constant expressions to current_block in current_ir_graph.
*/
-ir_node *copy_const_value(dbg_info *dbg, ir_node *n) {
+ir_node *copy_const_value(dbg_info *dbg, ir_node *n)
+{
ir_node *nn;
ir_mode *m;
}
void
-free_tpop(const tp_op *tpop) {
+free_tpop(const tp_op *tpop)
+{
xfree((void *)tpop);
}
#define C TP_OP_FLAG_COMPOUND
#define ID(s) new_id_from_chars(s, sizeof(s) - 1)
-void init_tpop(void) {
+void init_tpop(void)
+{
type_class = new_tpop(tpo_class , ID("class"), C, sizeof (cls_attr), &class_ops);
type_struct = new_tpop(tpo_struct , ID("struct"), C, sizeof (stc_attr), &struct_ops);
type_method = new_tpop(tpo_method , ID("method"), 0, sizeof (mtd_attr), &method_ops);
/* Finalize the tpop module.
* Frees all type opcodes. */
-void finish_tpop(void) {
+void finish_tpop(void)
+{
free_tpop(type_class ); type_class = NULL;
free_tpop(type_struct ); type_struct = NULL;
free_tpop(type_method ); type_method = NULL;
}
/* Returns the string for the tp_opcode. */
-const char *get_tpop_name(const tp_op *op) {
+const char *get_tpop_name(const tp_op *op)
+{
return get_id_str(op->name);
}
-tp_opcode (get_tpop_code)(const tp_op *op) {
+tp_opcode (get_tpop_code)(const tp_op *op)
+{
return _get_tpop_code(op);
}
/* returns the attribute size of the operator. */
-int (get_tpop_attr_size)(const tp_op *op) {
+int (get_tpop_attr_size)(const tp_op *op)
+{
return _get_tpop_attr_size(op);
}
/* Resolve implicit inheritance. */
/* ----------------------------------------------------------------------- */
-ident *default_mangle_inherited_name(const ir_entity *super, const ir_type *clss) {
+ident *default_mangle_inherited_name(const ir_entity *super, const ir_type *clss)
+{
return id_mangle_u(new_id_from_str("inh"), id_mangle_u(get_class_ident(clss), get_entity_ident(super)));
}
*
* Resolves the implicit inheritance supplied by firm.
*/
-void resolve_inheritance(mangle_inherited_name_func *mfunc) {
+void resolve_inheritance(mangle_inherited_name_func *mfunc)
+{
if (!mfunc)
mfunc = default_mangle_inherited_name;
class_walk_super2sub(copy_entities_from_superclass, NULL, (void *)&mfunc);
/* adding the infix 'trans_'. */
/* ----------------------------------------------------------------------- */
-void set_irp_inh_transitive_closure_state(inh_transitive_closure_state s) {
+void set_irp_inh_transitive_closure_state(inh_transitive_closure_state s)
+{
irp->inh_trans_closure_state = s;
}
-void invalidate_irp_inh_transitive_closure_state(void) {
+void invalidate_irp_inh_transitive_closure_state(void)
+{
if (irp->inh_trans_closure_state == inh_transitive_closure_valid)
irp->inh_trans_closure_state = inh_transitive_closure_invalid;
}
-inh_transitive_closure_state get_irp_inh_transitive_closure_state(void) {
+inh_transitive_closure_state get_irp_inh_transitive_closure_state(void)
+{
return irp->inh_trans_closure_state;
}
-static void assert_valid_state(void) {
+static void assert_valid_state(void)
+{
assert(irp->inh_trans_closure_state == inh_transitive_closure_valid ||
irp->inh_trans_closure_state == inh_transitive_closure_invalid);
}
/**
* Compare two tr_inh_trans_tp entries.
*/
-static int tr_inh_trans_cmp(const void *e1, const void *e2, size_t size) {
+static int tr_inh_trans_cmp(const void *e1, const void *e2, size_t size)
+{
const tr_inh_trans_tp *ef1 = e1;
const tr_inh_trans_tp *ef2 = e2;
(void) size;
/**
* calculate the hash value of an tr_inh_trans_tp
*/
-static inline unsigned int tr_inh_trans_hash(const tr_inh_trans_tp *v) {
+static inline unsigned int tr_inh_trans_hash(const tr_inh_trans_tp *v)
+{
return HASH_PTR(v->kind);
}
/* This always completes successfully. */
-static tr_inh_trans_tp *get_firm_kind_entry(const firm_kind *k) {
+static tr_inh_trans_tp *get_firm_kind_entry(const firm_kind *k)
+{
tr_inh_trans_tp a, *found;
a.kind = k;
return found;
}
-static pset *get_entity_map(const ir_entity *ent, dir d) {
+static pset *get_entity_map(const ir_entity *ent, dir d)
+{
tr_inh_trans_tp *found;
assert(is_entity(ent));
return found->directions[d];
}
-static pset *get_type_map(const ir_type *tp, dir d) {
+static pset *get_type_map(const ir_type *tp, dir d)
+{
tr_inh_trans_tp *found;
assert(is_type(tp));
* If it is marked with master_flag_visited it is fully processed.
*
* Well, we still miss some candidates ... */
-static void compute_down_closure(ir_type *tp) {
+static void compute_down_closure(ir_type *tp)
+{
pset *myset, *subset;
int i, n_subtypes, n_members, n_supertypes;
ir_visited_t master_visited = get_master_type_visited();
}
}
-static void compute_up_closure(ir_type *tp) {
+static void compute_up_closure(ir_type *tp)
+{
pset *myset, *subset;
int i, n_subtypes, n_members, n_supertypes;
ir_visited_t master_visited = get_master_type_visited();
*
* This function walks over the ir (O(#types+#entities)) to compute the
* transitive closure. */
-void compute_inh_transitive_closure(void) {
+void compute_inh_transitive_closure(void)
+{
int i, n_types = get_irp_n_types();
free_inh_transitive_closure();
}
/** Free memory occupied by the transitive closure information. */
-void free_inh_transitive_closure(void) {
+void free_inh_transitive_closure(void)
+{
if (tr_inh_trans_set) {
tr_inh_trans_tp *elt;
for (elt = set_first(tr_inh_trans_set); elt; elt = set_next(tr_inh_trans_set)) {
/* - subtype ------------------------------------------------------------- */
-ir_type *get_class_trans_subtype_first(const ir_type *tp) {
+ir_type *get_class_trans_subtype_first(const ir_type *tp)
+{
assert_valid_state();
return pset_first(get_type_map(tp, d_down));
}
-ir_type *get_class_trans_subtype_next(const ir_type *tp) {
+ir_type *get_class_trans_subtype_next(const ir_type *tp)
+{
assert_valid_state();
return pset_next(get_type_map(tp, d_down));
}
-int is_class_trans_subtype(const ir_type *tp, const ir_type *subtp) {
+int is_class_trans_subtype(const ir_type *tp, const ir_type *subtp)
+{
assert_valid_state();
return (pset_find_ptr(get_type_map(tp, d_down), subtp) != NULL);
}
/* - supertype ----------------------------------------------------------- */
-ir_type *get_class_trans_supertype_first(const ir_type *tp) {
+ir_type *get_class_trans_supertype_first(const ir_type *tp)
+{
assert_valid_state();
return pset_first(get_type_map(tp, d_up));
}
-ir_type *get_class_trans_supertype_next(const ir_type *tp) {
+ir_type *get_class_trans_supertype_next(const ir_type *tp)
+{
assert_valid_state();
return pset_next(get_type_map(tp, d_up));
}
/* - overwrittenby ------------------------------------------------------- */
-ir_entity *get_entity_trans_overwrittenby_first(const ir_entity *ent) {
+ir_entity *get_entity_trans_overwrittenby_first(const ir_entity *ent)
+{
assert_valid_state();
return pset_first(get_entity_map(ent, d_down));
}
-ir_entity *get_entity_trans_overwrittenby_next(const ir_entity *ent) {
+ir_entity *get_entity_trans_overwrittenby_next(const ir_entity *ent)
+{
assert_valid_state();
return pset_next(get_entity_map(ent, d_down));
}
/** Iterate over all transitive overwritten entities. */
-ir_entity *get_entity_trans_overwrites_first(const ir_entity *ent) {
+ir_entity *get_entity_trans_overwrites_first(const ir_entity *ent)
+{
assert_valid_state();
return pset_first(get_entity_map(ent, d_up));
}
-ir_entity *get_entity_trans_overwrites_next(const ir_entity *ent) {
+ir_entity *get_entity_trans_overwrites_next(const ir_entity *ent)
+{
assert_valid_state();
return pset_next(get_entity_map(ent, d_up));
}
/* ----------------------------------------------------------------------- */
/** Returns true if low is subclass of high. */
-static int check_is_SubClass_of(ir_type *low, ir_type *high) {
+static int check_is_SubClass_of(ir_type *low, ir_type *high)
+{
int i, n_subtypes;
/* depth first search from high downwards. */
}
/* Returns true if low is subclass of high. */
-int is_SubClass_of(ir_type *low, ir_type *high) {
+int is_SubClass_of(ir_type *low, ir_type *high)
+{
assert(is_Class_type(low) && is_Class_type(high));
if (low == high) return 1;
* many as possible). If the remaining types are both class types
* and subclasses, returns true, else false. Can also be called with
* two class types. */
-int is_SubClass_ptr_of(ir_type *low, ir_type *high) {
+int is_SubClass_ptr_of(ir_type *low, ir_type *high)
+{
while (is_Pointer_type(low) && is_Pointer_type(high)) {
low = get_pointer_points_to_type(low);
high = get_pointer_points_to_type(high);
return 0;
}
-int is_overwritten_by(ir_entity *high, ir_entity *low) {
+int is_overwritten_by(ir_entity *high, ir_entity *low)
+{
int i, n_overwrittenby;
assert(is_entity(low) && is_entity(high));
*
* Need two routines because I want to assert the result.
*/
-static ir_entity *do_resolve_ent_polymorphy(ir_type *dynamic_class, ir_entity *static_ent) {
+static ir_entity *do_resolve_ent_polymorphy(ir_type *dynamic_class, ir_entity *static_ent)
+{
int i, n_overwrittenby;
if (get_entity_owner(static_ent) == dynamic_class) return static_ent;
* dynamic type are given.
* Search downwards in overwritten tree.
*/
-ir_entity *resolve_ent_polymorphy(ir_type *dynamic_class, ir_entity *static_ent) {
+ir_entity *resolve_ent_polymorphy(ir_type *dynamic_class, ir_entity *static_ent)
+{
ir_entity *res;
assert(static_ent && is_entity(static_ent));
/* - State handling. ----------------------------------------- */
-void set_irg_class_cast_state(ir_graph *irg, ir_class_cast_state s) {
+void set_irg_class_cast_state(ir_graph *irg, ir_class_cast_state s)
+{
if (get_irp_class_cast_state() > s)
set_irp_class_cast_state(s);
irg->class_cast_state = s;
}
-ir_class_cast_state get_irg_class_cast_state(const ir_graph *irg) {
+ir_class_cast_state get_irg_class_cast_state(const ir_graph *irg)
+{
return irg->class_cast_state;
}
-void set_irp_class_cast_state(ir_class_cast_state s) {
+void set_irp_class_cast_state(ir_class_cast_state s)
+{
#ifndef NDEBUG
int i;
for (i = get_irp_n_irgs() - 1; i >= 0; --i)
irp->class_cast_state = s;
}
-ir_class_cast_state get_irp_class_cast_state(void) {
+ir_class_cast_state get_irp_class_cast_state(void)
+{
return irp->class_cast_state;
}
-const char *get_class_cast_state_string(ir_class_cast_state s) {
+const char *get_class_cast_state_string(ir_class_cast_state s)
+{
#define X(a) case a: return #a
switch(s) {
X(ir_class_casts_any);
/**
* Walker: check Casts.
*/
-static void verify_irn_class_cast_state(ir_node *n, void *env) {
+static void verify_irn_class_cast_state(ir_node *n, void *env)
+{
ccs_env *ccs = (ccs_env *)env;
ir_class_cast_state this_state = ir_class_casts_any;
ir_type *fromtype, *totype;
}
/** Verify that the graph meets requirements of state set. */
-void verify_irg_class_cast_state(ir_graph *irg) {
+void verify_irg_class_cast_state(ir_graph *irg)
+{
ccs_env env;
FIRM_DBG_REGISTER(dbg, "firm.tr.inheritance");
* Show diagnostic if an entity overwrites another one not
* in direct superclasses.
*/
-static void show_ent_not_supertp(ir_entity *ent, ir_entity *ovw) {
+static void show_ent_not_supertp(ir_entity *ent, ir_entity *ovw)
+{
ir_type *owner = get_entity_owner(ent);
ir_type *ov_own = get_entity_owner(ovw);
int i;
/**
* Show diagnostic if an entity overwrites a wrong number of things.
*/
-static void show_ent_overwrite_cnt(ir_entity *ent) {
+static void show_ent_overwrite_cnt(ir_entity *ent)
+{
ir_type *owner = get_entity_owner(ent);
int i, j, k, found, show_stp = 0;
/**
* Check a class
*/
-static int check_class(ir_type *tp) {
+static int check_class(ir_type *tp)
+{
int i, j, k;
int found;
/**
* Check an array.
*/
-static int check_array(ir_type *tp) {
+static int check_array(ir_type *tp)
+{
int i, n_dim = get_array_n_dimensions(tp);
for (i = 0; i < n_dim; ++i) {
/**
* Check a primitive.
*/
-static int check_primitive(ir_type *tp) {
+static int check_primitive(ir_type *tp)
+{
ASSERT_AND_RET_DBG(
is_mode(get_type_mode(tp)),
"Primitive type without mode",
* return
* 0 if no error encountered
*/
-int check_type(ir_type *tp) {
+int check_type(ir_type *tp)
+{
switch (get_type_tpop_code(tp)) {
case tpo_class:
return check_class(tp);
/**
* checks the visited flag
*/
-static int check_visited_flag(ir_graph *irg, ir_node *n) {
+static int check_visited_flag(ir_graph *irg, ir_node *n)
+{
ASSERT_AND_RET_DBG(
get_irn_visited(n) <= get_irg_visited(irg),
"Visited flag of node is larger than that of corresponding irg.",
/**
* called by the walker
*/
-static void on_irg_storage(ir_node *n, void *env) {
+static void on_irg_storage(ir_node *n, void *env)
+{
struct myenv *myenv = env;
/* We also test whether the setting of the visited flag is legal. */
/*
* check types and entities
*/
-static void check_tore(type_or_ent tore, void *env) {
+static void check_tore(type_or_ent tore, void *env)
+{
int *res = env;
assert(tore.ent);
if (is_type(tore.typ)) {
}
ir_type *firm_unknown_type;
-ir_type *get_unknown_type(void) {
+ir_type *get_unknown_type(void)
+{
return firm_unknown_type;
}
tp->mode = mode;
}
-void set_class_size(ir_type *tp, unsigned size) {
+void set_class_size(ir_type *tp, unsigned size)
+{
tp->size = size;
}
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
-static void do_finalization(type_or_ent tore, void *env) {
+static void do_finalization(type_or_ent tore, void *env)
+{
ir_type *glob_tp = env;
if (is_type(tore.typ)) {
* After this is done, all classes and entities that are not overridden
* anymore have the final property set.
*/
-void types_calc_finalization(void) {
+void types_calc_finalization(void)
+{
if (! get_opt_closed_world())
return;
/** Check whether node contains types or entities as an attribute.
If so start a walk over that information. */
-static void start_type_walk(ir_node *node, void *ctx) {
+static void start_type_walk(ir_node *node, void *ctx)
+{
type_walk_env *env = ctx;
type_walk_func *pre;
type_walk_func *post;
}
/* walker: walks over all types */
-void type_walk(type_walk_func *pre, type_walk_func *post, void *env) {
+void type_walk(type_walk_func *pre, type_walk_func *post, void *env)
+{
int i, n_types = get_irp_n_types();
type_or_ent cont;
irp_free_resources(irp, IR_RESOURCE_TYPE_VISITED);
}
-void type_walk_prog(type_walk_func *pre, type_walk_func *post, void *env) {
+void type_walk_prog(type_walk_func *pre, type_walk_func *post, void *env)
+{
int i, n_irgs = get_irp_n_irgs();
type_or_ent cont;
static int fc_exact = 1;
#if 0
-static void fail_char(const char *str, unsigned int len, int pos) {
+static void fail_char(const char *str, unsigned int len, int pos)
+{
if (*(str+pos))
printf("ERROR: Unexpected character '%c'\n", *(str + pos));
else
#endif
/** pack machine-like */
-static void *pack(const fp_value *int_float, void *packed) {
+static void *pack(const fp_value *int_float, void *packed)
+{
char *shift_val;
char *temp;
fp_value *val_buffer;
*
* @return non-zero if result is exact
*/
-static int normalize(const fp_value *in_val, fp_value *out_val, int sticky) {
+static int normalize(const fp_value *in_val, fp_value *out_val, int sticky)
+{
int exact = 1;
int hsb;
char lsb, guard, round, round_dir = 0;
/**
* calculate a + b, where a is the value with the bigger exponent
*/
-static void _fadd(const fp_value *a, const fp_value *b, fp_value *result) {
+static void _fadd(const fp_value *a, const fp_value *b, fp_value *result)
+{
char *temp;
char *exp_diff;
/**
* calculate a * b
*/
-static void _fmul(const fp_value *a, const fp_value *b, fp_value *result) {
+static void _fmul(const fp_value *a, const fp_value *b, fp_value *result)
+{
int sticky;
char *temp;
char res_sign;
/**
* calculate a / b
*/
-static void _fdiv(const fp_value *a, const fp_value *b, fp_value *result) {
+static void _fdiv(const fp_value *a, const fp_value *b, fp_value *result)
+{
int sticky;
char *temp, *dividend;
char res_sign;
}
#if 0
-static void _power_of_ten(int exp, ieee_descriptor_t *desc, char *result) {
+static void _power_of_ten(int exp, ieee_descriptor_t *desc, char *result)
+{
char *build;
char *temp;
*
* This does not clip to any integer range.
*/
-static void _trunc(const fp_value *a, fp_value *result) {
+static void _trunc(const fp_value *a, fp_value *result)
+{
/*
* When exponent == 0 all bits left of the radix point
* are the integral part of the value. For 15bit exp_size
/********
* functions defined in fltcalc.h
********/
-const void *fc_get_buffer(void) {
+const void *fc_get_buffer(void)
+{
return calc_buffer;
}
-int fc_get_buffer_length(void) {
+int fc_get_buffer_length(void)
+{
return calc_buffer_size;
}
-void *fc_val_from_str(const char *str, unsigned int len, const ieee_descriptor_t *desc, void *result) {
+void *fc_val_from_str(const char *str, unsigned int len, const ieee_descriptor_t *desc, void *result)
+{
#if 0
enum {
START,
#endif
}
-fp_value *fc_val_from_ieee754(LLDBL l, const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_val_from_ieee754(LLDBL l, const ieee_descriptor_t *desc, fp_value *result)
+{
char *temp;
int bias_res, bias_val, mant_val;
value_t srcval;
return result;
}
-LLDBL fc_val_to_ieee754(const fp_value *val) {
+LLDBL fc_val_to_ieee754(const fp_value *val)
+{
fp_value *value;
fp_value *temp = NULL;
return buildval.d;
}
-fp_value *fc_cast(const fp_value *value, const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_cast(const fp_value *value, const ieee_descriptor_t *desc, fp_value *result)
+{
char *temp;
int exp_offset, val_bias, res_bias;
return result;
}
-fp_value *fc_get_max(const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_get_max(const ieee_descriptor_t *desc, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
result->desc.exponent_size = desc->exponent_size;
return result;
}
-fp_value *fc_get_min(const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_get_min(const ieee_descriptor_t *desc, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
fc_get_max(desc, result);
return result;
}
-fp_value *fc_get_snan(const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_get_snan(const ieee_descriptor_t *desc, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
result->desc.exponent_size = desc->exponent_size;
return result;
}
-fp_value *fc_get_qnan(const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_get_qnan(const ieee_descriptor_t *desc, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
result->desc.exponent_size = desc->exponent_size;
return result;
}
-fp_value *fc_get_minusinf(const ieee_descriptor_t *desc, fp_value *result) {
+fp_value *fc_get_minusinf(const ieee_descriptor_t *desc, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
fc_get_plusinf(desc, result);
return result;
}
-int fc_comp(const fp_value *val_a, const fp_value *val_b) {
+int fc_comp(const fp_value *val_a, const fp_value *val_b)
+{
int mul = 1;
/*
}
}
-int fc_is_zero(const fp_value *a) {
+int fc_is_zero(const fp_value *a)
+{
return a->desc.clss == ZERO;
}
-int fc_is_negative(const fp_value *a) {
+int fc_is_negative(const fp_value *a)
+{
return a->sign;
}
-int fc_is_inf(const fp_value *a) {
+int fc_is_inf(const fp_value *a)
+{
return a->desc.clss == INF;
}
-int fc_is_nan(const fp_value *a) {
+int fc_is_nan(const fp_value *a)
+{
return a->desc.clss == NAN;
}
-int fc_is_subnormal(const fp_value *a) {
+int fc_is_subnormal(const fp_value *a)
+{
return a->desc.clss == SUBNORMAL;
}
-char *fc_print(const fp_value *val, char *buf, int buflen, unsigned base) {
+char *fc_print(const fp_value *val, char *buf, int buflen, unsigned base)
+{
char *mul_1;
LLDBL flt_val;
return buf;
}
-unsigned char fc_sub_bits(const fp_value *value, unsigned num_bits, unsigned byte_ofs) {
+unsigned char fc_sub_bits(const fp_value *value, unsigned num_bits, unsigned byte_ofs)
+{
/* this is used to cache the packed version of the value */
static char *packed_value = NULL;
}
/* Returns non-zero if the mantissa is zero, i.e. 1.0Exxx */
-int fc_zero_mantissa(const fp_value *value) {
+int fc_zero_mantissa(const fp_value *value)
+{
return sc_get_lowest_set_bit(_mant(value)) == ROUNDING_BITS + value->desc.mantissa_size;
}
/* Returns the exponent of a value. */
-int fc_get_exponent(const fp_value *value) {
+int fc_get_exponent(const fp_value *value)
+{
int exp_bias = (1 << (value->desc.exponent_size - 1)) - 1;
return sc_val_to_long(_exp(value)) - exp_bias;
}
/* Return non-zero if a given value can be converted lossless into another precision */
-int fc_can_lossless_conv_to(const fp_value *value, const ieee_descriptor_t *desc) {
+int fc_can_lossless_conv_to(const fp_value *value, const ieee_descriptor_t *desc)
+{
int v;
int exp_bias;
}
-fc_rounding_mode_t fc_set_rounding_mode(fc_rounding_mode_t mode) {
+fc_rounding_mode_t fc_set_rounding_mode(fc_rounding_mode_t mode)
+{
if (mode == FC_TONEAREST || mode == FC_TOPOSITIVE || mode == FC_TONEGATIVE || mode == FC_TOZERO)
rounding_mode = mode;
return rounding_mode;
}
-fc_rounding_mode_t fc_get_rounding_mode(void) {
+fc_rounding_mode_t fc_get_rounding_mode(void)
+{
return rounding_mode;
}
-void init_fltcalc(int precision) {
+void init_fltcalc(int precision)
+{
if (calc_buffer == NULL) {
/* does nothing if already init */
if (precision == 0) precision = FC_DEFAULT_PRECISION;
}
}
-void finish_fltcalc (void) {
+void finish_fltcalc (void)
+{
free(calc_buffer); calc_buffer = NULL;
}
#endif
/* definition of interface functions */
-fp_value *fc_add(const fp_value *a, const fp_value *b, fp_value *result) {
+fp_value *fc_add(const fp_value *a, const fp_value *b, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
TRACEPRINTF(("%s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
return result;
}
-fp_value *fc_sub(const fp_value *a, const fp_value *b, fp_value *result) {
+fp_value *fc_sub(const fp_value *a, const fp_value *b, fp_value *result)
+{
fp_value *temp;
if (result == NULL) result = calc_buffer;
return result;
}
-fp_value *fc_mul(const fp_value *a, const fp_value *b, fp_value *result) {
+fp_value *fc_mul(const fp_value *a, const fp_value *b, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
TRACEPRINTF(("%s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
return result;
}
-fp_value *fc_div(const fp_value *a, const fp_value *b, fp_value *result) {
+fp_value *fc_div(const fp_value *a, const fp_value *b, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
TRACEPRINTF(("%s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
return result;
}
-fp_value *fc_neg(const fp_value *a, fp_value *result) {
+fp_value *fc_neg(const fp_value *a, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
TRACEPRINTF(("- %s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
return result;
}
-fp_value *fc_int(const fp_value *a, fp_value *result) {
+fp_value *fc_int(const fp_value *a, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
TRACEPRINTF(("%s ", fc_print(a, buffer, sizeof(buffer), FC_PACKED)));
return result;
}
-fp_value *fc_rnd(const fp_value *a, fp_value *result) {
+fp_value *fc_rnd(const fp_value *a, fp_value *result)
+{
if (result == NULL) result = calc_buffer;
(void) a;
}
-unsigned fc_set_immediate_precision(unsigned bits) {
+unsigned fc_set_immediate_precision(unsigned bits)
+{
unsigned old = immediate_prec;
immediate_prec = bits;
return old;
}
-int fc_is_exact(void) {
+int fc_is_exact(void)
+{
return fc_exact;
}
/**
* implements the bitwise NOT operation
*/
-static void do_bitnot(const char *val, char *buffer) {
+static void do_bitnot(const char *val, char *buffer)
+{
int counter;
for (counter = 0; counter<calc_buffer_size; counter++)
/**
* implements the bitwise OR operation
*/
-static void do_bitor(const char *val1, const char *val2, char *buffer) {
+static void do_bitor(const char *val1, const char *val2, char *buffer)
+{
int counter;
for (counter = 0; counter<calc_buffer_size; counter++)
/**
* implements the bitwise eXclusive OR operation
*/
-static void do_bitxor(const char *val1, const char *val2, char *buffer) {
+static void do_bitxor(const char *val1, const char *val2, char *buffer)
+{
int counter;
for (counter = 0; counter<calc_buffer_size; counter++)
/**
* implements the bitwise AND operation
*/
-static void do_bitand(const char *val1, const char *val2, char *buffer) {
+static void do_bitand(const char *val1, const char *val2, char *buffer)
+{
int counter;
for (counter = 0; counter<calc_buffer_size; counter++)
* @todo This implementation is wrong, as it returns the highest bit of the buffer
* NOT the highest bit depending on the real mode
*/
-static int do_sign(const char *val) {
+static int do_sign(const char *val)
+{
return (val[calc_buffer_size-1] <= SC_7) ? (1) : (-1);
}
/**
* returns non-zero if bit at position pos is set
*/
-static int do_bit(const char *val, int pos) {
+static int do_bit(const char *val, int pos)
+{
int bit = pos & 3;
int nibble = pos >> 2;
/**
* Implements a fast ADD + 1
*/
-static void do_inc(const char *val, char *buffer) {
+static void do_inc(const char *val, char *buffer)
+{
int counter = 0;
while (counter++ < calc_buffer_size) {
/**
* Implements a unary MINUS
*/
-static void do_negate(const char *val, char *buffer) {
+static void do_negate(const char *val, char *buffer)
+{
do_bitnot(val, buffer);
do_inc(buffer, buffer);
}
* @todo The implementation of carry is wrong, as it is the
* calc_buffer_size carry, not the mode depending
*/
-static void do_add(const char *val1, const char *val2, char *buffer) {
+static void do_add(const char *val1, const char *val2, char *buffer)
+{
int counter;
const char *add1, *add2;
char carry = SC_0;
/**
* Implements a binary SUB
*/
-static void do_sub(const char *val1, const char *val2, char *buffer) {
+static void do_sub(const char *val1, const char *val2, char *buffer)
+{
char *temp_buffer = alloca(calc_buffer_size); /* intermediate buffer to hold -val2 */
do_negate(val2, temp_buffer);
/**
* Implements a binary MUL
*/
-static void do_mul(const char *val1, const char *val2, char *buffer) {
+static void do_mul(const char *val1, const char *val2, char *buffer)
+{
char *temp_buffer; /* result buffer */
char *neg_val1; /* abs of val1 */
char *neg_val2; /* abs of val2 */
/**
* Shift the buffer to left and add a 4 bit digit
*/
-static void do_push(const char digit, char *buffer) {
+static void do_push(const char digit, char *buffer)
+{
int counter;
for (counter = calc_buffer_size - 2; counter >= 0; counter--) {
*
* Note: This is MOST slow
*/
-static void do_divmod(const char *rDividend, const char *divisor, char *quot, char *rem) {
+static void do_divmod(const char *rDividend, const char *divisor, char *quot, char *rem)
+{
const char *dividend = rDividend;
const char *minus_divisor;
char *neg_val1;
*
* @todo Assertions seems to be wrong
*/
-static void do_shl(const char *val1, char *buffer, long shift_cnt, int bitsize, unsigned is_signed) {
+static void do_shl(const char *val1, char *buffer, long shift_cnt, int bitsize, unsigned is_signed)
+{
const char *shl;
char shift;
char carry = SC_0;
*
* @todo Assertions seems to be wrong
*/
-static void do_shr(const char *val1, char *buffer, long shift_cnt, int bitsize, unsigned is_signed, int signed_shift) {
+static void do_shr(const char *val1, char *buffer, long shift_cnt, int bitsize, unsigned is_signed, int signed_shift)
+{
const char *shrs;
char sign;
char msd;
* Implements a Rotate Left.
* positive: low-order -> high order, negative other direction
*/
-static void do_rotl(const char *val1, char *buffer, long offset, int radius, unsigned is_signed) {
+static void do_rotl(const char *val1, char *buffer, long offset, int radius, unsigned is_signed)
+{
char *temp1, *temp2;
temp1 = alloca(calc_buffer_size);
temp2 = alloca(calc_buffer_size);
/*****************************************************************************
* public functions, declared in strcalc.h
*****************************************************************************/
-const void *sc_get_buffer(void) {
+const void *sc_get_buffer(void)
+{
return (void*)calc_buffer;
}
-int sc_get_buffer_length(void) {
+int sc_get_buffer_length(void)
+{
return calc_buffer_size;
}
/**
* Do sign extension if the mode is signed, otherwise to zero extension.
*/
-void sign_extend(void *buffer, ir_mode *mode) {
+void sign_extend(void *buffer, ir_mode *mode)
+{
char *calc_buffer = buffer;
int bits = get_mode_size_bits(mode) - 1;
int nibble = bits >> 2;
}
/* FIXME doesn't check for overflows */
-void sc_val_from_str(const char *str, unsigned int len, void *buffer, ir_mode *mode) {
+void sc_val_from_str(const char *str, unsigned int len, void *buffer, ir_mode *mode)
+{
const char *orig_str = str;
unsigned int orig_len = len;
sign_extend(calc_buffer, mode);
}
-void sc_val_from_long(long value, void *buffer) {
+void sc_val_from_long(long value, void *buffer)
+{
char *pos;
char sign, is_minlong;
}
}
-void sc_val_from_ulong(unsigned long value, void *buffer) {
+void sc_val_from_ulong(unsigned long value, void *buffer)
+{
unsigned char *pos;
if (buffer == NULL) buffer = calc_buffer;
}
}
-long sc_val_to_long(const void *val) {
+long sc_val_to_long(const void *val)
+{
int i;
long l = 0;
return l;
}
-void sc_min_from_bits(unsigned int num_bits, unsigned int sign, void *buffer) {
+void sc_min_from_bits(unsigned int num_bits, unsigned int sign, void *buffer)
+{
char *pos;
int i, bits;
*pos++ = SC_F;
}
-void sc_max_from_bits(unsigned int num_bits, unsigned int sign, void *buffer) {
+void sc_max_from_bits(unsigned int num_bits, unsigned int sign, void *buffer)
+{
char* pos;
int i, bits;
*pos++ = SC_0;
}
-void sc_truncate(unsigned int num_bits, void *buffer) {
+void sc_truncate(unsigned int num_bits, void *buffer)
+{
char *cbuffer = buffer;
char *pos = cbuffer + (num_bits / 4);
char *end = cbuffer + calc_buffer_size;
*pos = SC_0;
}
-int sc_comp(const void* value1, const void* value2) {
+int sc_comp(const void* value1, const void* value2)
+{
int counter = calc_buffer_size - 1;
const char *val1 = (const char *)value1;
const char *val2 = (const char *)value2;
return (val1[counter] > val2[counter]) ? (1) : (-1);
}
-int sc_get_highest_set_bit(const void *value) {
+int sc_get_highest_set_bit(const void *value)
+{
const char *val = (const char*)value;
int high, counter;
return high;
}
-int sc_get_lowest_set_bit(const void *value) {
+int sc_get_lowest_set_bit(const void *value)
+{
const char *val = (const char*)value;
int low, counter;
return -1;
}
-int sc_get_bit_at(const void *value, unsigned pos) {
+int sc_get_bit_at(const void *value, unsigned pos)
+{
const char *val = value;
unsigned nibble = pos >> 2;
val[nibble] |= SHIFT(pos & 3);
}
-int sc_is_zero(const void *value) {
+int sc_is_zero(const void *value)
+{
const char* val = (const char *)value;
int counter;
return 1;
}
-int sc_is_negative(const void *value) {
+int sc_is_negative(const void *value)
+{
return do_sign(value) == -1;
}
-int sc_had_carry(void) {
+int sc_had_carry(void)
+{
return carry_flag;
}
-unsigned char sc_sub_bits(const void *value, int len, unsigned byte_ofs) {
+unsigned char sc_sub_bits(const void *value, int len, unsigned byte_ofs)
+{
const char *val = (const char *)value;
int nibble_ofs = 2 * byte_ofs;
unsigned char res;
* convert to a string
* FIXME: Doesn't check buffer bounds
*/
-const char *sc_print(const void *value, unsigned bits, enum base_t base, int signed_mode) {
+const char *sc_print(const void *value, unsigned bits, enum base_t base, int signed_mode)
+{
static const char big_digits[] = "0123456789ABCDEF";
static const char small_digits[] = "0123456789abcdef";
return pos;
}
-void init_strcalc(int precision) {
+void init_strcalc(int precision)
+{
if (calc_buffer == NULL) {
if (precision <= 0) precision = SC_DEFAULT_PRECISION;
}
-void finish_strcalc(void) {
+void finish_strcalc(void)
+{
free(calc_buffer); calc_buffer = NULL;
free(output_buffer); output_buffer = NULL;
}
-int sc_get_precision(void) {
+int sc_get_precision(void)
+{
return bit_pattern_size;
}
-void sc_add(const void *value1, const void *value2, void *buffer) {
+void sc_add(const void *value1, const void *value2, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_sub(const void *value1, const void *value2, void *buffer) {
+void sc_sub(const void *value1, const void *value2, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_neg(const void *value1, void *buffer) {
+void sc_neg(const void *value1, void *buffer)
+{
carry_flag = 0;
DEBUGPRINTF_COMPUTATION(("- %s ->", sc_print_hex(value1)));
}
}
-void sc_and(const void *value1, const void *value2, void *buffer) {
+void sc_and(const void *value1, const void *value2, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_or(const void *value1, const void *value2, void *buffer) {
+void sc_or(const void *value1, const void *value2, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_xor(const void *value1, const void *value2, void *buffer) {
+void sc_xor(const void *value1, const void *value2, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_not(const void *value1, void *buffer) {
+void sc_not(const void *value1, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_mul(const void *value1, const void *value2, void *buffer) {
+void sc_mul(const void *value1, const void *value2, void *buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
}
-void sc_div(const void *value1, const void *value2, void *buffer) {
+void sc_div(const void *value1, const void *value2, void *buffer)
+{
/* temp buffer holding unused result of divmod */
char *unused_res = alloca(calc_buffer_size);
}
}
-void sc_mod(const void *value1, const void *value2, void *buffer) {
+void sc_mod(const void *value1, const void *value2, void *buffer)
+{
/* temp buffer holding unused result of divmod */
char *unused_res = alloca(calc_buffer_size);
}
}
-void sc_divmod(const void *value1, const void *value2, void *div_buffer, void *mod_buffer) {
+void sc_divmod(const void *value1, const void *value2, void *div_buffer, void *mod_buffer)
+{
CLEAR_BUFFER(calc_buffer);
carry_flag = 0;
}
-void sc_shlI(const void *val1, long shift_cnt, int bitsize, int sign, void *buffer) {
+void sc_shlI(const void *val1, long shift_cnt, int bitsize, int sign, void *buffer)
+{
carry_flag = 0;
DEBUGPRINTF_COMPUTATION(("%s << %ld ", sc_print_hex(value1), shift_cnt));
}
}
-void sc_shl(const void *val1, const void *val2, int bitsize, int sign, void *buffer) {
+void sc_shl(const void *val1, const void *val2, int bitsize, int sign, void *buffer)
+{
long offset = sc_val_to_long(val2);
sc_shlI(val1, offset, bitsize, sign, buffer);
}
-void sc_shrI(const void *val1, long shift_cnt, int bitsize, int sign, void *buffer) {
+void sc_shrI(const void *val1, long shift_cnt, int bitsize, int sign, void *buffer)
+{
carry_flag = 0;
DEBUGPRINTF_COMPUTATION(("%s >>u %ld ", sc_print_hex(value1), shift_cnt));
}
}
-void sc_shr(const void *val1, const void *val2, int bitsize, int sign, void *buffer) {
+void sc_shr(const void *val1, const void *val2, int bitsize, int sign, void *buffer)
+{
long shift_cnt = sc_val_to_long(val2);
sc_shrI(val1, shift_cnt, bitsize, sign, buffer);
}
-void sc_shrs(const void *val1, const void *val2, int bitsize, int sign, void *buffer) {
+void sc_shrs(const void *val1, const void *val2, int bitsize, int sign, void *buffer)
+{
long offset = sc_val_to_long(val2);
carry_flag = 0;
}
}
-void sc_rotl(const void *val1, const void *val2, int bitsize, int sign, void *buffer) {
+void sc_rotl(const void *val1, const void *val2, int bitsize, int sign, void *buffer)
+{
long offset = sc_val_to_long(val2);
carry_flag = 0;
}
}
-void sc_zero(void *buffer) {
+void sc_zero(void *buffer)
+{
if (buffer == NULL)
buffer = calc_buffer;
CLEAR_BUFFER(buffer);
#endif /* NDEBUG */
/** Hash a tarval. */
-static int hash_tv(tarval *tv) {
+static int hash_tv(tarval *tv)
+{
return (PTR_TO_INT(tv->value) ^ PTR_TO_INT(tv->mode)) + tv->length;
}
/** Hash a value. Treat it as a byte array. */
-static int hash_val(const void *value, unsigned int length) {
+static int hash_val(const void *value, unsigned int length)
+{
unsigned int i;
unsigned int hash = 0;
return hash;
}
-static int cmp_tv(const void *p1, const void *p2, size_t n) {
+static int cmp_tv(const void *p1, const void *p2, size_t n)
+{
const tarval *tv1 = p1;
const tarval *tv2 = p2;
(void) n;
}
/** finds tarval with value/mode or creates new tarval */
-static tarval *get_tarval(const void *value, int length, ir_mode *mode) {
+static tarval *get_tarval(const void *value, int length, ir_mode *mode)
+{
tarval tv;
tv.kind = k_tarval;
/**
* get the float descriptor for given mode.
*/
-static const ieee_descriptor_t *get_descriptor(const ir_mode *mode) {
+static const ieee_descriptor_t *get_descriptor(const ir_mode *mode)
+{
switch (get_mode_size_bits(mode)) {
case 16: return &half_desc;
case 32: return &single_desc;
/*
* helper function, create a tarval from long
*/
-tarval *new_tarval_from_long(long l, ir_mode *mode) {
+tarval *new_tarval_from_long(long l, ir_mode *mode)
+{
assert(mode);
switch (get_mode_sort(mode)) {
}
/* returns non-zero if can be converted to long */
-int tarval_is_long(tarval *tv) {
+int tarval_is_long(tarval *tv)
+{
if (!mode_is_int(tv->mode) && !mode_is_reference(tv->mode))
return 0;
}
/* this might overflow the machine's long, so use only with small values */
-long get_tarval_long(tarval* tv) {
+long get_tarval_long(tarval* tv)
+{
assert(tarval_is_long(tv) && "tarval too big to fit in long");
return sc_val_to_long(tv->value);
}
-tarval *new_tarval_from_double(long double d, ir_mode *mode) {
+tarval *new_tarval_from_double(long double d, ir_mode *mode)
+{
const ieee_descriptor_t *desc;
assert(mode && (get_mode_sort(mode) == irms_float_number));
}
/* returns non-zero if can be converted to double */
-int tarval_is_double(tarval *tv) {
+int tarval_is_double(tarval *tv)
+{
assert(tv);
return (get_mode_sort(tv->mode) == irms_float_number);
}
-long double get_tarval_double(tarval *tv) {
+long double get_tarval_double(tarval *tv)
+{
assert(tarval_is_double(tv));
return fc_val_to_ieee754(tv->value);
*/
/* get the mode of the tarval */
-ir_mode *(get_tarval_mode)(const tarval *tv) {
+ir_mode *(get_tarval_mode)(const tarval *tv)
+{
return _get_tarval_mode(tv);
}
* therefore the irmode functions should be preferred to the functions below.
*/
-tarval *(get_tarval_bad)(void) {
+tarval *(get_tarval_bad)(void)
+{
return _get_tarval_bad();
}
-tarval *(get_tarval_undefined)(void) {
+tarval *(get_tarval_undefined)(void)
+{
return _get_tarval_undefined();
}
-tarval *(get_tarval_b_false)(void) {
+tarval *(get_tarval_b_false)(void)
+{
return _get_tarval_b_false();
}
-tarval *(get_tarval_b_true)(void) {
+tarval *(get_tarval_b_true)(void)
+{
return _get_tarval_b_true();
}
-tarval *(get_tarval_reachable)(void) {
+tarval *(get_tarval_reachable)(void)
+{
return _get_tarval_reachable();
}
-tarval *(get_tarval_unreachable)(void) {
+tarval *(get_tarval_unreachable)(void)
+{
return _get_tarval_unreachable();
}
-tarval *get_tarval_max(ir_mode *mode) {
+tarval *get_tarval_max(ir_mode *mode)
+{
const ieee_descriptor_t *desc;
assert(mode);
return tarval_bad;
}
-tarval *get_tarval_min(ir_mode *mode) {
+tarval *get_tarval_min(ir_mode *mode)
+{
const ieee_descriptor_t *desc;
assert(mode);
/** The bit pattern for the pointer NULL */
static long _null_value = 0;
-tarval *get_tarval_null(ir_mode *mode) {
+tarval *get_tarval_null(ir_mode *mode)
+{
assert(mode);
if (get_mode_n_vector_elems(mode) > 1) {
return tarval_bad;
}
-tarval *get_tarval_one(ir_mode *mode) {
+tarval *get_tarval_one(ir_mode *mode)
+{
assert(mode);
if (get_mode_n_vector_elems(mode) > 1)
return tarval_bad;
}
-tarval *get_tarval_all_one(ir_mode *mode) {
+tarval *get_tarval_all_one(ir_mode *mode)
+{
assert(mode);
if (get_mode_n_vector_elems(mode) > 1)
return tarval_bad;
}
-int tarval_is_constant(tarval *tv) {
+int tarval_is_constant(tarval *tv)
+{
int num_res = sizeof(reserved_tv) / sizeof(reserved_tv[0]);
/* reserved tarvals are NOT constants. Note that although
return (tv < &reserved_tv[2] || tv > &reserved_tv[num_res - 1]);
}
-tarval *get_tarval_minus_one(ir_mode *mode) {
+tarval *get_tarval_minus_one(ir_mode *mode)
+{
assert(mode);
if (get_mode_n_vector_elems(mode) > 1)
return tarval_bad;
}
-tarval *get_tarval_nan(ir_mode *mode) {
+tarval *get_tarval_nan(ir_mode *mode)
+{
const ieee_descriptor_t *desc;
assert(mode);
panic("mode %F does not support NaN value", mode);
}
-tarval *get_tarval_plus_inf(ir_mode *mode) {
+tarval *get_tarval_plus_inf(ir_mode *mode)
+{
assert(mode);
if (get_mode_n_vector_elems(mode) > 1)
panic("vector arithmetic not implemented yet");
panic("mode %F does not support +inf value", mode);
}
-tarval *get_tarval_minus_inf(ir_mode *mode) {
+tarval *get_tarval_minus_inf(ir_mode *mode)
+{
assert(mode);
if (get_mode_n_vector_elems(mode) > 1)
/*
* test if negative number, 1 means 'yes'
*/
-int tarval_is_negative(tarval *a) {
+int tarval_is_negative(tarval *a)
+{
if (get_mode_n_vector_elems(a->mode) > 1)
panic("vector arithmetic not implemented yet");
/*
* test if null, 1 means 'yes'
*/
-int tarval_is_null(tarval *a) {
+int tarval_is_null(tarval *a)
+{
return
a != tarval_bad &&
a == get_mode_null(get_tarval_mode(a));
/*
* test if one, 1 means 'yes'
*/
-int tarval_is_one(tarval *a) {
+int tarval_is_one(tarval *a)
+{
return
a != tarval_bad &&
a == get_mode_one(get_tarval_mode(a));
}
-int tarval_is_all_one(tarval *tv) {
+int tarval_is_all_one(tarval *tv)
+{
return
tv != tarval_bad &&
tv == get_mode_all_one(get_tarval_mode(tv));
/*
* test if one, 1 means 'yes'
*/
-int tarval_is_minus_one(tarval *a) {
+int tarval_is_minus_one(tarval *a)
+{
return
a != tarval_bad &&
a == get_mode_minus_one(get_tarval_mode(a));
/*
* comparison
*/
-pn_Cmp tarval_cmp(tarval *a, tarval *b) {
+pn_Cmp tarval_cmp(tarval *a, tarval *b)
+{
carry_flag = -1;
if (a == tarval_bad || b == tarval_bad) {
/*
* convert to other mode
*/
-tarval *tarval_convert_to(tarval *src, ir_mode *dst_mode) {
+tarval *tarval_convert_to(tarval *src, ir_mode *dst_mode)
+{
char *buffer;
fp_value *res;
const ieee_descriptor_t *desc;
/*
* bitwise negation
*/
-tarval *tarval_not(tarval *a) {
+tarval *tarval_not(tarval *a)
+{
char *buffer;
carry_flag = -1;
/*
* arithmetic negation
*/
-tarval *tarval_neg(tarval *a) {
+tarval *tarval_neg(tarval *a)
+{
char *buffer;
assert(mode_is_num(a->mode)); /* negation only for numerical values */
/*
* addition
*/
-tarval *tarval_add(tarval *a, tarval *b) {
+tarval *tarval_add(tarval *a, tarval *b)
+{
char *buffer;
carry_flag = -1;
/*
* subtraction
*/
-tarval *tarval_sub(tarval *a, tarval *b, ir_mode *dst_mode) {
+tarval *tarval_sub(tarval *a, tarval *b, ir_mode *dst_mode)
+{
char *buffer;
carry_flag = -1;
/*
* multiplication
*/
-tarval *tarval_mul(tarval *a, tarval *b) {
+tarval *tarval_mul(tarval *a, tarval *b)
+{
char *buffer;
assert(a->mode == b->mode);
/*
* floating point division
*/
-tarval *tarval_quo(tarval *a, tarval *b) {
+tarval *tarval_quo(tarval *a, tarval *b)
+{
assert((a->mode == b->mode) && mode_is_float(a->mode));
carry_flag = -1;
* integer division
* overflow is impossible, but look out for division by zero
*/
-tarval *tarval_div(tarval *a, tarval *b) {
+tarval *tarval_div(tarval *a, tarval *b)
+{
assert((a->mode == b->mode) && mode_is_int(a->mode));
carry_flag = -1;
* remainder
* overflow is impossible, but look out for division by zero
*/
-tarval *tarval_mod(tarval *a, tarval *b) {
+tarval *tarval_mod(tarval *a, tarval *b)
+{
assert((a->mode == b->mode) && mode_is_int(a->mode));
carry_flag = -1;
* integer division AND remainder
* overflow is impossible, but look out for division by zero
*/
-tarval *tarval_divmod(tarval *a, tarval *b, tarval **mod) {
+tarval *tarval_divmod(tarval *a, tarval *b, tarval **mod)
+{
int len = sc_get_buffer_length();
char *div_res = alloca(len);
char *mod_res = alloca(len);
/*
* absolute value
*/
-tarval *tarval_abs(tarval *a) {
+tarval *tarval_abs(tarval *a)
+{
char *buffer;
carry_flag = -1;
/*
* bitwise and
*/
-tarval *tarval_and(tarval *a, tarval *b) {
+tarval *tarval_and(tarval *a, tarval *b)
+{
assert(a->mode == b->mode);
/* works even for vector modes */
/*
* bitwise or
*/
-tarval *tarval_or(tarval *a, tarval *b) {
+tarval *tarval_or(tarval *a, tarval *b)
+{
assert(a->mode == b->mode);
/* works even for vector modes */
/*
* bitwise exclusive or (xor)
*/
-tarval *tarval_eor(tarval *a, tarval *b) {
+tarval *tarval_eor(tarval *a, tarval *b)
+{
assert((a->mode == b->mode));
/* works even for vector modes */
/*
* bitwise left shift
*/
-tarval *tarval_shl(tarval *a, tarval *b) {
+tarval *tarval_shl(tarval *a, tarval *b)
+{
char *temp_val = NULL;
assert(mode_is_int(a->mode) && mode_is_int(b->mode));
/*
* bitwise unsigned right shift
*/
-tarval *tarval_shr(tarval *a, tarval *b) {
+tarval *tarval_shr(tarval *a, tarval *b)
+{
char *temp_val = NULL;
assert(mode_is_int(a->mode) && mode_is_int(b->mode));
/*
* bitwise signed right shift
*/
-tarval *tarval_shrs(tarval *a, tarval *b) {
+tarval *tarval_shrs(tarval *a, tarval *b)
+{
char *temp_val = NULL;
assert(mode_is_int(a->mode) && mode_is_int(b->mode));
/*
* bitwise rotation to left
*/
-tarval *tarval_rotl(tarval *a, tarval *b) {
+tarval *tarval_rotl(tarval *a, tarval *b)
+{
char *temp_val = NULL;
assert(mode_is_int(a->mode) && mode_is_int(b->mode));
/*
* carry flag of the last operation
*/
-int tarval_carry(void) {
+int tarval_carry(void)
+{
if (carry_flag == -1)
panic("Carry undefined for the last operation");
return carry_flag;
/*
* Output of tarvals
*/
-int tarval_snprintf(char *buf, size_t len, tarval *tv) {
+int tarval_snprintf(char *buf, size_t len, tarval *tv)
+{
static const tarval_mode_info default_info = { TVO_NATIVE, NULL, NULL };
const char *str;
/**
* Output of tarvals to stdio.
*/
-int tarval_printf(tarval *tv) {
+int tarval_printf(tarval *tv)
+{
char buf[1024];
int res;
return res;
}
-char *get_tarval_bitpattern(tarval *tv) {
+char *get_tarval_bitpattern(tarval *tv)
+{
int i, j, pos = 0;
int n = get_mode_size_bits(tv->mode);
int bytes = (n + 7) / 8;
/*
* access to the bitpattern
*/
-unsigned char get_tarval_sub_bits(tarval *tv, unsigned byte_ofs) {
+unsigned char get_tarval_sub_bits(tarval *tv, unsigned byte_ofs)
+{
switch (get_mode_arithmetic(tv->mode)) {
case irma_twos_complement:
return sc_sub_bits(tv->value, get_mode_size_bits(tv->mode), byte_ofs);
*
* Returns zero on success.
*/
-int set_tarval_mode_output_option(ir_mode *mode, const tarval_mode_info *modeinfo) {
+int set_tarval_mode_output_option(ir_mode *mode, const tarval_mode_info *modeinfo)
+{
assert(mode);
mode->tv_priv = modeinfo;
*
* This functions returns the mode info of a given mode.
*/
-const tarval_mode_info *get_tarval_mode_output_option(ir_mode *mode) {
+const tarval_mode_info *get_tarval_mode_output_option(ir_mode *mode)
+{
assert(mode);
return mode->tv_priv;
* Returns non-zero if a given (integer) tarval has only one single bit
* set.
*/
-int tarval_is_single_bit(tarval *tv) {
+int tarval_is_single_bit(tarval *tv)
+{
int i, l;
int bits;
* Returns non-zero if the mantissa of a floating point IEEE-754
* tarval is zero (i.e. 1.0Exxx)
*/
-int tarval_ieee754_zero_mantissa(tarval *tv) {
+int tarval_ieee754_zero_mantissa(tarval *tv)
+{
assert(get_mode_arithmetic(tv->mode) == irma_ieee754);
return fc_zero_mantissa(tv->value);
}
/* Returns the exponent of a floating point IEEE-754 tarval. */
-int tarval_ieee754_get_exponent(tarval *tv) {
+int tarval_ieee754_get_exponent(tarval *tv)
+{
assert(get_mode_arithmetic(tv->mode) == irma_ieee754);
return fc_get_exponent(tv->value);
}
* Check if the tarval can be converted to the given mode without
* precision loss.
*/
-int tarval_ieee754_can_conv_lossless(tarval *tv, ir_mode *mode) {
+int tarval_ieee754_can_conv_lossless(tarval *tv, ir_mode *mode)
+{
const ieee_descriptor_t *desc = get_descriptor(mode);
return fc_can_lossless_conv_to(tv->value, desc);
}
/* Set the immediate precision for IEEE-754 results. */
-unsigned tarval_ieee754_set_immediate_precision(unsigned bits) {
+unsigned tarval_ieee754_set_immediate_precision(unsigned bits)
+{
return fc_set_immediate_precision(bits);
}
/* Returns non-zero if the result of the last IEEE-754 operation was exact. */
-unsigned tarval_ieee754_get_exact(void) {
+unsigned tarval_ieee754_get_exact(void)
+{
return fc_is_exact();
}
/* Return the size of the mantissa in bits (including possible
implicit bits) for the given mode. */
-unsigned tarval_ieee754_get_mantissa_size(const ir_mode *mode) {
+unsigned tarval_ieee754_get_mantissa_size(const ir_mode *mode)
+{
const ieee_descriptor_t *desc;
assert(get_mode_arithmetic(mode) == irma_ieee754);
}
/* check if its the a floating point NaN */
-int tarval_is_NaN(tarval *tv) {
+int tarval_is_NaN(tarval *tv)
+{
if (! mode_is_float(tv->mode))
return 0;
return fc_is_nan(tv->value);
}
/* check if its the a floating point +inf */
-int tarval_is_plus_inf(tarval *tv) {
+int tarval_is_plus_inf(tarval *tv)
+{
if (! mode_is_float(tv->mode))
return 0;
return fc_is_inf(tv->value) && !fc_is_negative(tv->value);
}
/* check if its the a floating point -inf */
-int tarval_is_minus_inf(tarval *tv) {
+int tarval_is_minus_inf(tarval *tv)
+{
if (! mode_is_float(tv->mode))
return 0;
return fc_is_inf(tv->value) && fc_is_negative(tv->value);
}
/* check if the tarval represents a finite value */
-int tarval_is_finite(tarval *tv) {
+int tarval_is_finite(tarval *tv)
+{
if (mode_is_float(tv->mode))
return !fc_is_nan(tv->value) && !fc_is_inf(tv->value);
return 1;
/*
* Sets the overflow mode for integer operations.
*/
-void tarval_set_integer_overflow_mode(tarval_int_overflow_mode_t ov_mode) {
+void tarval_set_integer_overflow_mode(tarval_int_overflow_mode_t ov_mode)
+{
int_overflow_mode = ov_mode;
}
/* Get the overflow mode for integer operations. */
-tarval_int_overflow_mode_t tarval_get_integer_overflow_mode(void) {
+tarval_int_overflow_mode_t tarval_get_integer_overflow_mode(void)
+{
return int_overflow_mode;
}
/* Enable/Disable floating point constant folding. */
-void tarval_enable_fp_ops(int enable) {
+void tarval_enable_fp_ops(int enable)
+{
no_float = !enable;
}
-int tarval_fp_ops_enabled(void) {
+int tarval_fp_ops_enabled(void)
+{
return !no_float;
}
/*
* Initialization of the tarval module: called before init_mode()
*/
-void init_tarval_1(long null_value, int support_quad_precision) {
+void init_tarval_1(long null_value, int support_quad_precision)
+{
/* if these assertion fail, tarval_is_constant() will follow ... */
assert(tarval_b_false == &reserved_tv[0] && "b_false MUST be the first reserved tarval!");
assert(tarval_b_true == &reserved_tv[1] && "b_true MUST be the second reserved tarval!");
/*
* Initialization of the tarval module: called after init_mode()
*/
-void init_tarval_2(void) {
+void init_tarval_2(void)
+{
tarval_bad->kind = k_tarval;
tarval_bad->mode = mode_BAD;
tarval_bad->value = INT_TO_PTR(resid_tarval_bad);
}
/* free all memory occupied by tarval. */
-void finish_tarval(void) {
+void finish_tarval(void)
+{
finish_strcalc();
finish_fltcalc();
del_set(tarvals); tarvals = NULL;
del_set(values); values = NULL;
}
-int (is_tarval)(const void *thing) {
+int (is_tarval)(const void *thing)
+{
return _is_tarval(thing);
}
#define ADD_ADR(p, off) ((void *)((char *)(p) + (off)))
/** debug output */
-static void debug(char *fmt, ...) {
+static void debug(char *fmt, ...)
+{
va_list ap;
char buf[1024];
/**
* return the size of a firm object
*/
-int get_firm_object_size(firm_kind kind) {
+int get_firm_object_size(firm_kind kind)
+{
switch (kind) {
case k_entity: /* an entity */
return sizeof(ir_entity);
/**
* Format an ident
*/
-HRESULT format_ident(DEBUGHELPER *pHelper, const void *address, char *pResult, size_t max) {
+HRESULT format_ident(DEBUGHELPER *pHelper, const void *address, char *pResult, size_t max)
+{
set_entry *data = NULL;
set_entry id;
size_t len, slen;
*
* @param type the address of the type in debuggee's space
*/
-static HRESULT is_global_type(DEBUGHELPER *pHelper, const void *type, int *flag) {
+static HRESULT is_global_type(DEBUGHELPER *pHelper, const void *type, int *flag)
+{
ir_type tp;
*flag = 0;
/**
* format an entity
*/
-static HRESULT format_entity(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top) {
+static HRESULT format_entity(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top)
+{
ir_entity ent;
ir_type owner;
char name[256];
/**
* format a type
*/
-static HRESULT format_type(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top) {
+static HRESULT format_type(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top)
+{
ir_type tp;
char name[256];
/**
* format an irg
*/
-static HRESULT format_irg(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top) {
+static HRESULT format_irg(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top)
+{
ir_graph irg;
char name[256];
/**
* format an ir_op
*/
-HRESULT format_op(DEBUGHELPER *pHelper, const void *addr, char *pResult, size_t max) {
+HRESULT format_op(DEBUGHELPER *pHelper, const void *addr, char *pResult, size_t max)
+{
ir_op op;
if (copy_from_debuggee(addr, pHelper, &op, sizeof(op)) != S_OK)
/**
* format an ir_mode
*/
-static HRESULT format_mode(DEBUGHELPER *pHelper, const void *addr, char *pResult, size_t max) {
+static HRESULT format_mode(DEBUGHELPER *pHelper, const void *addr, char *pResult, size_t max)
+{
ir_mode mode;
if (copy_from_debuggee(addr, pHelper, &mode, sizeof(mode)) != S_OK)
/**
* format an ir_node
*/
-static HRESULT format_node(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top) {
+static HRESULT format_node(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max, int top)
+{
ir_node n;
char name[256];
ir_op op;
/**
* format an extended block
*/
-static HRESULT format_extblk(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max){
+static HRESULT format_extblk(DEBUGHELPER *pHelper, int nBase, const void *addr, char *pResult, size_t max)
+{
ir_extblk extbb;
ir_arr_descr blocks;
ir_node *blks = NULL;
} /* format_pdeq */
/** show the first 2 units */
-static HRESULT fill_bits(DEBUGHELPER *pHelper, bitset_t *bs, char *pResult) {
+static HRESULT fill_bits(DEBUGHELPER *pHelper, bitset_t *bs, char *pResult)
+{
bitset_pos_t i, units = bs->units;
int l = 0, o = 0, breaked = 0;
unsigned j;
* Get the initial address. As the interface allows only 32 bit
* transmitted, new I/F must be asked for 64bit support.
*/
-static void *GetInitialAddress(DWORD dwAddress, DEBUGHELPER *pHelper) {
+static void *GetInitialAddress(DWORD dwAddress, DEBUGHELPER *pHelper)
+{
if (pHelper->dwVersion < 0x20000) {
/* VC 6.0 access */
return (void *)dwAddress;