#include "xmalloc.h"
/* Pointer Double Ended Queue */
-#define PDEQ_MAGIC1 FOURCC('P','D','E','1')
-#define PDEQ_MAGIC2 FOURCC('P','D','E','2')
+#define PDEQ_MAGIC1 FOURCC('P','D','E','1')
+#define PDEQ_MAGIC2 FOURCC('P','D','E','2')
/** Size of pdeq block cache. */
#define TUNE_NSAVED_PDEQS 16
*/
struct pdeq {
#ifndef NDEBUG
- unsigned magic; /**< debug magic */
+ unsigned magic; /**< debug magic */
#endif
- pdeq *l_end, *r_end; /**< left and right ends of the queue */
- pdeq *l, *r; /**< left and right neighbor */
- int n; /**< number of elements in the current chunk */
- int p; /**< the read/write pointer */
- const void *data[1]; /**< storage for elements */
+ pdeq *l_end, *r_end; /**< left and right ends of the queue */
+ pdeq *l, *r; /**< left and right neighbor */
+ int n; /**< number of elements in the current chunk */
+ int p; /**< the read/write pointer */
+ const void *data[1]; /**< storage for elements */
};
static inline void free_pdeq_block (pdeq *p)
{
#ifndef NDEBUG
- p->magic = 0xbadf00d1;
+ p->magic = 0xbadf00d1;
#endif
- if (pdeqs_cached < TUNE_NSAVED_PDEQS) {
- pdeq_block_cache[pdeqs_cached++] = p;
- } else {
- xfree (p);
- }
+ if (pdeqs_cached < TUNE_NSAVED_PDEQS) {
+ pdeq_block_cache[pdeqs_cached++] = p;
+ } else {
+ xfree (p);
+ }
}
/**
*/
static inline pdeq *alloc_pdeq_block (void)
{
- pdeq *p;
- if (TUNE_NSAVED_PDEQS && pdeqs_cached) {
- p = pdeq_block_cache[--pdeqs_cached];
- } else {
- p = xmalloc(PREF_MALLOC_SIZE);
- }
- return p;
+ pdeq *p;
+ if (TUNE_NSAVED_PDEQS && pdeqs_cached) {
+ p = pdeq_block_cache[--pdeqs_cached];
+ } else {
+ p = xmalloc(PREF_MALLOC_SIZE);
+ }
+ return p;
}
*/
void _pdeq_vrfy(pdeq *dq)
{
- pdeq *q;
-
- assert ( dq
- && (dq->magic == PDEQ_MAGIC1)
- && (dq->l_end && dq->r_end));
- q = dq->l_end;
- while (q) {
- assert ( ((q == dq) || (q->magic == PDEQ_MAGIC2))
- && ((q == dq->l_end) ^ (q->l != NULL))
- && ((q == dq->r_end) ^ (q->r != NULL))
- && (!q->l || (q == q->l->r))
- && ((q->n >= 0) && (q->n <= NDATA))
- && ((q == dq->l_end) || (q == dq->r_end) || (q->n == NDATA))
- && ((q->p >= 0) && (q->p < NDATA)));
- q = q->r;
- }
+ pdeq *q;
+
+ assert ( dq
+ && (dq->magic == PDEQ_MAGIC1)
+ && (dq->l_end && dq->r_end));
+ q = dq->l_end;
+ while (q) {
+ assert ( ((q == dq) || (q->magic == PDEQ_MAGIC2))
+ && ((q == dq->l_end) ^ (q->l != NULL))
+ && ((q == dq->r_end) ^ (q->r != NULL))
+ && (!q->l || (q == q->l->r))
+ && ((q->n >= 0) && (q->n <= NDATA))
+ && ((q == dq->l_end) || (q == dq->r_end) || (q->n == NDATA))
+ && ((q->p >= 0) && (q->p < NDATA)));
+ q = q->r;
+ }
}
#endif
/* Creates a new double ended pointer list. */
pdeq *new_pdeq(void)
{
- pdeq *dq;
+ pdeq *dq;
- dq = alloc_pdeq_block();
+ dq = alloc_pdeq_block();
#ifndef NDEBUG
- dq->magic = PDEQ_MAGIC1;
+ dq->magic = PDEQ_MAGIC1;
#endif
- dq->l_end = dq->r_end = dq;
- dq->l = dq->r = NULL;
- dq->n = dq->p = 0;
+ dq->l_end = dq->r_end = dq;
+ dq->l = dq->r = NULL;
+ dq->n = dq->p = 0;
- VRFY(dq);
- return dq;
+ VRFY(dq);
+ return dq;
}
/* Creates a new double ended pointer list and puts an initial pointer element in. */
pdeq *new_pdeq1(const void *x)
{
- return pdeq_putr(new_pdeq(), x);
+ return pdeq_putr(new_pdeq(), x);
}
/* Delete a double ended pointer list. */
void del_pdeq(pdeq *dq)
{
- pdeq *q, *qq;
+ pdeq *q, *qq;
- VRFY(dq);
+ VRFY(dq);
- q = dq->l_end; /* left end of chain */
- /* pdeq trunk empty, but !pdeq_empty() ==> trunk not in chain */
- if (dq->n == 0 && dq->l_end != dq ) {
- free_pdeq_block(dq);
- }
+ q = dq->l_end; /* left end of chain */
+ /* pdeq trunk empty, but !pdeq_empty() ==> trunk not in chain */
+ if (dq->n == 0 && dq->l_end != dq ) {
+ free_pdeq_block(dq);
+ }
- /* Free all blocks in the pdeq chain */
- do {
- qq = q->r;
- free_pdeq_block(q);
- } while ((q = qq));
+ /* Free all blocks in the pdeq chain */
+ do {
+ qq = q->r;
+ free_pdeq_block(q);
+ } while ((q = qq));
}
/* Checks if a list is empty. */
int pdeq_empty(pdeq *dq)
{
- VRFY(dq);
- return dq->l_end->n == 0;
+ VRFY(dq);
+ return dq->l_end->n == 0;
}
/* Returns the length of a double ended pointer list. */
int pdeq_len(pdeq *dq)
{
- int n;
- pdeq *q;
+ int n;
+ pdeq *q;
- VRFY(dq);
+ VRFY(dq);
- n = 0;
- q = dq->l_end;
- do {
- n += q->n;
- q = q->r;
- } while (q);
+ n = 0;
+ q = dq->l_end;
+ do {
+ n += q->n;
+ q = q->r;
+ } while (q);
- return n;
+ return n;
}
/* Add a pointer to the right site of a double ended pointer list. */
pdeq *pdeq_putr(pdeq *dq, const void *x)
{
- pdeq *rdq;
- int n;
+ pdeq *rdq;
+ int n;
- VRFY(dq);
+ VRFY(dq);
- rdq = dq->r_end;
- if (rdq->n >= NDATA) { /* tailblock full */
- pdeq *ndq;
+ rdq = dq->r_end;
+ if (rdq->n >= NDATA) { /* tailblock full */
+ pdeq *ndq;
- ndq = dq; /* try to reuse trunk, but ... */
- if (dq->n) { /* ... if trunk used */
- /* allocate and init new block */
- ndq = alloc_pdeq_block();
+ ndq = dq; /* try to reuse trunk, but ... */
+ if (dq->n) { /* ... if trunk used */
+ /* allocate and init new block */
+ ndq = alloc_pdeq_block();
#ifndef NDEBUG
- ndq->magic = PDEQ_MAGIC2;
+ ndq->magic = PDEQ_MAGIC2;
#endif
- ndq->l_end = ndq->r_end = NULL;
- }
+ ndq->l_end = ndq->r_end = NULL;
+ }
- ndq->r = NULL;
- ndq->l = rdq; rdq->r = ndq;
- ndq->n = 0; ndq->p = 0;
- dq->r_end = ndq;
- rdq = ndq;
- }
+ ndq->r = NULL;
+ ndq->l = rdq; rdq->r = ndq;
+ ndq->n = 0; ndq->p = 0;
+ dq->r_end = ndq;
+ rdq = ndq;
+ }
- n = rdq->n++ + rdq->p;
- if (n >= NDATA) n -= NDATA;
+ n = rdq->n++ + rdq->p;
+ if (n >= NDATA) n -= NDATA;
- rdq->data[n] = x;
+ rdq->data[n] = x;
- VRFY(dq);
- return dq;
+ VRFY(dq);
+ return dq;
}
/* Add a pointer to the left site of a double ended pointer list. */
pdeq *pdeq_putl(pdeq *dq, const void *x)
{
- pdeq *ldq;
- int p;
+ pdeq *ldq;
+ int p;
- VRFY(dq);
+ VRFY(dq);
- ldq = dq->l_end;
- if (ldq->n >= NDATA) { /* headblock full */
- pdeq *ndq;
+ ldq = dq->l_end;
+ if (ldq->n >= NDATA) { /* headblock full */
+ pdeq *ndq;
- ndq = dq; /* try to reuse trunk, but ... */
- if (dq->n) { /* ... if trunk used */
- /* allocate and init new block */
- ndq = alloc_pdeq_block();
+ ndq = dq; /* try to reuse trunk, but ... */
+ if (dq->n) { /* ... if trunk used */
+ /* allocate and init new block */
+ ndq = alloc_pdeq_block();
#ifndef NDEBUG
- ndq->magic = PDEQ_MAGIC2;
+ ndq->magic = PDEQ_MAGIC2;
#endif
- ndq->l_end = ndq->r_end = NULL;
- }
+ ndq->l_end = ndq->r_end = NULL;
+ }
- ndq->l = NULL;
- ndq->r = ldq; ldq->l = ndq;
- ndq->n = 0; ndq->p = 0;
- dq->l_end = ndq;
- ldq = ndq;
- }
+ ndq->l = NULL;
+ ndq->r = ldq; ldq->l = ndq;
+ ndq->n = 0; ndq->p = 0;
+ dq->l_end = ndq;
+ ldq = ndq;
+ }
- ldq->n++;
- p = ldq->p - 1;
- if (p < 0) p += NDATA;
- ldq->p = p;
+ ldq->n++;
+ p = ldq->p - 1;
+ if (p < 0) p += NDATA;
+ ldq->p = p;
- ldq->data[p] = x;
+ ldq->data[p] = x;
- VRFY(dq);
- return dq;
+ VRFY(dq);
+ return dq;
}
/* Retrieve a pointer from the right site of a double ended pointer list. */
void *pdeq_getr(pdeq *dq)
{
- pdeq *rdq;
- const void *x;
- int n;
-
- VRFY(dq);
- assert(dq->l_end->n);
-
- rdq = dq->r_end;
- n = rdq->p + --rdq->n;
- if (n >= NDATA) n -= NDATA;
- x = rdq->data[n];
-
- if (rdq->n == 0) {
- if (rdq->l) {
- dq->r_end = rdq->l;
- rdq->l->r = NULL;
- rdq->l = NULL;
- } else {
- dq->r_end = dq->l_end = dq;
- }
- if (dq != rdq) {
- free_pdeq_block(rdq);
- }
- }
-
- VRFY(dq);
- return (void *)x;
+ pdeq *rdq;
+ const void *x;
+ int n;
+
+ VRFY(dq);
+ assert(dq->l_end->n);
+
+ rdq = dq->r_end;
+ n = rdq->p + --rdq->n;
+ if (n >= NDATA) n -= NDATA;
+ x = rdq->data[n];
+
+ if (rdq->n == 0) {
+ if (rdq->l) {
+ dq->r_end = rdq->l;
+ rdq->l->r = NULL;
+ rdq->l = NULL;
+ } else {
+ dq->r_end = dq->l_end = dq;
+ }
+ if (dq != rdq) {
+ free_pdeq_block(rdq);
+ }
+ }
+
+ VRFY(dq);
+ return (void *)x;
}
/* Retrieve a pointer from the left site of a double ended pointer list. */
void *pdeq_getl(pdeq *dq)
{
- pdeq *ldq;
- const void *x;
- int p;
-
- VRFY(dq);
- assert(dq->l_end->n);
-
- ldq = dq->l_end;
- p = ldq->p;
- x = ldq->data[p];
- if (++p >= NDATA) p = 0;
- ldq->p = p;
-
- if (--ldq->n == 0) {
- if (ldq->r) {
- dq->l_end = ldq->r;
- ldq->r->l = NULL;
- ldq->r = NULL;
- } else {
- dq->l_end = dq->r_end = dq;
- }
- if (dq != ldq) {
- free_pdeq_block(ldq);
- }
- }
-
- VRFY(dq);
- return (void *)x;
+ pdeq *ldq;
+ const void *x;
+ int p;
+
+ VRFY(dq);
+ assert(dq->l_end->n);
+
+ ldq = dq->l_end;
+ p = ldq->p;
+ x = ldq->data[p];
+ if (++p >= NDATA) p = 0;
+ ldq->p = p;
+
+ if (--ldq->n == 0) {
+ if (ldq->r) {
+ dq->l_end = ldq->r;
+ ldq->r->l = NULL;
+ ldq->r = NULL;
+ } else {
+ dq->l_end = dq->r_end = dq;
+ }
+ if (dq != ldq) {
+ free_pdeq_block(ldq);
+ }
+ }
+
+ VRFY(dq);
+ return (void *)x;
}
/*
*/
int pdeq_contains(pdeq *dq, const void *x)
{
- pdeq *q;
+ pdeq *q;
- VRFY(dq);
+ VRFY(dq);
- q = dq->l_end;
- do {
- int p, ep;
+ q = dq->l_end;
+ do {
+ int p, ep;
- p = q->p; ep = p + q->n;
+ p = q->p; ep = p + q->n;
- if (ep > NDATA) {
- do {
- if (q->data[p] == x) return 1;
- } while (++p < NDATA);
- p = 0;
- ep -= NDATA;
- }
+ if (ep > NDATA) {
+ do {
+ if (q->data[p] == x) return 1;
+ } while (++p < NDATA);
+ p = 0;
+ ep -= NDATA;
+ }
- while (p < ep) {
- if (q->data[p++] == x) return 1;
- }
+ while (p < ep) {
+ if (q->data[p++] == x) return 1;
+ }
- q = q->r;
- } while (q);
+ q = q->r;
+ } while (q);
- return 0;
+ return 0;
}
/*
*/
void *pdeq_search(pdeq *dq, cmp_fun cmp, const void *key)
{
- pdeq *q;
- int p;
+ pdeq *q;
+ int p;
- VRFY(dq);
+ VRFY(dq);
- q = dq->l_end;
- do {
- int ep;
+ q = dq->l_end;
+ do {
+ int ep;
- p = q->p; ep = p + q->n;
+ p = q->p; ep = p + q->n;
- if (ep > NDATA) {
- do {
- if (!cmp (q->data[p], key)) return (void *)q->data[p-1];
- } while (++p < NDATA);
- p = 0;
- ep -= NDATA;
- }
+ if (ep > NDATA) {
+ do {
+ if (!cmp (q->data[p], key)) return (void *)q->data[p-1];
+ } while (++p < NDATA);
+ p = 0;
+ ep -= NDATA;
+ }
- while (p < ep) {
- if (!cmp (q->data[p++], key)) return (void *)q->data[p-1];
- }
+ while (p < ep) {
+ if (!cmp (q->data[p++], key)) return (void *)q->data[p-1];
+ }
- q = q->r;
- } while (q);
+ q = q->r;
+ } while (q);
- return NULL;
+ return NULL;
}
/*
*/
void **pdeq_copyl(pdeq *dq, const void **dst)
{
- pdeq *q;
- const void **d = dst;
+ pdeq *q;
+ const void **d = dst;
- VRFY(dq);
+ VRFY(dq);
- q = dq->l_end;
- while (q) {
- int p, n;
+ q = dq->l_end;
+ while (q) {
+ int p, n;
- p = q->p; n = q->n;
+ p = q->p; n = q->n;
- if (n + p > NDATA) {
- int nn = NDATA - p;
- memcpy((void *) d, &q->data[p], nn * sizeof(void *)); d += nn;
- p = 0; n -= nn;
- }
+ if (n + p > NDATA) {
+ int nn = NDATA - p;
+ memcpy((void *) d, &q->data[p], nn * sizeof(void *)); d += nn;
+ p = 0; n -= nn;
+ }
- memcpy((void *) d, &q->data[p], n * sizeof(void *)); d += n;
+ memcpy((void *) d, &q->data[p], n * sizeof(void *)); d += n;
- q = q->r;
- }
+ q = q->r;
+ }
- return (void **)dst;
+ return (void **)dst;
}
/*
*/
void **pdeq_copyr(pdeq *dq, const void **dst)
{
- pdeq *q;
- const void **d = dst;
+ pdeq *q;
+ const void **d = dst;
- VRFY(dq);
+ VRFY(dq);
- q = dq->r_end;
- while (q) {
- int p, i;
+ q = dq->r_end;
+ while (q) {
+ int p, i;
- p = q->p; i = q->n + p - 1;
- if (i >= NDATA) {
- i -= NDATA;
- do *d++ = q->data[i]; while (--i >= 0);
- i = NDATA - 1;
- }
+ p = q->p; i = q->n + p - 1;
+ if (i >= NDATA) {
+ i -= NDATA;
+ do *d++ = q->data[i]; while (--i >= 0);
+ i = NDATA - 1;
+ }
- do *d++ = q->data[i]; while (--i >= p);
+ do *d++ = q->data[i]; while (--i >= p);
- q = q->l;
- }
+ q = q->l;
+ }
- return (void **)dst;
+ return (void **)dst;
}
list->first_element = newElement;
}
- element->prev = newElement;
+ element->prev = newElement;
++list->element_count;
}
struct pmap {
- int dummy; /* dummy entry */
+ int dummy; /* dummy entry */
};
/* Creates a new empty map with an initial number of slots. */
pmap *pmap_create_ex(int slots)
{
- return (pmap *)new_set(pmap_entry_cmp, slots);
+ return (pmap *)new_set(pmap_entry_cmp, slots);
}
pmap *pmap_create(void)
#include "obst.h"
-#define SEGMENT_SIZE_SHIFT 8
-#define SEGMENT_SIZE (1 << SEGMENT_SIZE_SHIFT)
-#define DIRECTORY_SIZE_SHIFT 8
-#define DIRECTORY_SIZE (1 << DIRECTORY_SIZE_SHIFT)
-#define MAX_LOAD_FACTOR 4
+#define SEGMENT_SIZE_SHIFT 8
+#define SEGMENT_SIZE (1 << SEGMENT_SIZE_SHIFT)
+#define DIRECTORY_SIZE_SHIFT 8
+#define DIRECTORY_SIZE (1 << DIRECTORY_SIZE_SHIFT)
+#define MAX_LOAD_FACTOR 4
typedef struct element {
- struct element *chain; /**< for chaining Elements */
- MANGLEP (entry) entry;
+ struct element *chain; /**< for chaining Elements */
+ MANGLEP (entry) entry;
} Element, *Segment;
struct SET {
- unsigned p; /**< Next bucket to be split */
- unsigned maxp; /**< upper bound on p during expansion */
- unsigned nkey; /**< current # keys */
- unsigned nseg; /**< current # segments */
- Segment *dir[DIRECTORY_SIZE];
- MANGLEP(cmp_fun) cmp; /**< function comparing entries */
- unsigned iter_i, iter_j;
- Element *iter_tail; /**< non-NULL while iterating over elts */
+ unsigned p; /**< Next bucket to be split */
+ unsigned maxp; /**< upper bound on p during expansion */
+ unsigned nkey; /**< current # keys */
+ unsigned nseg; /**< current # segments */
+ Segment *dir[DIRECTORY_SIZE];
+ MANGLEP(cmp_fun) cmp; /**< function comparing entries */
+ unsigned iter_i, iter_j;
+ Element *iter_tail; /**< non-NULL while iterating over elts */
#ifdef PSET
- Element *free_list; /**< list of free Elements */
+ Element *free_list; /**< list of free Elements */
#endif
- struct obstack obst; /**< obstack for allocation all data */
+ struct obstack obst; /**< obstack for allocation all data */
#ifdef STATS
- int naccess, ncollision, ndups;
- int max_chain_len;
+ int naccess, ncollision, ndups;
+ int max_chain_len;
#endif
#ifdef DEBUG
- const char *tag; /**< an optionally tag for distinguishing sets */
+ const char *tag; /**< an optionally tag for distinguishing sets */
#endif
};
void MANGLEP(stats) (SET *table)
{
- int nfree = 0;
+ int nfree = 0;
#ifdef PSET
- Element *q = table->free_list;
- while (q) { q = q->chain; ++nfree; }
+ Element *q = table->free_list;
+ while (q) { q = q->chain; ++nfree; }
#endif
- printf (" accesses collisions keys duplicates longest wasted\n%12d%12d%12d%12d%12d%12d\n",
- table->naccess, table->ncollision, table->nkey, table->ndups, table->max_chain_len, nfree);
+ printf (" accesses collisions keys duplicates longest wasted\n%12d%12d%12d%12d%12d%12d\n",
+ table->naccess, table->ncollision, table->nkey, table->ndups, table->max_chain_len, nfree);
}
static inline void stat_chain_len(SET *table, int chain_len)
{
- table->ncollision += chain_len;
- if (table->max_chain_len < chain_len) table->max_chain_len = chain_len;
+ table->ncollision += chain_len;
+ if (table->max_chain_len < chain_len) table->max_chain_len = chain_len;
}
# define stat_access(table) (++(table)->naccess)
void MANGLEP(describe) (SET *table)
{
- unsigned i, j, collide;
- Element *ptr;
- Segment *seg;
-
- printf ("p=%u maxp=%u nkey=%u nseg=%u\n",
- table->p, table->maxp, table->nkey, table->nseg);
- for (i = 0; i < table->nseg; i++) {
- seg = table->dir[i];
- for (j = 0; j < SEGMENT_SIZE; j++) {
- collide = 0;
- ptr = seg[j];
- while (ptr) {
- if (collide) printf ("<%3d>", collide);
- else printf ("table");
- printf ("[%d][%3d]: %u %p\n", i, j, ptr->entry.hash, (void *)ptr->entry.dptr);
- ptr = ptr->chain;
- collide++;
- }
- }
- }
+ unsigned i, j, collide;
+ Element *ptr;
+ Segment *seg;
+
+ printf ("p=%u maxp=%u nkey=%u nseg=%u\n",
+ table->p, table->maxp, table->nkey, table->nseg);
+ for (i = 0; i < table->nseg; i++) {
+ seg = table->dir[i];
+ for (j = 0; j < SEGMENT_SIZE; j++) {
+ collide = 0;
+ ptr = seg[j];
+ while (ptr) {
+ if (collide) printf ("<%3d>", collide);
+ else printf ("table");
+ printf ("[%d][%3d]: %u %p\n", i, j, ptr->entry.hash, (void *)ptr->entry.dptr);
+ ptr = ptr->chain;
+ collide++;
+ }
+ }
+ }
#ifdef STATS
- MANGLEP(stats)(table);
+ MANGLEP(stats)(table);
#endif
}
SET *(PMANGLE(new)) (MANGLEP(cmp_fun) cmp, int nslots)
{
- int i;
- SET *table = XMALLOC(SET);
-
- if (nslots > SEGMENT_SIZE * DIRECTORY_SIZE)
- nslots = DIRECTORY_SIZE;
- else {
- assert (nslots >= 0);
- /* Adjust nslots up to next power of 2, minimum SEGMENT_SIZE */
- for (i = SEGMENT_SIZE; i < nslots; i <<= 1) {
+ int i;
+ SET *table = XMALLOC(SET);
+
+ if (nslots > SEGMENT_SIZE * DIRECTORY_SIZE)
+ nslots = DIRECTORY_SIZE;
+ else {
+ assert (nslots >= 0);
+ /* Adjust nslots up to next power of 2, minimum SEGMENT_SIZE */
+ for (i = SEGMENT_SIZE; i < nslots; i <<= 1) {
+ }
+ nslots = i >> SEGMENT_SIZE_SHIFT;
}
- nslots = i >> SEGMENT_SIZE_SHIFT;
- }
- table->nseg = table->p = table->nkey = 0;
- table->maxp = nslots << SEGMENT_SIZE_SHIFT;
- table->cmp = cmp;
- table->iter_tail = NULL;
+ table->nseg = table->p = table->nkey = 0;
+ table->maxp = nslots << SEGMENT_SIZE_SHIFT;
+ table->cmp = cmp;
+ table->iter_tail = NULL;
#ifdef PSET
- table->free_list = NULL;
+ table->free_list = NULL;
#endif
- obstack_init (&table->obst);
+ obstack_init (&table->obst);
- /* Make segments */
- for (i = 0; i < nslots; ++i) {
- table->dir[i] = OALLOCNZ(&table->obst, Segment, SEGMENT_SIZE);
- table->nseg++;
- }
+ /* Make segments */
+ for (i = 0; i < nslots; ++i) {
+ table->dir[i] = OALLOCNZ(&table->obst, Segment, SEGMENT_SIZE);
+ table->nseg++;
+ }
#ifdef STATS
- table->naccess = table->ncollision = table->ndups = 0;
- table->max_chain_len = 0;
+ table->naccess = table->ncollision = table->ndups = 0;
+ table->max_chain_len = 0;
#endif
#ifdef DEBUG
- table->tag = MANGLEP(tag);
+ table->tag = MANGLEP(tag);
#endif
- return table;
+ return table;
}
void PMANGLE(del) (SET *table)
{
#ifdef DEBUG
- MANGLEP(tag) = table->tag;
+ MANGLEP(tag) = table->tag;
#endif
- obstack_free (&table->obst, NULL);
- xfree (table);
+ obstack_free (&table->obst, NULL);
+ xfree (table);
}
int MANGLEP(count) (SET *table)
{
- return table->nkey;
+ return table->nkey;
}
/*
*/
static inline int iter_step(SET *table)
{
- if (++table->iter_j >= SEGMENT_SIZE) {
- table->iter_j = 0;
- if (++table->iter_i >= table->nseg) {
- table->iter_i = 0;
- return 0;
- }
- }
- return 1;
+ if (++table->iter_j >= SEGMENT_SIZE) {
+ table->iter_j = 0;
+ if (++table->iter_i >= table->nseg) {
+ table->iter_i = 0;
+ return 0;
+ }
+ }
+ return 1;
}
/*
*/
void * MANGLEP(first) (SET *table)
{
- assert (!table->iter_tail);
- table->iter_i = 0;
- table->iter_j = 0;
- while (!table->dir[table->iter_i][table->iter_j]) {
- if (!iter_step (table)) return NULL;
- }
- table->iter_tail = table->dir[table->iter_i][table->iter_j];
- assert (table->iter_tail->entry.dptr);
- return table->iter_tail->entry.dptr;
+ assert (!table->iter_tail);
+ table->iter_i = 0;
+ table->iter_j = 0;
+ while (!table->dir[table->iter_i][table->iter_j]) {
+ if (!iter_step (table)) return NULL;
+ }
+ table->iter_tail = table->dir[table->iter_i][table->iter_j];
+ assert (table->iter_tail->entry.dptr);
+ return table->iter_tail->entry.dptr;
}
/*
*/
void *MANGLEP(next) (SET *table)
{
- if (!table->iter_tail)
- return NULL;
-
- /* follow collision chain */
- table->iter_tail = table->iter_tail->chain;
- if (!table->iter_tail) {
- /* go to next segment */
- do {
- if (!iter_step (table)) return NULL;
- } while (!table->dir[table->iter_i][table->iter_j]);
- table->iter_tail = table->dir[table->iter_i][table->iter_j];
- }
- assert (table->iter_tail->entry.dptr);
- return table->iter_tail->entry.dptr;
+ if (!table->iter_tail)
+ return NULL;
+
+ /* follow collision chain */
+ table->iter_tail = table->iter_tail->chain;
+ if (!table->iter_tail) {
+ /* go to next segment */
+ do {
+ if (!iter_step (table)) return NULL;
+ } while (!table->dir[table->iter_i][table->iter_j]);
+ table->iter_tail = table->dir[table->iter_i][table->iter_j];
+ }
+ assert (table->iter_tail->entry.dptr);
+ return table->iter_tail->entry.dptr;
}
void MANGLEP(break) (SET *table)
{
- table->iter_tail = NULL;
+ table->iter_tail = NULL;
}
/*
*/
static inline unsigned Hash(SET *table, unsigned h)
{
- unsigned address;
- address = h & (table->maxp - 1); /* h % table->maxp */
- if (address < (unsigned)table->p)
- address = h & ((table->maxp << 1) - 1); /* h % (2*table->maxp) */
- return address;
+ unsigned address;
+ address = h & (table->maxp - 1); /* h % table->maxp */
+ if (address < (unsigned)table->p)
+ address = h & ((table->maxp << 1) - 1); /* h % (2*table->maxp) */
+ return address;
}
/*
*/
static inline int loaded(SET *table)
{
- return ( ++table->nkey
- > (table->nseg << SEGMENT_SIZE_SHIFT) * MAX_LOAD_FACTOR);
+ return ( ++table->nkey
+ > (table->nseg << SEGMENT_SIZE_SHIFT) * MAX_LOAD_FACTOR);
}
/*
*/
static void expand_table(SET *table)
{
- unsigned NewAddress;
- int OldSegmentIndex, NewSegmentIndex;
- int OldSegmentDir, NewSegmentDir;
- Segment *OldSegment;
- Segment *NewSegment;
- Element *Current;
- Element **Previous;
- Element **LastOfNew;
-
- if (table->maxp + table->p < (DIRECTORY_SIZE << SEGMENT_SIZE_SHIFT)) {
- /* Locate the bucket to be split */
- OldSegmentDir = table->p >> SEGMENT_SIZE_SHIFT;
- OldSegment = table->dir[OldSegmentDir];
- OldSegmentIndex = table->p & (SEGMENT_SIZE-1);
-
- /* Expand address space; if necessary create a new segment */
- NewAddress = table->maxp + table->p;
- NewSegmentDir = NewAddress >> SEGMENT_SIZE_SHIFT;
- NewSegmentIndex = NewAddress & (SEGMENT_SIZE-1);
- if (NewSegmentIndex == 0) {
- table->dir[NewSegmentDir] = OALLOCNZ(&table->obst, Segment, SEGMENT_SIZE);
- table->nseg++;
- }
- NewSegment = table->dir[NewSegmentDir];
-
- /* Adjust state variables */
- table->p++;
- if (table->p == table->maxp) {
- table->maxp <<= 1; /* table->maxp *= 2 */
- table->p = 0;
- }
-
- /* Relocate records to the new bucket */
- Previous = &OldSegment[OldSegmentIndex];
- Current = *Previous;
- LastOfNew = &NewSegment[NewSegmentIndex];
- *LastOfNew = NULL;
- while (Current != NULL) {
- if (Hash (table, Current->entry.hash) == NewAddress) {
- /* move to new chain */
- *LastOfNew = Current;
- *Previous = Current->chain;
- LastOfNew = &Current->chain;
- Current = Current->chain;
- *LastOfNew = NULL;
- } else {
- /* leave on old chain */
- Previous = &Current->chain;
- Current = Current->chain;
- }
- }
- }
+ unsigned NewAddress;
+ int OldSegmentIndex, NewSegmentIndex;
+ int OldSegmentDir, NewSegmentDir;
+ Segment *OldSegment;
+ Segment *NewSegment;
+ Element *Current;
+ Element **Previous;
+ Element **LastOfNew;
+
+ if (table->maxp + table->p < (DIRECTORY_SIZE << SEGMENT_SIZE_SHIFT)) {
+ /* Locate the bucket to be split */
+ OldSegmentDir = table->p >> SEGMENT_SIZE_SHIFT;
+ OldSegment = table->dir[OldSegmentDir];
+ OldSegmentIndex = table->p & (SEGMENT_SIZE-1);
+
+ /* Expand address space; if necessary create a new segment */
+ NewAddress = table->maxp + table->p;
+ NewSegmentDir = NewAddress >> SEGMENT_SIZE_SHIFT;
+ NewSegmentIndex = NewAddress & (SEGMENT_SIZE-1);
+ if (NewSegmentIndex == 0) {
+ table->dir[NewSegmentDir] = OALLOCNZ(&table->obst, Segment, SEGMENT_SIZE);
+ table->nseg++;
+ }
+ NewSegment = table->dir[NewSegmentDir];
+
+ /* Adjust state variables */
+ table->p++;
+ if (table->p == table->maxp) {
+ table->maxp <<= 1; /* table->maxp *= 2 */
+ table->p = 0;
+ }
+
+ /* Relocate records to the new bucket */
+ Previous = &OldSegment[OldSegmentIndex];
+ Current = *Previous;
+ LastOfNew = &NewSegment[NewSegmentIndex];
+ *LastOfNew = NULL;
+ while (Current != NULL) {
+ if (Hash (table, Current->entry.hash) == NewAddress) {
+ /* move to new chain */
+ *LastOfNew = Current;
+ *Previous = Current->chain;
+ LastOfNew = &Current->chain;
+ Current = Current->chain;
+ *LastOfNew = NULL;
+ } else {
+ /* leave on old chain */
+ Previous = &Current->chain;
+ Current = Current->chain;
+ }
+ }
+ }
}
void * MANGLE(_,_search) (SET *table,
- const void *key,
+ const void *key,
#ifndef PSET
- size_t size,
+ size_t size,
#endif
- unsigned hash,
- MANGLE(_,_action) action)
+ unsigned hash,
+ MANGLE(_,_action) action)
{
- unsigned h;
- Segment *CurrentSegment;
- int SegmentIndex;
- MANGLEP(cmp_fun) cmp = table->cmp;
- Segment q;
- int chain_len = 0;
-
- assert (table);
- assert (key);
+ unsigned h;
+ Segment *CurrentSegment;
+ int SegmentIndex;
+ MANGLEP(cmp_fun) cmp = table->cmp;
+ Segment q;
+ int chain_len = 0;
+
+ assert (table);
+ assert (key);
#ifdef DEBUG
- MANGLEP(tag) = table->tag;
+ MANGLEP(tag) = table->tag;
#endif
- stat_access (table);
-
- /* Find collision chain */
- h = Hash (table, hash);
- SegmentIndex = h & (SEGMENT_SIZE-1);
- CurrentSegment = table->dir[h >> SEGMENT_SIZE_SHIFT];
- assert (CurrentSegment != NULL);
- q = CurrentSegment[SegmentIndex];
-
- /* Follow collision chain */
- while (q && !EQUAL (cmp, q, key, size)) {
- q = q->chain;
- ++chain_len;
- }
+ stat_access (table);
+
+ /* Find collision chain */
+ h = Hash (table, hash);
+ SegmentIndex = h & (SEGMENT_SIZE-1);
+ CurrentSegment = table->dir[h >> SEGMENT_SIZE_SHIFT];
+ assert (CurrentSegment != NULL);
+ q = CurrentSegment[SegmentIndex];
+
+ /* Follow collision chain */
+ while (q && !EQUAL (cmp, q, key, size)) {
+ q = q->chain;
+ ++chain_len;
+ }
- stat_chain_len (table, chain_len);
+ stat_chain_len (table, chain_len);
- if (!q && (action != MANGLE(_,_find))) { /* not found, insert */
- assert (!table->iter_tail && "insert an element into a set that is iterated");
+ if (!q && (action != MANGLE(_,_find))) { /* not found, insert */
+ assert (!table->iter_tail && "insert an element into a set that is iterated");
- if (CurrentSegment[SegmentIndex]) stat_dup (table);
+ if (CurrentSegment[SegmentIndex]) stat_dup (table);
#ifdef PSET
- if (table->free_list) {
- q = table->free_list;
- table->free_list = table->free_list->chain;
- } else {
- q = OALLOC(&table->obst, Element);
- }
- q->entry.dptr = (void *)key;
+ if (table->free_list) {
+ q = table->free_list;
+ table->free_list = table->free_list->chain;
+ } else {
+ q = OALLOC(&table->obst, Element);
+ }
+ q->entry.dptr = (void *)key;
#else
- obstack_blank (&table->obst, offsetof (Element, entry.dptr));
- if (action == _set_hinsert0)
- obstack_grow0 (&table->obst, key, size);
- else
- obstack_grow (&table->obst, key, size);
- q = obstack_finish (&table->obst);
- q->entry.size = size;
+ obstack_blank (&table->obst, offsetof (Element, entry.dptr));
+ if (action == _set_hinsert0)
+ obstack_grow0 (&table->obst, key, size);
+ else
+ obstack_grow (&table->obst, key, size);
+ q = obstack_finish (&table->obst);
+ q->entry.size = size;
#endif
- q->chain = CurrentSegment[SegmentIndex];
- q->entry.hash = hash;
- CurrentSegment[SegmentIndex] = q;
+ q->chain = CurrentSegment[SegmentIndex];
+ q->entry.hash = hash;
+ CurrentSegment[SegmentIndex] = q;
- if (loaded (table)) {
- expand_table(table); /* doesn't affect q */
- }
- }
+ if (loaded (table)) {
+ expand_table(table); /* doesn't affect q */
+ }
+ }
- if (!q) return NULL;
+ if (!q) return NULL;
#ifdef PSET
- if (action == _pset_hinsert) return &q->entry;
+ if (action == _pset_hinsert) return &q->entry;
#else
- if (action == _set_hinsert || action == _set_hinsert0) return &q->entry;
+ if (action == _set_hinsert || action == _set_hinsert0) return &q->entry;
#endif
- return q->entry.dptr;
+ return q->entry.dptr;
}
void *pset_remove(SET *table, const void *key, unsigned hash)
{
- unsigned h;
- Segment *CurrentSegment;
- int SegmentIndex;
- pset_cmp_fun cmp = table->cmp;
- Segment *p;
- Segment q;
- int chain_len = 0;
-
- assert (table && !table->iter_tail);
- stat_access (table);
-
- /* Find collision chain */
- h = Hash (table, hash);
- SegmentIndex = h & (SEGMENT_SIZE-1);
- CurrentSegment = table->dir[h >> SEGMENT_SIZE_SHIFT];
- assert (CurrentSegment != NULL);
- p = &CurrentSegment[SegmentIndex];
-
- /* Follow collision chain */
- while (!EQUAL (cmp, *p, key, size)) {
- p = &(*p)->chain;
- assert (*p);
- ++chain_len;
- }
-
- stat_chain_len (table, chain_len);
-
- q = *p;
-
- if (q == table->iter_tail) {
- /* removing current element */
- table->iter_tail = q->chain;
- if (!table->iter_tail) {
- /* go to next segment */
- do {
- if (!iter_step (table))
- break;
- } while (!table->dir[table->iter_i][table->iter_j]);
- table->iter_tail = table->dir[table->iter_i][table->iter_j];
- }
- }
-
- *p = (*p)->chain;
- q->chain = table->free_list;
- table->free_list = q;
- --table->nkey;
-
- return q->entry.dptr;
+ unsigned h;
+ Segment *CurrentSegment;
+ int SegmentIndex;
+ pset_cmp_fun cmp = table->cmp;
+ Segment *p;
+ Segment q;
+ int chain_len = 0;
+
+ assert (table && !table->iter_tail);
+ stat_access (table);
+
+ /* Find collision chain */
+ h = Hash (table, hash);
+ SegmentIndex = h & (SEGMENT_SIZE-1);
+ CurrentSegment = table->dir[h >> SEGMENT_SIZE_SHIFT];
+ assert (CurrentSegment != NULL);
+ p = &CurrentSegment[SegmentIndex];
+
+ /* Follow collision chain */
+ while (!EQUAL (cmp, *p, key, size)) {
+ p = &(*p)->chain;
+ assert (*p);
+ ++chain_len;
+ }
+
+ stat_chain_len (table, chain_len);
+
+ q = *p;
+
+ if (q == table->iter_tail) {
+ /* removing current element */
+ table->iter_tail = q->chain;
+ if (!table->iter_tail) {
+ /* go to next segment */
+ do {
+ if (!iter_step (table))
+ break;
+ } while (!table->dir[table->iter_i][table->iter_j]);
+ table->iter_tail = table->dir[table->iter_i][table->iter_j];
+ }
+ }
+
+ *p = (*p)->chain;
+ q->chain = table->free_list;
+ table->free_list = q;
+ --table->nkey;
+
+ return q->entry.dptr;
}
void *(pset_find) (SET *se, const void *key, unsigned hash)
{
- return pset_find (se, key, hash);
+ return pset_find (se, key, hash);
}
void *(pset_insert) (SET *se, const void *key, unsigned hash)
{
- return pset_insert (se, key, hash);
+ return pset_insert (se, key, hash);
}
-MANGLEP(entry) *
+ MANGLEP(entry) *
(pset_hinsert) (SET *se, const void *key, unsigned hash)
{
- return pset_hinsert (se, key, hash);
+ return pset_hinsert (se, key, hash);
}
void pset_insert_pset_ptr(pset *target, pset *src)
{
- void *elt;
- for (elt = pset_first(src); elt; elt = pset_next(src)) {
- pset_insert_ptr(target, elt);
- }
+ void *elt;
+ for (elt = pset_first(src); elt; elt = pset_next(src)) {
+ pset_insert_ptr(target, elt);
+ }
}
#else /* !PSET */
void *(set_find) (set *se, const void *key, size_t size, unsigned hash)
{
- return set_find (se, key, size, hash);
+ return set_find (se, key, size, hash);
}
void *(set_insert) (set *se, const void *key, size_t size, unsigned hash)
{
- return set_insert (se, key, size, hash);
+ return set_insert (se, key, size, hash);
}
set_entry *(set_hinsert) (set *se, const void *key, size_t size, unsigned hash)
{
- return set_hinsert (se, key, size, hash);
+ return set_hinsert (se, key, size, hash);
}
#endif /* !PSET */
const char *s, *style;
int weight;
-#define XXX(e) case DFS_EDGE_ ## e: s = #e; break
+#define XXX(e) case DFS_EDGE_ ## e: s = #e; break
switch (edge->kind) {
XXX(FWD);
XXX(CROSS);
#endif
-#define EPSILON 1e-5
+#define EPSILON 1e-5
#define UNDEF(x) (fabs(x) < EPSILON)
#define SEIDEL_TOLERANCE 1e-7
}
assert(other_blk);
- /*
+ /*
* Note the special case here: if block is a then, there might be no else
* block. In that case the other_block is the user_blk itself and pred_block
* is the cond_block ...
tmp_dom_info *u;
if (is_Bad(pred) || (get_Block_dom_pre_num (pred) == -1))
- continue; /* control-dead */
+ continue; /* control-dead */
u = dom_eval (&tdi_list[get_Block_dom_pre_num(pred)]);
if (u->semi < w->semi) w->semi = u->semi;
tmp_dom_info *u;
if (!is_Block(pred) || get_Block_dom_pre_num(pred) == -1)
- continue; /* control-dead */
+ continue; /* control-dead */
u = dom_eval (&tdi_list[get_Block_dom_pre_num(pred)]);
if (u->semi < w->semi) w->semi = u->semi;
tmp_dom_info *u;
if (get_Block_postdom_pre_num (succ) == -1)
- continue; /* endless-loop */
+ continue; /* endless-loop */
u = dom_eval (&tdi_list[get_Block_postdom_pre_num(succ)]);
if (u->semi < w->semi) w->semi = u->semi;
ir_node *best_succ = NULL;
double best_execfreq = -1;
- /*
+ /*
More than two successors means we have a jump table.
we cannot include a jump target into the current extended
basic block, so create a new one here.
*/
void compute_extbb_execfreqs(ir_graph *irg, ir_exec_freq *execfreqs)
{
- env_t env;
+ env_t env;
ir_extblk *extbb, *next;
ir_node *endblock;
bitset_t *red_reachable; /**< Holds all id's if blocks reachable
in the CFG modulo back edges. */
- bitset_t *be_tgt_reach; /**< target blocks of back edges whose
+ bitset_t *be_tgt_reach; /**< target blocks of back edges whose
sources are reachable from this block
in the reduced graph. */
} bl_info_t;
/** An entry in the relation cache. */
typedef struct mem_disambig_entry {
- const ir_node *adr1; /**< The first address. */
- const ir_mode *mode1; /**< The first address mode. */
- const ir_node *adr2; /**< The second address. */
- const ir_mode *mode2; /**< The second address mode. */
+ const ir_node *adr1; /**< The first address. */
+ const ir_mode *mode1; /**< The first address mode. */
+ const ir_node *adr2; /**< The second address. */
+ const ir_mode *mode2; /**< The second address mode. */
ir_alias_relation result; /**< The alias relation result. */
} mem_disambig_entry;
-#define HASH_ENTRY(adr1, adr2) (HASH_PTR(adr1) ^ HASH_PTR(adr2))
+#define HASH_ENTRY(adr1, adr2) (HASH_PTR(adr1) ^ HASH_PTR(adr2))
/**
* Compare two relation cache entries.
* @file
* @brief analyze graph to provide value range information
* @author Jonas Fietz
- * @version $Id$
+ * @version $Id$
*/
#include "config.h"
static void emit_amd64_SymConst(const ir_node *irn)
{
const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(irn);
-// sym_or_tv_t key, *entry;
-// unsigned label;
-//
-// key.u.id = get_entity_ld_ident(attr->entity);
-// key.is_ident = 1;
-// key.label = 0;
-// entry = (sym_or_tv_t *)set_insert(sym_or_tv, &key, sizeof(key), HASH_PTR(key.u.generic));
-// if (entry->label == 0) {
-// /* allocate a label */
-// entry->label = get_unique_label();
-// }
-// label = entry->label;
+#if 0
+ sym_or_tv_t key, *entry;
+ unsigned label;
+
+ key.u.id = get_entity_ld_ident(attr->entity);
+ key.is_ident = 1;
+ key.label = 0;
+ entry = (sym_or_tv_t *)set_insert(sym_or_tv, &key, sizeof(key), HASH_PTR(key.u.generic));
+ if (entry->label == 0) {
+ /* allocate a label */
+ entry->label = get_unique_label();
+ }
+ label = entry->label;
+#endif
be_emit_cstring("\tmov $");
be_gas_emit_entity(attr->entity);
*/
static void amd64_dump_node(FILE *F, ir_node *n, dump_reason_t reason)
{
- ir_mode *mode = NULL;
+ ir_mode *mode = NULL;
switch (reason) {
case dump_node_opcode_txt:
is_unsigned = !mode_is_signed(cmp_mode);
new_op1 = be_transform_node(op1);
-// new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
+ /* new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode); */
new_op2 = be_transform_node(op2);
-// new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
+ /* new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode); */
return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
is_unsigned);
}
return new_bd_amd64_Jcc(dbgi, block, flag_node, get_Proj_proj(selector));
}
-///**
-// * Create an And that will zero out upper bits.
-// *
-// * @param dbgi debug info
-// * @param block the basic block
-// * @param op the original node
-// * param src_bits number of lower bits that will remain
-// */
-//static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
-// int src_bits)
-//{
-// if (src_bits == 8) {
-// return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
-// } else if (src_bits == 16) {
-// ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
-// ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
-// return rshift;
-// } else {
-// panic("zero extension only supported for 8 and 16 bits");
-// }
-//}
-//
-///**
-// * Generate code for a sign extension.
-// */
-//static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
-// int src_bits)
-//{
-// int shift_width = 32 - src_bits;
-// ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
-// ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
-// return rshift_node;
-//}
-//
-//static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
-// ir_mode *orig_mode)
-//{
-// int bits = get_mode_size_bits(orig_mode);
-// if (bits == 32)
-// return op;
-//
-// if (mode_is_signed(orig_mode)) {
-// return gen_sign_extension(dbgi, block, op, bits);
-// } else {
-// return gen_zero_extension(dbgi, block, op, bits);
-// }
-//}
-//
-///**
-// * returns true if it is assured, that the upper bits of a node are "clean"
-// * which means for a 16 or 8 bit value, that the upper bits in the register
-// * are 0 for unsigned and a copy of the last significant bit for signed
-// * numbers.
-// */
-//static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
-//{
-// (void) transformed_node;
-// (void) mode;
-// /* TODO */
-// return false;
-//}
+#if 0
+/**
+ * Create an And that will zero out upper bits.
+ *
+ * @param dbgi debug info
+ * @param block the basic block
+ * @param op the original node
+ * param src_bits number of lower bits that will remain
+ */
+static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ int src_bits)
+{
+ if (src_bits == 8) {
+ return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
+ } else if (src_bits == 16) {
+ ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
+ ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
+ return rshift;
+ } else {
+ panic("zero extension only supported for 8 and 16 bits");
+ }
+}
+
+/**
+ * Generate code for a sign extension.
+ */
+static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ int src_bits)
+{
+ int shift_width = 32 - src_bits;
+ ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
+ ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
+ return rshift_node;
+}
+
+static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
+ ir_mode *orig_mode)
+{
+ int bits = get_mode_size_bits(orig_mode);
+ if (bits == 32)
+ return op;
+
+ if (mode_is_signed(orig_mode)) {
+ return gen_sign_extension(dbgi, block, op, bits);
+ } else {
+ return gen_zero_extension(dbgi, block, op, bits);
+ }
+}
+
+/**
+ * returns true if it is assured, that the upper bits of a node are "clean"
+ * which means for a 16 or 8 bit value, that the upper bits in the register
+ * are 0 for unsigned and a copy of the last significant bit for signed
+ * numbers.
+ */
+static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
+{
+ (void) transformed_node;
+ (void) mode;
+ /* TODO */
+ return false;
+}
+#endif
/**
* Change some phi modes
return new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
- //if (upper_bits_clean(new_op, min_mode)) {
- // return new_op;
- //}
+#if 0
+ if (upper_bits_clean(new_op, min_mode)) {
+ return new_op;
+ }
- //if (mode_is_signed(min_mode)) {
- // return gen_sign_extension(dbg, block, new_op, min_bits);
- //} else {
- // return gen_zero_extension(dbg, block, new_op, min_bits);
- //}
+ if (mode_is_signed(min_mode)) {
+ return gen_sign_extension(dbg, block, new_op, min_bits);
+ } else {
+ return gen_zero_extension(dbg, block, new_op, min_bits);
+ }
+#endif
}
}
}
set_irn_pinned(new_load, get_irn_pinned(node));
+#if 0
/* check for special case: the loaded value might not be used */
-// if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
-// /* add a result proj and a Keep to produce a pseudo use */
-// ir_node *proj = new_r_Proj(new_load, mode_Iu, pn_amd64_Load_res);
-// be_new_Keep(block, 1, &proj);
-// }
+ if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
+ /* add a result proj and a Keep to produce a pseudo use */
+ ir_node *proj = new_r_Proj(new_load, mode_Iu, pn_amd64_Load_res);
+ be_new_Keep(block, 1, &proj);
+ }
+#endif
return new_load;
}
}
} else if (is_Load(pred)) {
return gen_Proj_Load(node);
-// } else if (be_is_SubSP(pred)) {
-// //panic("gen_Proj not implemented for SubSP");
-// return gen_Proj_be_SubSP(node);
-// } else if (be_is_AddSP(pred)) {
-// //panic("gen_Proj not implemented for AddSP");
-// return gen_Proj_be_AddSP(node);
-// } else if (is_Cmp(pred)) {
-// //panic("gen_Proj not implemented for Cmp");
-// return gen_Proj_Cmp(node);
-// } else if (is_Div(pred)) {
-// return gen_Proj_Div(node);
+#if 0
+ } else if (be_is_SubSP(pred)) {
+ //panic("gen_Proj not implemented for SubSP");
+ return gen_Proj_be_SubSP(node);
+ } else if (be_is_AddSP(pred)) {
+ //panic("gen_Proj not implemented for AddSP");
+ return gen_Proj_be_AddSP(node);
+ } else if (is_Cmp(pred)) {
+ //panic("gen_Proj not implemented for Cmp");
+ return gen_Proj_Cmp(node);
+ } else if (is_Div(pred)) {
+ return gen_Proj_Div(node);
+#endif
} else if (is_Start(pred)) {
-// /*
-// if (proj == pn_Start_X_initial_exec) {
-// ir_node *block = get_nodes_block(pred);
-// ir_node *jump;
-//
-// // we exchange the ProjX with a jump
-// block = be_transform_node(block);
-// jump = new_rd_Jmp(dbgi, block);
-// return jump;
-// }
-//
-// if (node == get_irg_anchor(irg, anchor_tls)) {
-// return gen_Proj_tls(node);
-// }
-// */
-// } else {
-// ir_node *new_pred = be_transform_node(pred);
-// ir_mode *mode = get_irn_mode(node);
-// if (mode_needs_gp_reg(mode)) {
-// ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
-// new_proj->node_nr = node->node_nr;
-// return new_proj;
-// }
+#if 0
+ if (node == get_irg_anchor(irg, anchor_tls)) {
+ return gen_Proj_tls(node);
+ }
+ } else {
+ ir_node *new_pred = be_transform_node(pred);
+ ir_mode *mode = get_irn_mode(node);
+ if (mode_needs_gp_reg(mode)) {
+ ir_node *new_proj = new_r_Proj(new_pred, mode_Iu, get_Proj_proj(node));
+ new_proj->node_nr = node->node_nr;
+ return new_proj;
+ }
+#endif
}
return be_duplicate_node(node);
fprintf(F, "%s", get_irn_opname(n));
if (arm_has_symconst_attr(n)) {
- const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(n);
+ const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(n);
if (attr->entity != NULL) {
fputc(' ', F);
fputs(get_entity_name(attr->entity), F);
fputc('\n', F);
}
if (arm_has_symconst_attr(n)) {
- const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(n);
+ const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(n);
fprintf(F, "entity = ");
if (attr->entity != NULL) {
{
static ir_settings_arch_dep_t ad = {
1, /* allow subs */
- 1, /* Muls are fast enough on ARM but ... */
+ 1, /* Muls are fast enough on ARM but ... */
31, /* ... one shift would be possible better */
NULL, /* no evaluator function */
0, /* SMUL is needed, only in Arch M */
typedef struct be_abi_call_arg_t {
unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
- unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
+ unsigned on_stack : 1; /**< 1: this argument is transmitted on the stack. */
unsigned callee : 1; /**< 1: someone called us. 0: We call another function */
int pos;
* @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
- * @param callee context type - if we are callee or caller
+ * @param callee context type - if we are callee or caller
*/
static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
{
static int algo = BLOCKSCHED_GREEDY;
static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
- { "naiv", BLOCKSCHED_NAIV },
+ { "naiv", BLOCKSCHED_NAIV },
{ "greedy", BLOCKSCHED_GREEDY },
#ifdef WITH_ILP
{ "ilp", BLOCKSCHED_ILP },
be_chordal_env_t *chordal_env;
pset *pre_colored; /**< Set of precolored nodes. */
- bitset_t *live; /**< A liveness bitset. */
+ bitset_t *live; /**< A liveness bitset. */
bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
- bitset_t *colors; /**< The color mask. */
+ bitset_t *colors; /**< The color mask. */
bitset_t *in_colors; /**< Colors used by live in values. */
int colors_n; /**< The number of colors. */
} be_chordal_alloc_env_t;
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
/* Make a fourcc for border checking. */
-#define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
-
+#define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
{
*/
static void post_spill(post_spill_env_t *pse, int iteration)
{
- be_chordal_env_t *chordal_env = &pse->cenv;
- ir_graph *irg = pse->irg;
- ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
- int colors_n = arch_register_class_n_regs(chordal_env->cls);
- int allocatable_regs
+ be_chordal_env_t *chordal_env = &pse->cenv;
+ ir_graph *irg = pse->irg;
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
+ int colors_n = arch_register_class_n_regs(chordal_env->cls);
+ int allocatable_regs
= colors_n - be_put_ignore_regs(irg, chordal_env->cls, NULL);
/* some special classes contain only ignore regs, no work to be done */
set *changed_nodes; /**< contains node_stat_t's. */
} qnode_t;
-static pset *pinned_global; /**< optimized nodes should not be altered any more */
+static pset *pinned_global; /**< optimized nodes should not be altered any more */
static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
{
co2_irn_t *touched_next;
col_t tmp_col;
col_t orig_col;
- int last_color_change;
+ int last_color_change;
bitset_t *adm_cache;
unsigned fixed : 1;
unsigned tmp_fixed : 1;
int n_memb;
int n_constr;
int max_degree;
- int ticks;
+ int ticks;
double freedom;
co2_cloud_irn_t *master;
co2_cloud_irn_t *mst_root;
* - Clique-star constraints
*
*
- * \min \sum_{ (i,j) \in Q } w_ij y_ij
+ * \min \sum_{ (i,j) \in Q } w_ij y_ij
*
- * \sum_c x_nc = 1 n \in N, c \in C
+ * \sum_c x_nc = 1 n \in N, c \in C
*
- * x_nc = 0 n \in N, c \not\in C(n)
+ * x_nc = 0 n \in N, c \not\in C(n)
*
- * \sum x_nc <= 1 x_nc \in Clique \in AllCliques, c \in C
+ * \sum x_nc <= 1 x_nc \in Clique \in AllCliques, c \in C
*
- * \sum_{e \in p} y_e >= 1 p \in P path constraints
+ * \sum_{e \in p} y_e >= 1 p \in P path constraints
*
- * \sum_{e \in cs} y_e >= |cs| - 1 cs \in CP clique-star constraints
+ * \sum_{e \in cs} y_e >= |cs| - 1 cs \in CP clique-star constraints
*
- * x_nc, y_ij \in N, w_ij \in R^+
+ * x_nc, y_ij \in N, w_ij \in R^+
*/
#include "config.h"
I_BLOCKS,
/* phi nodes */
- I_PHI_CNT, /* number of phi nodes */
- I_PHI_ARG_CNT, /* number of arguments of phis */
- I_PHI_ARG_SELF, /* number of arguments of phis being the phi itself */
- I_PHI_ARG_CONST, /* number of arguments of phis being consts */
- I_PHI_ARG_PRED, /* ... being defined in a cf-pred */
- I_PHI_ARG_GLOB, /* ... being defined elsewhere */
+ I_PHI_CNT, /* number of phi nodes */
+ I_PHI_ARG_CNT, /* number of arguments of phis */
+ I_PHI_ARG_SELF, /* number of arguments of phis being the phi itself */
+ I_PHI_ARG_CONST, /* number of arguments of phis being consts */
+ I_PHI_ARG_PRED, /* ... being defined in a cf-pred */
+ I_PHI_ARG_GLOB, /* ... being defined elsewhere */
I_PHI_ARITY_S,
I_PHI_ARITY_E = I_PHI_ARITY_S+MAX_ARITY,
/* copy nodes */
- I_CPY_CNT, /* number of copynodes */
+ I_CPY_CNT, /* number of copynodes */
/* phi classes */
- I_CLS_CNT, /* number of phi classes */
- I_CLS_IF_FREE, /* number of pc having no interference */
- I_CLS_IF_MAX, /* number of possible interferences in all classes */
- I_CLS_IF_CNT, /* number of actual interferences in all classes */
+ I_CLS_CNT, /* number of phi classes */
+ I_CLS_IF_FREE, /* number of pc having no interference */
+ I_CLS_IF_MAX, /* number of possible interferences in all classes */
+ I_CLS_IF_CNT, /* number of actual interferences in all classes */
I_CLS_SIZE_S,
I_CLS_SIZE_E = I_CLS_SIZE_S+MAX_CLS_SIZE,
I_CLS_PHIS_S,
/* all of them are external set */
/* ilp values */
- I_HEUR_TIME, /* solving time in milli seconds */
- I_ILP_TIME, /* solving time in milli seconds */
+ I_HEUR_TIME, /* solving time in milli seconds */
+ I_ILP_TIME, /* solving time in milli seconds */
I_ILP_VARS,
I_ILP_CSTR,
- I_ILP_ITER, /* number of simplex iterations */
+ I_ILP_ITER, /* number of simplex iterations */
/* copy instructions */
- I_COPIES_MAX, /* max possible costs of copies*/
- I_COPIES_INIT, /* number of copies in initial allocation */
- I_COPIES_HEUR, /* number of copies after heuristic */
- I_COPIES_5SEC, /* number of copies after ilp with max n sec */
- I_COPIES_30SEC, /* number of copies after ilp with max n sec */
- I_COPIES_OPT, /* number of copies after ilp */
- I_COPIES_IF, /* number of copies inevitable due to root-arg-interf */
+ I_COPIES_MAX, /* max possible costs of copies*/
+ I_COPIES_INIT, /* number of copies in initial allocation */
+ I_COPIES_HEUR, /* number of copies after heuristic */
+ I_COPIES_5SEC, /* number of copies after ilp with max n sec */
+ I_COPIES_30SEC, /* number of copies after ilp with max n sec */
+ I_COPIES_OPT, /* number of copies after ilp */
+ I_COPIES_IF, /* number of copies inevitable due to root-arg-interf */
ASIZE
};
/**
* @return 1 if the block at pos @p pos removed a critical edge
- * 0 else
+ * 0 else
*/
static inline int was_edge_critical(const ir_node *bl, int pos)
{
assert(fragment->offset >= offset);
nops = fragment->offset - offset;
if (nops > 0) {
- unsigned char *nopbuffer = obstack_alloc(&code_fragment_obst, nops);
- interface->create_nops(nopbuffer, nops);
- emit(output, nopbuffer, nops);
+ unsigned char *nopbuffer = obstack_alloc(&code_fragment_obst, nops);
+ interface->create_nops(nopbuffer, nops);
+ emit(output, nopbuffer, nops);
offset = fragment->offset;
obstack_free(&code_fragment_obst, nopbuffer);
}
if (vals[k].v.value != NULL) {
emit_atomic_init(env, vals[k].v.value);
elem_size = get_mode_size_bytes(get_irn_mode(vals[k].v.value));
- } else {
- elem_size = 0;
- }
+ } else {
+ elem_size = 0;
+ }
} else if (vals[k].kind == TARVAL) {
tarval *tv = vals[k].v.tarval;
size_t size = get_mode_size_bytes(get_tarval_mode(tv));
if (is_Phi(irn)) {
/*
- Phi functions are scheduled immediately, since they only
- transfer data flow from the predecessors to this block.
+ Phi functions are scheduled immediately, since they only
+ transfer data flow from the predecessors to this block.
*/
add_to_sched(&be, irn);
} else if (be_is_Start(irn)) {
static struct {
be_lv_t *lv; /**< The liveness object. */
- ir_node *def; /**< The node (value). */
+ ir_node *def; /**< The node (value). */
ir_node *def_block; /**< The block of def. */
bitset_t *visited; /**< A set were all visited blocks are recorded. */
} re;
}
if ((
- flags & ASM_CONSTRAINT_FLAG_MODIFIER_WRITE &&
- flags & ASM_CONSTRAINT_FLAG_MODIFIER_NO_WRITE
+ flags & ASM_CONSTRAINT_FLAG_MODIFIER_WRITE &&
+ flags & ASM_CONSTRAINT_FLAG_MODIFIER_NO_WRITE
) || (
- flags & ASM_CONSTRAINT_FLAG_MODIFIER_READ &&
- flags & ASM_CONSTRAINT_FLAG_MODIFIER_NO_READ
+ flags & ASM_CONSTRAINT_FLAG_MODIFIER_READ &&
+ flags & ASM_CONSTRAINT_FLAG_MODIFIER_NO_READ
)) {
flags |= ASM_CONSTRAINT_FLAG_INVALID;
}
if (old_info->out_infos != NULL) {
unsigned n_outs = ARR_LEN(old_info->out_infos);
/* need dyanmic out infos? */
- if (be_is_Barrier(new_node) || be_is_Perm(new_node)) {
+ if (be_is_Barrier(new_node) || be_is_Perm(new_node)) {
new_info->out_infos = NEW_ARR_F(reg_out_info_t, n_outs);
} else {
new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outs);
typedef struct be_pbqp_alloc_env_t {
pbqp *pbqp_inst; /**< PBQP instance for register allocation */
ir_graph *irg; /**< The graph under examination. */
- const arch_register_class_t *cls; /**< Current processed register class */
+ const arch_register_class_t *cls; /**< Current processed register class */
be_lv_t *lv;
bitset_t *ignored_regs;
pbqp_matrix *ife_matrix_template;
static void create_pbqp_coloring_instance(ir_node *block, void *data)
{
- be_pbqp_alloc_env_t *pbqp_alloc_env = data;
- be_lv_t *lv = pbqp_alloc_env->lv;
- const arch_register_class_t *cls = pbqp_alloc_env->cls;
+ be_pbqp_alloc_env_t *pbqp_alloc_env = data;
+ be_lv_t *lv = pbqp_alloc_env->lv;
+ const arch_register_class_t *cls = pbqp_alloc_env->cls;
plist_t *rpeo = pbqp_alloc_env->rpeo;
pbqp *pbqp_inst = pbqp_alloc_env->pbqp_inst;
plist_t *temp_list = plist_new();
int *assignment = ALLOCAN(int, cls->n_regs);
#else
unsigned *restr_nodes = pbqp_alloc_env->restr_nodes;
- pqueue_t *restr_nodes_queue = new_pqueue();
- pqueue_t *queue = new_pqueue();
- plist_t *sorted_list = plist_new();
+ pqueue_t *restr_nodes_queue = new_pqueue();
+ pqueue_t *queue = new_pqueue();
+ plist_t *sorted_list = plist_new();
ir_node *last_element = NULL;
#endif
* start handling constraints from there.
*/
for (irn = sched_first(block); !sched_is_end(irn);) {
- int silent_old = silent; /* store old silent value */
+ int silent_old = silent; /* store old silent value */
if (be_is_Barrier(irn))
- silent = !silent; /* toggle silent flag */
+ silent = !silent; /* toggle silent flag */
be_insn_t *insn = chordal_scan_insn(env, irn);
irn = insn->next_insn;
/* initialize pbqp allocation data structure */
- pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
+ pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
pbqp_alloc_env.cls = cls;
pbqp_alloc_env.irg = irg;
pbqp_alloc_env.lv = lv;
pbqp_alloc_env.restr_nodes = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.ife_edge_num = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.env = env;
- be_put_ignore_regs(irg, cls, pbqp_alloc_env.ignored_regs); /* get ignored registers */
+ be_put_ignore_regs(irg, cls, pbqp_alloc_env.ignored_regs); /* get ignored registers */
/* create costs matrix template for interference edges */
#if PRINT_RPEO
plist_element_t *elements;
foreach_plist(pbqp_alloc_env.rpeo, elements) {
- pbqp_node *node = elements->data;
+ pbqp_node *node = elements->data;
printf(" %d(%lu);", node->index, get_idx_irn(irg, node->index)->node_nr);
}
printf("\n");
/**
* Kill the Barrier nodes for better peephole optimization.
*/
-static void kill_barriers(ir_graph *irg)
+static void kill_barriers(ir_graph *irg)
{
ir_node *end_blk = get_irg_end_block(irg);
ir_node *start_blk = get_irg_start_block(irg);
* @p v A variable to put the current value in
* @p i An integer for internal use
*/
-#define workset_foreach(ws, v, i) \
+#define workset_foreach(ws, v, i) \
for (i=0; v=(i < ws->len) ? ws->vals[i].node : NULL, i < ws->len; ++i)
typedef struct block_info_t {
unsigned i;
int in;
unsigned ws_count;
- int free_slots, free_pressure_slots;
+ int free_slots, free_pressure_slots;
unsigned pressure;
int arity;
workset_t **pred_worksets;
} loc_t;
typedef struct workset_t {
- int len; /**< current length */
- loc_t vals[0]; /**< inlined array of the values/distances in this working set */
+ int len; /**< current length */
+ loc_t vals[0]; /**< inlined array of the values/distances in this working set */
} workset_t;
typedef struct belady_env_t {
ir_node **blocks; /**< Array of all blocks. */
int n_blocks; /**< Number of blocks in the graph. */
- int n_regs; /**< number of regs in this reg-class */
- workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
- ir_node *instr; /**< current instruction */
+ int n_regs; /**< number of regs in this reg-class */
+ workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
+ ir_node *instr; /**< current instruction */
int instr_nr; /**< current instruction number (relative to block start) */
- spill_env_t *senv; /**< see bespill.h */
+ spill_env_t *senv; /**< see bespill.h */
bitset_t *spilled; /**< bitset to keep all the irns which have already been spilled. */
ir_nodeset_t *extra_spilled; /** All nodes for which a special spill location has been computed. */
} belady_env_t;
* @p v A variable to put the current value in
* @p i An integer for internal use
*/
-#define workset_foreach(ws, v, i) for (i=0; \
+#define workset_foreach(ws, v, i) for (i=0; \
v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
++i)
}
}
-#define get_current_use(bi, irn) phase_get_irn_data(&(bi)->next_uses, (irn))
+#define get_current_use(bi, irn) phase_get_irn_data(&(bi)->next_uses, (irn))
static inline void advance_current_use(block_info_t *bi, const ir_node *irn)
{
#include "beirg.h"
#include "bearch.h"
-#define DBG_COALESCING 1
-#define DBG_INTERFERENCES 2
+#define DBG_COALESCING 1
+#define DBG_INTERFERENCES 2
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
* @file
* @brief implementation of the spill/reload placement abstraction layer
* @author Daniel Grund, Sebastian Hack, Matthias Braun
- * @date 29.09.2005
+ * @date 29.09.2005
* @version $Id$
*/
#include "config.h"
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
spill_env_t *env = XMALLOC(spill_env_t);
- env->spills = new_set(cmp_spillinfo, 1024);
+ env->spills = new_set(cmp_spillinfo, 1024);
env->irg = irg;
env->arch_env = arch_env;
ir_nodeset_init(&env->mem_phis);
}
void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
- int pos, const arch_register_class_t *reload_cls,
+ int pos, const arch_register_class_t *reload_cls,
int allow_remat)
{
ir_node *before = get_block_insertion_point(block, pos);
int pos = get_edge_src_pos(edge);
ir_node *def;
- if (env->ignore_uses != NULL &&
+ if (env->ignore_uses != NULL &&
ir_nodeset_contains(env->ignore_uses, use))
continue;
if (is_Anchor(use) || is_End(use))
* Adjusts the register allocation for the (new) phi-operands
* and insert duplicates iff necessary.
*/
-static void set_regs_or_place_dupls_walker(ir_node *bl, void *data)
+static void set_regs_or_place_dupls_walker(ir_node *bl, void *data)
{
be_chordal_env_t *chordal_env = data;
be_lv_t *lv = be_get_irg_liveness(chordal_env->irg);
if (!is_cfop(last)) {
last = sched_next(last);
/* last node must be a cfop, only exception is the start block */
- assert(last == get_irg_start_block(get_irn_irg(block)));
+ assert(last == get_irg_start_block(get_irn_irg(block)));
}
return last;
pred_info->end_state, need_state));
if (pred_info->end_state != need_state) {
- ir_node *insert_point = get_end_of_block_insertion_point(pred);
+ ir_node *insert_point = get_end_of_block_insertion_point(pred);
DBG((dbg, LEVEL_3, " Creating reload for %+F\n", need_state));
cycles.
*/
if (is_ia32_use_frame(irn) || (
- is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
- is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
+ is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
+ is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
)) {
cost += 5;
} else {
#define IA32_EMIT2(a,b) op_ia32_##a->ops.generic = (op_func)emit_ia32_##b
#define IA32_EMIT(a) IA32_EMIT2(a,a)
#define EMIT(a) op_##a->ops.generic = (op_func)emit_##a
-#define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
+#define IGN(a) op_##a->ops.generic = (op_func)emit_Nothing
#define BE_EMIT(a) op_be_##a->ops.generic = (op_func)emit_be_##a
-#define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
+#define BE_IGN(a) op_be_##a->ops.generic = (op_func)emit_Nothing
/* first clear the generic function pointer for all ops */
clear_irp_opcodes_generic_func();
}
/* only optimize up to 48 stores behind IncSPs */
-#define MAXPUSH_OPTIMIZE 48
+#define MAXPUSH_OPTIMIZE 48
/**
* Tries to create Push's from IncSP, Store combinations.
#define ENT_DFP_ABS "C_ia32_dfp_abs"
#define ENT_ULL_BIAS "C_ia32_ull_bias"
-#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode)
+#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode)
#define mode_xmm (ia32_reg_classes[CLASS_ia32_xmm].mode)
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
return new_node;
}
-static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block,
+static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block,
ia32_address_t *addr)
{
ir_node *base, *index, *res;
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *sel = get_Cond_selector(node);
+ ir_node *sel = get_Cond_selector(node);
ir_mode *sel_mode = get_irn_mode(sel);
ir_node *flags = NULL;
ir_node *new_node;
ir_node *sub, *res, *flags, *block;
res = gen_binop(node, get_Bound_index(node), get_Bound_upper(node),
- new_bd_ia32_Sub, match_mode_neutral | match_am | match_immediate);
+ new_bd_ia32_Sub,
+ match_mode_neutral | match_am | match_immediate);
block = get_nodes_block(res);
if (! is_Proj(res)) {
match_mode_neutral);
if (is_Proj(lowered)) {
- lowered = get_Proj_pred(lowered);
+ lowered = get_Proj_pred(lowered);
} else {
assert(is_ia32_Add(lowered));
set_irn_mode(lowered, mode_T);
match_am | match_immediate | match_mode_neutral);
if (is_Proj(lowered)) {
- lowered = get_Proj_pred(lowered);
+ lowered = get_Proj_pred(lowered);
} else {
assert(is_ia32_Sub(lowered));
set_irn_mode(lowered, mode_T);
/* the unop index */
#define UNOP_IDX 0
-#define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
+#define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
/** the debug handle */
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
x87_state *end; /**< state at the end or NULL if not assigned */
} blk_state;
-#define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
+#define PTR_TO_BLKSTATE(p) ((blk_state *)(p))
/** liveness bitset for vfp registers. */
typedef unsigned char vfp_liveness;
} /* vfp_liveness_end_of_block */
/** get the register mask from an arch_register */
-#define REGMASK(reg) (1 << (arch_register_get_index(reg)))
+#define REGMASK(reg) (1 << (arch_register_get_index(reg)))
/**
* Return a bitset of argument registers which are live at the end of a node.
}
#define GEN_BINOP(op) _GEN_BINOP(op, op)
-#define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
+#define GEN_BINOPR(op) _GEN_BINOP(op, op##r)
#define GEN_LOAD(op) \
static int sim_##op(x87_state *state, ir_node *n) { \
be_dbg_set_dbg_info(get_irn_dbg_info(node));
(*func) (node);
} else {
- panic("No emit handler for node %+F (graph %+F)\n", node,
+ panic("No emit handler for node %+F (graph %+F)\n", node,
current_ir_graph);
}
}
return *lexer.curr_pos++;
} /* next_char */
-#define unput() if (lexer.curr_pos < lexer.end_pos) --lexer.curr_pos
+#define unput() if (lexer.curr_pos < lexer.end_pos) --lexer.curr_pos
#undef MIN
#define MIN(a, b) (a) < (b) ? (a) : (b)
size_t prefix_len = get_id_strlen(prefix);
if (prefix_len > get_id_strlen(id))
return 0;
- return 0 == memcmp(get_id_str(prefix), get_id_str(id), prefix_len);
+ return 0 == memcmp(get_id_str(prefix), get_id_str(id), prefix_len);
}
int id_is_suffix(ident *suffix, ident *id)
instruction *Ns;
unsigned t;
- if (R[0] == 0) { /* Case 1 */
+ if (R[0] == 0) { /* Case 1 */
t = R[1] > IMAX(env->max_S, R[1]);
R[1] -= t;
Ns = decompose_mul(env, &R[1], r - 1, N);
return emit_LEA(env, env->root, Ns, t);
- } else if (R[0] <= env->max_S) { /* Case 2 */
+ } else if (R[0] <= env->max_S) { /* Case 2 */
t = R[0];
R[1] += t;
Ns = decompose_mul(env, &R[1], r - 1, N);
ir_node *new_rd_Const(dbg_info *db, ir_graph *irg, tarval *con)
{
ir_node *res;
-//#ifdef USE_ORIGINAL
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
res = new_bd_Const_type(db, con, firm_unknown_type);
current_ir_graph = rem;
-//#else
-// res = new_rd_Const_type(db, irg, con, firm_unknown_type);
-//#endif
return res;
} /* new_rd_Const */
is not needed.
Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
known = res;
- for (i = ins - 1; i >= 0; --i) {
+ for (i = ins - 1; i >= 0; --i) {
assert(in[i]);
in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
/* DISABLE - don't do this optimization
ENABLE - lets see, if there is a better graph */
-#define ON (-1)
-#define OFF (0)
+#define ON -1
+#define OFF 0
#define FLAG(name, value, def) (irf_##name & def) |
#define E_FLAG(name, value, def) FLAG(name, value, def)
{
switch (initializer->kind) {
case IR_INITIALIZER_CONST:
- irg_walk(initializer->consti.value, env->pre, env->post, env->env);
+ irg_walk(initializer->consti.value, env->pre, env->post, env->env);
return;
case IR_INITIALIZER_TARVAL:
case IR_INITIALIZER_NULL:
ctx.env = env;
if (pre != NULL && post != NULL)
- dom_tree_walk_irg(current_ir_graph, dom_block_visit_both, NULL, &ctx);
+ dom_tree_walk_irg(current_ir_graph, dom_block_visit_both, NULL, &ctx);
else if (pre != NULL)
- dom_tree_walk_irg(current_ir_graph, dom_block_visit_pre, NULL, &ctx);
+ dom_tree_walk_irg(current_ir_graph, dom_block_visit_pre, NULL, &ctx);
else if (post != NULL)
- dom_tree_walk_irg(current_ir_graph, dom_block_visit_post, NULL, &ctx);
+ dom_tree_walk_irg(current_ir_graph, dom_block_visit_post, NULL, &ctx);
}
/**
fprintf(env->file, "] { ");
- switch (opcode) {
- #include "gen_irio_export.inl"
+ switch (opcode) {
+#include "gen_irio_export.inl"
}
fputs("}\n", env->file);
}
return n;
} else if (right == b) {
if (mode != get_irn_mode(left)) {
- /* This Sub is an effective Cast */
+ /* This Sub is an effective Cast */
left = new_r_Conv(get_nodes_block(n), left, mode);
}
n = left;
n = new_r_Minus(get_nodes_block(n), left, l_mode);
if (mode != l_mode) {
- /* This Sub is an effective Cast */
+ /* This Sub is an effective Cast */
n = new_r_Conv(get_nodes_block(n), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
/*
* UpConv(x) REL 0 ==> x REL 0
* Don't do this for float values as it's unclear whether it is a
- * win. (on the other side it makes detection/creation of fabs hard)
+ * win. (on the other side it makes detection/creation of fabs hard)
*/
if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) ||
* Generates a new irg which calls the initializer
*
* Pseudocode:
- * void __firmprof_initializer(void) { __init_firmprof(ent_filename, bblock_id, bblock_counts, n_blocks); }
+ * void __firmprof_initializer(void) { __init_firmprof(ent_filename, bblock_id, bblock_counts, n_blocks); }
*/
static ir_graph *gen_initializer_irg(ir_entity *ent_filename, ir_entity *bblock_id, ir_entity *bblock_counts, int n_blocks)
{
++wd->id;
}
-#define IDENT(x) new_id_from_chars(x, sizeof(x) - 1)
+#define IDENT(x) new_id_from_chars(x, sizeof(x) - 1)
ir_graph *ir_profile_instrument(const char *filename, unsigned flags)
{
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
ASSERT_AND_RET(
- is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ is_Bad(pred) || (get_irn_mode(pred) == mode_X),
"Block node must have a mode_X predecessor", 0);
}
#define OPT_DELIM '-'
-#define HELP_TEMPL "%-15s %-10s %-45s"
-#define HELP_TEMPL_VALS HELP_TEMPL " [%s] (%s)"
+#define HELP_TEMPL "%-15s %-10s %-45s"
+#define HELP_TEMPL_VALS HELP_TEMPL " [%s] (%s)"
static struct obstack obst;
set_error(err, lc_opt_err_none, "");
list_add_tail(&ent->list, &lc_get_grp_special(ent->parent)->opts);
- s->type = type;
- s->value = val;
- s->cb = cb;
+ s->type = type;
+ s->value = val;
+ s->cb = cb;
s->dump = dump;
s->dump_vals = dump_vals;
- s->length = length;
+ s->length = length;
return ent;
}
* from a file.
*/
if (arg[0] == '@') {
- size_t n = strcspn(&arg[1], " \t\n");
- char *fname = alloca(n + 1);
+ size_t n = strcspn(&arg[1], " \t\n");
+ char *fname = alloca(n + 1);
FILE *f;
strncpy(fname, &arg[1], n);
char buf[256];
lc_opt_entry_t *opt = arg->v_ptr;
- const char *s = buf;
- size_t res = 0;
+ const char *s = buf;
+ size_t res = 0;
switch (occ->conversion) {
case 'V':
#include "lc_opts_t.h"
#include "lc_opts_enum.h"
-static const char *delim = " \t|,";
+static const char *delim = " \t|,";
#define DECL_CB(N, op) \
int lc_opt_enum_ ## N ## _cb(LC_UNUSED(const char *name), LC_UNUSED(lc_opt_type_t type), void *data, size_t len, ...) \
{ \
- lc_opt_enum_ ## N ## _var_t *var = data; \
- const lc_opt_enum_ ## N ## _items_t *items = var->items; \
+ lc_opt_enum_ ## N ## _var_t *var = data; \
+ const lc_opt_enum_ ## N ## _items_t *items = var->items; \
\
va_list args; \
char *s, *tmp; \
#define DECL_DUMP(T, N, cond) \
int lc_opt_enum_ ## N ## _dump(char *buf, size_t n, LC_UNUSED(const char *name), LC_UNUSED(lc_opt_type_t type), void *data, LC_UNUSED(size_t len)) \
{ \
- lc_opt_enum_ ## N ## _var_t *var = data; \
- const lc_opt_enum_ ## N ## _items_t *items = var->items; \
- const char *prefix = ""; \
+ lc_opt_enum_ ## N ## _var_t *var = data; \
+ const lc_opt_enum_ ## N ## _items_t *items = var->items; \
+ const char *prefix = ""; \
TYPE(value) = *var->value; \
int i; \
size_t l = strlen(buf); \
#define DECL_DUMP_VALS(T, N) \
int lc_opt_enum_ ## N ## _dump_vals(char *buf, size_t n, LC_UNUSED(const char *name), LC_UNUSED(lc_opt_type_t type), void *data, LC_UNUSED(size_t len)) \
{ \
- lc_opt_enum_ ## N ## _var_t *var = data; \
- const lc_opt_enum_ ## N ## _items_t *items = var->items; \
- const char *prefix = ""; \
+ lc_opt_enum_ ## N ## _var_t *var = data; \
+ const lc_opt_enum_ ## N ## _items_t *items = var->items; \
+ const char *prefix = ""; \
int i; \
size_t l = strlen(buf); \
\
static ir_mode *get_ir_mode(unsigned bytes)
{
switch (bytes) {
- case 1: return mode_Bu;
+ case 1: return mode_Bu;
case 2: return mode_Hu;
case 4: return mode_Iu;
case 8: return mode_Lu;
} /* if */
} /* for */
- if (results) { /* there are results */
+ if (results) { /* there are results */
int rem = get_optimize();
/* switch off optimization for new Proj nodes or they might be CSE'ed
int shift_count_down = bits - bf_bits;
if (shift_count_up) {
- res = new_r_Shl(block, res, new_Const_long(mode_Iu, shift_count_up), mode);
+ res = new_r_Shl(block, res, new_Const_long(mode_Iu, shift_count_up), mode);
}
if (shift_count_down) {
res = new_r_Shrs(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
if (shift_count_down) {
- res = new_r_Shr(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
+ res = new_r_Shr(block, res, new_Const_long(mode_Iu, shift_count_down), mode);
}
if (bits != bf_bits) {
res = new_r_And(block, res, new_Const_long(mode, mask), mode);
ir_graph *irg = get_irn_irg(node);
ir_node *one = new_rd_Const(dbgi, irg, tv_one);
- return new_rd_Eor(dbgi, block, node, one, mode);
+ return new_rd_Eor(dbgi, block, node, one, mode);
}
static ir_node *create_convb(ir_node *node)
{
register struct _obstack_chunk *old_chunk = h->chunk;
register struct _obstack_chunk *new_chunk;
- register long new_size;
+ register long new_size;
register long obj_size = h->next_free - h->object_base;
register long i;
long already;
int _obstack_allocated_p(struct obstack *h, void *obj)
{
- register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
- register struct _obstack_chunk *plp; /* point to previous chunk if any */
+ register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk *plp; /* point to previous chunk if any */
lp = (h)->chunk;
/* We use >= rather than > since the object cannot be exactly at
void obstack_free(struct obstack *h, void *obj)
{
- register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
- register struct _obstack_chunk *plp; /* point to previous chunk if any */
+ register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk *plp; /* point to previous chunk if any */
lp = h->chunk;
/* We use >= because there cannot be an object at the beginning of a chunk.
{
register struct _obstack_chunk *old_chunk = h->chunk;
register struct _obstack_chunk *new_chunk;
- register long new_size;
+ register long new_size;
register long obj_size = h->next_free - h->object_base;
register long i;
long already;
struct obstack *h;
POINTER obj;
{
- register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
- register struct _obstack_chunk *plp; /* point to previous chunk if any */
+ register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk *plp; /* point to previous chunk if any */
lp = (h)->chunk;
/* We use >= rather than > since the object cannot be exactly at
struct obstack *h;
POINTER obj;
{
- register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
- register struct _obstack_chunk *plp; /* point to previous chunk if any */
+ register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk *plp; /* point to previous chunk if any */
lp = h->chunk;
/* We use >= because there cannot be an object at the beginning of a chunk.
struct obstack *h;
POINTER obj;
{
- register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
- register struct _obstack_chunk *plp; /* point to previous chunk if any */
+ register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk *plp; /* point to previous chunk if any */
lp = h->chunk;
/* We use >= because there cannot be an object at the beginning of a chunk.
#endif /* 0 */
-#endif /* !ELIDE_CODE */
+#endif /* !ELIDE_CODE */
} /* fix_nothrow_call_list */
/* marking */
-#define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
+#define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
#define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
#define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
#define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
if (entity_visited(entity))
continue;
- DB((dbg, LEVEL_1, " freeing method %+F\n", entity));
+ DB((dbg, LEVEL_1, " freeing method %+F\n", entity));
remove_irp_irg(irg);
}
for (i = 0; i < n_keep; ++i) {
marked[i] = keep_arr[i];
set_entity_link(marked[i], MARK);
- DB((dbg, LEVEL_1, " method %+F kept alive.\n", marked[i]));
+ DB((dbg, LEVEL_1, " method %+F kept alive.\n", marked[i]));
}
for (i = 0; i < ARR_LEN(marked); ++i) {
if (get_entity_link(ent) == MARK)
continue;
- DB((dbg, LEVEL_1, " freeing method %+F\n", ent));
+ DB((dbg, LEVEL_1, " freeing method %+F\n", ent));
remove_irp_irg(irg);
free_entity(ent);
#endif
#undef IMAX
-#define IMAX(a,b) ((a) > (b) ? (a) : (b))
+#define IMAX(a,b) ((a) > (b) ? (a) : (b))
-#define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
+#define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
enum changes_t {
DF_CHANGED = 1, /**< data flow changed */
ir_node *l = get_Sub_left(ptr);
ir_node *r = get_Sub_right(ptr);
- if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
+ if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
ptr = l;
else
return NULL;
/** A scc. */
typedef struct scc {
- ir_node *head; /**< the head of the list */
+ ir_node *head; /**< the head of the list */
} scc;
/** A node entry. */
/* Commandline parameters */
typedef struct loop_opt_params_t {
- unsigned max_loop_size; /* Maximum number of nodes */
- int depth_adaption; /* Loop nest depth adaption */
- unsigned allowed_calls; /* Number of calls allowed */
- unsigned count_phi:1; /* Count phi nodes */
- unsigned count_proj:1; /* Count projections */
+ unsigned max_loop_size; /* Maximum number of nodes */
+ int depth_adaption; /* Loop nest depth adaption */
+ unsigned allowed_calls; /* Number of calls allowed */
+ unsigned count_phi:1; /* Count phi nodes */
+ unsigned count_proj:1; /* Count projections */
- unsigned max_cc_size; /* Maximum condition chain size */
+ unsigned max_cc_size; /* Maximum condition chain size */
unsigned allow_const_unrolling:1;
unsigned allow_invar_unrolling:1;
/* Loop analysis informations */
typedef struct loop_info_t {
- unsigned nodes; /* node count */
- unsigned ld_st; /* load and store nodes */
- unsigned calls; /* number of calls */
- unsigned cf_outs; /* number of cf edges which leave the loop */
- entry_edge cf_out; /* single loop leaving cf edge */
- int be_src_pos; /* position of the single own backedge in the head */
+ unsigned nodes; /* node count */
+ unsigned ld_st; /* load and store nodes */
+ unsigned calls; /* number of calls */
+ unsigned cf_outs; /* number of cf edges which leave the loop */
+ entry_edge cf_out; /* single loop leaving cf edge */
+ int be_src_pos; /* position of the single own backedge in the head */
/* for inversion */
- unsigned cc_size; /* nodes in the condition chain */
+ unsigned cc_size; /* nodes in the condition chain */
/* for unrolling */
- unsigned max_unroll; /* Number of unrolls satisfying max_loop_size */
- unsigned exit_cond; /* 1 if condition==true exits the loop. */
- unsigned latest_value:1; /* 1 if condition is checked against latest counter value */
- unsigned needs_backedge:1; /* 0 if loop is completely unrolled */
- unsigned decreasing:1; /* Step operation is_Sub, or step is<0 */
+ unsigned max_unroll; /* Number of unrolls satisfying max_loop_size */
+ unsigned exit_cond; /* 1 if condition==true exits the loop. */
+ unsigned latest_value:1; /* 1 if condition is checked against latest counter value */
+ unsigned needs_backedge:1; /* 0 if loop is completely unrolled */
+ unsigned decreasing:1; /* Step operation is_Sub, or step is<0 */
/* IV informations of a simple loop */
ir_node *start_val;
ir_node *iteration_phi;
ir_node *add;
- tarval *count_tar; /* Number of loop iterations */
+ tarval *count_tar; /* Number of loop iterations */
- ir_node *duff_cond; /* Duff mod */
- unrolling_kind_flag unroll_kind; /* constant or invariant unrolling */
+ ir_node *duff_cond; /* Duff mod */
+ unrolling_kind_flag unroll_kind; /* constant or invariant unrolling */
} loop_info_t;
/* Information about the current loop */
* Order of ins is important for later usage.
*/
static void copy_walk(ir_node *node, walker_condition *walk_condition,
- ir_loop *set_loop)
+ ir_loop *set_loop)
{
int i;
int arity;
* / A* B / |
* / /\ / ? |
* / C* => D |
- * / D Head |
+ * / D Head |
* / A \_|
* C
*/
ir_node **ins;
ir_node **phis;
ir_node *phi, *next;
- ir_node *head_cp = get_inversion_copy(loop_head);
- int arity = get_irn_arity(head_cp);
- int backedges = get_backedge_n(head_cp, 0);
- int new_arity = arity - backedges;
+ ir_node *head_cp = get_inversion_copy(loop_head);
+ int arity = get_irn_arity(head_cp);
+ int backedges = get_backedge_n(head_cp, 0);
+ int new_arity = arity - backedges;
int pos;
int i;
ir_node **ins;
ir_node *phi, *next;
ir_node **phis;
- int arity = get_irn_arity(loop_head);
- int backedges = get_backedge_n(loop_head, 0);
- int new_arity = backedges;
+ int arity = get_irn_arity(loop_head);
+ int backedges = get_backedge_n(loop_head, 0);
+ int new_arity = backedges;
int pos;
int i;
if (! is_own_backedge(be_block, i)) {
ins[c] = get_irn_n(node, i);
++c;
- }
- /* } else {
+#if 0
+ } else {
ir_node *pred = get_inr_n(node, i);
if (! is_in_loop(pred)) {
ins[c] = pred;
++c;
}
- }*/
+#endif
+ }
}
return new_r_Phi(get_nodes_block(node), c, ins, get_irn_mode(node));
static unsigned get_unroll_decision_invariant(void)
{
- ir_node *projres, *loop_condition, *iteration_path;
- unsigned success, is_latest_val;
- tarval *start_tar, *step_tar;
- ir_mode *mode;
+ ir_node *projres, *loop_condition, *iteration_path;
+ unsigned success, is_latest_val;
+ tarval *start_tar, *step_tar;
+ ir_mode *mode;
/* RETURN if loop is not 'simple' */
projres = is_simple_loop();
/* TODO split. */
static unsigned get_unroll_decision_constant(void)
{
- ir_node *projres, *loop_condition, *iteration_path;
- unsigned success, is_latest_val;
- tarval *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar, *stepped;
- pn_Cmp proj_proj, norm_proj;
- ir_mode *mode;
+ ir_node *projres, *loop_condition, *iteration_path;
+ unsigned success, is_latest_val;
+ tarval *start_tar, *end_tar, *step_tar, *diff_tar, *count_tar, *stepped;
+ pn_Cmp proj_proj, norm_proj;
+ ir_mode *mode;
/* RETURN if loop is not 'simple' */
projres = is_simple_loop();
/* Calls might be removed. */
set_trouts_inconsistent();
-
- // dump_ir_block_graph(irg, "-after");
}
for (bl = env.all_blocks; bl != NULL; bl = bl->all_next) {
#ifdef DEBUG_CONFIRM
-#define compare_iv(l_iv, r_iv, pnc) compare_iv_dbg(l_iv, r_iv, pnc)
+#define compare_iv(l_iv, r_iv, pnc) compare_iv_dbg(l_iv, r_iv, pnc)
/* forward */
static tarval *compare_iv_dbg(const interval_t *l_iv, const interval_t *r_iv, pn_Cmp pnc);
for (op = entry->memop_forward; op != NULL; op = op->next) {
if (i == 0) {
DB((dbg, LEVEL_2, "\n\t"));
- } DB((dbg, LEVEL_2, "%+F", op->node));
+ }
+ DB((dbg, LEVEL_2, "%+F", op->node));
if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
DB((dbg, LEVEL_2, "X"));
else if (op->flags & FLAG_KILL_ALL)
ir_node *l = get_Sub_left(ptr);
ir_node *r = get_Sub_right(ptr);
- if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
+ if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
ptr = l;
else
return NULL;
current_ir_graph = irg;
FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
-// firm_dbg_set_mask(dbg, -1);
DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
/* we need landing pads */
remove_critical_cf_edges(irg);
-// dump_ir_block_graph(irg, "-XXX");
-
if (get_opt_alias_analysis()) {
assure_irg_entity_usage_computed(irg);
assure_irp_globals_entity_usage_computed();
memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
}
-// dump_block_list(&env);
(void) dump_block_list;
calcAvail();
ir_nodemap_destroy(&env.adr_map);
obstack_free(&env.obst, NULL);
-// dump_ir_block_graph(irg, "-YYY");
-
#ifdef DEBUG_libfirm
DEL_ARR_F(env.id_2_address);
#endif
/** A scc. */
typedef struct scc {
- ir_node *head; /**< the head of the list */
+ ir_node *head; /**< the head of the list */
tarval *init; /**< the init value iff only one exists. */
tarval *incr; /**< the induction variable increment if only a single const exists. */
unsigned code; /**< == iro_Add if +incr, iro_Sub if -incr, 0 if not analysed, iro_Bad else */
* There might be more than one edge here. This is rather bad
* because we currently store only one.
*/
-// assert(LFTR_find(src, env) == NULL);
set_insert(env->lftr_edges, &key, sizeof(key), HASH_PTR(src));
} /* LFTR_add */
/* ---------------------------------------------------------------------------------- */
/** Marks the begin of a statistic (hook) function. */
-#define STAT_ENTER ++status->recursive
+#define STAT_ENTER ++status->recursive
/** Marks the end of a statistic (hook) functions. */
-#define STAT_LEAVE --status->recursive
+#define STAT_LEAVE --status->recursive
/** Allows to enter a statistic function only when we are not already in a hook. */
-#define STAT_ENTER_SINGLE do { if (status->recursive > 0) return; ++status->recursive; } while (0)
+#define STAT_ENTER_SINGLE do { if (status->recursive > 0) return; ++status->recursive; } while (0)
/**
* global status
ir_node *other_block = get_nodes_block(pred);
block_entry_t *b_entry_other = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(other_block), graph->block_hash);
- cnt_inc(&b_entry->cnt[bcnt_in_edges]); /* an edge coming from another block */
+ cnt_inc(&b_entry->cnt[bcnt_in_edges]); /* an edge coming from another block */
cnt_inc(&b_entry_other->cnt[bcnt_out_edges]);
} /* for */
return;
other_block = get_nodes_block(pred);
if (other_block == block)
- cnt_inc(&b_entry->cnt[bcnt_edges]); /* a in block edge */
+ cnt_inc(&b_entry->cnt[bcnt_edges]); /* a in block edge */
else {
block_entry_t *b_entry_other = block_get_entry(&graph->recalc_cnts, get_irn_node_nr(other_block), graph->block_hash);
- cnt_inc(&b_entry->cnt[bcnt_in_edges]); /* an edge coming from another block */
+ cnt_inc(&b_entry->cnt[bcnt_in_edges]); /* an edge coming from another block */
cnt_inc(&b_entry_other->cnt[bcnt_out_edges]);
} /* if */
} /* for */
if (extbb != other_extbb) {
extbb_entry_t *eb_entry_other = block_get_entry(&graph->recalc_cnts, get_extbb_node_nr(other_extbb), graph->extbb_hash);
- cnt_inc(&eb_entry->cnt[bcnt_in_edges]); /* an edge coming from another extbb */
+ cnt_inc(&eb_entry->cnt[bcnt_in_edges]); /* an edge coming from another extbb */
cnt_inc(&eb_entry_other->cnt[bcnt_out_edges]);
} /* if */
} /* for */
ir_extblk *other_extbb = get_nodes_extbb(pred);
if (other_extbb == extbb)
- cnt_inc(&eb_entry->cnt[bcnt_edges]); /* a in extbb edge */
+ cnt_inc(&eb_entry->cnt[bcnt_edges]); /* a in extbb edge */
else {
extbb_entry_t *eb_entry_other = block_get_entry(&graph->recalc_cnts, get_extbb_node_nr(other_extbb), graph->extbb_hash);
- cnt_inc(&eb_entry->cnt[bcnt_in_edges]); /* an edge coming from another extbb */
+ cnt_inc(&eb_entry->cnt[bcnt_in_edges]); /* an edge coming from another extbb */
cnt_inc(&eb_entry_other->cnt[bcnt_out_edges]);
} /* if */
} /* for */
} /* if */
/* other parameter */
cnt_inc(&graph->cnt[gcnt_param_adr]);
-end_parameter: ;
+end_parameter: ;
} else {
/* unknown Pointer access */
cnt_inc(&graph->cnt[gcnt_other_adr]);
typedef unsigned char BYTE;
/** Maximum size of the pattern store. */
-#define PATTERN_STORE_SIZE 2048
+#define PATTERN_STORE_SIZE 2048
/**
if (last_node == NULL)
return 0;
- return get_entity_offset_bits_remainder(last_node);
+ return get_entity_offset_bits_remainder(last_node);
}
int get_compound_ent_n_values(const ir_entity *ent)
*/
void types_calc_finalization(void)
{
- if (! get_opt_closed_world())
- return;
+ if (! get_opt_closed_world())
+ return;
FIRM_DBG_REGISTER(dbg, "firm.tr.finalization");
struct fp_value {
ieee_descriptor_t desc;
char sign;
- char value[1]; /* exp[value_size] + mant[value_size] */
+ char value[1]; /* exp[value_size] + mant[value_size] */
};
#define _exp(a) &((a)->value[0])