7 void bench_malloc_sparse() {
10 for (i=0; i<sizeof p/sizeof *p; i++) {
12 memset(p[i], 0, 4000);
14 for (i=0; i<sizeof p/sizeof *p; i++)
15 if (i%150) free(p[i]);
18 void bench_malloc_bubble() {
21 for (i=0; i<sizeof p/sizeof *p; i++) {
23 memset(p[i], 0, 4000);
25 for (i=0; i<sizeof p/sizeof *p-1; i++)
29 void bench_malloc_tiny1() {
30 void **p = malloc(N * sizeof *p);
33 p[i] = malloc((i%4+1)*16);
41 void bench_malloc_tiny2() {
42 void **p = malloc(N * sizeof *p);
45 p[i] = malloc((i%4+1)*16);
47 if (N>1) for (i=1; i; i = (i+1999)%N)
53 void bench_malloc_big1() {
56 for (i=0; i<sizeof p/sizeof *p; i++) {
57 p[i] = malloc((i%4+1)*16384);
59 for (i=0; i<sizeof p/sizeof *p; i++) {
64 void bench_malloc_big2() {
67 for (i=0; i<sizeof p/sizeof *p; i++) {
68 p[i] = malloc((i%4+1)*16384);
70 if (N>1) for (i=1; i; i = (i+1999)%(sizeof p/sizeof *p))
86 static unsigned rng(unsigned *r)
88 return *r = *r * 1103515245 + 12345;
92 static void *stress(void *arg)
94 struct foo *foo = arg;
95 unsigned r = (unsigned)pthread_self();
100 for (i=0; i<N; i++) {
101 j = rng(&r) % SH_COUNT;
102 sz = rng(&r) % MAX_SZ;
103 pthread_mutex_lock(&foo[j].lock);
106 pthread_mutex_unlock(&foo[j].lock);
110 pthread_mutex_lock(&foo[j].lock);
111 if (!foo[j].mem) foo[j].mem = p, p = 0;
112 pthread_mutex_unlock(&foo[j].lock);
119 void bench_malloc_thread_stress() {
120 struct foo foo[SH_COUNT] = {{0}};
124 pthread_create(&td1, 0, stress, foo);
125 pthread_create(&td2, 0, stress, foo);
126 pthread_join(td1, &res);
127 pthread_join(td2, &res);
130 void bench_malloc_thread_local() {
131 struct foo foo1[SH_COUNT] = {{0}};
132 struct foo foo2[SH_COUNT] = {{0}};
136 pthread_create(&td1, 0, stress, foo1);
137 pthread_create(&td2, 0, stress, foo2);
138 pthread_join(td1, &res);
139 pthread_join(td2, &res);