7 void bench_malloc_sparse(int N) {
10 for (i=0; i<sizeof p/sizeof *p; i++) {
12 memset(p[i], 0, 4000);
14 for (i=0; i<sizeof p/sizeof *p; i++)
15 if (i%150) free(p[i]);
18 void bench_malloc_bubble(int N) {
21 for (i=0; i<sizeof p/sizeof *p; i++) {
23 memset(p[i], 0, 4000);
25 for (i=0; i<sizeof p/sizeof *p-1; i++)
29 void bench_malloc_tiny1(int N) {
30 void **p = malloc(N * sizeof *p);
33 p[i] = malloc((i%4+1)*16);
41 void bench_malloc_tiny2(int N) {
42 void **p = malloc(N * sizeof *p);
45 p[i] = malloc((i%4+1)*16);
47 if (N>1) for (i=1; i; i = (i+1999)%N)
53 void bench_malloc_big1(int N) {
56 for (i=0; i<sizeof p/sizeof *p; i++) {
57 p[i] = malloc((i%4+1)*16384);
59 for (i=0; i<sizeof p/sizeof *p; i++) {
64 void bench_malloc_big2(int N) {
67 for (i=0; i<sizeof p/sizeof *p; i++) {
68 p[i] = malloc((i%4+1)*16384);
70 if (N>1) for (i=1; i; i = (i+1999)%(sizeof p/sizeof *p))
86 static unsigned rng(unsigned *r)
88 return *r = *r * 1103515245 + 12345;
93 static void *stress(void *arg)
95 struct foo *foo = arg;
96 unsigned r = (unsigned)pthread_self();
101 for (i=0; i<N; i++) {
102 j = rng(&r) % SH_COUNT;
103 sz = rng(&r) % MAX_SZ;
104 pthread_mutex_lock(&foo[j].lock);
107 pthread_mutex_unlock(&foo[j].lock);
111 pthread_mutex_lock(&foo[j].lock);
112 if (!foo[j].mem) foo[j].mem = p, p = 0;
113 pthread_mutex_unlock(&foo[j].lock);
120 void bench_malloc_thread_stress(int n) {
121 struct foo foo[SH_COUNT] = {{0}};
126 pthread_create(&td1, 0, stress, foo);
127 pthread_create(&td2, 0, stress, foo);
128 pthread_join(td1, &res);
129 pthread_join(td2, &res);
132 void bench_malloc_thread_local(int n) {
133 struct foo foo1[SH_COUNT] = {{0}};
134 struct foo foo2[SH_COUNT] = {{0}};
139 pthread_create(&td1, 0, stress, foo1);
140 pthread_create(&td2, 0, stress, foo2);
141 pthread_join(td1, &res);
142 pthread_join(td2, &res);