1 /* test for gslice cross thread allocation/free
2 * Copyright (C) 2006 Stefan Westerfeld
3 * Copyright (C) 2007 Tim Janik
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20 #include <glib.h>
21 #include <stdlib.h>
22 #include <unistd.h>
23
24 #define N_THREADS 8
25 #define N_ALLOCS 50000
26 #define MAX_BLOCK_SIZE 64
27
28 struct ThreadData
29 {
30 int thread_id;
31 GThread* gthread;
32
33 GMutex* to_free_mutex;
34 void* to_free [N_THREADS * N_ALLOCS];
35 int bytes_to_free [N_THREADS * N_ALLOCS];
36 int n_to_free;
37 int n_freed;
38 } tdata[N_THREADS];
39
40 void*
thread_func(void * arg)41 thread_func (void *arg)
42 {
43 struct ThreadData *td = arg;
44 int i;
45 // g_print ("Thread %d starting\n", td->thread_id);
46 for (i = 0; i < N_ALLOCS; i++)
47 {
48 if (rand() % (N_ALLOCS / 20) == 0)
49 g_print ("%c", 'a' - 1 + td->thread_id);
50
51 /* allocate block of random size and randomly fill */
52 int bytes = rand() % MAX_BLOCK_SIZE + 1;
53 char *mem = g_slice_alloc (bytes);
54 int f;
55 for (f = 0; f < bytes; f++)
56 mem[f] = rand();
57
58 /* associate block with random thread */
59 int t = rand() % N_THREADS;
60 g_mutex_lock (tdata[t].to_free_mutex);
61 tdata[t].to_free[tdata[t].n_to_free] = mem;
62 tdata[t].bytes_to_free[tdata[t].n_to_free] = bytes;
63 tdata[t].n_to_free++;
64 g_mutex_unlock (tdata[t].to_free_mutex);
65
66 /* shuffle thread execution order every once in a while */
67 if (rand() % 97 == 0)
68 {
69 if (rand() % 2)
70 g_thread_yield(); /* concurrent shuffling for single core */
71 else
72 g_usleep (1000); /* concurrent shuffling for multi core */
73 }
74
75 /* free a block associated with this thread */
76 g_mutex_lock (td->to_free_mutex);
77 if (td->n_to_free > 0)
78 {
79 td->n_to_free--;
80 g_slice_free1 (td->bytes_to_free[td->n_to_free], td->to_free[td->n_to_free]);
81 td->n_freed++;
82 }
83 g_mutex_unlock (td->to_free_mutex);
84 }
85
86 return NULL;
87 }
88
89 int
main()90 main()
91 {
92 int t;
93
94 g_thread_init (NULL);
95
96 for (t = 0; t < N_THREADS; t++)
97 {
98 tdata[t].thread_id = t + 1;
99 tdata[t].n_to_free = 0;
100 tdata[t].n_freed = 0;
101 tdata[t].to_free_mutex = g_mutex_new();
102 }
103 g_print ("Starting %d threads for concurrent GSlice usage...\n", N_THREADS);
104 for (t = 0; t < N_THREADS; t++)
105 {
106 tdata[t].gthread = g_thread_create (thread_func, &tdata[t], TRUE, NULL);
107 g_assert (tdata[t].gthread != NULL);
108 }
109 for (t = 0; t < N_THREADS; t++)
110 {
111 g_thread_join (tdata[t].gthread);
112 }
113 g_print ("\n");
114 for (t = 0; t < N_THREADS; t++)
115 {
116 g_print ("Thread %d: %d blocks freed, %d blocks not freed\n",
117 tdata[t].thread_id, tdata[t].n_freed, tdata[t].n_to_free);
118 }
119 return 0;
120 }
121