• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (C) 2000-2019 Red Hat, Inc.
2    This file is part of elfutils.
3    Written by Srdan Milakovic <sm108@rice.edu>, 2019.
4    Derived from Ulrich Drepper <drepper@redhat.com>, 2000.
5 
6    This file is free software; you can redistribute it and/or modify
7    it under the terms of either
8 
9      * the GNU Lesser General Public License as published by the Free
10        Software Foundation; either version 3 of the License, or (at
11        your option) any later version
12 
13    or
14 
15      * the GNU General Public License as published by the Free
16        Software Foundation; either version 2 of the License, or (at
17        your option) any later version
18 
19    or both in parallel, as here.
20 
21    elfutils is distributed in the hope that it will be useful, but
22    WITHOUT ANY WARRANTY; without even the implied warranty of
23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24    General Public License for more details.
25 
26    You should have received copies of the GNU General Public License and
27    the GNU Lesser General Public License along with this program.  If
28    not, see <http://www.gnu.org/licenses/>.  */
29 
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <system.h>
33 #include <pthread.h>
34 
35 /* Before including this file the following macros must be defined:
36 
37    NAME      name of the hash table structure.
38    TYPE      data type of the hash table entries
39  */
40 
41 
42 static size_t
lookup(NAME * htab,HASHTYPE hval)43 lookup (NAME *htab, HASHTYPE hval)
44 {
45   /* First hash function: simply take the modul but prevent zero.  Small values
46       can skip the division, which helps performance when this is common.  */
47   size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
48 
49   HASHTYPE hash;
50 
51   hash = atomic_load_explicit(&htab->table[idx].hashval,
52                               memory_order_acquire);
53   if (hash == hval)
54     return idx;
55   else if (hash == 0)
56     return 0;
57 
58   /* Second hash function as suggested in [Knuth].  */
59   HASHTYPE second_hash = 1 + hval % (htab->size - 2);
60 
61   for(;;)
62     {
63       if (idx <= second_hash)
64           idx = htab->size + idx - second_hash;
65       else
66           idx -= second_hash;
67 
68       hash = atomic_load_explicit(&htab->table[idx].hashval,
69                                   memory_order_acquire);
70       if (hash == hval)
71 	return idx;
72       else if (hash == 0)
73 	return 0;
74     }
75 }
76 
77 static int
insert_helper(NAME * htab,HASHTYPE hval,TYPE val)78 insert_helper (NAME *htab, HASHTYPE hval, TYPE val)
79 {
80   /* First hash function: simply take the modul but prevent zero.  Small values
81       can skip the division, which helps performance when this is common.  */
82   size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
83 
84   TYPE val_ptr;
85   HASHTYPE hash;
86 
87   hash = atomic_load_explicit(&htab->table[idx].hashval,
88                               memory_order_acquire);
89   if (hash == hval)
90     return -1;
91   else if (hash == 0)
92     {
93       val_ptr = NULL;
94       atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
95                                               (uintptr_t *) &val_ptr,
96                                               (uintptr_t) val,
97                                               memory_order_acquire,
98                                               memory_order_acquire);
99 
100       if (val_ptr == NULL)
101         {
102           atomic_store_explicit(&htab->table[idx].hashval, hval,
103                                 memory_order_release);
104           return 0;
105         }
106       else
107         {
108           do
109             {
110               hash = atomic_load_explicit(&htab->table[idx].hashval,
111                                           memory_order_acquire);
112             }
113           while (hash == 0);
114           if (hash == hval)
115             return -1;
116         }
117     }
118 
119   /* Second hash function as suggested in [Knuth].  */
120   HASHTYPE second_hash = 1 + hval % (htab->size - 2);
121 
122   for(;;)
123     {
124       if (idx <= second_hash)
125           idx = htab->size + idx - second_hash;
126       else
127           idx -= second_hash;
128 
129       hash = atomic_load_explicit(&htab->table[idx].hashval,
130                                   memory_order_acquire);
131       if (hash == hval)
132         return -1;
133       else if (hash == 0)
134         {
135           val_ptr = NULL;
136           atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
137                                                   (uintptr_t *) &val_ptr,
138                                                   (uintptr_t) val,
139                                                   memory_order_acquire,
140                                                   memory_order_acquire);
141 
142           if (val_ptr == NULL)
143             {
144               atomic_store_explicit(&htab->table[idx].hashval, hval,
145                                     memory_order_release);
146               return 0;
147             }
148           else
149             {
150               do
151                 {
152                   hash = atomic_load_explicit(&htab->table[idx].hashval,
153                                               memory_order_acquire);
154                 }
155               while (hash == 0);
156               if (hash == hval)
157                 return -1;
158             }
159         }
160     }
161 }
162 
163 #define NO_RESIZING 0u
164 #define ALLOCATING_MEMORY 1u
165 #define MOVING_DATA 3u
166 #define CLEANING 2u
167 
168 #define STATE_BITS 2u
169 #define STATE_INCREMENT (1u << STATE_BITS)
170 #define STATE_MASK (STATE_INCREMENT - 1)
171 #define GET_STATE(A) ((A) & STATE_MASK)
172 
173 #define IS_NO_RESIZE_OR_CLEANING(A) (((A) & 0x1u) == 0)
174 
175 #define GET_ACTIVE_WORKERS(A) ((A) >> STATE_BITS)
176 
177 #define INITIALIZATION_BLOCK_SIZE 256
178 #define MOVE_BLOCK_SIZE 256
179 #define CEIL(A, B) (((A) + (B) - 1) / (B))
180 
181 /* Initializes records and copies the data from the old table.
182    It can share work with other threads */
resize_helper(NAME * htab,int blocking)183 static void resize_helper(NAME *htab, int blocking)
184 {
185   size_t num_old_blocks = CEIL(htab->old_size, MOVE_BLOCK_SIZE);
186   size_t num_new_blocks = CEIL(htab->size, INITIALIZATION_BLOCK_SIZE);
187 
188   size_t my_block;
189   size_t num_finished_blocks = 0;
190 
191   while ((my_block = atomic_fetch_add_explicit(&htab->next_init_block, 1,
192                                                 memory_order_acquire))
193                                                     < num_new_blocks)
194     {
195       size_t record_it = my_block * INITIALIZATION_BLOCK_SIZE;
196       size_t record_end = (my_block + 1) * INITIALIZATION_BLOCK_SIZE;
197       if (record_end > htab->size)
198           record_end = htab->size;
199 
200       while (record_it++ != record_end)
201         {
202           atomic_init(&htab->table[record_it].hashval, (uintptr_t) NULL);
203           atomic_init(&htab->table[record_it].val_ptr, (uintptr_t) NULL);
204         }
205 
206       num_finished_blocks++;
207     }
208 
209   atomic_fetch_add_explicit(&htab->num_initialized_blocks,
210                             num_finished_blocks, memory_order_release);
211   while (atomic_load_explicit(&htab->num_initialized_blocks,
212                               memory_order_acquire) != num_new_blocks);
213 
214   /* All block are initialized, start moving */
215   num_finished_blocks = 0;
216   while ((my_block = atomic_fetch_add_explicit(&htab->next_move_block, 1,
217                                                 memory_order_acquire))
218                                                     < num_old_blocks)
219     {
220       size_t record_it = my_block * MOVE_BLOCK_SIZE;
221       size_t record_end = (my_block + 1) * MOVE_BLOCK_SIZE;
222       if (record_end > htab->old_size)
223           record_end = htab->old_size;
224 
225       while (record_it++ != record_end)
226         {
227           TYPE val_ptr = (TYPE) atomic_load_explicit(
228               &htab->old_table[record_it].val_ptr,
229               memory_order_acquire);
230           if (val_ptr == NULL)
231               continue;
232 
233           HASHTYPE hashval = atomic_load_explicit(
234               &htab->old_table[record_it].hashval,
235               memory_order_acquire);
236           assert(hashval);
237 
238           insert_helper(htab, hashval, val_ptr);
239         }
240 
241       num_finished_blocks++;
242     }
243 
244   atomic_fetch_add_explicit(&htab->num_moved_blocks, num_finished_blocks,
245                             memory_order_release);
246 
247   if (blocking)
248       while (atomic_load_explicit(&htab->num_moved_blocks,
249                                   memory_order_acquire) != num_old_blocks);
250 }
251 
252 static void
resize_master(NAME * htab)253 resize_master(NAME *htab)
254 {
255   htab->old_size = htab->size;
256   htab->old_table = htab->table;
257 
258   htab->size = next_prime(htab->size * 2);
259   htab->table = malloc((1 + htab->size) * sizeof(htab->table[0]));
260   assert(htab->table);
261 
262   /* Change state from ALLOCATING_MEMORY to MOVING_DATA */
263   atomic_fetch_xor_explicit(&htab->resizing_state,
264                             ALLOCATING_MEMORY ^ MOVING_DATA,
265                             memory_order_release);
266 
267   resize_helper(htab, 1);
268 
269   /* Change state from MOVING_DATA to CLEANING */
270   size_t resize_state = atomic_fetch_xor_explicit(&htab->resizing_state,
271                                                   MOVING_DATA ^ CLEANING,
272                                                   memory_order_acq_rel);
273   while (GET_ACTIVE_WORKERS(resize_state) != 0)
274       resize_state = atomic_load_explicit(&htab->resizing_state,
275                                           memory_order_acquire);
276 
277   /* There are no more active workers */
278   atomic_store_explicit(&htab->next_init_block, 0, memory_order_relaxed);
279   atomic_store_explicit(&htab->num_initialized_blocks, 0,
280                         memory_order_relaxed);
281 
282   atomic_store_explicit(&htab->next_move_block, 0, memory_order_relaxed);
283   atomic_store_explicit(&htab->num_moved_blocks, 0, memory_order_relaxed);
284 
285   free(htab->old_table);
286 
287   /* Change state to NO_RESIZING */
288   atomic_fetch_xor_explicit(&htab->resizing_state, CLEANING ^ NO_RESIZING,
289                             memory_order_relaxed);
290 
291 }
292 
293 static void
resize_worker(NAME * htab)294 resize_worker(NAME *htab)
295 {
296   size_t resize_state = atomic_load_explicit(&htab->resizing_state,
297                                               memory_order_acquire);
298 
299   /* If the resize has finished */
300   if (IS_NO_RESIZE_OR_CLEANING(resize_state))
301       return;
302 
303   /* Register as worker and check if the resize has finished in the meantime*/
304   resize_state = atomic_fetch_add_explicit(&htab->resizing_state,
305                                             STATE_INCREMENT,
306                                             memory_order_acquire);
307   if (IS_NO_RESIZE_OR_CLEANING(resize_state))
308     {
309       atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
310                                 memory_order_relaxed);
311       return;
312     }
313 
314   /* Wait while the new table is being allocated. */
315   while (GET_STATE(resize_state) == ALLOCATING_MEMORY)
316       resize_state = atomic_load_explicit(&htab->resizing_state,
317                                           memory_order_acquire);
318 
319   /* Check if the resize is done */
320   assert(GET_STATE(resize_state) != NO_RESIZING);
321   if (GET_STATE(resize_state) == CLEANING)
322     {
323       atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
324                                 memory_order_relaxed);
325       return;
326     }
327 
328   resize_helper(htab, 0);
329 
330   /* Deregister worker */
331   atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
332                             memory_order_release);
333 }
334 
335 
336 int
337 #define INIT(name) _INIT (name)
338 #define _INIT(name) \
339   name##_init
INIT(NAME)340 INIT(NAME) (NAME *htab, size_t init_size)
341 {
342   /* We need the size to be a prime.  */
343   init_size = next_prime (init_size);
344 
345   /* Initialize the data structure.  */
346   htab->size = init_size;
347   atomic_init(&htab->filled, 0);
348   atomic_init(&htab->resizing_state, 0);
349 
350   atomic_init(&htab->next_init_block, 0);
351   atomic_init(&htab->num_initialized_blocks, 0);
352 
353   atomic_init(&htab->next_move_block, 0);
354   atomic_init(&htab->num_moved_blocks, 0);
355 
356   pthread_rwlock_init(&htab->resize_rwl, NULL);
357 
358   htab->table = (void *) malloc ((init_size + 1) * sizeof (htab->table[0]));
359   if (htab->table == NULL)
360       return -1;
361 
362   for (size_t i = 0; i <= init_size; i++)
363     {
364       atomic_init(&htab->table[i].hashval, (uintptr_t) NULL);
365       atomic_init(&htab->table[i].val_ptr, (uintptr_t) NULL);
366     }
367 
368   return 0;
369 }
370 
371 
372 int
373 #define FREE(name) _FREE (name)
374 #define _FREE(name) \
375 name##_free
FREE(NAME)376 FREE(NAME) (NAME *htab)
377 {
378   pthread_rwlock_destroy(&htab->resize_rwl);
379   free (htab->table);
380   return 0;
381 }
382 
383 
384 int
385 #define INSERT(name) _INSERT (name)
386 #define _INSERT(name) \
387 name##_insert
INSERT(NAME)388 INSERT(NAME) (NAME *htab, HASHTYPE hval, TYPE data)
389 {
390   int incremented = 0;
391 
392   for(;;)
393     {
394       while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
395           resize_worker(htab);
396 
397       size_t filled;
398       if (!incremented)
399         {
400           filled = atomic_fetch_add_explicit(&htab->filled, 1,
401                                               memory_order_acquire);
402           incremented = 1;
403         }
404       else
405         {
406           filled = atomic_load_explicit(&htab->filled,
407                                         memory_order_acquire);
408         }
409 
410 
411       if (100 * filled > 90 * htab->size)
412         {
413           /* Table is filled more than 90%.  Resize the table.  */
414 
415           size_t resizing_state = atomic_load_explicit(&htab->resizing_state,
416                                                         memory_order_acquire);
417           if (resizing_state == 0 &&
418               atomic_compare_exchange_strong_explicit(&htab->resizing_state,
419                                                       &resizing_state,
420                                                       ALLOCATING_MEMORY,
421                                                       memory_order_acquire,
422                                                       memory_order_acquire))
423             {
424               /* Master thread */
425               pthread_rwlock_unlock(&htab->resize_rwl);
426 
427               pthread_rwlock_wrlock(&htab->resize_rwl);
428               resize_master(htab);
429               pthread_rwlock_unlock(&htab->resize_rwl);
430 
431             }
432           else
433             {
434               /* Worker thread */
435               pthread_rwlock_unlock(&htab->resize_rwl);
436               resize_worker(htab);
437             }
438         }
439       else
440         {
441           /* Lock acquired, no need for resize*/
442           break;
443         }
444     }
445 
446   int ret_val = insert_helper(htab, hval, data);
447   if (ret_val == -1)
448       atomic_fetch_sub_explicit(&htab->filled, 1, memory_order_relaxed);
449   pthread_rwlock_unlock(&htab->resize_rwl);
450   return ret_val;
451 }
452 
453 
454 
455 TYPE
456 #define FIND(name) _FIND (name)
457 #define _FIND(name) \
458   name##_find
FIND(NAME)459 FIND(NAME) (NAME *htab, HASHTYPE hval)
460 {
461   while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
462       resize_worker(htab);
463 
464   size_t idx;
465 
466   /* Make the hash data nonzero.  */
467   hval = hval ?: 1;
468   idx = lookup(htab, hval);
469 
470   if (idx == 0)
471     {
472       pthread_rwlock_unlock(&htab->resize_rwl);
473       return NULL;
474     }
475 
476   /* get a copy before unlocking the lock */
477   TYPE ret_val = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
478                                              memory_order_relaxed);
479 
480   pthread_rwlock_unlock(&htab->resize_rwl);
481   return ret_val;
482 }
483