1 /*
2 Unix SMB/CIFS implementation.
3
4 trivial database library
5
6 Copyright (C) Anton Blanchard 2001
7
8 ** NOTE! The following LGPL license applies to the tdb
9 ** library. This does NOT imply that all of Samba is released
10 ** under the LGPL
11
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU Lesser General Public
14 License as published by the Free Software Foundation; either
15 version 2 of the License, or (at your option) any later version.
16
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 Lesser General Public License for more details.
21
22 You should have received a copy of the GNU Lesser General Public
23 License along with this library; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <string.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/stat.h>
33 #include <time.h>
34 #include <signal.h>
35 #include "tdb.h"
36 #include "spinlock.h"
37
38 #define DEBUG
39
40 #ifdef USE_SPINLOCKS
41
42 /*
43 * ARCH SPECIFIC
44 */
45
46 #if defined(SPARC_SPINLOCKS)
47
__spin_trylock(spinlock_t * lock)48 static inline int __spin_trylock(spinlock_t *lock)
49 {
50 unsigned int result;
51
52 asm volatile("ldstub [%1], %0"
53 : "=r" (result)
54 : "r" (lock)
55 : "memory");
56
57 return (result == 0) ? 0 : EBUSY;
58 }
59
__spin_unlock(spinlock_t * lock)60 static inline void __spin_unlock(spinlock_t *lock)
61 {
62 asm volatile("":::"memory");
63 *lock = 0;
64 }
65
__spin_lock_init(spinlock_t * lock)66 static inline void __spin_lock_init(spinlock_t *lock)
67 {
68 *lock = 0;
69 }
70
__spin_is_locked(spinlock_t * lock)71 static inline int __spin_is_locked(spinlock_t *lock)
72 {
73 return (*lock != 0);
74 }
75
76 #elif defined(POWERPC_SPINLOCKS)
77
__spin_trylock(spinlock_t * lock)78 static inline int __spin_trylock(spinlock_t *lock)
79 {
80 unsigned int result;
81
82 __asm__ __volatile__(
83 "1: lwarx %0,0,%1\n\
84 cmpwi 0,%0,0\n\
85 li %0,0\n\
86 bne- 2f\n\
87 li %0,1\n\
88 stwcx. %0,0,%1\n\
89 bne- 1b\n\
90 isync\n\
91 2:" : "=&r"(result)
92 : "r"(lock)
93 : "cr0", "memory");
94
95 return (result == 1) ? 0 : EBUSY;
96 }
97
__spin_unlock(spinlock_t * lock)98 static inline void __spin_unlock(spinlock_t *lock)
99 {
100 asm volatile("eieio":::"memory");
101 *lock = 0;
102 }
103
__spin_lock_init(spinlock_t * lock)104 static inline void __spin_lock_init(spinlock_t *lock)
105 {
106 *lock = 0;
107 }
108
__spin_is_locked(spinlock_t * lock)109 static inline int __spin_is_locked(spinlock_t *lock)
110 {
111 return (*lock != 0);
112 }
113
114 #elif defined(INTEL_SPINLOCKS)
115
__spin_trylock(spinlock_t * lock)116 static inline int __spin_trylock(spinlock_t *lock)
117 {
118 int oldval;
119
120 asm volatile("xchgl %0,%1"
121 : "=r" (oldval), "=m" (*lock)
122 : "0" (0)
123 : "memory");
124
125 return oldval > 0 ? 0 : EBUSY;
126 }
127
__spin_unlock(spinlock_t * lock)128 static inline void __spin_unlock(spinlock_t *lock)
129 {
130 asm volatile("":::"memory");
131 *lock = 1;
132 }
133
__spin_lock_init(spinlock_t * lock)134 static inline void __spin_lock_init(spinlock_t *lock)
135 {
136 *lock = 1;
137 }
138
__spin_is_locked(spinlock_t * lock)139 static inline int __spin_is_locked(spinlock_t *lock)
140 {
141 return (*lock != 1);
142 }
143
144 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
145
146 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
147 * sync(3) for the details of the intrinsic operations.
148 *
149 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
150 */
151
152 #ifdef STANDALONE
153
154 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
155 #define inline __inline
156
157 #endif /* STANDALONE */
158
159 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
__spin_trylock(spinlock_t * lock)160 static inline int __spin_trylock(spinlock_t *lock)
161 {
162 unsigned int val;
163 val = __lock_test_and_set(lock, 1);
164 return val == 0 ? 0 : EBUSY;
165 }
166
__spin_unlock(spinlock_t * lock)167 static inline void __spin_unlock(spinlock_t *lock)
168 {
169 __lock_release(lock);
170 }
171
__spin_lock_init(spinlock_t * lock)172 static inline void __spin_lock_init(spinlock_t *lock)
173 {
174 __lock_release(lock);
175 }
176
177 /* Returns 1 if the lock is held, 0 otherwise. */
__spin_is_locked(spinlock_t * lock)178 static inline int __spin_is_locked(spinlock_t *lock)
179 {
180 unsigned int val;
181 val = __add_and_fetch(lock, 0);
182 return val;
183 }
184
185 #elif defined(MIPS_SPINLOCKS)
186
load_linked(unsigned long addr)187 static inline unsigned int load_linked(unsigned long addr)
188 {
189 unsigned int res;
190
191 __asm__ __volatile__("ll\t%0,(%1)"
192 : "=r" (res)
193 : "r" (addr));
194
195 return res;
196 }
197
store_conditional(unsigned long addr,unsigned int value)198 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
199 {
200 unsigned int res;
201
202 __asm__ __volatile__("sc\t%0,(%2)"
203 : "=r" (res)
204 : "0" (value), "r" (addr));
205 return res;
206 }
207
__spin_trylock(spinlock_t * lock)208 static inline int __spin_trylock(spinlock_t *lock)
209 {
210 unsigned int mw;
211
212 do {
213 mw = load_linked(lock);
214 if (mw)
215 return EBUSY;
216 } while (!store_conditional(lock, 1));
217
218 asm volatile("":::"memory");
219
220 return 0;
221 }
222
__spin_unlock(spinlock_t * lock)223 static inline void __spin_unlock(spinlock_t *lock)
224 {
225 asm volatile("":::"memory");
226 *lock = 0;
227 }
228
__spin_lock_init(spinlock_t * lock)229 static inline void __spin_lock_init(spinlock_t *lock)
230 {
231 *lock = 0;
232 }
233
__spin_is_locked(spinlock_t * lock)234 static inline int __spin_is_locked(spinlock_t *lock)
235 {
236 return (*lock != 0);
237 }
238
239 #else
240 #error Need to implement spinlock code in spinlock.c
241 #endif
242
243 /*
244 * OS SPECIFIC
245 */
246
yield_cpu(void)247 static void yield_cpu(void)
248 {
249 struct timespec tm;
250
251 #ifdef USE_SCHED_YIELD
252 sched_yield();
253 #else
254 /* Linux will busy loop for delays < 2ms on real time tasks */
255 tm.tv_sec = 0;
256 tm.tv_nsec = 2000000L + 1;
257 nanosleep(&tm, NULL);
258 #endif
259 }
260
this_is_smp(void)261 static int this_is_smp(void)
262 {
263 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
264 return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
265 #else
266 return 0;
267 #endif
268 }
269
270 /*
271 * GENERIC
272 */
273
274 static int smp_machine = 0;
275
__spin_lock(spinlock_t * lock)276 static inline void __spin_lock(spinlock_t *lock)
277 {
278 int ntries = 0;
279
280 while(__spin_trylock(lock)) {
281 while(__spin_is_locked(lock)) {
282 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
283 continue;
284 yield_cpu();
285 }
286 }
287 }
288
__read_lock(tdb_rwlock_t * rwlock)289 static void __read_lock(tdb_rwlock_t *rwlock)
290 {
291 int ntries = 0;
292
293 while(1) {
294 __spin_lock(&rwlock->lock);
295
296 if (!(rwlock->count & RWLOCK_BIAS)) {
297 rwlock->count++;
298 __spin_unlock(&rwlock->lock);
299 return;
300 }
301
302 __spin_unlock(&rwlock->lock);
303
304 while(rwlock->count & RWLOCK_BIAS) {
305 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
306 continue;
307 yield_cpu();
308 }
309 }
310 }
311
__write_lock(tdb_rwlock_t * rwlock)312 static void __write_lock(tdb_rwlock_t *rwlock)
313 {
314 int ntries = 0;
315
316 while(1) {
317 __spin_lock(&rwlock->lock);
318
319 if (rwlock->count == 0) {
320 rwlock->count |= RWLOCK_BIAS;
321 __spin_unlock(&rwlock->lock);
322 return;
323 }
324
325 __spin_unlock(&rwlock->lock);
326
327 while(rwlock->count != 0) {
328 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
329 continue;
330 yield_cpu();
331 }
332 }
333 }
334
__write_unlock(tdb_rwlock_t * rwlock)335 static void __write_unlock(tdb_rwlock_t *rwlock)
336 {
337 __spin_lock(&rwlock->lock);
338
339 #ifdef DEBUG
340 if (!(rwlock->count & RWLOCK_BIAS))
341 fprintf(stderr, "bug: write_unlock\n");
342 #endif
343
344 rwlock->count &= ~RWLOCK_BIAS;
345 __spin_unlock(&rwlock->lock);
346 }
347
__read_unlock(tdb_rwlock_t * rwlock)348 static void __read_unlock(tdb_rwlock_t *rwlock)
349 {
350 __spin_lock(&rwlock->lock);
351
352 #ifdef DEBUG
353 if (!rwlock->count)
354 fprintf(stderr, "bug: read_unlock\n");
355
356 if (rwlock->count & RWLOCK_BIAS)
357 fprintf(stderr, "bug: read_unlock\n");
358 #endif
359
360 rwlock->count--;
361 __spin_unlock(&rwlock->lock);
362 }
363
364 /* TDB SPECIFIC */
365
366 /* lock a list in the database. list -1 is the alloc list */
tdb_spinlock(TDB_CONTEXT * tdb,int list,int rw_type)367 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
368 {
369 tdb_rwlock_t *rwlocks;
370
371 if (!tdb->map_ptr) return -1;
372 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
373
374 switch(rw_type) {
375 case F_RDLCK:
376 __read_lock(&rwlocks[list+1]);
377 break;
378
379 case F_WRLCK:
380 __write_lock(&rwlocks[list+1]);
381 break;
382
383 default:
384 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
385 }
386 return 0;
387 }
388
389 /* unlock the database. */
tdb_spinunlock(TDB_CONTEXT * tdb,int list,int rw_type)390 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
391 {
392 tdb_rwlock_t *rwlocks;
393
394 if (!tdb->map_ptr) return -1;
395 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
396
397 switch(rw_type) {
398 case F_RDLCK:
399 __read_unlock(&rwlocks[list+1]);
400 break;
401
402 case F_WRLCK:
403 __write_unlock(&rwlocks[list+1]);
404 break;
405
406 default:
407 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
408 }
409
410 return 0;
411 }
412
tdb_create_rwlocks(int fd,unsigned int hash_size)413 int tdb_create_rwlocks(int fd, unsigned int hash_size)
414 {
415 unsigned size, i;
416 tdb_rwlock_t *rwlocks;
417
418 size = TDB_SPINLOCK_SIZE(hash_size);
419 rwlocks = malloc(size);
420 if (!rwlocks)
421 return -1;
422
423 for(i = 0; i < hash_size+1; i++) {
424 __spin_lock_init(&rwlocks[i].lock);
425 rwlocks[i].count = 0;
426 }
427
428 /* Write it out (appending to end) */
429 if (write(fd, rwlocks, size) != size) {
430 free(rwlocks);
431 return -1;
432 }
433 smp_machine = this_is_smp();
434 free(rwlocks);
435 return 0;
436 }
437
tdb_clear_spinlocks(TDB_CONTEXT * tdb)438 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
439 {
440 tdb_rwlock_t *rwlocks;
441 unsigned i;
442
443 if (tdb->header.rwlocks == 0) return 0;
444 if (!tdb->map_ptr) return -1;
445
446 /* We're mmapped here */
447 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
448 for(i = 0; i < tdb->header.hash_size+1; i++) {
449 __spin_lock_init(&rwlocks[i].lock);
450 rwlocks[i].count = 0;
451 }
452 return 0;
453 }
454 #else
tdb_create_rwlocks(int fd,unsigned int hash_size)455 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
tdb_spinlock(TDB_CONTEXT * tdb,int list,int rw_type)456 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
tdb_spinunlock(TDB_CONTEXT * tdb,int list,int rw_type)457 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
458
459 /* Non-spinlock version: remove spinlock pointer */
tdb_clear_spinlocks(TDB_CONTEXT * tdb)460 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
461 {
462 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
463 - (char *)&tdb->header);
464
465 tdb->header.rwlocks = 0;
466 if (lseek(tdb->fd, off, SEEK_SET) != off
467 || write(tdb->fd, (void *)&tdb->header.rwlocks,
468 sizeof(tdb->header.rwlocks))
469 != sizeof(tdb->header.rwlocks))
470 return -1;
471 return 0;
472 }
473 #endif
474