• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/threads.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 
15 #ifdef CONFIG_SMP
16 
17 struct percpu_counter {
18 	spinlock_t lock;
19 	s64 count;
20 	s32 *counters;
21 };
22 
23 #if NR_CPUS >= 16
24 #define FBC_BATCH	(NR_CPUS*2)
25 #else
26 #define FBC_BATCH	(NR_CPUS*4)
27 #endif
28 
percpu_counter_init(struct percpu_counter * fbc,s64 amount)29 static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
30 {
31 	spin_lock_init(&fbc->lock);
32 	fbc->count = amount;
33 	fbc->counters = alloc_percpu(s32);
34 }
35 
percpu_counter_destroy(struct percpu_counter * fbc)36 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
37 {
38 	free_percpu(fbc->counters);
39 }
40 
41 void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
42 s64 percpu_counter_sum(struct percpu_counter *fbc);
43 
percpu_counter_read(struct percpu_counter * fbc)44 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
45 {
46 	return fbc->count;
47 }
48 
49 /*
50  * It is possible for the percpu_counter_read() to return a small negative
51  * number for some counter which should never be negative.
52  *
53  */
percpu_counter_read_positive(struct percpu_counter * fbc)54 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
55 {
56 	s64 ret = fbc->count;
57 
58 	barrier();		/* Prevent reloads of fbc->count */
59 	if (ret >= 0)
60 		return ret;
61 	return 1;
62 }
63 
64 #else
65 
66 struct percpu_counter {
67 	s64 count;
68 };
69 
percpu_counter_init(struct percpu_counter * fbc,s64 amount)70 static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
71 {
72 	fbc->count = amount;
73 }
74 
percpu_counter_destroy(struct percpu_counter * fbc)75 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
76 {
77 }
78 
79 static inline void
percpu_counter_mod(struct percpu_counter * fbc,s32 amount)80 percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
81 {
82 	preempt_disable();
83 	fbc->count += amount;
84 	preempt_enable();
85 }
86 
percpu_counter_read(struct percpu_counter * fbc)87 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
88 {
89 	return fbc->count;
90 }
91 
percpu_counter_read_positive(struct percpu_counter * fbc)92 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
93 {
94 	return fbc->count;
95 }
96 
percpu_counter_sum(struct percpu_counter * fbc)97 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
98 {
99 	return percpu_counter_read_positive(fbc);
100 }
101 
102 #endif	/* CONFIG_SMP */
103 
percpu_counter_inc(struct percpu_counter * fbc)104 static inline void percpu_counter_inc(struct percpu_counter *fbc)
105 {
106 	percpu_counter_mod(fbc, 1);
107 }
108 
percpu_counter_dec(struct percpu_counter * fbc)109 static inline void percpu_counter_dec(struct percpu_counter *fbc)
110 {
111 	percpu_counter_mod(fbc, -1);
112 }
113 
114 #endif /* _LINUX_PERCPU_COUNTER_H */
115