• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "misc.h"
12 #include "ctree.h"
13 #include "extent_io.h"
14 #include "locking.h"
15 
16 #ifdef CONFIG_BTRFS_DEBUG
btrfs_assert_spinning_writers_get(struct extent_buffer * eb)17 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18 {
19 	WARN_ON(eb->spinning_writers);
20 	eb->spinning_writers++;
21 }
22 
btrfs_assert_spinning_writers_put(struct extent_buffer * eb)23 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24 {
25 	WARN_ON(eb->spinning_writers != 1);
26 	eb->spinning_writers--;
27 }
28 
btrfs_assert_no_spinning_writers(struct extent_buffer * eb)29 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30 {
31 	WARN_ON(eb->spinning_writers);
32 }
33 
btrfs_assert_spinning_readers_get(struct extent_buffer * eb)34 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35 {
36 	atomic_inc(&eb->spinning_readers);
37 }
38 
btrfs_assert_spinning_readers_put(struct extent_buffer * eb)39 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40 {
41 	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
42 	atomic_dec(&eb->spinning_readers);
43 }
44 
btrfs_assert_tree_read_locks_get(struct extent_buffer * eb)45 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46 {
47 	atomic_inc(&eb->read_locks);
48 }
49 
btrfs_assert_tree_read_locks_put(struct extent_buffer * eb)50 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51 {
52 	atomic_dec(&eb->read_locks);
53 }
54 
btrfs_assert_tree_read_locked(struct extent_buffer * eb)55 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56 {
57 	BUG_ON(!atomic_read(&eb->read_locks));
58 }
59 
btrfs_assert_tree_write_locks_get(struct extent_buffer * eb)60 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61 {
62 	eb->write_locks++;
63 }
64 
btrfs_assert_tree_write_locks_put(struct extent_buffer * eb)65 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66 {
67 	eb->write_locks--;
68 }
69 
btrfs_assert_tree_locked(struct extent_buffer * eb)70 void btrfs_assert_tree_locked(struct extent_buffer *eb)
71 {
72 	BUG_ON(!eb->write_locks);
73 }
74 
75 #else
btrfs_assert_spinning_writers_get(struct extent_buffer * eb)76 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
btrfs_assert_spinning_writers_put(struct extent_buffer * eb)77 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
btrfs_assert_no_spinning_writers(struct extent_buffer * eb)78 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
btrfs_assert_spinning_readers_put(struct extent_buffer * eb)79 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
btrfs_assert_spinning_readers_get(struct extent_buffer * eb)80 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
btrfs_assert_tree_read_locked(struct extent_buffer * eb)81 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
btrfs_assert_tree_read_locks_get(struct extent_buffer * eb)82 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
btrfs_assert_tree_read_locks_put(struct extent_buffer * eb)83 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
btrfs_assert_tree_locked(struct extent_buffer * eb)84 void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
btrfs_assert_tree_write_locks_get(struct extent_buffer * eb)85 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
btrfs_assert_tree_write_locks_put(struct extent_buffer * eb)86 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
87 #endif
88 
btrfs_set_lock_blocking_read(struct extent_buffer * eb)89 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
90 {
91 	trace_btrfs_set_lock_blocking_read(eb);
92 	/*
93 	 * No lock is required.  The lock owner may change if we have a read
94 	 * lock, but it won't change to or away from us.  If we have the write
95 	 * lock, we are the owner and it'll never change.
96 	 */
97 	if (eb->lock_nested && current->pid == eb->lock_owner)
98 		return;
99 	btrfs_assert_tree_read_locked(eb);
100 	atomic_inc(&eb->blocking_readers);
101 	btrfs_assert_spinning_readers_put(eb);
102 	read_unlock(&eb->lock);
103 }
104 
btrfs_set_lock_blocking_write(struct extent_buffer * eb)105 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
106 {
107 	trace_btrfs_set_lock_blocking_write(eb);
108 	/*
109 	 * No lock is required.  The lock owner may change if we have a read
110 	 * lock, but it won't change to or away from us.  If we have the write
111 	 * lock, we are the owner and it'll never change.
112 	 */
113 	if (eb->lock_nested && current->pid == eb->lock_owner)
114 		return;
115 	if (eb->blocking_writers == 0) {
116 		btrfs_assert_spinning_writers_put(eb);
117 		btrfs_assert_tree_locked(eb);
118 		eb->blocking_writers++;
119 		write_unlock(&eb->lock);
120 	}
121 }
122 
123 /*
124  * take a spinning read lock.  This will wait for any blocking
125  * writers
126  */
btrfs_tree_read_lock(struct extent_buffer * eb)127 void btrfs_tree_read_lock(struct extent_buffer *eb)
128 {
129 	u64 start_ns = 0;
130 
131 	if (trace_btrfs_tree_read_lock_enabled())
132 		start_ns = ktime_get_ns();
133 again:
134 	read_lock(&eb->lock);
135 	BUG_ON(eb->blocking_writers == 0 &&
136 	       current->pid == eb->lock_owner);
137 	if (eb->blocking_writers && current->pid == eb->lock_owner) {
138 		/*
139 		 * This extent is already write-locked by our thread. We allow
140 		 * an additional read lock to be added because it's for the same
141 		 * thread. btrfs_find_all_roots() depends on this as it may be
142 		 * called on a partly (write-)locked tree.
143 		 */
144 		BUG_ON(eb->lock_nested);
145 		eb->lock_nested = true;
146 		read_unlock(&eb->lock);
147 		trace_btrfs_tree_read_lock(eb, start_ns);
148 		return;
149 	}
150 	if (eb->blocking_writers) {
151 		read_unlock(&eb->lock);
152 		wait_event(eb->write_lock_wq,
153 			   eb->blocking_writers == 0);
154 		goto again;
155 	}
156 	btrfs_assert_tree_read_locks_get(eb);
157 	btrfs_assert_spinning_readers_get(eb);
158 	trace_btrfs_tree_read_lock(eb, start_ns);
159 }
160 
161 /*
162  * take a spinning read lock.
163  * returns 1 if we get the read lock and 0 if we don't
164  * this won't wait for blocking writers
165  */
btrfs_tree_read_lock_atomic(struct extent_buffer * eb)166 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
167 {
168 	if (eb->blocking_writers)
169 		return 0;
170 
171 	read_lock(&eb->lock);
172 	if (eb->blocking_writers) {
173 		read_unlock(&eb->lock);
174 		return 0;
175 	}
176 	btrfs_assert_tree_read_locks_get(eb);
177 	btrfs_assert_spinning_readers_get(eb);
178 	trace_btrfs_tree_read_lock_atomic(eb);
179 	return 1;
180 }
181 
182 /*
183  * returns 1 if we get the read lock and 0 if we don't
184  * this won't wait for blocking writers
185  */
btrfs_try_tree_read_lock(struct extent_buffer * eb)186 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
187 {
188 	if (eb->blocking_writers)
189 		return 0;
190 
191 	if (!read_trylock(&eb->lock))
192 		return 0;
193 
194 	if (eb->blocking_writers) {
195 		read_unlock(&eb->lock);
196 		return 0;
197 	}
198 	btrfs_assert_tree_read_locks_get(eb);
199 	btrfs_assert_spinning_readers_get(eb);
200 	trace_btrfs_try_tree_read_lock(eb);
201 	return 1;
202 }
203 
204 /*
205  * returns 1 if we get the read lock and 0 if we don't
206  * this won't wait for blocking writers or readers
207  */
btrfs_try_tree_write_lock(struct extent_buffer * eb)208 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
209 {
210 	if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
211 		return 0;
212 
213 	write_lock(&eb->lock);
214 	if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
215 		write_unlock(&eb->lock);
216 		return 0;
217 	}
218 	btrfs_assert_tree_write_locks_get(eb);
219 	btrfs_assert_spinning_writers_get(eb);
220 	eb->lock_owner = current->pid;
221 	trace_btrfs_try_tree_write_lock(eb);
222 	return 1;
223 }
224 
225 /*
226  * drop a spinning read lock
227  */
btrfs_tree_read_unlock(struct extent_buffer * eb)228 void btrfs_tree_read_unlock(struct extent_buffer *eb)
229 {
230 	trace_btrfs_tree_read_unlock(eb);
231 	/*
232 	 * if we're nested, we have the write lock.  No new locking
233 	 * is needed as long as we are the lock owner.
234 	 * The write unlock will do a barrier for us, and the lock_nested
235 	 * field only matters to the lock owner.
236 	 */
237 	if (eb->lock_nested && current->pid == eb->lock_owner) {
238 		eb->lock_nested = false;
239 		return;
240 	}
241 	btrfs_assert_tree_read_locked(eb);
242 	btrfs_assert_spinning_readers_put(eb);
243 	btrfs_assert_tree_read_locks_put(eb);
244 	read_unlock(&eb->lock);
245 }
246 
247 /*
248  * drop a blocking read lock
249  */
btrfs_tree_read_unlock_blocking(struct extent_buffer * eb)250 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
251 {
252 	trace_btrfs_tree_read_unlock_blocking(eb);
253 	/*
254 	 * if we're nested, we have the write lock.  No new locking
255 	 * is needed as long as we are the lock owner.
256 	 * The write unlock will do a barrier for us, and the lock_nested
257 	 * field only matters to the lock owner.
258 	 */
259 	if (eb->lock_nested && current->pid == eb->lock_owner) {
260 		eb->lock_nested = false;
261 		return;
262 	}
263 	btrfs_assert_tree_read_locked(eb);
264 	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
265 	/* atomic_dec_and_test implies a barrier */
266 	if (atomic_dec_and_test(&eb->blocking_readers))
267 		cond_wake_up_nomb(&eb->read_lock_wq);
268 	btrfs_assert_tree_read_locks_put(eb);
269 }
270 
271 /*
272  * take a spinning write lock.  This will wait for both
273  * blocking readers or writers
274  */
btrfs_tree_lock(struct extent_buffer * eb)275 void btrfs_tree_lock(struct extent_buffer *eb)
276 {
277 	u64 start_ns = 0;
278 
279 	if (trace_btrfs_tree_lock_enabled())
280 		start_ns = ktime_get_ns();
281 
282 	WARN_ON(eb->lock_owner == current->pid);
283 again:
284 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
285 	wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
286 	write_lock(&eb->lock);
287 	if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
288 		write_unlock(&eb->lock);
289 		goto again;
290 	}
291 	btrfs_assert_spinning_writers_get(eb);
292 	btrfs_assert_tree_write_locks_get(eb);
293 	eb->lock_owner = current->pid;
294 	trace_btrfs_tree_lock(eb, start_ns);
295 }
296 
297 /*
298  * drop a spinning or a blocking write lock.
299  */
btrfs_tree_unlock(struct extent_buffer * eb)300 void btrfs_tree_unlock(struct extent_buffer *eb)
301 {
302 	int blockers = eb->blocking_writers;
303 
304 	BUG_ON(blockers > 1);
305 
306 	btrfs_assert_tree_locked(eb);
307 	trace_btrfs_tree_unlock(eb);
308 	eb->lock_owner = 0;
309 	btrfs_assert_tree_write_locks_put(eb);
310 
311 	if (blockers) {
312 		btrfs_assert_no_spinning_writers(eb);
313 		eb->blocking_writers--;
314 		/*
315 		 * We need to order modifying blocking_writers above with
316 		 * actually waking up the sleepers to ensure they see the
317 		 * updated value of blocking_writers
318 		 */
319 		cond_wake_up(&eb->write_lock_wq);
320 	} else {
321 		btrfs_assert_spinning_writers_put(eb);
322 		write_unlock(&eb->lock);
323 	}
324 }
325