Lines Matching +full:1 +full:eb
35 * The extent buffer locks (also called tree locks) manage access to eb data
37 * members of eb).
121 static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) in btrfs_assert_spinning_writers_get() argument
123 WARN_ON(eb->spinning_writers); in btrfs_assert_spinning_writers_get()
124 eb->spinning_writers++; in btrfs_assert_spinning_writers_get()
127 static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) in btrfs_assert_spinning_writers_put() argument
129 WARN_ON(eb->spinning_writers != 1); in btrfs_assert_spinning_writers_put()
130 eb->spinning_writers--; in btrfs_assert_spinning_writers_put()
133 static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) in btrfs_assert_no_spinning_writers() argument
135 WARN_ON(eb->spinning_writers); in btrfs_assert_no_spinning_writers()
138 static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) in btrfs_assert_spinning_readers_get() argument
140 atomic_inc(&eb->spinning_readers); in btrfs_assert_spinning_readers_get()
143 static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) in btrfs_assert_spinning_readers_put() argument
145 WARN_ON(atomic_read(&eb->spinning_readers) == 0); in btrfs_assert_spinning_readers_put()
146 atomic_dec(&eb->spinning_readers); in btrfs_assert_spinning_readers_put()
149 static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) in btrfs_assert_tree_read_locks_get() argument
151 atomic_inc(&eb->read_locks); in btrfs_assert_tree_read_locks_get()
154 static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) in btrfs_assert_tree_read_locks_put() argument
156 atomic_dec(&eb->read_locks); in btrfs_assert_tree_read_locks_put()
159 static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb) in btrfs_assert_tree_read_locked() argument
161 BUG_ON(!atomic_read(&eb->read_locks)); in btrfs_assert_tree_read_locked()
164 static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) in btrfs_assert_tree_write_locks_get() argument
166 eb->write_locks++; in btrfs_assert_tree_write_locks_get()
169 static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) in btrfs_assert_tree_write_locks_put() argument
171 eb->write_locks--; in btrfs_assert_tree_write_locks_put()
175 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { } in btrfs_assert_spinning_writers_get() argument
176 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { } in btrfs_assert_spinning_writers_put() argument
177 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { } in btrfs_assert_no_spinning_writers() argument
178 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { } in btrfs_assert_spinning_readers_put() argument
179 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { } in btrfs_assert_spinning_readers_get() argument
180 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { } in btrfs_assert_tree_read_locked() argument
181 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { } in btrfs_assert_tree_read_locks_get() argument
182 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { } in btrfs_assert_tree_read_locks_put() argument
183 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { } in btrfs_assert_tree_write_locks_get() argument
184 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { } in btrfs_assert_tree_write_locks_put() argument
196 void btrfs_set_lock_blocking_read(struct extent_buffer *eb) in btrfs_set_lock_blocking_read() argument
198 trace_btrfs_set_lock_blocking_read(eb); in btrfs_set_lock_blocking_read()
204 if (eb->lock_recursed && current->pid == eb->lock_owner) in btrfs_set_lock_blocking_read()
206 btrfs_assert_tree_read_locked(eb); in btrfs_set_lock_blocking_read()
207 atomic_inc(&eb->blocking_readers); in btrfs_set_lock_blocking_read()
208 btrfs_assert_spinning_readers_put(eb); in btrfs_set_lock_blocking_read()
209 read_unlock(&eb->lock); in btrfs_set_lock_blocking_read()
220 void btrfs_set_lock_blocking_write(struct extent_buffer *eb) in btrfs_set_lock_blocking_write() argument
222 trace_btrfs_set_lock_blocking_write(eb); in btrfs_set_lock_blocking_write()
228 if (eb->lock_recursed && current->pid == eb->lock_owner) in btrfs_set_lock_blocking_write()
230 if (eb->blocking_writers == 0) { in btrfs_set_lock_blocking_write()
231 btrfs_assert_spinning_writers_put(eb); in btrfs_set_lock_blocking_write()
232 btrfs_assert_tree_locked(eb); in btrfs_set_lock_blocking_write()
233 WRITE_ONCE(eb->blocking_writers, 1); in btrfs_set_lock_blocking_write()
234 write_unlock(&eb->lock); in btrfs_set_lock_blocking_write()
247 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest, in __btrfs_tree_read_lock() argument
255 read_lock(&eb->lock); in __btrfs_tree_read_lock()
256 BUG_ON(eb->blocking_writers == 0 && in __btrfs_tree_read_lock()
257 current->pid == eb->lock_owner); in __btrfs_tree_read_lock()
258 if (eb->blocking_writers) { in __btrfs_tree_read_lock()
259 if (current->pid == eb->lock_owner) { in __btrfs_tree_read_lock()
268 BUG_ON(eb->lock_recursed); in __btrfs_tree_read_lock()
269 eb->lock_recursed = true; in __btrfs_tree_read_lock()
270 read_unlock(&eb->lock); in __btrfs_tree_read_lock()
271 trace_btrfs_tree_read_lock(eb, start_ns); in __btrfs_tree_read_lock()
274 read_unlock(&eb->lock); in __btrfs_tree_read_lock()
275 wait_event(eb->write_lock_wq, in __btrfs_tree_read_lock()
276 READ_ONCE(eb->blocking_writers) == 0); in __btrfs_tree_read_lock()
279 btrfs_assert_tree_read_locks_get(eb); in __btrfs_tree_read_lock()
280 btrfs_assert_spinning_readers_get(eb); in __btrfs_tree_read_lock()
281 trace_btrfs_tree_read_lock(eb, start_ns); in __btrfs_tree_read_lock()
284 void btrfs_tree_read_lock(struct extent_buffer *eb) in btrfs_tree_read_lock() argument
286 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false); in btrfs_tree_read_lock()
293 * Return 1 if the rwlock has been taken, 0 otherwise
295 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) in btrfs_tree_read_lock_atomic() argument
297 if (READ_ONCE(eb->blocking_writers)) in btrfs_tree_read_lock_atomic()
300 read_lock(&eb->lock); in btrfs_tree_read_lock_atomic()
302 if (READ_ONCE(eb->blocking_writers)) { in btrfs_tree_read_lock_atomic()
303 read_unlock(&eb->lock); in btrfs_tree_read_lock_atomic()
306 btrfs_assert_tree_read_locks_get(eb); in btrfs_tree_read_lock_atomic()
307 btrfs_assert_spinning_readers_get(eb); in btrfs_tree_read_lock_atomic()
308 trace_btrfs_tree_read_lock_atomic(eb); in btrfs_tree_read_lock_atomic()
309 return 1; in btrfs_tree_read_lock_atomic()
315 * Retrun 1 if the rwlock has been taken, 0 otherwise
317 int btrfs_try_tree_read_lock(struct extent_buffer *eb) in btrfs_try_tree_read_lock() argument
319 if (READ_ONCE(eb->blocking_writers)) in btrfs_try_tree_read_lock()
322 if (!read_trylock(&eb->lock)) in btrfs_try_tree_read_lock()
326 if (READ_ONCE(eb->blocking_writers)) { in btrfs_try_tree_read_lock()
327 read_unlock(&eb->lock); in btrfs_try_tree_read_lock()
330 btrfs_assert_tree_read_locks_get(eb); in btrfs_try_tree_read_lock()
331 btrfs_assert_spinning_readers_get(eb); in btrfs_try_tree_read_lock()
332 trace_btrfs_try_tree_read_lock(eb); in btrfs_try_tree_read_lock()
333 return 1; in btrfs_try_tree_read_lock()
340 * Retrun 1 if the rwlock has been taken, 0 otherwise
342 int btrfs_try_tree_write_lock(struct extent_buffer *eb) in btrfs_try_tree_write_lock() argument
344 if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) in btrfs_try_tree_write_lock()
347 write_lock(&eb->lock); in btrfs_try_tree_write_lock()
349 if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { in btrfs_try_tree_write_lock()
350 write_unlock(&eb->lock); in btrfs_try_tree_write_lock()
353 btrfs_assert_tree_write_locks_get(eb); in btrfs_try_tree_write_lock()
354 btrfs_assert_spinning_writers_get(eb); in btrfs_try_tree_write_lock()
355 eb->lock_owner = current->pid; in btrfs_try_tree_write_lock()
356 trace_btrfs_try_tree_write_lock(eb); in btrfs_try_tree_write_lock()
357 return 1; in btrfs_try_tree_write_lock()
366 void btrfs_tree_read_unlock(struct extent_buffer *eb) in btrfs_tree_read_unlock() argument
368 trace_btrfs_tree_read_unlock(eb); in btrfs_tree_read_unlock()
375 if (eb->lock_recursed && current->pid == eb->lock_owner) { in btrfs_tree_read_unlock()
376 eb->lock_recursed = false; in btrfs_tree_read_unlock()
379 btrfs_assert_tree_read_locked(eb); in btrfs_tree_read_unlock()
380 btrfs_assert_spinning_readers_put(eb); in btrfs_tree_read_unlock()
381 btrfs_assert_tree_read_locks_put(eb); in btrfs_tree_read_unlock()
382 read_unlock(&eb->lock); in btrfs_tree_read_unlock()
392 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) in btrfs_tree_read_unlock_blocking() argument
394 trace_btrfs_tree_read_unlock_blocking(eb); in btrfs_tree_read_unlock_blocking()
401 if (eb->lock_recursed && current->pid == eb->lock_owner) { in btrfs_tree_read_unlock_blocking()
402 eb->lock_recursed = false; in btrfs_tree_read_unlock_blocking()
405 btrfs_assert_tree_read_locked(eb); in btrfs_tree_read_unlock_blocking()
406 WARN_ON(atomic_read(&eb->blocking_readers) == 0); in btrfs_tree_read_unlock_blocking()
408 if (atomic_dec_and_test(&eb->blocking_readers)) in btrfs_tree_read_unlock_blocking()
409 cond_wake_up_nomb(&eb->read_lock_wq); in btrfs_tree_read_unlock_blocking()
410 btrfs_assert_tree_read_locks_put(eb); in btrfs_tree_read_unlock_blocking()
419 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) in __btrfs_tree_lock() argument
420 __acquires(&eb->lock) in __btrfs_tree_lock()
427 WARN_ON(eb->lock_owner == current->pid); in __btrfs_tree_lock()
429 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); in __btrfs_tree_lock()
430 wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0); in __btrfs_tree_lock()
431 write_lock(&eb->lock); in __btrfs_tree_lock()
433 if (atomic_read(&eb->blocking_readers) || in __btrfs_tree_lock()
434 READ_ONCE(eb->blocking_writers)) { in __btrfs_tree_lock()
435 write_unlock(&eb->lock); in __btrfs_tree_lock()
438 btrfs_assert_spinning_writers_get(eb); in __btrfs_tree_lock()
439 btrfs_assert_tree_write_locks_get(eb); in __btrfs_tree_lock()
440 eb->lock_owner = current->pid; in __btrfs_tree_lock()
441 trace_btrfs_tree_lock(eb, start_ns); in __btrfs_tree_lock()
444 void btrfs_tree_lock(struct extent_buffer *eb) in btrfs_tree_lock() argument
446 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL); in btrfs_tree_lock()
457 void btrfs_tree_unlock(struct extent_buffer *eb) in btrfs_tree_unlock() argument
463 int blockers = eb->blocking_writers; in btrfs_tree_unlock()
465 BUG_ON(blockers > 1); in btrfs_tree_unlock()
467 btrfs_assert_tree_locked(eb); in btrfs_tree_unlock()
468 trace_btrfs_tree_unlock(eb); in btrfs_tree_unlock()
469 eb->lock_owner = 0; in btrfs_tree_unlock()
470 btrfs_assert_tree_write_locks_put(eb); in btrfs_tree_unlock()
473 btrfs_assert_no_spinning_writers(eb); in btrfs_tree_unlock()
475 WRITE_ONCE(eb->blocking_writers, 0); in btrfs_tree_unlock()
481 cond_wake_up(&eb->write_lock_wq); in btrfs_tree_unlock()
483 btrfs_assert_spinning_writers_put(eb); in btrfs_tree_unlock()
484 write_unlock(&eb->lock); in btrfs_tree_unlock()
548 struct extent_buffer *eb; in btrfs_lock_root_node() local
550 while (1) { in btrfs_lock_root_node()
551 eb = btrfs_root_node(root); in btrfs_lock_root_node()
552 btrfs_tree_lock(eb); in btrfs_lock_root_node()
553 if (eb == root->node) in btrfs_lock_root_node()
555 btrfs_tree_unlock(eb); in btrfs_lock_root_node()
556 free_extent_buffer(eb); in btrfs_lock_root_node()
558 return eb; in btrfs_lock_root_node()
570 struct extent_buffer *eb; in __btrfs_read_lock_root_node() local
572 while (1) { in __btrfs_read_lock_root_node()
573 eb = btrfs_root_node(root); in __btrfs_read_lock_root_node()
574 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, recurse); in __btrfs_read_lock_root_node()
575 if (eb == root->node) in __btrfs_read_lock_root_node()
577 btrfs_tree_read_unlock(eb); in __btrfs_read_lock_root_node()
578 free_extent_buffer(eb); in __btrfs_read_lock_root_node()
580 return eb; in __btrfs_read_lock_root_node()