• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
10 #include "misc.h"
11 #include "ctree.h"
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
15 #include "disk-io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
18 #include "qgroup.h"
19 #include "subpage.h"
20 
21 static struct kmem_cache *btrfs_ordered_extent_cache;
22 
entry_end(struct btrfs_ordered_extent * entry)23 static u64 entry_end(struct btrfs_ordered_extent *entry)
24 {
25 	if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 		return (u64)-1;
27 	return entry->file_offset + entry->num_bytes;
28 }
29 
30 /* returns NULL if the insertion worked, or it returns the node it did find
31  * in the tree
32  */
tree_insert(struct rb_root * root,u64 file_offset,struct rb_node * node)33 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
34 				   struct rb_node *node)
35 {
36 	struct rb_node **p = &root->rb_node;
37 	struct rb_node *parent = NULL;
38 	struct btrfs_ordered_extent *entry;
39 
40 	while (*p) {
41 		parent = *p;
42 		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43 
44 		if (file_offset < entry->file_offset)
45 			p = &(*p)->rb_left;
46 		else if (file_offset >= entry_end(entry))
47 			p = &(*p)->rb_right;
48 		else
49 			return parent;
50 	}
51 
52 	rb_link_node(node, parent, p);
53 	rb_insert_color(node, root);
54 	return NULL;
55 }
56 
57 /*
58  * look for a given offset in the tree, and if it can't be found return the
59  * first lesser offset
60  */
__tree_search(struct rb_root * root,u64 file_offset,struct rb_node ** prev_ret)61 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62 				     struct rb_node **prev_ret)
63 {
64 	struct rb_node *n = root->rb_node;
65 	struct rb_node *prev = NULL;
66 	struct rb_node *test;
67 	struct btrfs_ordered_extent *entry;
68 	struct btrfs_ordered_extent *prev_entry = NULL;
69 
70 	while (n) {
71 		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
72 		prev = n;
73 		prev_entry = entry;
74 
75 		if (file_offset < entry->file_offset)
76 			n = n->rb_left;
77 		else if (file_offset >= entry_end(entry))
78 			n = n->rb_right;
79 		else
80 			return n;
81 	}
82 	if (!prev_ret)
83 		return NULL;
84 
85 	while (prev && file_offset >= entry_end(prev_entry)) {
86 		test = rb_next(prev);
87 		if (!test)
88 			break;
89 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 				      rb_node);
91 		if (file_offset < entry_end(prev_entry))
92 			break;
93 
94 		prev = test;
95 	}
96 	if (prev)
97 		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 				      rb_node);
99 	while (prev && file_offset < entry_end(prev_entry)) {
100 		test = rb_prev(prev);
101 		if (!test)
102 			break;
103 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
104 				      rb_node);
105 		prev = test;
106 	}
107 	*prev_ret = prev;
108 	return NULL;
109 }
110 
range_overlaps(struct btrfs_ordered_extent * entry,u64 file_offset,u64 len)111 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
112 			  u64 len)
113 {
114 	if (file_offset + len <= entry->file_offset ||
115 	    entry->file_offset + entry->num_bytes <= file_offset)
116 		return 0;
117 	return 1;
118 }
119 
120 /*
121  * look find the first ordered struct that has this offset, otherwise
122  * the first one less than this offset
123  */
tree_search(struct btrfs_ordered_inode_tree * tree,u64 file_offset)124 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
125 					  u64 file_offset)
126 {
127 	struct rb_root *root = &tree->tree;
128 	struct rb_node *prev = NULL;
129 	struct rb_node *ret;
130 	struct btrfs_ordered_extent *entry;
131 
132 	if (tree->last) {
133 		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 				 rb_node);
135 		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
136 			return tree->last;
137 	}
138 	ret = __tree_search(root, file_offset, &prev);
139 	if (!ret)
140 		ret = prev;
141 	if (ret)
142 		tree->last = ret;
143 	return ret;
144 }
145 
146 /**
147  * Add an ordered extent to the per-inode tree.
148  *
149  * @inode:           Inode that this extent is for.
150  * @file_offset:     Logical offset in file where the extent starts.
151  * @num_bytes:       Logical length of extent in file.
152  * @ram_bytes:       Full length of unencoded data.
153  * @disk_bytenr:     Offset of extent on disk.
154  * @disk_num_bytes:  Size of extent on disk.
155  * @offset:          Offset into unencoded data where file data starts.
156  * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
157  * @compress_type:   Compression algorithm used for data.
158  *
159  * Most of these parameters correspond to &struct btrfs_file_extent_item. The
160  * tree is given a single reference on the ordered extent that was inserted.
161  *
162  * Return: 0 or -ENOMEM.
163  */
btrfs_add_ordered_extent(struct btrfs_inode * inode,u64 file_offset,u64 num_bytes,u64 ram_bytes,u64 disk_bytenr,u64 disk_num_bytes,u64 offset,unsigned flags,int compress_type)164 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
165 			     u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
166 			     u64 disk_num_bytes, u64 offset, unsigned flags,
167 			     int compress_type)
168 {
169 	struct btrfs_root *root = inode->root;
170 	struct btrfs_fs_info *fs_info = root->fs_info;
171 	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
172 	struct rb_node *node;
173 	struct btrfs_ordered_extent *entry;
174 	int ret;
175 	u64 qgroup_rsv = 0;
176 
177 	if (flags &
178 	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
179 		/* For nocow write, we can release the qgroup rsv right now */
180 		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
181 		if (ret < 0)
182 			return ret;
183 		ret = 0;
184 	} else {
185 		/*
186 		 * The ordered extent has reserved qgroup space, release now
187 		 * and pass the reserved number for qgroup_record to free.
188 		 */
189 		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
190 		if (ret < 0)
191 			return ret;
192 	}
193 	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
194 	if (!entry)
195 		return -ENOMEM;
196 
197 	entry->file_offset = file_offset;
198 	entry->num_bytes = num_bytes;
199 	entry->ram_bytes = ram_bytes;
200 	entry->disk_bytenr = disk_bytenr;
201 	entry->disk_num_bytes = disk_num_bytes;
202 	entry->offset = offset;
203 	entry->bytes_left = num_bytes;
204 	entry->inode = igrab(&inode->vfs_inode);
205 	entry->compress_type = compress_type;
206 	entry->truncated_len = (u64)-1;
207 	entry->qgroup_rsv = qgroup_rsv;
208 	entry->physical = (u64)-1;
209 
210 	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
211 	entry->flags = flags;
212 
213 	percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
214 				 fs_info->delalloc_batch);
215 
216 	/* one ref for the tree */
217 	refcount_set(&entry->refs, 1);
218 	init_waitqueue_head(&entry->wait);
219 	INIT_LIST_HEAD(&entry->list);
220 	INIT_LIST_HEAD(&entry->log_list);
221 	INIT_LIST_HEAD(&entry->root_extent_list);
222 	INIT_LIST_HEAD(&entry->work_list);
223 	init_completion(&entry->completion);
224 
225 	trace_btrfs_ordered_extent_add(inode, entry);
226 
227 	spin_lock_irq(&tree->lock);
228 	node = tree_insert(&tree->tree, file_offset,
229 			   &entry->rb_node);
230 	if (node)
231 		btrfs_panic(fs_info, -EEXIST,
232 				"inconsistency in ordered tree at offset %llu",
233 				file_offset);
234 	spin_unlock_irq(&tree->lock);
235 
236 	spin_lock(&root->ordered_extent_lock);
237 	list_add_tail(&entry->root_extent_list,
238 		      &root->ordered_extents);
239 	root->nr_ordered_extents++;
240 	if (root->nr_ordered_extents == 1) {
241 		spin_lock(&fs_info->ordered_root_lock);
242 		BUG_ON(!list_empty(&root->ordered_root));
243 		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
244 		spin_unlock(&fs_info->ordered_root_lock);
245 	}
246 	spin_unlock(&root->ordered_extent_lock);
247 
248 	/*
249 	 * We don't need the count_max_extents here, we can assume that all of
250 	 * that work has been done at higher layers, so this is truly the
251 	 * smallest the extent is going to get.
252 	 */
253 	spin_lock(&inode->lock);
254 	btrfs_mod_outstanding_extents(inode, 1);
255 	spin_unlock(&inode->lock);
256 
257 	return 0;
258 }
259 
260 /*
261  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
262  * when an ordered extent is finished.  If the list covers more than one
263  * ordered extent, it is split across multiples.
264  */
btrfs_add_ordered_sum(struct btrfs_ordered_extent * entry,struct btrfs_ordered_sum * sum)265 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
266 			   struct btrfs_ordered_sum *sum)
267 {
268 	struct btrfs_ordered_inode_tree *tree;
269 
270 	tree = &BTRFS_I(entry->inode)->ordered_tree;
271 	spin_lock_irq(&tree->lock);
272 	list_add_tail(&sum->list, &entry->list);
273 	spin_unlock_irq(&tree->lock);
274 }
275 
finish_ordered_fn(struct btrfs_work * work)276 static void finish_ordered_fn(struct btrfs_work *work)
277 {
278 	struct btrfs_ordered_extent *ordered_extent;
279 
280 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
281 	btrfs_finish_ordered_io(ordered_extent);
282 }
283 
284 /*
285  * Mark all ordered extents io inside the specified range finished.
286  *
287  * @page:	 The involved page for the operation.
288  *		 For uncompressed buffered IO, the page status also needs to be
289  *		 updated to indicate whether the pending ordered io is finished.
290  *		 Can be NULL for direct IO and compressed write.
291  *		 For these cases, callers are ensured they won't execute the
292  *		 endio function twice.
293  *
294  * This function is called for endio, thus the range must have ordered
295  * extent(s) covering it.
296  */
btrfs_mark_ordered_io_finished(struct btrfs_inode * inode,struct page * page,u64 file_offset,u64 num_bytes,bool uptodate)297 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
298 				    struct page *page, u64 file_offset,
299 				    u64 num_bytes, bool uptodate)
300 {
301 	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
302 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
303 	struct btrfs_workqueue *wq;
304 	struct rb_node *node;
305 	struct btrfs_ordered_extent *entry = NULL;
306 	unsigned long flags;
307 	u64 cur = file_offset;
308 
309 	if (btrfs_is_free_space_inode(inode))
310 		wq = fs_info->endio_freespace_worker;
311 	else
312 		wq = fs_info->endio_write_workers;
313 
314 	if (page)
315 		ASSERT(page->mapping && page_offset(page) <= file_offset &&
316 		       file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
317 
318 	spin_lock_irqsave(&tree->lock, flags);
319 	while (cur < file_offset + num_bytes) {
320 		u64 entry_end;
321 		u64 end;
322 		u32 len;
323 
324 		node = tree_search(tree, cur);
325 		/* No ordered extents at all */
326 		if (!node)
327 			break;
328 
329 		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
330 		entry_end = entry->file_offset + entry->num_bytes;
331 		/*
332 		 * |<-- OE --->|  |
333 		 *		  cur
334 		 * Go to next OE.
335 		 */
336 		if (cur >= entry_end) {
337 			node = rb_next(node);
338 			/* No more ordered extents, exit */
339 			if (!node)
340 				break;
341 			entry = rb_entry(node, struct btrfs_ordered_extent,
342 					 rb_node);
343 
344 			/* Go to next ordered extent and continue */
345 			cur = entry->file_offset;
346 			continue;
347 		}
348 		/*
349 		 * |	|<--- OE --->|
350 		 * cur
351 		 * Go to the start of OE.
352 		 */
353 		if (cur < entry->file_offset) {
354 			cur = entry->file_offset;
355 			continue;
356 		}
357 
358 		/*
359 		 * Now we are definitely inside one ordered extent.
360 		 *
361 		 * |<--- OE --->|
362 		 *	|
363 		 *	cur
364 		 */
365 		end = min(entry->file_offset + entry->num_bytes,
366 			  file_offset + num_bytes) - 1;
367 		ASSERT(end + 1 - cur < U32_MAX);
368 		len = end + 1 - cur;
369 
370 		if (page) {
371 			/*
372 			 * Ordered (Private2) bit indicates whether we still
373 			 * have pending io unfinished for the ordered extent.
374 			 *
375 			 * If there's no such bit, we need to skip to next range.
376 			 */
377 			if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
378 				cur += len;
379 				continue;
380 			}
381 			btrfs_page_clear_ordered(fs_info, page, cur, len);
382 		}
383 
384 		/* Now we're fine to update the accounting */
385 		if (unlikely(len > entry->bytes_left)) {
386 			WARN_ON(1);
387 			btrfs_crit(fs_info,
388 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
389 				   inode->root->root_key.objectid,
390 				   btrfs_ino(inode),
391 				   entry->file_offset,
392 				   entry->num_bytes,
393 				   len, entry->bytes_left);
394 			entry->bytes_left = 0;
395 		} else {
396 			entry->bytes_left -= len;
397 		}
398 
399 		if (!uptodate)
400 			set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
401 
402 		/*
403 		 * All the IO of the ordered extent is finished, we need to queue
404 		 * the finish_func to be executed.
405 		 */
406 		if (entry->bytes_left == 0) {
407 			set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
408 			cond_wake_up(&entry->wait);
409 			refcount_inc(&entry->refs);
410 			trace_btrfs_ordered_extent_mark_finished(inode, entry);
411 			spin_unlock_irqrestore(&tree->lock, flags);
412 			btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
413 			btrfs_queue_work(wq, &entry->work);
414 			spin_lock_irqsave(&tree->lock, flags);
415 		}
416 		cur += len;
417 	}
418 	spin_unlock_irqrestore(&tree->lock, flags);
419 }
420 
421 /*
422  * Finish IO for one ordered extent across a given range.  The range can only
423  * contain one ordered extent.
424  *
425  * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
426  *               search and use the ordered extent directly.
427  * 		 Will be also used to store the finished ordered extent.
428  * @file_offset: File offset for the finished IO
429  * @io_size:	 Length of the finish IO range
430  *
431  * Return true if the ordered extent is finished in the range, and update
432  * @cached.
433  * Return false otherwise.
434  *
435  * NOTE: The range can NOT cross multiple ordered extents.
436  * Thus caller should ensure the range doesn't cross ordered extents.
437  */
btrfs_dec_test_ordered_pending(struct btrfs_inode * inode,struct btrfs_ordered_extent ** cached,u64 file_offset,u64 io_size)438 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
439 				    struct btrfs_ordered_extent **cached,
440 				    u64 file_offset, u64 io_size)
441 {
442 	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
443 	struct rb_node *node;
444 	struct btrfs_ordered_extent *entry = NULL;
445 	unsigned long flags;
446 	bool finished = false;
447 
448 	spin_lock_irqsave(&tree->lock, flags);
449 	if (cached && *cached) {
450 		entry = *cached;
451 		goto have_entry;
452 	}
453 
454 	node = tree_search(tree, file_offset);
455 	if (!node)
456 		goto out;
457 
458 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
459 have_entry:
460 	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
461 		goto out;
462 
463 	if (io_size > entry->bytes_left)
464 		btrfs_crit(inode->root->fs_info,
465 			   "bad ordered accounting left %llu size %llu",
466 		       entry->bytes_left, io_size);
467 
468 	entry->bytes_left -= io_size;
469 
470 	if (entry->bytes_left == 0) {
471 		/*
472 		 * Ensure only one caller can set the flag and finished_ret
473 		 * accordingly
474 		 */
475 		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
476 		/* test_and_set_bit implies a barrier */
477 		cond_wake_up_nomb(&entry->wait);
478 	}
479 out:
480 	if (finished && cached && entry) {
481 		*cached = entry;
482 		refcount_inc(&entry->refs);
483 		trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
484 	}
485 	spin_unlock_irqrestore(&tree->lock, flags);
486 	return finished;
487 }
488 
489 /*
490  * used to drop a reference on an ordered extent.  This will free
491  * the extent if the last reference is dropped
492  */
btrfs_put_ordered_extent(struct btrfs_ordered_extent * entry)493 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
494 {
495 	struct list_head *cur;
496 	struct btrfs_ordered_sum *sum;
497 
498 	trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
499 
500 	if (refcount_dec_and_test(&entry->refs)) {
501 		ASSERT(list_empty(&entry->root_extent_list));
502 		ASSERT(list_empty(&entry->log_list));
503 		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
504 		if (entry->inode)
505 			btrfs_add_delayed_iput(entry->inode);
506 		while (!list_empty(&entry->list)) {
507 			cur = entry->list.next;
508 			sum = list_entry(cur, struct btrfs_ordered_sum, list);
509 			list_del(&sum->list);
510 			kvfree(sum);
511 		}
512 		kmem_cache_free(btrfs_ordered_extent_cache, entry);
513 	}
514 }
515 
516 /*
517  * remove an ordered extent from the tree.  No references are dropped
518  * and waiters are woken up.
519  */
btrfs_remove_ordered_extent(struct btrfs_inode * btrfs_inode,struct btrfs_ordered_extent * entry)520 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
521 				 struct btrfs_ordered_extent *entry)
522 {
523 	struct btrfs_ordered_inode_tree *tree;
524 	struct btrfs_root *root = btrfs_inode->root;
525 	struct btrfs_fs_info *fs_info = root->fs_info;
526 	struct rb_node *node;
527 	bool pending;
528 	bool freespace_inode;
529 
530 	/*
531 	 * If this is a free space inode the thread has not acquired the ordered
532 	 * extents lockdep map.
533 	 */
534 	freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
535 
536 	btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
537 	/* This is paired with btrfs_add_ordered_extent. */
538 	spin_lock(&btrfs_inode->lock);
539 	btrfs_mod_outstanding_extents(btrfs_inode, -1);
540 	spin_unlock(&btrfs_inode->lock);
541 	if (root != fs_info->tree_root) {
542 		u64 release;
543 
544 		if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
545 			release = entry->disk_num_bytes;
546 		else
547 			release = entry->num_bytes;
548 		btrfs_delalloc_release_metadata(btrfs_inode, release,
549 						test_bit(BTRFS_ORDERED_IOERR,
550 							 &entry->flags));
551 	}
552 
553 	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
554 				 fs_info->delalloc_batch);
555 
556 	tree = &btrfs_inode->ordered_tree;
557 	spin_lock_irq(&tree->lock);
558 	node = &entry->rb_node;
559 	rb_erase(node, &tree->tree);
560 	RB_CLEAR_NODE(node);
561 	if (tree->last == node)
562 		tree->last = NULL;
563 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
564 	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
565 	spin_unlock_irq(&tree->lock);
566 
567 	/*
568 	 * The current running transaction is waiting on us, we need to let it
569 	 * know that we're complete and wake it up.
570 	 */
571 	if (pending) {
572 		struct btrfs_transaction *trans;
573 
574 		/*
575 		 * The checks for trans are just a formality, it should be set,
576 		 * but if it isn't we don't want to deref/assert under the spin
577 		 * lock, so be nice and check if trans is set, but ASSERT() so
578 		 * if it isn't set a developer will notice.
579 		 */
580 		spin_lock(&fs_info->trans_lock);
581 		trans = fs_info->running_transaction;
582 		if (trans)
583 			refcount_inc(&trans->use_count);
584 		spin_unlock(&fs_info->trans_lock);
585 
586 		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
587 		if (trans) {
588 			if (atomic_dec_and_test(&trans->pending_ordered))
589 				wake_up(&trans->pending_wait);
590 			btrfs_put_transaction(trans);
591 		}
592 	}
593 
594 	btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
595 
596 	spin_lock(&root->ordered_extent_lock);
597 	list_del_init(&entry->root_extent_list);
598 	root->nr_ordered_extents--;
599 
600 	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
601 
602 	if (!root->nr_ordered_extents) {
603 		spin_lock(&fs_info->ordered_root_lock);
604 		BUG_ON(list_empty(&root->ordered_root));
605 		list_del_init(&root->ordered_root);
606 		spin_unlock(&fs_info->ordered_root_lock);
607 	}
608 	spin_unlock(&root->ordered_extent_lock);
609 	wake_up(&entry->wait);
610 	if (!freespace_inode)
611 		btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
612 }
613 
btrfs_run_ordered_extent_work(struct btrfs_work * work)614 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
615 {
616 	struct btrfs_ordered_extent *ordered;
617 
618 	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
619 	btrfs_start_ordered_extent(ordered, 1);
620 	complete(&ordered->completion);
621 }
622 
623 /*
624  * wait for all the ordered extents in a root.  This is done when balancing
625  * space between drives.
626  */
btrfs_wait_ordered_extents(struct btrfs_root * root,u64 nr,const u64 range_start,const u64 range_len)627 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
628 			       const u64 range_start, const u64 range_len)
629 {
630 	struct btrfs_fs_info *fs_info = root->fs_info;
631 	LIST_HEAD(splice);
632 	LIST_HEAD(skipped);
633 	LIST_HEAD(works);
634 	struct btrfs_ordered_extent *ordered, *next;
635 	u64 count = 0;
636 	const u64 range_end = range_start + range_len;
637 
638 	mutex_lock(&root->ordered_extent_mutex);
639 	spin_lock(&root->ordered_extent_lock);
640 	list_splice_init(&root->ordered_extents, &splice);
641 	while (!list_empty(&splice) && nr) {
642 		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
643 					   root_extent_list);
644 
645 		if (range_end <= ordered->disk_bytenr ||
646 		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
647 			list_move_tail(&ordered->root_extent_list, &skipped);
648 			cond_resched_lock(&root->ordered_extent_lock);
649 			continue;
650 		}
651 
652 		list_move_tail(&ordered->root_extent_list,
653 			       &root->ordered_extents);
654 		refcount_inc(&ordered->refs);
655 		spin_unlock(&root->ordered_extent_lock);
656 
657 		btrfs_init_work(&ordered->flush_work,
658 				btrfs_run_ordered_extent_work, NULL, NULL);
659 		list_add_tail(&ordered->work_list, &works);
660 		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
661 
662 		cond_resched();
663 		spin_lock(&root->ordered_extent_lock);
664 		if (nr != U64_MAX)
665 			nr--;
666 		count++;
667 	}
668 	list_splice_tail(&skipped, &root->ordered_extents);
669 	list_splice_tail(&splice, &root->ordered_extents);
670 	spin_unlock(&root->ordered_extent_lock);
671 
672 	list_for_each_entry_safe(ordered, next, &works, work_list) {
673 		list_del_init(&ordered->work_list);
674 		wait_for_completion(&ordered->completion);
675 		btrfs_put_ordered_extent(ordered);
676 		cond_resched();
677 	}
678 	mutex_unlock(&root->ordered_extent_mutex);
679 
680 	return count;
681 }
682 
btrfs_wait_ordered_roots(struct btrfs_fs_info * fs_info,u64 nr,const u64 range_start,const u64 range_len)683 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
684 			     const u64 range_start, const u64 range_len)
685 {
686 	struct btrfs_root *root;
687 	struct list_head splice;
688 	u64 done;
689 
690 	INIT_LIST_HEAD(&splice);
691 
692 	mutex_lock(&fs_info->ordered_operations_mutex);
693 	spin_lock(&fs_info->ordered_root_lock);
694 	list_splice_init(&fs_info->ordered_roots, &splice);
695 	while (!list_empty(&splice) && nr) {
696 		root = list_first_entry(&splice, struct btrfs_root,
697 					ordered_root);
698 		root = btrfs_grab_root(root);
699 		BUG_ON(!root);
700 		list_move_tail(&root->ordered_root,
701 			       &fs_info->ordered_roots);
702 		spin_unlock(&fs_info->ordered_root_lock);
703 
704 		done = btrfs_wait_ordered_extents(root, nr,
705 						  range_start, range_len);
706 		btrfs_put_root(root);
707 
708 		spin_lock(&fs_info->ordered_root_lock);
709 		if (nr != U64_MAX) {
710 			nr -= done;
711 		}
712 	}
713 	list_splice_tail(&splice, &fs_info->ordered_roots);
714 	spin_unlock(&fs_info->ordered_root_lock);
715 	mutex_unlock(&fs_info->ordered_operations_mutex);
716 }
717 
718 /*
719  * Used to start IO or wait for a given ordered extent to finish.
720  *
721  * If wait is one, this effectively waits on page writeback for all the pages
722  * in the extent, and it waits on the io completion code to insert
723  * metadata into the btree corresponding to the extent
724  */
btrfs_start_ordered_extent(struct btrfs_ordered_extent * entry,int wait)725 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
726 {
727 	u64 start = entry->file_offset;
728 	u64 end = start + entry->num_bytes - 1;
729 	struct btrfs_inode *inode = BTRFS_I(entry->inode);
730 	bool freespace_inode;
731 
732 	trace_btrfs_ordered_extent_start(inode, entry);
733 
734 	/*
735 	 * If this is a free space inode do not take the ordered extents lockdep
736 	 * map.
737 	 */
738 	freespace_inode = btrfs_is_free_space_inode(inode);
739 
740 	/*
741 	 * pages in the range can be dirty, clean or writeback.  We
742 	 * start IO on any dirty ones so the wait doesn't stall waiting
743 	 * for the flusher thread to find them
744 	 */
745 	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
746 		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
747 	if (wait) {
748 		if (!freespace_inode)
749 			btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
750 		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
751 						 &entry->flags));
752 	}
753 }
754 
755 /*
756  * Used to wait on ordered extents across a large range of bytes.
757  */
btrfs_wait_ordered_range(struct inode * inode,u64 start,u64 len)758 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
759 {
760 	int ret = 0;
761 	int ret_wb = 0;
762 	u64 end;
763 	u64 orig_end;
764 	struct btrfs_ordered_extent *ordered;
765 
766 	if (start + len < start) {
767 		orig_end = INT_LIMIT(loff_t);
768 	} else {
769 		orig_end = start + len - 1;
770 		if (orig_end > INT_LIMIT(loff_t))
771 			orig_end = INT_LIMIT(loff_t);
772 	}
773 
774 	/* start IO across the range first to instantiate any delalloc
775 	 * extents
776 	 */
777 	ret = btrfs_fdatawrite_range(inode, start, orig_end);
778 	if (ret)
779 		return ret;
780 
781 	/*
782 	 * If we have a writeback error don't return immediately. Wait first
783 	 * for any ordered extents that haven't completed yet. This is to make
784 	 * sure no one can dirty the same page ranges and call writepages()
785 	 * before the ordered extents complete - to avoid failures (-EEXIST)
786 	 * when adding the new ordered extents to the ordered tree.
787 	 */
788 	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
789 
790 	end = orig_end;
791 	while (1) {
792 		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
793 		if (!ordered)
794 			break;
795 		if (ordered->file_offset > orig_end) {
796 			btrfs_put_ordered_extent(ordered);
797 			break;
798 		}
799 		if (ordered->file_offset + ordered->num_bytes <= start) {
800 			btrfs_put_ordered_extent(ordered);
801 			break;
802 		}
803 		btrfs_start_ordered_extent(ordered, 1);
804 		end = ordered->file_offset;
805 		/*
806 		 * If the ordered extent had an error save the error but don't
807 		 * exit without waiting first for all other ordered extents in
808 		 * the range to complete.
809 		 */
810 		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
811 			ret = -EIO;
812 		btrfs_put_ordered_extent(ordered);
813 		if (end == 0 || end == start)
814 			break;
815 		end--;
816 	}
817 	return ret_wb ? ret_wb : ret;
818 }
819 
820 /*
821  * find an ordered extent corresponding to file_offset.  return NULL if
822  * nothing is found, otherwise take a reference on the extent and return it
823  */
btrfs_lookup_ordered_extent(struct btrfs_inode * inode,u64 file_offset)824 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
825 							 u64 file_offset)
826 {
827 	struct btrfs_ordered_inode_tree *tree;
828 	struct rb_node *node;
829 	struct btrfs_ordered_extent *entry = NULL;
830 	unsigned long flags;
831 
832 	tree = &inode->ordered_tree;
833 	spin_lock_irqsave(&tree->lock, flags);
834 	node = tree_search(tree, file_offset);
835 	if (!node)
836 		goto out;
837 
838 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
839 	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
840 		entry = NULL;
841 	if (entry) {
842 		refcount_inc(&entry->refs);
843 		trace_btrfs_ordered_extent_lookup(inode, entry);
844 	}
845 out:
846 	spin_unlock_irqrestore(&tree->lock, flags);
847 	return entry;
848 }
849 
850 /* Since the DIO code tries to lock a wide area we need to look for any ordered
851  * extents that exist in the range, rather than just the start of the range.
852  */
btrfs_lookup_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)853 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
854 		struct btrfs_inode *inode, u64 file_offset, u64 len)
855 {
856 	struct btrfs_ordered_inode_tree *tree;
857 	struct rb_node *node;
858 	struct btrfs_ordered_extent *entry = NULL;
859 
860 	tree = &inode->ordered_tree;
861 	spin_lock_irq(&tree->lock);
862 	node = tree_search(tree, file_offset);
863 	if (!node) {
864 		node = tree_search(tree, file_offset + len);
865 		if (!node)
866 			goto out;
867 	}
868 
869 	while (1) {
870 		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
871 		if (range_overlaps(entry, file_offset, len))
872 			break;
873 
874 		if (entry->file_offset >= file_offset + len) {
875 			entry = NULL;
876 			break;
877 		}
878 		entry = NULL;
879 		node = rb_next(node);
880 		if (!node)
881 			break;
882 	}
883 out:
884 	if (entry) {
885 		refcount_inc(&entry->refs);
886 		trace_btrfs_ordered_extent_lookup_range(inode, entry);
887 	}
888 	spin_unlock_irq(&tree->lock);
889 	return entry;
890 }
891 
892 /*
893  * Adds all ordered extents to the given list. The list ends up sorted by the
894  * file_offset of the ordered extents.
895  */
btrfs_get_ordered_extents_for_logging(struct btrfs_inode * inode,struct list_head * list)896 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
897 					   struct list_head *list)
898 {
899 	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
900 	struct rb_node *n;
901 
902 	ASSERT(inode_is_locked(&inode->vfs_inode));
903 
904 	spin_lock_irq(&tree->lock);
905 	for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
906 		struct btrfs_ordered_extent *ordered;
907 
908 		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
909 
910 		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
911 			continue;
912 
913 		ASSERT(list_empty(&ordered->log_list));
914 		list_add_tail(&ordered->log_list, list);
915 		refcount_inc(&ordered->refs);
916 		trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
917 	}
918 	spin_unlock_irq(&tree->lock);
919 }
920 
921 /*
922  * lookup and return any extent before 'file_offset'.  NULL is returned
923  * if none is found
924  */
925 struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode * inode,u64 file_offset)926 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
927 {
928 	struct btrfs_ordered_inode_tree *tree;
929 	struct rb_node *node;
930 	struct btrfs_ordered_extent *entry = NULL;
931 
932 	tree = &inode->ordered_tree;
933 	spin_lock_irq(&tree->lock);
934 	node = tree_search(tree, file_offset);
935 	if (!node)
936 		goto out;
937 
938 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
939 	refcount_inc(&entry->refs);
940 	trace_btrfs_ordered_extent_lookup_first(inode, entry);
941 out:
942 	spin_unlock_irq(&tree->lock);
943 	return entry;
944 }
945 
946 /*
947  * Lookup the first ordered extent that overlaps the range
948  * [@file_offset, @file_offset + @len).
949  *
950  * The difference between this and btrfs_lookup_first_ordered_extent() is
951  * that this one won't return any ordered extent that does not overlap the range.
952  * And the difference against btrfs_lookup_ordered_extent() is, this function
953  * ensures the first ordered extent gets returned.
954  */
btrfs_lookup_first_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)955 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
956 			struct btrfs_inode *inode, u64 file_offset, u64 len)
957 {
958 	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
959 	struct rb_node *node;
960 	struct rb_node *cur;
961 	struct rb_node *prev;
962 	struct rb_node *next;
963 	struct btrfs_ordered_extent *entry = NULL;
964 
965 	spin_lock_irq(&tree->lock);
966 	node = tree->tree.rb_node;
967 	/*
968 	 * Here we don't want to use tree_search() which will use tree->last
969 	 * and screw up the search order.
970 	 * And __tree_search() can't return the adjacent ordered extents
971 	 * either, thus here we do our own search.
972 	 */
973 	while (node) {
974 		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
975 
976 		if (file_offset < entry->file_offset) {
977 			node = node->rb_left;
978 		} else if (file_offset >= entry_end(entry)) {
979 			node = node->rb_right;
980 		} else {
981 			/*
982 			 * Direct hit, got an ordered extent that starts at
983 			 * @file_offset
984 			 */
985 			goto out;
986 		}
987 	}
988 	if (!entry) {
989 		/* Empty tree */
990 		goto out;
991 	}
992 
993 	cur = &entry->rb_node;
994 	/* We got an entry around @file_offset, check adjacent entries */
995 	if (entry->file_offset < file_offset) {
996 		prev = cur;
997 		next = rb_next(cur);
998 	} else {
999 		prev = rb_prev(cur);
1000 		next = cur;
1001 	}
1002 	if (prev) {
1003 		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1004 		if (range_overlaps(entry, file_offset, len))
1005 			goto out;
1006 	}
1007 	if (next) {
1008 		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1009 		if (range_overlaps(entry, file_offset, len))
1010 			goto out;
1011 	}
1012 	/* No ordered extent in the range */
1013 	entry = NULL;
1014 out:
1015 	if (entry) {
1016 		refcount_inc(&entry->refs);
1017 		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1018 	}
1019 
1020 	spin_unlock_irq(&tree->lock);
1021 	return entry;
1022 }
1023 
1024 /*
1025  * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1026  * ordered extents in it are run to completion.
1027  *
1028  * @inode:        Inode whose ordered tree is to be searched
1029  * @start:        Beginning of range to flush
1030  * @end:          Last byte of range to lock
1031  * @cached_state: If passed, will return the extent state responsible for the
1032  * locked range. It's the caller's responsibility to free the cached state.
1033  *
1034  * This function always returns with the given range locked, ensuring after it's
1035  * called no order extent can be pending.
1036  */
btrfs_lock_and_flush_ordered_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state)1037 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1038 					u64 end,
1039 					struct extent_state **cached_state)
1040 {
1041 	struct btrfs_ordered_extent *ordered;
1042 	struct extent_state *cache = NULL;
1043 	struct extent_state **cachedp = &cache;
1044 
1045 	if (cached_state)
1046 		cachedp = cached_state;
1047 
1048 	while (1) {
1049 		lock_extent(&inode->io_tree, start, end, cachedp);
1050 		ordered = btrfs_lookup_ordered_range(inode, start,
1051 						     end - start + 1);
1052 		if (!ordered) {
1053 			/*
1054 			 * If no external cached_state has been passed then
1055 			 * decrement the extra ref taken for cachedp since we
1056 			 * aren't exposing it outside of this function
1057 			 */
1058 			if (!cached_state)
1059 				refcount_dec(&cache->refs);
1060 			break;
1061 		}
1062 		unlock_extent(&inode->io_tree, start, end, cachedp);
1063 		btrfs_start_ordered_extent(ordered, 1);
1064 		btrfs_put_ordered_extent(ordered);
1065 	}
1066 }
1067 
1068 /*
1069  * Lock the passed range and ensure all pending ordered extents in it are run
1070  * to completion in nowait mode.
1071  *
1072  * Return true if btrfs_lock_ordered_range does not return any extents,
1073  * otherwise false.
1074  */
btrfs_try_lock_ordered_range(struct btrfs_inode * inode,u64 start,u64 end)1075 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end)
1076 {
1077 	struct btrfs_ordered_extent *ordered;
1078 
1079 	if (!try_lock_extent(&inode->io_tree, start, end))
1080 		return false;
1081 
1082 	ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1083 	if (!ordered)
1084 		return true;
1085 
1086 	btrfs_put_ordered_extent(ordered);
1087 	unlock_extent(&inode->io_tree, start, end, NULL);
1088 
1089 	return false;
1090 }
1091 
1092 
clone_ordered_extent(struct btrfs_ordered_extent * ordered,u64 pos,u64 len)1093 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1094 				u64 len)
1095 {
1096 	struct inode *inode = ordered->inode;
1097 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1098 	u64 file_offset = ordered->file_offset + pos;
1099 	u64 disk_bytenr = ordered->disk_bytenr + pos;
1100 	unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1101 
1102 	/*
1103 	 * The splitting extent is already counted and will be added again in
1104 	 * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
1105 	 */
1106 	percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
1107 				 fs_info->delalloc_batch);
1108 	WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
1109 	return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1110 					disk_bytenr, len, 0, flags,
1111 					ordered->compress_type);
1112 }
1113 
btrfs_split_ordered_extent(struct btrfs_ordered_extent * ordered,u64 pre,u64 post)1114 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1115 				u64 post)
1116 {
1117 	struct inode *inode = ordered->inode;
1118 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1119 	struct rb_node *node;
1120 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1121 	int ret = 0;
1122 
1123 	trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
1124 
1125 	spin_lock_irq(&tree->lock);
1126 	/* Remove from tree once */
1127 	node = &ordered->rb_node;
1128 	rb_erase(node, &tree->tree);
1129 	RB_CLEAR_NODE(node);
1130 	if (tree->last == node)
1131 		tree->last = NULL;
1132 
1133 	ordered->file_offset += pre;
1134 	ordered->disk_bytenr += pre;
1135 	ordered->num_bytes -= (pre + post);
1136 	ordered->disk_num_bytes -= (pre + post);
1137 	ordered->bytes_left -= (pre + post);
1138 
1139 	/* Re-insert the node */
1140 	node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1141 	if (node)
1142 		btrfs_panic(fs_info, -EEXIST,
1143 			"zoned: inconsistency in ordered tree at offset %llu",
1144 			    ordered->file_offset);
1145 
1146 	spin_unlock_irq(&tree->lock);
1147 
1148 	if (pre)
1149 		ret = clone_ordered_extent(ordered, 0, pre);
1150 	if (ret == 0 && post)
1151 		ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1152 					   post);
1153 
1154 	return ret;
1155 }
1156 
ordered_data_init(void)1157 int __init ordered_data_init(void)
1158 {
1159 	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1160 				     sizeof(struct btrfs_ordered_extent), 0,
1161 				     SLAB_MEM_SPREAD,
1162 				     NULL);
1163 	if (!btrfs_ordered_extent_cache)
1164 		return -ENOMEM;
1165 
1166 	return 0;
1167 }
1168 
ordered_data_exit(void)1169 void __cold ordered_data_exit(void)
1170 {
1171 	kmem_cache_destroy(btrfs_ordered_extent_cache);
1172 }
1173