• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/backing-dev.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/pagevec.h>
17 #include "internal.h"
18 
19 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
20 					   struct page *page);
21 
22 /*
23  * mark a page as having been made dirty and thus needing writeback
24  */
afs_set_page_dirty(struct page * page)25 int afs_set_page_dirty(struct page *page)
26 {
27 	_enter("");
28 	return __set_page_dirty_nobuffers(page);
29 }
30 
31 /*
32  * unlink a writeback record because its usage has reached zero
33  * - must be called with the wb->vnode->writeback_lock held
34  */
afs_unlink_writeback(struct afs_writeback * wb)35 static void afs_unlink_writeback(struct afs_writeback *wb)
36 {
37 	struct afs_writeback *front;
38 	struct afs_vnode *vnode = wb->vnode;
39 
40 	list_del_init(&wb->link);
41 	if (!list_empty(&vnode->writebacks)) {
42 		/* if an fsync rises to the front of the queue then wake it
43 		 * up */
44 		front = list_entry(vnode->writebacks.next,
45 				   struct afs_writeback, link);
46 		if (front->state == AFS_WBACK_SYNCING) {
47 			_debug("wake up sync");
48 			front->state = AFS_WBACK_COMPLETE;
49 			wake_up(&front->waitq);
50 		}
51 	}
52 }
53 
54 /*
55  * free a writeback record
56  */
afs_free_writeback(struct afs_writeback * wb)57 static void afs_free_writeback(struct afs_writeback *wb)
58 {
59 	_enter("");
60 	key_put(wb->key);
61 	kfree(wb);
62 }
63 
64 /*
65  * dispose of a reference to a writeback record
66  */
afs_put_writeback(struct afs_writeback * wb)67 void afs_put_writeback(struct afs_writeback *wb)
68 {
69 	struct afs_vnode *vnode = wb->vnode;
70 
71 	_enter("{%d}", wb->usage);
72 
73 	spin_lock(&vnode->writeback_lock);
74 	if (--wb->usage == 0)
75 		afs_unlink_writeback(wb);
76 	else
77 		wb = NULL;
78 	spin_unlock(&vnode->writeback_lock);
79 	if (wb)
80 		afs_free_writeback(wb);
81 }
82 
83 /*
84  * partly or wholly fill a page that's under preparation for writing
85  */
afs_fill_page(struct afs_vnode * vnode,struct key * key,loff_t pos,struct page * page)86 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87 			 loff_t pos, struct page *page)
88 {
89 	loff_t i_size;
90 	int ret;
91 	int len;
92 
93 	_enter(",,%llu", (unsigned long long)pos);
94 
95 	i_size = i_size_read(&vnode->vfs_inode);
96 	if (pos + PAGE_SIZE > i_size)
97 		len = i_size - pos;
98 	else
99 		len = PAGE_SIZE;
100 
101 	ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
102 	if (ret < 0) {
103 		if (ret == -ENOENT) {
104 			_debug("got NOENT from server"
105 			       " - marking file deleted and stale");
106 			set_bit(AFS_VNODE_DELETED, &vnode->flags);
107 			ret = -ESTALE;
108 		}
109 	}
110 
111 	_leave(" = %d", ret);
112 	return ret;
113 }
114 
115 /*
116  * prepare to perform part of a write to a page
117  */
afs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)118 int afs_write_begin(struct file *file, struct address_space *mapping,
119 		    loff_t pos, unsigned len, unsigned flags,
120 		    struct page **pagep, void **fsdata)
121 {
122 	struct afs_writeback *candidate, *wb;
123 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
124 	struct page *page;
125 	struct key *key = file->private_data;
126 	unsigned from = pos & (PAGE_SIZE - 1);
127 	unsigned to = from + len;
128 	pgoff_t index = pos >> PAGE_SHIFT;
129 	int ret;
130 
131 	_enter("{%x:%u},{%lx},%u,%u",
132 	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
133 
134 	candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
135 	if (!candidate)
136 		return -ENOMEM;
137 	candidate->vnode = vnode;
138 	candidate->first = candidate->last = index;
139 	candidate->offset_first = from;
140 	candidate->to_last = to;
141 	INIT_LIST_HEAD(&candidate->link);
142 	candidate->usage = 1;
143 	candidate->state = AFS_WBACK_PENDING;
144 	init_waitqueue_head(&candidate->waitq);
145 
146 	page = grab_cache_page_write_begin(mapping, index, flags);
147 	if (!page) {
148 		kfree(candidate);
149 		return -ENOMEM;
150 	}
151 
152 	if (!PageUptodate(page) && len != PAGE_SIZE) {
153 		ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
154 		if (ret < 0) {
155 			unlock_page(page);
156 			put_page(page);
157 			kfree(candidate);
158 			_leave(" = %d [prep]", ret);
159 			return ret;
160 		}
161 		SetPageUptodate(page);
162 	}
163 
164 	/* page won't leak in error case: it eventually gets cleaned off LRU */
165 	*pagep = page;
166 
167 try_again:
168 	spin_lock(&vnode->writeback_lock);
169 
170 	/* see if this page is already pending a writeback under a suitable key
171 	 * - if so we can just join onto that one */
172 	wb = (struct afs_writeback *) page_private(page);
173 	if (wb) {
174 		if (wb->key == key && wb->state == AFS_WBACK_PENDING)
175 			goto subsume_in_current_wb;
176 		goto flush_conflicting_wb;
177 	}
178 
179 	if (index > 0) {
180 		/* see if we can find an already pending writeback that we can
181 		 * append this page to */
182 		list_for_each_entry(wb, &vnode->writebacks, link) {
183 			if (wb->last == index - 1 && wb->key == key &&
184 			    wb->state == AFS_WBACK_PENDING)
185 				goto append_to_previous_wb;
186 		}
187 	}
188 
189 	list_add_tail(&candidate->link, &vnode->writebacks);
190 	candidate->key = key_get(key);
191 	spin_unlock(&vnode->writeback_lock);
192 	SetPagePrivate(page);
193 	set_page_private(page, (unsigned long) candidate);
194 	_leave(" = 0 [new]");
195 	return 0;
196 
197 subsume_in_current_wb:
198 	_debug("subsume");
199 	ASSERTRANGE(wb->first, <=, index, <=, wb->last);
200 	if (index == wb->first && from < wb->offset_first)
201 		wb->offset_first = from;
202 	if (index == wb->last && to > wb->to_last)
203 		wb->to_last = to;
204 	spin_unlock(&vnode->writeback_lock);
205 	kfree(candidate);
206 	_leave(" = 0 [sub]");
207 	return 0;
208 
209 append_to_previous_wb:
210 	_debug("append into %lx-%lx", wb->first, wb->last);
211 	wb->usage++;
212 	wb->last++;
213 	wb->to_last = to;
214 	spin_unlock(&vnode->writeback_lock);
215 	SetPagePrivate(page);
216 	set_page_private(page, (unsigned long) wb);
217 	kfree(candidate);
218 	_leave(" = 0 [app]");
219 	return 0;
220 
221 	/* the page is currently bound to another context, so if it's dirty we
222 	 * need to flush it before we can use the new context */
223 flush_conflicting_wb:
224 	_debug("flush conflict");
225 	if (wb->state == AFS_WBACK_PENDING)
226 		wb->state = AFS_WBACK_CONFLICTING;
227 	spin_unlock(&vnode->writeback_lock);
228 	if (PageDirty(page)) {
229 		ret = afs_write_back_from_locked_page(wb, page);
230 		if (ret < 0) {
231 			afs_put_writeback(candidate);
232 			_leave(" = %d", ret);
233 			return ret;
234 		}
235 	}
236 
237 	/* the page holds a ref on the writeback record */
238 	afs_put_writeback(wb);
239 	set_page_private(page, 0);
240 	ClearPagePrivate(page);
241 	goto try_again;
242 }
243 
244 /*
245  * finalise part of a write to a page
246  */
afs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)247 int afs_write_end(struct file *file, struct address_space *mapping,
248 		  loff_t pos, unsigned len, unsigned copied,
249 		  struct page *page, void *fsdata)
250 {
251 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
252 	loff_t i_size, maybe_i_size;
253 
254 	_enter("{%x:%u},{%lx}",
255 	       vnode->fid.vid, vnode->fid.vnode, page->index);
256 
257 	maybe_i_size = pos + copied;
258 
259 	i_size = i_size_read(&vnode->vfs_inode);
260 	if (maybe_i_size > i_size) {
261 		spin_lock(&vnode->writeback_lock);
262 		i_size = i_size_read(&vnode->vfs_inode);
263 		if (maybe_i_size > i_size)
264 			i_size_write(&vnode->vfs_inode, maybe_i_size);
265 		spin_unlock(&vnode->writeback_lock);
266 	}
267 
268 	set_page_dirty(page);
269 	if (PageDirty(page))
270 		_debug("dirtied");
271 	unlock_page(page);
272 	put_page(page);
273 
274 	return copied;
275 }
276 
277 /*
278  * kill all the pages in the given range
279  */
afs_kill_pages(struct afs_vnode * vnode,bool error,pgoff_t first,pgoff_t last)280 static void afs_kill_pages(struct afs_vnode *vnode, bool error,
281 			   pgoff_t first, pgoff_t last)
282 {
283 	struct pagevec pv;
284 	unsigned count, loop;
285 
286 	_enter("{%x:%u},%lx-%lx",
287 	       vnode->fid.vid, vnode->fid.vnode, first, last);
288 
289 	pagevec_init(&pv, 0);
290 
291 	do {
292 		_debug("kill %lx-%lx", first, last);
293 
294 		count = last - first + 1;
295 		if (count > PAGEVEC_SIZE)
296 			count = PAGEVEC_SIZE;
297 		pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
298 					      first, count, pv.pages);
299 		ASSERTCMP(pv.nr, ==, count);
300 
301 		for (loop = 0; loop < count; loop++) {
302 			struct page *page = pv.pages[loop];
303 			ClearPageUptodate(page);
304 			if (error)
305 				SetPageError(page);
306 			if (PageWriteback(page))
307 				end_page_writeback(page);
308 			if (page->index >= first)
309 				first = page->index + 1;
310 		}
311 
312 		__pagevec_release(&pv);
313 	} while (first < last);
314 
315 	_leave("");
316 }
317 
318 /*
319  * synchronously write back the locked page and any subsequent non-locked dirty
320  * pages also covered by the same writeback record
321  */
afs_write_back_from_locked_page(struct afs_writeback * wb,struct page * primary_page)322 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
323 					   struct page *primary_page)
324 {
325 	struct page *pages[8], *page;
326 	unsigned long count;
327 	unsigned n, offset, to;
328 	pgoff_t start, first, last;
329 	int loop, ret;
330 
331 	_enter(",%lx", primary_page->index);
332 
333 	count = 1;
334 	if (!clear_page_dirty_for_io(primary_page))
335 		BUG();
336 	if (test_set_page_writeback(primary_page))
337 		BUG();
338 
339 	/* find all consecutive lockable dirty pages, stopping when we find a
340 	 * page that is not immediately lockable, is not dirty or is missing,
341 	 * or we reach the end of the range */
342 	start = primary_page->index;
343 	if (start >= wb->last)
344 		goto no_more;
345 	start++;
346 	do {
347 		_debug("more %lx [%lx]", start, count);
348 		n = wb->last - start + 1;
349 		if (n > ARRAY_SIZE(pages))
350 			n = ARRAY_SIZE(pages);
351 		n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
352 					  start, n, pages);
353 		_debug("fgpc %u", n);
354 		if (n == 0)
355 			goto no_more;
356 		if (pages[0]->index != start) {
357 			do {
358 				put_page(pages[--n]);
359 			} while (n > 0);
360 			goto no_more;
361 		}
362 
363 		for (loop = 0; loop < n; loop++) {
364 			page = pages[loop];
365 			if (page->index > wb->last)
366 				break;
367 			if (!trylock_page(page))
368 				break;
369 			if (!PageDirty(page) ||
370 			    page_private(page) != (unsigned long) wb) {
371 				unlock_page(page);
372 				break;
373 			}
374 			if (!clear_page_dirty_for_io(page))
375 				BUG();
376 			if (test_set_page_writeback(page))
377 				BUG();
378 			unlock_page(page);
379 			put_page(page);
380 		}
381 		count += loop;
382 		if (loop < n) {
383 			for (; loop < n; loop++)
384 				put_page(pages[loop]);
385 			goto no_more;
386 		}
387 
388 		start += loop;
389 	} while (start <= wb->last && count < 65536);
390 
391 no_more:
392 	/* we now have a contiguous set of dirty pages, each with writeback set
393 	 * and the dirty mark cleared; the first page is locked and must remain
394 	 * so, all the rest are unlocked */
395 	first = primary_page->index;
396 	last = first + count - 1;
397 
398 	offset = (first == wb->first) ? wb->offset_first : 0;
399 	to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
400 
401 	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
402 
403 	ret = afs_vnode_store_data(wb, first, last, offset, to);
404 	if (ret < 0) {
405 		switch (ret) {
406 		case -EDQUOT:
407 		case -ENOSPC:
408 			mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
409 			break;
410 		case -EROFS:
411 		case -EIO:
412 		case -EREMOTEIO:
413 		case -EFBIG:
414 		case -ENOENT:
415 		case -ENOMEDIUM:
416 		case -ENXIO:
417 			afs_kill_pages(wb->vnode, true, first, last);
418 			mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
419 			break;
420 		case -EACCES:
421 		case -EPERM:
422 		case -ENOKEY:
423 		case -EKEYEXPIRED:
424 		case -EKEYREJECTED:
425 		case -EKEYREVOKED:
426 			afs_kill_pages(wb->vnode, false, first, last);
427 			break;
428 		default:
429 			break;
430 		}
431 	} else {
432 		ret = count;
433 	}
434 
435 	_leave(" = %d", ret);
436 	return ret;
437 }
438 
439 /*
440  * write a page back to the server
441  * - the caller locked the page for us
442  */
afs_writepage(struct page * page,struct writeback_control * wbc)443 int afs_writepage(struct page *page, struct writeback_control *wbc)
444 {
445 	struct afs_writeback *wb;
446 	int ret;
447 
448 	_enter("{%lx},", page->index);
449 
450 	wb = (struct afs_writeback *) page_private(page);
451 	ASSERT(wb != NULL);
452 
453 	ret = afs_write_back_from_locked_page(wb, page);
454 	unlock_page(page);
455 	if (ret < 0) {
456 		_leave(" = %d", ret);
457 		return 0;
458 	}
459 
460 	wbc->nr_to_write -= ret;
461 
462 	_leave(" = 0");
463 	return 0;
464 }
465 
466 /*
467  * write a region of pages back to the server
468  */
afs_writepages_region(struct address_space * mapping,struct writeback_control * wbc,pgoff_t index,pgoff_t end,pgoff_t * _next)469 static int afs_writepages_region(struct address_space *mapping,
470 				 struct writeback_control *wbc,
471 				 pgoff_t index, pgoff_t end, pgoff_t *_next)
472 {
473 	struct afs_writeback *wb;
474 	struct page *page;
475 	int ret, n;
476 
477 	_enter(",,%lx,%lx,", index, end);
478 
479 	do {
480 		n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
481 				       1, &page);
482 		if (!n)
483 			break;
484 
485 		_debug("wback %lx", page->index);
486 
487 		if (page->index > end) {
488 			*_next = index;
489 			put_page(page);
490 			_leave(" = 0 [%lx]", *_next);
491 			return 0;
492 		}
493 
494 		/* at this point we hold neither mapping->tree_lock nor lock on
495 		 * the page itself: the page may be truncated or invalidated
496 		 * (changing page->mapping to NULL), or even swizzled back from
497 		 * swapper_space to tmpfs file mapping
498 		 */
499 		lock_page(page);
500 
501 		if (page->mapping != mapping) {
502 			unlock_page(page);
503 			put_page(page);
504 			continue;
505 		}
506 
507 		if (wbc->sync_mode != WB_SYNC_NONE)
508 			wait_on_page_writeback(page);
509 
510 		if (PageWriteback(page) || !PageDirty(page)) {
511 			unlock_page(page);
512 			put_page(page);
513 			continue;
514 		}
515 
516 		wb = (struct afs_writeback *) page_private(page);
517 		ASSERT(wb != NULL);
518 
519 		spin_lock(&wb->vnode->writeback_lock);
520 		wb->state = AFS_WBACK_WRITING;
521 		spin_unlock(&wb->vnode->writeback_lock);
522 
523 		ret = afs_write_back_from_locked_page(wb, page);
524 		unlock_page(page);
525 		put_page(page);
526 		if (ret < 0) {
527 			_leave(" = %d", ret);
528 			return ret;
529 		}
530 
531 		wbc->nr_to_write -= ret;
532 
533 		cond_resched();
534 	} while (index < end && wbc->nr_to_write > 0);
535 
536 	*_next = index;
537 	_leave(" = 0 [%lx]", *_next);
538 	return 0;
539 }
540 
541 /*
542  * write some of the pending data back to the server
543  */
afs_writepages(struct address_space * mapping,struct writeback_control * wbc)544 int afs_writepages(struct address_space *mapping,
545 		   struct writeback_control *wbc)
546 {
547 	pgoff_t start, end, next;
548 	int ret;
549 
550 	_enter("");
551 
552 	if (wbc->range_cyclic) {
553 		start = mapping->writeback_index;
554 		end = -1;
555 		ret = afs_writepages_region(mapping, wbc, start, end, &next);
556 		if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
557 			ret = afs_writepages_region(mapping, wbc, 0, start,
558 						    &next);
559 		mapping->writeback_index = next;
560 	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
561 		end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
562 		ret = afs_writepages_region(mapping, wbc, 0, end, &next);
563 		if (wbc->nr_to_write > 0)
564 			mapping->writeback_index = next;
565 	} else {
566 		start = wbc->range_start >> PAGE_SHIFT;
567 		end = wbc->range_end >> PAGE_SHIFT;
568 		ret = afs_writepages_region(mapping, wbc, start, end, &next);
569 	}
570 
571 	_leave(" = %d", ret);
572 	return ret;
573 }
574 
575 /*
576  * completion of write to server
577  */
afs_pages_written_back(struct afs_vnode * vnode,struct afs_call * call)578 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
579 {
580 	struct afs_writeback *wb = call->wb;
581 	struct pagevec pv;
582 	unsigned count, loop;
583 	pgoff_t first = call->first, last = call->last;
584 	bool free_wb;
585 
586 	_enter("{%x:%u},{%lx-%lx}",
587 	       vnode->fid.vid, vnode->fid.vnode, first, last);
588 
589 	ASSERT(wb != NULL);
590 
591 	pagevec_init(&pv, 0);
592 
593 	do {
594 		_debug("done %lx-%lx", first, last);
595 
596 		count = last - first + 1;
597 		if (count > PAGEVEC_SIZE)
598 			count = PAGEVEC_SIZE;
599 		pv.nr = find_get_pages_contig(call->mapping, first, count,
600 					      pv.pages);
601 		ASSERTCMP(pv.nr, ==, count);
602 
603 		spin_lock(&vnode->writeback_lock);
604 		for (loop = 0; loop < count; loop++) {
605 			struct page *page = pv.pages[loop];
606 			end_page_writeback(page);
607 			if (page_private(page) == (unsigned long) wb) {
608 				set_page_private(page, 0);
609 				ClearPagePrivate(page);
610 				wb->usage--;
611 			}
612 		}
613 		free_wb = false;
614 		if (wb->usage == 0) {
615 			afs_unlink_writeback(wb);
616 			free_wb = true;
617 		}
618 		spin_unlock(&vnode->writeback_lock);
619 		first += count;
620 		if (free_wb) {
621 			afs_free_writeback(wb);
622 			wb = NULL;
623 		}
624 
625 		__pagevec_release(&pv);
626 	} while (first <= last);
627 
628 	_leave("");
629 }
630 
631 /*
632  * write to an AFS file
633  */
afs_file_write(struct kiocb * iocb,struct iov_iter * from)634 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
635 {
636 	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
637 	ssize_t result;
638 	size_t count = iov_iter_count(from);
639 
640 	_enter("{%x.%u},{%zu},",
641 	       vnode->fid.vid, vnode->fid.vnode, count);
642 
643 	if (IS_SWAPFILE(&vnode->vfs_inode)) {
644 		printk(KERN_INFO
645 		       "AFS: Attempt to write to active swap file!\n");
646 		return -EBUSY;
647 	}
648 
649 	if (!count)
650 		return 0;
651 
652 	result = generic_file_write_iter(iocb, from);
653 
654 	_leave(" = %zd", result);
655 	return result;
656 }
657 
658 /*
659  * flush the vnode to the fileserver
660  */
afs_writeback_all(struct afs_vnode * vnode)661 int afs_writeback_all(struct afs_vnode *vnode)
662 {
663 	struct address_space *mapping = vnode->vfs_inode.i_mapping;
664 	struct writeback_control wbc = {
665 		.sync_mode	= WB_SYNC_ALL,
666 		.nr_to_write	= LONG_MAX,
667 		.range_cyclic	= 1,
668 	};
669 	int ret;
670 
671 	_enter("");
672 
673 	ret = mapping->a_ops->writepages(mapping, &wbc);
674 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 
676 	_leave(" = %d", ret);
677 	return ret;
678 }
679 
680 /*
681  * flush any dirty pages for this process, and check for write errors.
682  * - the return status from this call provides a reliable indication of
683  *   whether any write errors occurred for this process.
684  */
afs_fsync(struct file * file,loff_t start,loff_t end,int datasync)685 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
686 {
687 	struct inode *inode = file_inode(file);
688 	struct afs_writeback *wb, *xwb;
689 	struct afs_vnode *vnode = AFS_FS_I(inode);
690 	int ret;
691 
692 	_enter("{%x:%u},{n=%pD},%d",
693 	       vnode->fid.vid, vnode->fid.vnode, file,
694 	       datasync);
695 
696 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
697 	if (ret)
698 		return ret;
699 	inode_lock(inode);
700 
701 	/* use a writeback record as a marker in the queue - when this reaches
702 	 * the front of the queue, all the outstanding writes are either
703 	 * completed or rejected */
704 	wb = kzalloc(sizeof(*wb), GFP_KERNEL);
705 	if (!wb) {
706 		ret = -ENOMEM;
707 		goto out;
708 	}
709 	wb->vnode = vnode;
710 	wb->first = 0;
711 	wb->last = -1;
712 	wb->offset_first = 0;
713 	wb->to_last = PAGE_SIZE;
714 	wb->usage = 1;
715 	wb->state = AFS_WBACK_SYNCING;
716 	init_waitqueue_head(&wb->waitq);
717 
718 	spin_lock(&vnode->writeback_lock);
719 	list_for_each_entry(xwb, &vnode->writebacks, link) {
720 		if (xwb->state == AFS_WBACK_PENDING)
721 			xwb->state = AFS_WBACK_CONFLICTING;
722 	}
723 	list_add_tail(&wb->link, &vnode->writebacks);
724 	spin_unlock(&vnode->writeback_lock);
725 
726 	/* push all the outstanding writebacks to the server */
727 	ret = afs_writeback_all(vnode);
728 	if (ret < 0) {
729 		afs_put_writeback(wb);
730 		_leave(" = %d [wb]", ret);
731 		goto out;
732 	}
733 
734 	/* wait for the preceding writes to actually complete */
735 	ret = wait_event_interruptible(wb->waitq,
736 				       wb->state == AFS_WBACK_COMPLETE ||
737 				       vnode->writebacks.next == &wb->link);
738 	afs_put_writeback(wb);
739 	_leave(" = %d", ret);
740 out:
741 	inode_unlock(inode);
742 	return ret;
743 }
744 
745 /*
746  * Flush out all outstanding writes on a file opened for writing when it is
747  * closed.
748  */
afs_flush(struct file * file,fl_owner_t id)749 int afs_flush(struct file *file, fl_owner_t id)
750 {
751 	_enter("");
752 
753 	if ((file->f_mode & FMODE_WRITE) == 0)
754 		return 0;
755 
756 	return vfs_fsync(file, 0);
757 }
758 
759 /*
760  * notification that a previously read-only page is about to become writable
761  * - if it returns an error, the caller will deliver a bus error signal
762  */
afs_page_mkwrite(struct vm_area_struct * vma,struct page * page)763 int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
764 {
765 	struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host);
766 
767 	_enter("{{%x:%u}},{%lx}",
768 	       vnode->fid.vid, vnode->fid.vnode, page->index);
769 
770 	/* wait for the page to be written to the cache before we allow it to
771 	 * be modified */
772 #ifdef CONFIG_AFS_FSCACHE
773 	fscache_wait_on_page_write(vnode->cache, page);
774 #endif
775 
776 	_leave(" = 0");
777 	return 0;
778 }
779