• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
init_buffer(struct buffer_head * bh,bh_end_io_t * handler,void * private)50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
sync_buffer(void * word)56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
__lock_buffer(struct buffer_head * bh)70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
unlock_buffer(struct buffer_head * bh)77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	clear_bit_unlock(BH_Lock, &bh->b_state);
80 	smp_mb__after_clear_bit();
81 	wake_up_bit(&bh->b_state, BH_Lock);
82 }
83 
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
__wait_on_buffer(struct buffer_head * bh)89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93 
94 static void
__clear_page_buffers(struct page * page)95 __clear_page_buffers(struct page *page)
96 {
97 	ClearPagePrivate(page);
98 	set_page_private(page, 0);
99 	page_cache_release(page);
100 }
101 
102 
quiet_error(struct buffer_head * bh)103 static int quiet_error(struct buffer_head *bh)
104 {
105 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 		return 0;
107 	return 1;
108 }
109 
110 
buffer_io_error(struct buffer_head * bh)111 static void buffer_io_error(struct buffer_head *bh)
112 {
113 	char b[BDEVNAME_SIZE];
114 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 			bdevname(bh->b_bdev, b),
116 			(unsigned long long)bh->b_blocknr);
117 }
118 
119 /*
120  * End-of-IO handler helper function which does not touch the bh after
121  * unlocking it.
122  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123  * a race there is benign: unlock_buffer() only use the bh's address for
124  * hashing after unlocking the buffer, so it doesn't actually touch the bh
125  * itself.
126  */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
128 {
129 	if (uptodate) {
130 		set_buffer_uptodate(bh);
131 	} else {
132 		/* This happens, due to failed READA attempts. */
133 		clear_buffer_uptodate(bh);
134 	}
135 	unlock_buffer(bh);
136 }
137 
138 /*
139  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
140  * unlock the buffer. This is what ll_rw_block uses too.
141  */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143 {
144 	__end_buffer_read_notouch(bh, uptodate);
145 	put_bh(bh);
146 }
147 
end_buffer_write_sync(struct buffer_head * bh,int uptodate)148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149 {
150 	char b[BDEVNAME_SIZE];
151 
152 	if (uptodate) {
153 		set_buffer_uptodate(bh);
154 	} else {
155 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
156 			buffer_io_error(bh);
157 			printk(KERN_WARNING "lost page write due to "
158 					"I/O error on %s\n",
159 				       bdevname(bh->b_bdev, b));
160 		}
161 		set_buffer_write_io_error(bh);
162 		clear_buffer_uptodate(bh);
163 	}
164 	unlock_buffer(bh);
165 	put_bh(bh);
166 }
167 
168 /*
169  * Write out and wait upon all the dirty data associated with a block
170  * device via its mapping.  Does not take the superblock lock.
171  */
sync_blockdev(struct block_device * bdev)172 int sync_blockdev(struct block_device *bdev)
173 {
174 	int ret = 0;
175 
176 	if (bdev)
177 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
178 	return ret;
179 }
180 EXPORT_SYMBOL(sync_blockdev);
181 
182 /*
183  * Write out and wait upon all dirty data associated with this
184  * device.   Filesystem data as well as the underlying block
185  * device.  Takes the superblock lock.
186  */
fsync_bdev(struct block_device * bdev)187 int fsync_bdev(struct block_device *bdev)
188 {
189 	struct super_block *sb = get_super(bdev);
190 	if (sb) {
191 		int res = fsync_super(sb);
192 		drop_super(sb);
193 		return res;
194 	}
195 	return sync_blockdev(bdev);
196 }
197 
198 /**
199  * freeze_bdev  --  lock a filesystem and force it into a consistent state
200  * @bdev:	blockdevice to lock
201  *
202  * This takes the block device bd_mount_sem to make sure no new mounts
203  * happen on bdev until thaw_bdev() is called.
204  * If a superblock is found on this device, we take the s_umount semaphore
205  * on it to make sure nobody unmounts until the snapshot creation is done.
206  * The reference counter (bd_fsfreeze_count) guarantees that only the last
207  * unfreeze process can unfreeze the frozen filesystem actually when multiple
208  * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209  * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
210  * actually.
211  */
freeze_bdev(struct block_device * bdev)212 struct super_block *freeze_bdev(struct block_device *bdev)
213 {
214 	struct super_block *sb;
215 	int error = 0;
216 
217 	mutex_lock(&bdev->bd_fsfreeze_mutex);
218 	if (bdev->bd_fsfreeze_count > 0) {
219 		bdev->bd_fsfreeze_count++;
220 		sb = get_super(bdev);
221 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
222 		return sb;
223 	}
224 	bdev->bd_fsfreeze_count++;
225 
226 	down(&bdev->bd_mount_sem);
227 	sb = get_super(bdev);
228 	if (sb && !(sb->s_flags & MS_RDONLY)) {
229 		sb->s_frozen = SB_FREEZE_WRITE;
230 		smp_wmb();
231 
232 		__fsync_super(sb);
233 
234 		sb->s_frozen = SB_FREEZE_TRANS;
235 		smp_wmb();
236 
237 		sync_blockdev(sb->s_bdev);
238 
239 		if (sb->s_op->freeze_fs) {
240 			error = sb->s_op->freeze_fs(sb);
241 			if (error) {
242 				printk(KERN_ERR
243 					"VFS:Filesystem freeze failed\n");
244 				sb->s_frozen = SB_UNFROZEN;
245 				drop_super(sb);
246 				up(&bdev->bd_mount_sem);
247 				bdev->bd_fsfreeze_count--;
248 				mutex_unlock(&bdev->bd_fsfreeze_mutex);
249 				return ERR_PTR(error);
250 			}
251 		}
252 	}
253 
254 	sync_blockdev(bdev);
255 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
256 
257 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
258 }
259 EXPORT_SYMBOL(freeze_bdev);
260 
261 /**
262  * thaw_bdev  -- unlock filesystem
263  * @bdev:	blockdevice to unlock
264  * @sb:		associated superblock
265  *
266  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
267  */
thaw_bdev(struct block_device * bdev,struct super_block * sb)268 int thaw_bdev(struct block_device *bdev, struct super_block *sb)
269 {
270 	int error = 0;
271 
272 	mutex_lock(&bdev->bd_fsfreeze_mutex);
273 	if (!bdev->bd_fsfreeze_count) {
274 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
275 		return -EINVAL;
276 	}
277 
278 	bdev->bd_fsfreeze_count--;
279 	if (bdev->bd_fsfreeze_count > 0) {
280 		if (sb)
281 			drop_super(sb);
282 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
283 		return 0;
284 	}
285 
286 	if (sb) {
287 		BUG_ON(sb->s_bdev != bdev);
288 		if (!(sb->s_flags & MS_RDONLY)) {
289 			if (sb->s_op->unfreeze_fs) {
290 				error = sb->s_op->unfreeze_fs(sb);
291 				if (error) {
292 					printk(KERN_ERR
293 						"VFS:Filesystem thaw failed\n");
294 					sb->s_frozen = SB_FREEZE_TRANS;
295 					bdev->bd_fsfreeze_count++;
296 					mutex_unlock(&bdev->bd_fsfreeze_mutex);
297 					return error;
298 				}
299 			}
300 			sb->s_frozen = SB_UNFROZEN;
301 			smp_wmb();
302 			wake_up(&sb->s_wait_unfrozen);
303 		}
304 		drop_super(sb);
305 	}
306 
307 	up(&bdev->bd_mount_sem);
308 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 	return 0;
310 }
311 EXPORT_SYMBOL(thaw_bdev);
312 
313 /*
314  * Various filesystems appear to want __find_get_block to be non-blocking.
315  * But it's the page lock which protects the buffers.  To get around this,
316  * we get exclusion from try_to_free_buffers with the blockdev mapping's
317  * private_lock.
318  *
319  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
320  * may be quite high.  This code could TryLock the page, and if that
321  * succeeds, there is no need to take private_lock. (But if
322  * private_lock is contended then so is mapping->tree_lock).
323  */
324 static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block)325 __find_get_block_slow(struct block_device *bdev, sector_t block)
326 {
327 	struct inode *bd_inode = bdev->bd_inode;
328 	struct address_space *bd_mapping = bd_inode->i_mapping;
329 	struct buffer_head *ret = NULL;
330 	pgoff_t index;
331 	struct buffer_head *bh;
332 	struct buffer_head *head;
333 	struct page *page;
334 	int all_mapped = 1;
335 
336 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
337 	page = find_get_page(bd_mapping, index);
338 	if (!page)
339 		goto out;
340 
341 	spin_lock(&bd_mapping->private_lock);
342 	if (!page_has_buffers(page))
343 		goto out_unlock;
344 	head = page_buffers(page);
345 	bh = head;
346 	do {
347 		if (bh->b_blocknr == block) {
348 			ret = bh;
349 			get_bh(bh);
350 			goto out_unlock;
351 		}
352 		if (!buffer_mapped(bh))
353 			all_mapped = 0;
354 		bh = bh->b_this_page;
355 	} while (bh != head);
356 
357 	/* we might be here because some of the buffers on this page are
358 	 * not mapped.  This is due to various races between
359 	 * file io on the block device and getblk.  It gets dealt with
360 	 * elsewhere, don't buffer_error if we had some unmapped buffers
361 	 */
362 	if (all_mapped) {
363 		printk("__find_get_block_slow() failed. "
364 			"block=%llu, b_blocknr=%llu\n",
365 			(unsigned long long)block,
366 			(unsigned long long)bh->b_blocknr);
367 		printk("b_state=0x%08lx, b_size=%zu\n",
368 			bh->b_state, bh->b_size);
369 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
370 	}
371 out_unlock:
372 	spin_unlock(&bd_mapping->private_lock);
373 	page_cache_release(page);
374 out:
375 	return ret;
376 }
377 
378 /* If invalidate_buffers() will trash dirty buffers, it means some kind
379    of fs corruption is going on. Trashing dirty data always imply losing
380    information that was supposed to be just stored on the physical layer
381    by the user.
382 
383    Thus invalidate_buffers in general usage is not allwowed to trash
384    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
385    be preserved.  These buffers are simply skipped.
386 
387    We also skip buffers which are still in use.  For example this can
388    happen if a userspace program is reading the block device.
389 
390    NOTE: In the case where the user removed a removable-media-disk even if
391    there's still dirty data not synced on disk (due a bug in the device driver
392    or due an error of the user), by not destroying the dirty buffers we could
393    generate corruption also on the next media inserted, thus a parameter is
394    necessary to handle this case in the most safe way possible (trying
395    to not corrupt also the new disk inserted with the data belonging to
396    the old now corrupted disk). Also for the ramdisk the natural thing
397    to do in order to release the ramdisk memory is to destroy dirty buffers.
398 
399    These are two special cases. Normal usage imply the device driver
400    to issue a sync on the device (without waiting I/O completion) and
401    then an invalidate_buffers call that doesn't trash dirty buffers.
402 
403    For handling cache coherency with the blkdev pagecache the 'update' case
404    is been introduced. It is needed to re-read from disk any pinned
405    buffer. NOTE: re-reading from disk is destructive so we can do it only
406    when we assume nobody is changing the buffercache under our I/O and when
407    we think the disk contains more recent information than the buffercache.
408    The update == 1 pass marks the buffers we need to update, the update == 2
409    pass does the actual I/O. */
invalidate_bdev(struct block_device * bdev)410 void invalidate_bdev(struct block_device *bdev)
411 {
412 	struct address_space *mapping = bdev->bd_inode->i_mapping;
413 
414 	if (mapping->nrpages == 0)
415 		return;
416 
417 	invalidate_bh_lrus();
418 	invalidate_mapping_pages(mapping, 0, -1);
419 }
420 
421 /*
422  * Kick pdflush then try to free up some ZONE_NORMAL memory.
423  */
free_more_memory(void)424 static void free_more_memory(void)
425 {
426 	struct zone *zone;
427 	int nid;
428 
429 	wakeup_pdflush(1024);
430 	yield();
431 
432 	for_each_online_node(nid) {
433 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
434 						gfp_zone(GFP_NOFS), NULL,
435 						&zone);
436 		if (zone)
437 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
438 						GFP_NOFS);
439 	}
440 }
441 
442 /*
443  * I/O completion handler for block_read_full_page() - pages
444  * which come unlocked at the end of I/O.
445  */
end_buffer_async_read(struct buffer_head * bh,int uptodate)446 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
447 {
448 	unsigned long flags;
449 	struct buffer_head *first;
450 	struct buffer_head *tmp;
451 	struct page *page;
452 	int page_uptodate = 1;
453 
454 	BUG_ON(!buffer_async_read(bh));
455 
456 	page = bh->b_page;
457 	if (uptodate) {
458 		set_buffer_uptodate(bh);
459 	} else {
460 		clear_buffer_uptodate(bh);
461 		if (!quiet_error(bh))
462 			buffer_io_error(bh);
463 		SetPageError(page);
464 	}
465 
466 	/*
467 	 * Be _very_ careful from here on. Bad things can happen if
468 	 * two buffer heads end IO at almost the same time and both
469 	 * decide that the page is now completely done.
470 	 */
471 	first = page_buffers(page);
472 	local_irq_save(flags);
473 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
474 	clear_buffer_async_read(bh);
475 	unlock_buffer(bh);
476 	tmp = bh;
477 	do {
478 		if (!buffer_uptodate(tmp))
479 			page_uptodate = 0;
480 		if (buffer_async_read(tmp)) {
481 			BUG_ON(!buffer_locked(tmp));
482 			goto still_busy;
483 		}
484 		tmp = tmp->b_this_page;
485 	} while (tmp != bh);
486 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
487 	local_irq_restore(flags);
488 
489 	/*
490 	 * If none of the buffers had errors and they are all
491 	 * uptodate then we can set the page uptodate.
492 	 */
493 	if (page_uptodate && !PageError(page))
494 		SetPageUptodate(page);
495 	unlock_page(page);
496 	return;
497 
498 still_busy:
499 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
500 	local_irq_restore(flags);
501 	return;
502 }
503 
504 /*
505  * Completion handler for block_write_full_page() - pages which are unlocked
506  * during I/O, and which have PageWriteback cleared upon I/O completion.
507  */
end_buffer_async_write(struct buffer_head * bh,int uptodate)508 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
509 {
510 	char b[BDEVNAME_SIZE];
511 	unsigned long flags;
512 	struct buffer_head *first;
513 	struct buffer_head *tmp;
514 	struct page *page;
515 
516 	BUG_ON(!buffer_async_write(bh));
517 
518 	page = bh->b_page;
519 	if (uptodate) {
520 		set_buffer_uptodate(bh);
521 	} else {
522 		if (!quiet_error(bh)) {
523 			buffer_io_error(bh);
524 			printk(KERN_WARNING "lost page write due to "
525 					"I/O error on %s\n",
526 			       bdevname(bh->b_bdev, b));
527 		}
528 		set_bit(AS_EIO, &page->mapping->flags);
529 		set_buffer_write_io_error(bh);
530 		clear_buffer_uptodate(bh);
531 		SetPageError(page);
532 	}
533 
534 	first = page_buffers(page);
535 	local_irq_save(flags);
536 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
537 
538 	clear_buffer_async_write(bh);
539 	unlock_buffer(bh);
540 	tmp = bh->b_this_page;
541 	while (tmp != bh) {
542 		if (buffer_async_write(tmp)) {
543 			BUG_ON(!buffer_locked(tmp));
544 			goto still_busy;
545 		}
546 		tmp = tmp->b_this_page;
547 	}
548 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
549 	local_irq_restore(flags);
550 	end_page_writeback(page);
551 	return;
552 
553 still_busy:
554 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
555 	local_irq_restore(flags);
556 	return;
557 }
558 
559 /*
560  * If a page's buffers are under async readin (end_buffer_async_read
561  * completion) then there is a possibility that another thread of
562  * control could lock one of the buffers after it has completed
563  * but while some of the other buffers have not completed.  This
564  * locked buffer would confuse end_buffer_async_read() into not unlocking
565  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
566  * that this buffer is not under async I/O.
567  *
568  * The page comes unlocked when it has no locked buffer_async buffers
569  * left.
570  *
571  * PageLocked prevents anyone starting new async I/O reads any of
572  * the buffers.
573  *
574  * PageWriteback is used to prevent simultaneous writeout of the same
575  * page.
576  *
577  * PageLocked prevents anyone from starting writeback of a page which is
578  * under read I/O (PageWriteback is only ever set against a locked page).
579  */
mark_buffer_async_read(struct buffer_head * bh)580 static void mark_buffer_async_read(struct buffer_head *bh)
581 {
582 	bh->b_end_io = end_buffer_async_read;
583 	set_buffer_async_read(bh);
584 }
585 
mark_buffer_async_write(struct buffer_head * bh)586 void mark_buffer_async_write(struct buffer_head *bh)
587 {
588 	bh->b_end_io = end_buffer_async_write;
589 	set_buffer_async_write(bh);
590 }
591 EXPORT_SYMBOL(mark_buffer_async_write);
592 
593 
594 /*
595  * fs/buffer.c contains helper functions for buffer-backed address space's
596  * fsync functions.  A common requirement for buffer-based filesystems is
597  * that certain data from the backing blockdev needs to be written out for
598  * a successful fsync().  For example, ext2 indirect blocks need to be
599  * written back and waited upon before fsync() returns.
600  *
601  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
602  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
603  * management of a list of dependent buffers at ->i_mapping->private_list.
604  *
605  * Locking is a little subtle: try_to_free_buffers() will remove buffers
606  * from their controlling inode's queue when they are being freed.  But
607  * try_to_free_buffers() will be operating against the *blockdev* mapping
608  * at the time, not against the S_ISREG file which depends on those buffers.
609  * So the locking for private_list is via the private_lock in the address_space
610  * which backs the buffers.  Which is different from the address_space
611  * against which the buffers are listed.  So for a particular address_space,
612  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
613  * mapping->private_list will always be protected by the backing blockdev's
614  * ->private_lock.
615  *
616  * Which introduces a requirement: all buffers on an address_space's
617  * ->private_list must be from the same address_space: the blockdev's.
618  *
619  * address_spaces which do not place buffers at ->private_list via these
620  * utility functions are free to use private_lock and private_list for
621  * whatever they want.  The only requirement is that list_empty(private_list)
622  * be true at clear_inode() time.
623  *
624  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
625  * filesystems should do that.  invalidate_inode_buffers() should just go
626  * BUG_ON(!list_empty).
627  *
628  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
629  * take an address_space, not an inode.  And it should be called
630  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
631  * queued up.
632  *
633  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
634  * list if it is already on a list.  Because if the buffer is on a list,
635  * it *must* already be on the right one.  If not, the filesystem is being
636  * silly.  This will save a ton of locking.  But first we have to ensure
637  * that buffers are taken *off* the old inode's list when they are freed
638  * (presumably in truncate).  That requires careful auditing of all
639  * filesystems (do it inside bforget()).  It could also be done by bringing
640  * b_inode back.
641  */
642 
643 /*
644  * The buffer's backing address_space's private_lock must be held
645  */
__remove_assoc_queue(struct buffer_head * bh)646 static void __remove_assoc_queue(struct buffer_head *bh)
647 {
648 	list_del_init(&bh->b_assoc_buffers);
649 	WARN_ON(!bh->b_assoc_map);
650 	if (buffer_write_io_error(bh))
651 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
652 	bh->b_assoc_map = NULL;
653 }
654 
inode_has_buffers(struct inode * inode)655 int inode_has_buffers(struct inode *inode)
656 {
657 	return !list_empty(&inode->i_data.private_list);
658 }
659 
660 /*
661  * osync is designed to support O_SYNC io.  It waits synchronously for
662  * all already-submitted IO to complete, but does not queue any new
663  * writes to the disk.
664  *
665  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
666  * you dirty the buffers, and then use osync_inode_buffers to wait for
667  * completion.  Any other dirty buffers which are not yet queued for
668  * write will not be flushed to disk by the osync.
669  */
osync_buffers_list(spinlock_t * lock,struct list_head * list)670 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
671 {
672 	struct buffer_head *bh;
673 	struct list_head *p;
674 	int err = 0;
675 
676 	spin_lock(lock);
677 repeat:
678 	list_for_each_prev(p, list) {
679 		bh = BH_ENTRY(p);
680 		if (buffer_locked(bh)) {
681 			get_bh(bh);
682 			spin_unlock(lock);
683 			wait_on_buffer(bh);
684 			if (!buffer_uptodate(bh))
685 				err = -EIO;
686 			brelse(bh);
687 			spin_lock(lock);
688 			goto repeat;
689 		}
690 	}
691 	spin_unlock(lock);
692 	return err;
693 }
694 
695 /**
696  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
697  * @mapping: the mapping which wants those buffers written
698  *
699  * Starts I/O against the buffers at mapping->private_list, and waits upon
700  * that I/O.
701  *
702  * Basically, this is a convenience function for fsync().
703  * @mapping is a file or directory which needs those buffers to be written for
704  * a successful fsync().
705  */
sync_mapping_buffers(struct address_space * mapping)706 int sync_mapping_buffers(struct address_space *mapping)
707 {
708 	struct address_space *buffer_mapping = mapping->assoc_mapping;
709 
710 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
711 		return 0;
712 
713 	return fsync_buffers_list(&buffer_mapping->private_lock,
714 					&mapping->private_list);
715 }
716 EXPORT_SYMBOL(sync_mapping_buffers);
717 
718 /*
719  * Called when we've recently written block `bblock', and it is known that
720  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
721  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
722  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
723  */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)724 void write_boundary_block(struct block_device *bdev,
725 			sector_t bblock, unsigned blocksize)
726 {
727 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
728 	if (bh) {
729 		if (buffer_dirty(bh))
730 			ll_rw_block(WRITE, 1, &bh);
731 		put_bh(bh);
732 	}
733 }
734 
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)735 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
736 {
737 	struct address_space *mapping = inode->i_mapping;
738 	struct address_space *buffer_mapping = bh->b_page->mapping;
739 
740 	mark_buffer_dirty(bh);
741 	if (!mapping->assoc_mapping) {
742 		mapping->assoc_mapping = buffer_mapping;
743 	} else {
744 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
745 	}
746 	if (!bh->b_assoc_map) {
747 		spin_lock(&buffer_mapping->private_lock);
748 		list_move_tail(&bh->b_assoc_buffers,
749 				&mapping->private_list);
750 		bh->b_assoc_map = mapping;
751 		spin_unlock(&buffer_mapping->private_lock);
752 	}
753 }
754 EXPORT_SYMBOL(mark_buffer_dirty_inode);
755 
756 /*
757  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
758  * dirty.
759  *
760  * If warn is true, then emit a warning if the page is not uptodate and has
761  * not been truncated.
762  */
__set_page_dirty(struct page * page,struct address_space * mapping,int warn)763 static void __set_page_dirty(struct page *page,
764 		struct address_space *mapping, int warn)
765 {
766 	spin_lock_irq(&mapping->tree_lock);
767 	if (page->mapping) {	/* Race with truncate? */
768 		WARN_ON_ONCE(warn && !PageUptodate(page));
769 
770 		if (mapping_cap_account_dirty(mapping)) {
771 			__inc_zone_page_state(page, NR_FILE_DIRTY);
772 			__inc_bdi_stat(mapping->backing_dev_info,
773 					BDI_RECLAIMABLE);
774 			task_dirty_inc(current);
775 			task_io_account_write(PAGE_CACHE_SIZE);
776 		}
777 		radix_tree_tag_set(&mapping->page_tree,
778 				page_index(page), PAGECACHE_TAG_DIRTY);
779 	}
780 	spin_unlock_irq(&mapping->tree_lock);
781 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
782 }
783 
784 /*
785  * Add a page to the dirty page list.
786  *
787  * It is a sad fact of life that this function is called from several places
788  * deeply under spinlocking.  It may not sleep.
789  *
790  * If the page has buffers, the uptodate buffers are set dirty, to preserve
791  * dirty-state coherency between the page and the buffers.  It the page does
792  * not have buffers then when they are later attached they will all be set
793  * dirty.
794  *
795  * The buffers are dirtied before the page is dirtied.  There's a small race
796  * window in which a writepage caller may see the page cleanness but not the
797  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
798  * before the buffers, a concurrent writepage caller could clear the page dirty
799  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
800  * page on the dirty page list.
801  *
802  * We use private_lock to lock against try_to_free_buffers while using the
803  * page's buffer list.  Also use this to protect against clean buffers being
804  * added to the page after it was set dirty.
805  *
806  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
807  * address_space though.
808  */
__set_page_dirty_buffers(struct page * page)809 int __set_page_dirty_buffers(struct page *page)
810 {
811 	int newly_dirty;
812 	struct address_space *mapping = page_mapping(page);
813 
814 	if (unlikely(!mapping))
815 		return !TestSetPageDirty(page);
816 
817 	spin_lock(&mapping->private_lock);
818 	if (page_has_buffers(page)) {
819 		struct buffer_head *head = page_buffers(page);
820 		struct buffer_head *bh = head;
821 
822 		do {
823 			set_buffer_dirty(bh);
824 			bh = bh->b_this_page;
825 		} while (bh != head);
826 	}
827 	newly_dirty = !TestSetPageDirty(page);
828 	spin_unlock(&mapping->private_lock);
829 
830 	if (newly_dirty)
831 		__set_page_dirty(page, mapping, 1);
832 	return newly_dirty;
833 }
834 EXPORT_SYMBOL(__set_page_dirty_buffers);
835 
836 /*
837  * Write out and wait upon a list of buffers.
838  *
839  * We have conflicting pressures: we want to make sure that all
840  * initially dirty buffers get waited on, but that any subsequently
841  * dirtied buffers don't.  After all, we don't want fsync to last
842  * forever if somebody is actively writing to the file.
843  *
844  * Do this in two main stages: first we copy dirty buffers to a
845  * temporary inode list, queueing the writes as we go.  Then we clean
846  * up, waiting for those writes to complete.
847  *
848  * During this second stage, any subsequent updates to the file may end
849  * up refiling the buffer on the original inode's dirty list again, so
850  * there is a chance we will end up with a buffer queued for write but
851  * not yet completed on that list.  So, as a final cleanup we go through
852  * the osync code to catch these locked, dirty buffers without requeuing
853  * any newly dirty buffers for write.
854  */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)855 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
856 {
857 	struct buffer_head *bh;
858 	struct list_head tmp;
859 	struct address_space *mapping;
860 	int err = 0, err2;
861 
862 	INIT_LIST_HEAD(&tmp);
863 
864 	spin_lock(lock);
865 	while (!list_empty(list)) {
866 		bh = BH_ENTRY(list->next);
867 		mapping = bh->b_assoc_map;
868 		__remove_assoc_queue(bh);
869 		/* Avoid race with mark_buffer_dirty_inode() which does
870 		 * a lockless check and we rely on seeing the dirty bit */
871 		smp_mb();
872 		if (buffer_dirty(bh) || buffer_locked(bh)) {
873 			list_add(&bh->b_assoc_buffers, &tmp);
874 			bh->b_assoc_map = mapping;
875 			if (buffer_dirty(bh)) {
876 				get_bh(bh);
877 				spin_unlock(lock);
878 				/*
879 				 * Ensure any pending I/O completes so that
880 				 * ll_rw_block() actually writes the current
881 				 * contents - it is a noop if I/O is still in
882 				 * flight on potentially older contents.
883 				 */
884 				ll_rw_block(SWRITE_SYNC, 1, &bh);
885 				brelse(bh);
886 				spin_lock(lock);
887 			}
888 		}
889 	}
890 
891 	while (!list_empty(&tmp)) {
892 		bh = BH_ENTRY(tmp.prev);
893 		get_bh(bh);
894 		mapping = bh->b_assoc_map;
895 		__remove_assoc_queue(bh);
896 		/* Avoid race with mark_buffer_dirty_inode() which does
897 		 * a lockless check and we rely on seeing the dirty bit */
898 		smp_mb();
899 		if (buffer_dirty(bh)) {
900 			list_add(&bh->b_assoc_buffers,
901 				 &mapping->private_list);
902 			bh->b_assoc_map = mapping;
903 		}
904 		spin_unlock(lock);
905 		wait_on_buffer(bh);
906 		if (!buffer_uptodate(bh))
907 			err = -EIO;
908 		brelse(bh);
909 		spin_lock(lock);
910 	}
911 
912 	spin_unlock(lock);
913 	err2 = osync_buffers_list(lock, list);
914 	if (err)
915 		return err;
916 	else
917 		return err2;
918 }
919 
920 /*
921  * Invalidate any and all dirty buffers on a given inode.  We are
922  * probably unmounting the fs, but that doesn't mean we have already
923  * done a sync().  Just drop the buffers from the inode list.
924  *
925  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
926  * assumes that all the buffers are against the blockdev.  Not true
927  * for reiserfs.
928  */
invalidate_inode_buffers(struct inode * inode)929 void invalidate_inode_buffers(struct inode *inode)
930 {
931 	if (inode_has_buffers(inode)) {
932 		struct address_space *mapping = &inode->i_data;
933 		struct list_head *list = &mapping->private_list;
934 		struct address_space *buffer_mapping = mapping->assoc_mapping;
935 
936 		spin_lock(&buffer_mapping->private_lock);
937 		while (!list_empty(list))
938 			__remove_assoc_queue(BH_ENTRY(list->next));
939 		spin_unlock(&buffer_mapping->private_lock);
940 	}
941 }
942 EXPORT_SYMBOL(invalidate_inode_buffers);
943 
944 /*
945  * Remove any clean buffers from the inode's buffer list.  This is called
946  * when we're trying to free the inode itself.  Those buffers can pin it.
947  *
948  * Returns true if all buffers were removed.
949  */
remove_inode_buffers(struct inode * inode)950 int remove_inode_buffers(struct inode *inode)
951 {
952 	int ret = 1;
953 
954 	if (inode_has_buffers(inode)) {
955 		struct address_space *mapping = &inode->i_data;
956 		struct list_head *list = &mapping->private_list;
957 		struct address_space *buffer_mapping = mapping->assoc_mapping;
958 
959 		spin_lock(&buffer_mapping->private_lock);
960 		while (!list_empty(list)) {
961 			struct buffer_head *bh = BH_ENTRY(list->next);
962 			if (buffer_dirty(bh)) {
963 				ret = 0;
964 				break;
965 			}
966 			__remove_assoc_queue(bh);
967 		}
968 		spin_unlock(&buffer_mapping->private_lock);
969 	}
970 	return ret;
971 }
972 
973 /*
974  * Create the appropriate buffers when given a page for data area and
975  * the size of each buffer.. Use the bh->b_this_page linked list to
976  * follow the buffers created.  Return NULL if unable to create more
977  * buffers.
978  *
979  * The retry flag is used to differentiate async IO (paging, swapping)
980  * which may not fail from ordinary buffer allocations.
981  */
alloc_page_buffers(struct page * page,unsigned long size,int retry)982 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
983 		int retry)
984 {
985 	struct buffer_head *bh, *head;
986 	long offset;
987 
988 try_again:
989 	head = NULL;
990 	offset = PAGE_SIZE;
991 	while ((offset -= size) >= 0) {
992 		bh = alloc_buffer_head(GFP_NOFS);
993 		if (!bh)
994 			goto no_grow;
995 
996 		bh->b_bdev = NULL;
997 		bh->b_this_page = head;
998 		bh->b_blocknr = -1;
999 		head = bh;
1000 
1001 		bh->b_state = 0;
1002 		atomic_set(&bh->b_count, 0);
1003 		bh->b_private = NULL;
1004 		bh->b_size = size;
1005 
1006 		/* Link the buffer to its page */
1007 		set_bh_page(bh, page, offset);
1008 
1009 		init_buffer(bh, NULL, NULL);
1010 	}
1011 	return head;
1012 /*
1013  * In case anything failed, we just free everything we got.
1014  */
1015 no_grow:
1016 	if (head) {
1017 		do {
1018 			bh = head;
1019 			head = head->b_this_page;
1020 			free_buffer_head(bh);
1021 		} while (head);
1022 	}
1023 
1024 	/*
1025 	 * Return failure for non-async IO requests.  Async IO requests
1026 	 * are not allowed to fail, so we have to wait until buffer heads
1027 	 * become available.  But we don't want tasks sleeping with
1028 	 * partially complete buffers, so all were released above.
1029 	 */
1030 	if (!retry)
1031 		return NULL;
1032 
1033 	/* We're _really_ low on memory. Now we just
1034 	 * wait for old buffer heads to become free due to
1035 	 * finishing IO.  Since this is an async request and
1036 	 * the reserve list is empty, we're sure there are
1037 	 * async buffer heads in use.
1038 	 */
1039 	free_more_memory();
1040 	goto try_again;
1041 }
1042 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1043 
1044 static inline void
link_dev_buffers(struct page * page,struct buffer_head * head)1045 link_dev_buffers(struct page *page, struct buffer_head *head)
1046 {
1047 	struct buffer_head *bh, *tail;
1048 
1049 	bh = head;
1050 	do {
1051 		tail = bh;
1052 		bh = bh->b_this_page;
1053 	} while (bh);
1054 	tail->b_this_page = head;
1055 	attach_page_buffers(page, head);
1056 }
1057 
1058 /*
1059  * Initialise the state of a blockdev page's buffers.
1060  */
1061 static void
init_page_buffers(struct page * page,struct block_device * bdev,sector_t block,int size)1062 init_page_buffers(struct page *page, struct block_device *bdev,
1063 			sector_t block, int size)
1064 {
1065 	struct buffer_head *head = page_buffers(page);
1066 	struct buffer_head *bh = head;
1067 	int uptodate = PageUptodate(page);
1068 
1069 	do {
1070 		if (!buffer_mapped(bh)) {
1071 			init_buffer(bh, NULL, NULL);
1072 			bh->b_bdev = bdev;
1073 			bh->b_blocknr = block;
1074 			if (uptodate)
1075 				set_buffer_uptodate(bh);
1076 			set_buffer_mapped(bh);
1077 		}
1078 		block++;
1079 		bh = bh->b_this_page;
1080 	} while (bh != head);
1081 }
1082 
1083 /*
1084  * Create the page-cache page that contains the requested block.
1085  *
1086  * This is user purely for blockdev mappings.
1087  */
1088 static struct page *
grow_dev_page(struct block_device * bdev,sector_t block,pgoff_t index,int size)1089 grow_dev_page(struct block_device *bdev, sector_t block,
1090 		pgoff_t index, int size)
1091 {
1092 	struct inode *inode = bdev->bd_inode;
1093 	struct page *page;
1094 	struct buffer_head *bh;
1095 
1096 	page = find_or_create_page(inode->i_mapping, index,
1097 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1098 	if (!page)
1099 		return NULL;
1100 
1101 	BUG_ON(!PageLocked(page));
1102 
1103 	if (page_has_buffers(page)) {
1104 		bh = page_buffers(page);
1105 		if (bh->b_size == size) {
1106 			init_page_buffers(page, bdev, block, size);
1107 			return page;
1108 		}
1109 		if (!try_to_free_buffers(page))
1110 			goto failed;
1111 	}
1112 
1113 	/*
1114 	 * Allocate some buffers for this page
1115 	 */
1116 	bh = alloc_page_buffers(page, size, 0);
1117 	if (!bh)
1118 		goto failed;
1119 
1120 	/*
1121 	 * Link the page to the buffers and initialise them.  Take the
1122 	 * lock to be atomic wrt __find_get_block(), which does not
1123 	 * run under the page lock.
1124 	 */
1125 	spin_lock(&inode->i_mapping->private_lock);
1126 	link_dev_buffers(page, bh);
1127 	init_page_buffers(page, bdev, block, size);
1128 	spin_unlock(&inode->i_mapping->private_lock);
1129 	return page;
1130 
1131 failed:
1132 	BUG();
1133 	unlock_page(page);
1134 	page_cache_release(page);
1135 	return NULL;
1136 }
1137 
1138 /*
1139  * Create buffers for the specified block device block's page.  If
1140  * that page was dirty, the buffers are set dirty also.
1141  */
1142 static int
grow_buffers(struct block_device * bdev,sector_t block,int size)1143 grow_buffers(struct block_device *bdev, sector_t block, int size)
1144 {
1145 	struct page *page;
1146 	pgoff_t index;
1147 	int sizebits;
1148 
1149 	sizebits = -1;
1150 	do {
1151 		sizebits++;
1152 	} while ((size << sizebits) < PAGE_SIZE);
1153 
1154 	index = block >> sizebits;
1155 
1156 	/*
1157 	 * Check for a block which wants to lie outside our maximum possible
1158 	 * pagecache index.  (this comparison is done using sector_t types).
1159 	 */
1160 	if (unlikely(index != block >> sizebits)) {
1161 		char b[BDEVNAME_SIZE];
1162 
1163 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1164 			"device %s\n",
1165 			__func__, (unsigned long long)block,
1166 			bdevname(bdev, b));
1167 		return -EIO;
1168 	}
1169 	block = index << sizebits;
1170 	/* Create a page with the proper size buffers.. */
1171 	page = grow_dev_page(bdev, block, index, size);
1172 	if (!page)
1173 		return 0;
1174 	unlock_page(page);
1175 	page_cache_release(page);
1176 	return 1;
1177 }
1178 
1179 static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,int size)1180 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1181 {
1182 	/* Size must be multiple of hard sectorsize */
1183 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1184 			(size < 512 || size > PAGE_SIZE))) {
1185 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1186 					size);
1187 		printk(KERN_ERR "hardsect size: %d\n",
1188 					bdev_hardsect_size(bdev));
1189 
1190 		dump_stack();
1191 		return NULL;
1192 	}
1193 
1194 	for (;;) {
1195 		struct buffer_head * bh;
1196 		int ret;
1197 
1198 		bh = __find_get_block(bdev, block, size);
1199 		if (bh)
1200 			return bh;
1201 
1202 		ret = grow_buffers(bdev, block, size);
1203 		if (ret < 0)
1204 			return NULL;
1205 		if (ret == 0)
1206 			free_more_memory();
1207 	}
1208 }
1209 
1210 /*
1211  * The relationship between dirty buffers and dirty pages:
1212  *
1213  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1214  * the page is tagged dirty in its radix tree.
1215  *
1216  * At all times, the dirtiness of the buffers represents the dirtiness of
1217  * subsections of the page.  If the page has buffers, the page dirty bit is
1218  * merely a hint about the true dirty state.
1219  *
1220  * When a page is set dirty in its entirety, all its buffers are marked dirty
1221  * (if the page has buffers).
1222  *
1223  * When a buffer is marked dirty, its page is dirtied, but the page's other
1224  * buffers are not.
1225  *
1226  * Also.  When blockdev buffers are explicitly read with bread(), they
1227  * individually become uptodate.  But their backing page remains not
1228  * uptodate - even if all of its buffers are uptodate.  A subsequent
1229  * block_read_full_page() against that page will discover all the uptodate
1230  * buffers, will set the page uptodate and will perform no I/O.
1231  */
1232 
1233 /**
1234  * mark_buffer_dirty - mark a buffer_head as needing writeout
1235  * @bh: the buffer_head to mark dirty
1236  *
1237  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1238  * backing page dirty, then tag the page as dirty in its address_space's radix
1239  * tree and then attach the address_space's inode to its superblock's dirty
1240  * inode list.
1241  *
1242  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1243  * mapping->tree_lock and the global inode_lock.
1244  */
mark_buffer_dirty(struct buffer_head * bh)1245 void mark_buffer_dirty(struct buffer_head *bh)
1246 {
1247 	WARN_ON_ONCE(!buffer_uptodate(bh));
1248 
1249 	/*
1250 	 * Very *carefully* optimize the it-is-already-dirty case.
1251 	 *
1252 	 * Don't let the final "is it dirty" escape to before we
1253 	 * perhaps modified the buffer.
1254 	 */
1255 	if (buffer_dirty(bh)) {
1256 		smp_mb();
1257 		if (buffer_dirty(bh))
1258 			return;
1259 	}
1260 
1261 	if (!test_set_buffer_dirty(bh)) {
1262 		struct page *page = bh->b_page;
1263 		if (!TestSetPageDirty(page))
1264 			__set_page_dirty(page, page_mapping(page), 0);
1265 	}
1266 }
1267 
1268 /*
1269  * Decrement a buffer_head's reference count.  If all buffers against a page
1270  * have zero reference count, are clean and unlocked, and if the page is clean
1271  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1272  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1273  * a page but it ends up not being freed, and buffers may later be reattached).
1274  */
__brelse(struct buffer_head * buf)1275 void __brelse(struct buffer_head * buf)
1276 {
1277 	if (atomic_read(&buf->b_count)) {
1278 		put_bh(buf);
1279 		return;
1280 	}
1281 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1282 }
1283 
1284 /*
1285  * bforget() is like brelse(), except it discards any
1286  * potentially dirty data.
1287  */
__bforget(struct buffer_head * bh)1288 void __bforget(struct buffer_head *bh)
1289 {
1290 	clear_buffer_dirty(bh);
1291 	if (bh->b_assoc_map) {
1292 		struct address_space *buffer_mapping = bh->b_page->mapping;
1293 
1294 		spin_lock(&buffer_mapping->private_lock);
1295 		list_del_init(&bh->b_assoc_buffers);
1296 		bh->b_assoc_map = NULL;
1297 		spin_unlock(&buffer_mapping->private_lock);
1298 	}
1299 	__brelse(bh);
1300 }
1301 
__bread_slow(struct buffer_head * bh)1302 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1303 {
1304 	lock_buffer(bh);
1305 	if (buffer_uptodate(bh)) {
1306 		unlock_buffer(bh);
1307 		return bh;
1308 	} else {
1309 		get_bh(bh);
1310 		bh->b_end_io = end_buffer_read_sync;
1311 		submit_bh(READ, bh);
1312 		wait_on_buffer(bh);
1313 		if (buffer_uptodate(bh))
1314 			return bh;
1315 	}
1316 	brelse(bh);
1317 	return NULL;
1318 }
1319 
1320 /*
1321  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1322  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1323  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1324  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1325  * CPU's LRUs at the same time.
1326  *
1327  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1328  * sb_find_get_block().
1329  *
1330  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1331  * a local interrupt disable for that.
1332  */
1333 
1334 #define BH_LRU_SIZE	8
1335 
1336 struct bh_lru {
1337 	struct buffer_head *bhs[BH_LRU_SIZE];
1338 };
1339 
1340 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1341 
1342 #ifdef CONFIG_SMP
1343 #define bh_lru_lock()	local_irq_disable()
1344 #define bh_lru_unlock()	local_irq_enable()
1345 #else
1346 #define bh_lru_lock()	preempt_disable()
1347 #define bh_lru_unlock()	preempt_enable()
1348 #endif
1349 
check_irqs_on(void)1350 static inline void check_irqs_on(void)
1351 {
1352 #ifdef irqs_disabled
1353 	BUG_ON(irqs_disabled());
1354 #endif
1355 }
1356 
1357 /*
1358  * The LRU management algorithm is dopey-but-simple.  Sorry.
1359  */
bh_lru_install(struct buffer_head * bh)1360 static void bh_lru_install(struct buffer_head *bh)
1361 {
1362 	struct buffer_head *evictee = NULL;
1363 	struct bh_lru *lru;
1364 
1365 	check_irqs_on();
1366 	bh_lru_lock();
1367 	lru = &__get_cpu_var(bh_lrus);
1368 	if (lru->bhs[0] != bh) {
1369 		struct buffer_head *bhs[BH_LRU_SIZE];
1370 		int in;
1371 		int out = 0;
1372 
1373 		get_bh(bh);
1374 		bhs[out++] = bh;
1375 		for (in = 0; in < BH_LRU_SIZE; in++) {
1376 			struct buffer_head *bh2 = lru->bhs[in];
1377 
1378 			if (bh2 == bh) {
1379 				__brelse(bh2);
1380 			} else {
1381 				if (out >= BH_LRU_SIZE) {
1382 					BUG_ON(evictee != NULL);
1383 					evictee = bh2;
1384 				} else {
1385 					bhs[out++] = bh2;
1386 				}
1387 			}
1388 		}
1389 		while (out < BH_LRU_SIZE)
1390 			bhs[out++] = NULL;
1391 		memcpy(lru->bhs, bhs, sizeof(bhs));
1392 	}
1393 	bh_lru_unlock();
1394 
1395 	if (evictee)
1396 		__brelse(evictee);
1397 }
1398 
1399 /*
1400  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1401  */
1402 static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)1403 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1404 {
1405 	struct buffer_head *ret = NULL;
1406 	struct bh_lru *lru;
1407 	unsigned int i;
1408 
1409 	check_irqs_on();
1410 	bh_lru_lock();
1411 	lru = &__get_cpu_var(bh_lrus);
1412 	for (i = 0; i < BH_LRU_SIZE; i++) {
1413 		struct buffer_head *bh = lru->bhs[i];
1414 
1415 		if (bh && bh->b_bdev == bdev &&
1416 				bh->b_blocknr == block && bh->b_size == size) {
1417 			if (i) {
1418 				while (i) {
1419 					lru->bhs[i] = lru->bhs[i - 1];
1420 					i--;
1421 				}
1422 				lru->bhs[0] = bh;
1423 			}
1424 			get_bh(bh);
1425 			ret = bh;
1426 			break;
1427 		}
1428 	}
1429 	bh_lru_unlock();
1430 	return ret;
1431 }
1432 
1433 /*
1434  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1435  * it in the LRU and mark it as accessed.  If it is not present then return
1436  * NULL
1437  */
1438 struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)1439 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1440 {
1441 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1442 
1443 	if (bh == NULL) {
1444 		bh = __find_get_block_slow(bdev, block);
1445 		if (bh)
1446 			bh_lru_install(bh);
1447 	}
1448 	if (bh)
1449 		touch_buffer(bh);
1450 	return bh;
1451 }
1452 EXPORT_SYMBOL(__find_get_block);
1453 
1454 /*
1455  * __getblk will locate (and, if necessary, create) the buffer_head
1456  * which corresponds to the passed block_device, block and size. The
1457  * returned buffer has its reference count incremented.
1458  *
1459  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1460  * illegal block number, __getblk() will happily return a buffer_head
1461  * which represents the non-existent block.  Very weird.
1462  *
1463  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1464  * attempt is failing.  FIXME, perhaps?
1465  */
1466 struct buffer_head *
__getblk(struct block_device * bdev,sector_t block,unsigned size)1467 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1468 {
1469 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1470 
1471 	might_sleep();
1472 	if (bh == NULL)
1473 		bh = __getblk_slow(bdev, block, size);
1474 	return bh;
1475 }
1476 EXPORT_SYMBOL(__getblk);
1477 
1478 /*
1479  * Do async read-ahead on a buffer..
1480  */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)1481 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1482 {
1483 	struct buffer_head *bh = __getblk(bdev, block, size);
1484 	if (likely(bh)) {
1485 		ll_rw_block(READA, 1, &bh);
1486 		brelse(bh);
1487 	}
1488 }
1489 EXPORT_SYMBOL(__breadahead);
1490 
1491 /**
1492  *  __bread() - reads a specified block and returns the bh
1493  *  @bdev: the block_device to read from
1494  *  @block: number of block
1495  *  @size: size (in bytes) to read
1496  *
1497  *  Reads a specified block, and returns buffer head that contains it.
1498  *  It returns NULL if the block was unreadable.
1499  */
1500 struct buffer_head *
__bread(struct block_device * bdev,sector_t block,unsigned size)1501 __bread(struct block_device *bdev, sector_t block, unsigned size)
1502 {
1503 	struct buffer_head *bh = __getblk(bdev, block, size);
1504 
1505 	if (likely(bh) && !buffer_uptodate(bh))
1506 		bh = __bread_slow(bh);
1507 	return bh;
1508 }
1509 EXPORT_SYMBOL(__bread);
1510 
1511 /*
1512  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1513  * This doesn't race because it runs in each cpu either in irq
1514  * or with preempt disabled.
1515  */
invalidate_bh_lru(void * arg)1516 static void invalidate_bh_lru(void *arg)
1517 {
1518 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1519 	int i;
1520 
1521 	for (i = 0; i < BH_LRU_SIZE; i++) {
1522 		brelse(b->bhs[i]);
1523 		b->bhs[i] = NULL;
1524 	}
1525 	put_cpu_var(bh_lrus);
1526 }
1527 
invalidate_bh_lrus(void)1528 void invalidate_bh_lrus(void)
1529 {
1530 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1531 }
1532 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1533 
set_bh_page(struct buffer_head * bh,struct page * page,unsigned long offset)1534 void set_bh_page(struct buffer_head *bh,
1535 		struct page *page, unsigned long offset)
1536 {
1537 	bh->b_page = page;
1538 	BUG_ON(offset >= PAGE_SIZE);
1539 	if (PageHighMem(page))
1540 		/*
1541 		 * This catches illegal uses and preserves the offset:
1542 		 */
1543 		bh->b_data = (char *)(0 + offset);
1544 	else
1545 		bh->b_data = page_address(page) + offset;
1546 }
1547 EXPORT_SYMBOL(set_bh_page);
1548 
1549 /*
1550  * Called when truncating a buffer on a page completely.
1551  */
discard_buffer(struct buffer_head * bh)1552 static void discard_buffer(struct buffer_head * bh)
1553 {
1554 	lock_buffer(bh);
1555 	clear_buffer_dirty(bh);
1556 	bh->b_bdev = NULL;
1557 	clear_buffer_mapped(bh);
1558 	clear_buffer_req(bh);
1559 	clear_buffer_new(bh);
1560 	clear_buffer_delay(bh);
1561 	clear_buffer_unwritten(bh);
1562 	unlock_buffer(bh);
1563 }
1564 
1565 /**
1566  * block_invalidatepage - invalidate part of all of a buffer-backed page
1567  *
1568  * @page: the page which is affected
1569  * @offset: the index of the truncation point
1570  *
1571  * block_invalidatepage() is called when all or part of the page has become
1572  * invalidatedby a truncate operation.
1573  *
1574  * block_invalidatepage() does not have to release all buffers, but it must
1575  * ensure that no dirty buffer is left outside @offset and that no I/O
1576  * is underway against any of the blocks which are outside the truncation
1577  * point.  Because the caller is about to free (and possibly reuse) those
1578  * blocks on-disk.
1579  */
block_invalidatepage(struct page * page,unsigned long offset)1580 void block_invalidatepage(struct page *page, unsigned long offset)
1581 {
1582 	struct buffer_head *head, *bh, *next;
1583 	unsigned int curr_off = 0;
1584 
1585 	BUG_ON(!PageLocked(page));
1586 	if (!page_has_buffers(page))
1587 		goto out;
1588 
1589 	head = page_buffers(page);
1590 	bh = head;
1591 	do {
1592 		unsigned int next_off = curr_off + bh->b_size;
1593 		next = bh->b_this_page;
1594 
1595 		/*
1596 		 * is this block fully invalidated?
1597 		 */
1598 		if (offset <= curr_off)
1599 			discard_buffer(bh);
1600 		curr_off = next_off;
1601 		bh = next;
1602 	} while (bh != head);
1603 
1604 	/*
1605 	 * We release buffers only if the entire page is being invalidated.
1606 	 * The get_block cached value has been unconditionally invalidated,
1607 	 * so real IO is not possible anymore.
1608 	 */
1609 	if (offset == 0)
1610 		try_to_release_page(page, 0);
1611 out:
1612 	return;
1613 }
1614 EXPORT_SYMBOL(block_invalidatepage);
1615 
1616 /*
1617  * We attach and possibly dirty the buffers atomically wrt
1618  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1619  * is already excluded via the page lock.
1620  */
create_empty_buffers(struct page * page,unsigned long blocksize,unsigned long b_state)1621 void create_empty_buffers(struct page *page,
1622 			unsigned long blocksize, unsigned long b_state)
1623 {
1624 	struct buffer_head *bh, *head, *tail;
1625 
1626 	head = alloc_page_buffers(page, blocksize, 1);
1627 	bh = head;
1628 	do {
1629 		bh->b_state |= b_state;
1630 		tail = bh;
1631 		bh = bh->b_this_page;
1632 	} while (bh);
1633 	tail->b_this_page = head;
1634 
1635 	spin_lock(&page->mapping->private_lock);
1636 	if (PageUptodate(page) || PageDirty(page)) {
1637 		bh = head;
1638 		do {
1639 			if (PageDirty(page))
1640 				set_buffer_dirty(bh);
1641 			if (PageUptodate(page))
1642 				set_buffer_uptodate(bh);
1643 			bh = bh->b_this_page;
1644 		} while (bh != head);
1645 	}
1646 	attach_page_buffers(page, head);
1647 	spin_unlock(&page->mapping->private_lock);
1648 }
1649 EXPORT_SYMBOL(create_empty_buffers);
1650 
1651 /*
1652  * We are taking a block for data and we don't want any output from any
1653  * buffer-cache aliases starting from return from that function and
1654  * until the moment when something will explicitly mark the buffer
1655  * dirty (hopefully that will not happen until we will free that block ;-)
1656  * We don't even need to mark it not-uptodate - nobody can expect
1657  * anything from a newly allocated buffer anyway. We used to used
1658  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1659  * don't want to mark the alias unmapped, for example - it would confuse
1660  * anyone who might pick it with bread() afterwards...
1661  *
1662  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1663  * be writeout I/O going on against recently-freed buffers.  We don't
1664  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1665  * only if we really need to.  That happens here.
1666  */
unmap_underlying_metadata(struct block_device * bdev,sector_t block)1667 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1668 {
1669 	struct buffer_head *old_bh;
1670 
1671 	might_sleep();
1672 
1673 	old_bh = __find_get_block_slow(bdev, block);
1674 	if (old_bh) {
1675 		clear_buffer_dirty(old_bh);
1676 		wait_on_buffer(old_bh);
1677 		clear_buffer_req(old_bh);
1678 		__brelse(old_bh);
1679 	}
1680 }
1681 EXPORT_SYMBOL(unmap_underlying_metadata);
1682 
1683 /*
1684  * NOTE! All mapped/uptodate combinations are valid:
1685  *
1686  *	Mapped	Uptodate	Meaning
1687  *
1688  *	No	No		"unknown" - must do get_block()
1689  *	No	Yes		"hole" - zero-filled
1690  *	Yes	No		"allocated" - allocated on disk, not read in
1691  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1692  *
1693  * "Dirty" is valid only with the last case (mapped+uptodate).
1694  */
1695 
1696 /*
1697  * While block_write_full_page is writing back the dirty buffers under
1698  * the page lock, whoever dirtied the buffers may decide to clean them
1699  * again at any time.  We handle that by only looking at the buffer
1700  * state inside lock_buffer().
1701  *
1702  * If block_write_full_page() is called for regular writeback
1703  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1704  * locked buffer.   This only can happen if someone has written the buffer
1705  * directly, with submit_bh().  At the address_space level PageWriteback
1706  * prevents this contention from occurring.
1707  */
__block_write_full_page(struct inode * inode,struct page * page,get_block_t * get_block,struct writeback_control * wbc)1708 static int __block_write_full_page(struct inode *inode, struct page *page,
1709 			get_block_t *get_block, struct writeback_control *wbc)
1710 {
1711 	int err;
1712 	sector_t block;
1713 	sector_t last_block;
1714 	struct buffer_head *bh, *head;
1715 	const unsigned blocksize = 1 << inode->i_blkbits;
1716 	int nr_underway = 0;
1717 
1718 	BUG_ON(!PageLocked(page));
1719 
1720 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1721 
1722 	if (!page_has_buffers(page)) {
1723 		create_empty_buffers(page, blocksize,
1724 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1725 	}
1726 
1727 	/*
1728 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1729 	 * here, and the (potentially unmapped) buffers may become dirty at
1730 	 * any time.  If a buffer becomes dirty here after we've inspected it
1731 	 * then we just miss that fact, and the page stays dirty.
1732 	 *
1733 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1734 	 * handle that here by just cleaning them.
1735 	 */
1736 
1737 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1738 	head = page_buffers(page);
1739 	bh = head;
1740 
1741 	/*
1742 	 * Get all the dirty buffers mapped to disk addresses and
1743 	 * handle any aliases from the underlying blockdev's mapping.
1744 	 */
1745 	do {
1746 		if (block > last_block) {
1747 			/*
1748 			 * mapped buffers outside i_size will occur, because
1749 			 * this page can be outside i_size when there is a
1750 			 * truncate in progress.
1751 			 */
1752 			/*
1753 			 * The buffer was zeroed by block_write_full_page()
1754 			 */
1755 			clear_buffer_dirty(bh);
1756 			set_buffer_uptodate(bh);
1757 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1758 			   buffer_dirty(bh)) {
1759 			WARN_ON(bh->b_size != blocksize);
1760 			err = get_block(inode, block, bh, 1);
1761 			if (err)
1762 				goto recover;
1763 			clear_buffer_delay(bh);
1764 			if (buffer_new(bh)) {
1765 				/* blockdev mappings never come here */
1766 				clear_buffer_new(bh);
1767 				unmap_underlying_metadata(bh->b_bdev,
1768 							bh->b_blocknr);
1769 			}
1770 		}
1771 		bh = bh->b_this_page;
1772 		block++;
1773 	} while (bh != head);
1774 
1775 	do {
1776 		if (!buffer_mapped(bh))
1777 			continue;
1778 		/*
1779 		 * If it's a fully non-blocking write attempt and we cannot
1780 		 * lock the buffer then redirty the page.  Note that this can
1781 		 * potentially cause a busy-wait loop from pdflush and kswapd
1782 		 * activity, but those code paths have their own higher-level
1783 		 * throttling.
1784 		 */
1785 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1786 			lock_buffer(bh);
1787 		} else if (!trylock_buffer(bh)) {
1788 			redirty_page_for_writepage(wbc, page);
1789 			continue;
1790 		}
1791 		if (test_clear_buffer_dirty(bh)) {
1792 			mark_buffer_async_write(bh);
1793 		} else {
1794 			unlock_buffer(bh);
1795 		}
1796 	} while ((bh = bh->b_this_page) != head);
1797 
1798 	/*
1799 	 * The page and its buffers are protected by PageWriteback(), so we can
1800 	 * drop the bh refcounts early.
1801 	 */
1802 	BUG_ON(PageWriteback(page));
1803 	set_page_writeback(page);
1804 
1805 	do {
1806 		struct buffer_head *next = bh->b_this_page;
1807 		if (buffer_async_write(bh)) {
1808 			submit_bh(WRITE, bh);
1809 			nr_underway++;
1810 		}
1811 		bh = next;
1812 	} while (bh != head);
1813 	unlock_page(page);
1814 
1815 	err = 0;
1816 done:
1817 	if (nr_underway == 0) {
1818 		/*
1819 		 * The page was marked dirty, but the buffers were
1820 		 * clean.  Someone wrote them back by hand with
1821 		 * ll_rw_block/submit_bh.  A rare case.
1822 		 */
1823 		end_page_writeback(page);
1824 
1825 		/*
1826 		 * The page and buffer_heads can be released at any time from
1827 		 * here on.
1828 		 */
1829 	}
1830 	return err;
1831 
1832 recover:
1833 	/*
1834 	 * ENOSPC, or some other error.  We may already have added some
1835 	 * blocks to the file, so we need to write these out to avoid
1836 	 * exposing stale data.
1837 	 * The page is currently locked and not marked for writeback
1838 	 */
1839 	bh = head;
1840 	/* Recovery: lock and submit the mapped buffers */
1841 	do {
1842 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1843 		    !buffer_delay(bh)) {
1844 			lock_buffer(bh);
1845 			mark_buffer_async_write(bh);
1846 		} else {
1847 			/*
1848 			 * The buffer may have been set dirty during
1849 			 * attachment to a dirty page.
1850 			 */
1851 			clear_buffer_dirty(bh);
1852 		}
1853 	} while ((bh = bh->b_this_page) != head);
1854 	SetPageError(page);
1855 	BUG_ON(PageWriteback(page));
1856 	mapping_set_error(page->mapping, err);
1857 	set_page_writeback(page);
1858 	do {
1859 		struct buffer_head *next = bh->b_this_page;
1860 		if (buffer_async_write(bh)) {
1861 			clear_buffer_dirty(bh);
1862 			submit_bh(WRITE, bh);
1863 			nr_underway++;
1864 		}
1865 		bh = next;
1866 	} while (bh != head);
1867 	unlock_page(page);
1868 	goto done;
1869 }
1870 
1871 /*
1872  * If a page has any new buffers, zero them out here, and mark them uptodate
1873  * and dirty so they'll be written out (in order to prevent uninitialised
1874  * block data from leaking). And clear the new bit.
1875  */
page_zero_new_buffers(struct page * page,unsigned from,unsigned to)1876 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1877 {
1878 	unsigned int block_start, block_end;
1879 	struct buffer_head *head, *bh;
1880 
1881 	BUG_ON(!PageLocked(page));
1882 	if (!page_has_buffers(page))
1883 		return;
1884 
1885 	bh = head = page_buffers(page);
1886 	block_start = 0;
1887 	do {
1888 		block_end = block_start + bh->b_size;
1889 
1890 		if (buffer_new(bh)) {
1891 			if (block_end > from && block_start < to) {
1892 				if (!PageUptodate(page)) {
1893 					unsigned start, size;
1894 
1895 					start = max(from, block_start);
1896 					size = min(to, block_end) - start;
1897 
1898 					zero_user(page, start, size);
1899 					set_buffer_uptodate(bh);
1900 				}
1901 
1902 				clear_buffer_new(bh);
1903 				mark_buffer_dirty(bh);
1904 			}
1905 		}
1906 
1907 		block_start = block_end;
1908 		bh = bh->b_this_page;
1909 	} while (bh != head);
1910 }
1911 EXPORT_SYMBOL(page_zero_new_buffers);
1912 
__block_prepare_write(struct inode * inode,struct page * page,unsigned from,unsigned to,get_block_t * get_block)1913 static int __block_prepare_write(struct inode *inode, struct page *page,
1914 		unsigned from, unsigned to, get_block_t *get_block)
1915 {
1916 	unsigned block_start, block_end;
1917 	sector_t block;
1918 	int err = 0;
1919 	unsigned blocksize, bbits;
1920 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1921 
1922 	BUG_ON(!PageLocked(page));
1923 	BUG_ON(from > PAGE_CACHE_SIZE);
1924 	BUG_ON(to > PAGE_CACHE_SIZE);
1925 	BUG_ON(from > to);
1926 
1927 	blocksize = 1 << inode->i_blkbits;
1928 	if (!page_has_buffers(page))
1929 		create_empty_buffers(page, blocksize, 0);
1930 	head = page_buffers(page);
1931 
1932 	bbits = inode->i_blkbits;
1933 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1934 
1935 	for(bh = head, block_start = 0; bh != head || !block_start;
1936 	    block++, block_start=block_end, bh = bh->b_this_page) {
1937 		block_end = block_start + blocksize;
1938 		if (block_end <= from || block_start >= to) {
1939 			if (PageUptodate(page)) {
1940 				if (!buffer_uptodate(bh))
1941 					set_buffer_uptodate(bh);
1942 			}
1943 			continue;
1944 		}
1945 		if (buffer_new(bh))
1946 			clear_buffer_new(bh);
1947 		if (!buffer_mapped(bh)) {
1948 			WARN_ON(bh->b_size != blocksize);
1949 			err = get_block(inode, block, bh, 1);
1950 			if (err)
1951 				break;
1952 			if (buffer_new(bh)) {
1953 				unmap_underlying_metadata(bh->b_bdev,
1954 							bh->b_blocknr);
1955 				if (PageUptodate(page)) {
1956 					clear_buffer_new(bh);
1957 					set_buffer_uptodate(bh);
1958 					mark_buffer_dirty(bh);
1959 					continue;
1960 				}
1961 				if (block_end > to || block_start < from)
1962 					zero_user_segments(page,
1963 						to, block_end,
1964 						block_start, from);
1965 				continue;
1966 			}
1967 		}
1968 		if (PageUptodate(page)) {
1969 			if (!buffer_uptodate(bh))
1970 				set_buffer_uptodate(bh);
1971 			continue;
1972 		}
1973 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1974 		    !buffer_unwritten(bh) &&
1975 		     (block_start < from || block_end > to)) {
1976 			ll_rw_block(READ, 1, &bh);
1977 			*wait_bh++=bh;
1978 		}
1979 	}
1980 	/*
1981 	 * If we issued read requests - let them complete.
1982 	 */
1983 	while(wait_bh > wait) {
1984 		wait_on_buffer(*--wait_bh);
1985 		if (!buffer_uptodate(*wait_bh))
1986 			err = -EIO;
1987 	}
1988 	if (unlikely(err))
1989 		page_zero_new_buffers(page, from, to);
1990 	return err;
1991 }
1992 
__block_commit_write(struct inode * inode,struct page * page,unsigned from,unsigned to)1993 static int __block_commit_write(struct inode *inode, struct page *page,
1994 		unsigned from, unsigned to)
1995 {
1996 	unsigned block_start, block_end;
1997 	int partial = 0;
1998 	unsigned blocksize;
1999 	struct buffer_head *bh, *head;
2000 
2001 	blocksize = 1 << inode->i_blkbits;
2002 
2003 	for(bh = head = page_buffers(page), block_start = 0;
2004 	    bh != head || !block_start;
2005 	    block_start=block_end, bh = bh->b_this_page) {
2006 		block_end = block_start + blocksize;
2007 		if (block_end <= from || block_start >= to) {
2008 			if (!buffer_uptodate(bh))
2009 				partial = 1;
2010 		} else {
2011 			set_buffer_uptodate(bh);
2012 			mark_buffer_dirty(bh);
2013 		}
2014 		clear_buffer_new(bh);
2015 	}
2016 
2017 	/*
2018 	 * If this is a partial write which happened to make all buffers
2019 	 * uptodate then we can optimize away a bogus readpage() for
2020 	 * the next read(). Here we 'discover' whether the page went
2021 	 * uptodate as a result of this (potentially partial) write.
2022 	 */
2023 	if (!partial)
2024 		SetPageUptodate(page);
2025 	return 0;
2026 }
2027 
2028 /*
2029  * block_write_begin takes care of the basic task of block allocation and
2030  * bringing partial write blocks uptodate first.
2031  *
2032  * If *pagep is not NULL, then block_write_begin uses the locked page
2033  * at *pagep rather than allocating its own. In this case, the page will
2034  * not be unlocked or deallocated on failure.
2035  */
block_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata,get_block_t * get_block)2036 int block_write_begin(struct file *file, struct address_space *mapping,
2037 			loff_t pos, unsigned len, unsigned flags,
2038 			struct page **pagep, void **fsdata,
2039 			get_block_t *get_block)
2040 {
2041 	struct inode *inode = mapping->host;
2042 	int status = 0;
2043 	struct page *page;
2044 	pgoff_t index;
2045 	unsigned start, end;
2046 	int ownpage = 0;
2047 
2048 	index = pos >> PAGE_CACHE_SHIFT;
2049 	start = pos & (PAGE_CACHE_SIZE - 1);
2050 	end = start + len;
2051 
2052 	page = *pagep;
2053 	if (page == NULL) {
2054 		ownpage = 1;
2055 		page = grab_cache_page_write_begin(mapping, index, flags);
2056 		if (!page) {
2057 			status = -ENOMEM;
2058 			goto out;
2059 		}
2060 		*pagep = page;
2061 	} else
2062 		BUG_ON(!PageLocked(page));
2063 
2064 	status = __block_prepare_write(inode, page, start, end, get_block);
2065 	if (unlikely(status)) {
2066 		ClearPageUptodate(page);
2067 
2068 		if (ownpage) {
2069 			unlock_page(page);
2070 			page_cache_release(page);
2071 			*pagep = NULL;
2072 
2073 			/*
2074 			 * prepare_write() may have instantiated a few blocks
2075 			 * outside i_size.  Trim these off again. Don't need
2076 			 * i_size_read because we hold i_mutex.
2077 			 */
2078 			if (pos + len > inode->i_size)
2079 				vmtruncate(inode, inode->i_size);
2080 		}
2081 	}
2082 
2083 out:
2084 	return status;
2085 }
2086 EXPORT_SYMBOL(block_write_begin);
2087 
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2088 int block_write_end(struct file *file, struct address_space *mapping,
2089 			loff_t pos, unsigned len, unsigned copied,
2090 			struct page *page, void *fsdata)
2091 {
2092 	struct inode *inode = mapping->host;
2093 	unsigned start;
2094 
2095 	start = pos & (PAGE_CACHE_SIZE - 1);
2096 
2097 	if (unlikely(copied < len)) {
2098 		/*
2099 		 * The buffers that were written will now be uptodate, so we
2100 		 * don't have to worry about a readpage reading them and
2101 		 * overwriting a partial write. However if we have encountered
2102 		 * a short write and only partially written into a buffer, it
2103 		 * will not be marked uptodate, so a readpage might come in and
2104 		 * destroy our partial write.
2105 		 *
2106 		 * Do the simplest thing, and just treat any short write to a
2107 		 * non uptodate page as a zero-length write, and force the
2108 		 * caller to redo the whole thing.
2109 		 */
2110 		if (!PageUptodate(page))
2111 			copied = 0;
2112 
2113 		page_zero_new_buffers(page, start+copied, start+len);
2114 	}
2115 	flush_dcache_page(page);
2116 
2117 	/* This could be a short (even 0-length) commit */
2118 	__block_commit_write(inode, page, start, start+copied);
2119 
2120 	return copied;
2121 }
2122 EXPORT_SYMBOL(block_write_end);
2123 
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2124 int generic_write_end(struct file *file, struct address_space *mapping,
2125 			loff_t pos, unsigned len, unsigned copied,
2126 			struct page *page, void *fsdata)
2127 {
2128 	struct inode *inode = mapping->host;
2129 	int i_size_changed = 0;
2130 
2131 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2132 
2133 	/*
2134 	 * No need to use i_size_read() here, the i_size
2135 	 * cannot change under us because we hold i_mutex.
2136 	 *
2137 	 * But it's important to update i_size while still holding page lock:
2138 	 * page writeout could otherwise come in and zero beyond i_size.
2139 	 */
2140 	if (pos+copied > inode->i_size) {
2141 		i_size_write(inode, pos+copied);
2142 		i_size_changed = 1;
2143 	}
2144 
2145 	unlock_page(page);
2146 	page_cache_release(page);
2147 
2148 	/*
2149 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2150 	 * makes the holding time of page lock longer. Second, it forces lock
2151 	 * ordering of page lock and transaction start for journaling
2152 	 * filesystems.
2153 	 */
2154 	if (i_size_changed)
2155 		mark_inode_dirty(inode);
2156 
2157 	return copied;
2158 }
2159 EXPORT_SYMBOL(generic_write_end);
2160 
2161 /*
2162  * block_is_partially_uptodate checks whether buffers within a page are
2163  * uptodate or not.
2164  *
2165  * Returns true if all buffers which correspond to a file portion
2166  * we want to read are uptodate.
2167  */
block_is_partially_uptodate(struct page * page,read_descriptor_t * desc,unsigned long from)2168 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2169 					unsigned long from)
2170 {
2171 	struct inode *inode = page->mapping->host;
2172 	unsigned block_start, block_end, blocksize;
2173 	unsigned to;
2174 	struct buffer_head *bh, *head;
2175 	int ret = 1;
2176 
2177 	if (!page_has_buffers(page))
2178 		return 0;
2179 
2180 	blocksize = 1 << inode->i_blkbits;
2181 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2182 	to = from + to;
2183 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2184 		return 0;
2185 
2186 	head = page_buffers(page);
2187 	bh = head;
2188 	block_start = 0;
2189 	do {
2190 		block_end = block_start + blocksize;
2191 		if (block_end > from && block_start < to) {
2192 			if (!buffer_uptodate(bh)) {
2193 				ret = 0;
2194 				break;
2195 			}
2196 			if (block_end >= to)
2197 				break;
2198 		}
2199 		block_start = block_end;
2200 		bh = bh->b_this_page;
2201 	} while (bh != head);
2202 
2203 	return ret;
2204 }
2205 EXPORT_SYMBOL(block_is_partially_uptodate);
2206 
2207 /*
2208  * Generic "read page" function for block devices that have the normal
2209  * get_block functionality. This is most of the block device filesystems.
2210  * Reads the page asynchronously --- the unlock_buffer() and
2211  * set/clear_buffer_uptodate() functions propagate buffer state into the
2212  * page struct once IO has completed.
2213  */
block_read_full_page(struct page * page,get_block_t * get_block)2214 int block_read_full_page(struct page *page, get_block_t *get_block)
2215 {
2216 	struct inode *inode = page->mapping->host;
2217 	sector_t iblock, lblock;
2218 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2219 	unsigned int blocksize;
2220 	int nr, i;
2221 	int fully_mapped = 1;
2222 
2223 	BUG_ON(!PageLocked(page));
2224 	blocksize = 1 << inode->i_blkbits;
2225 	if (!page_has_buffers(page))
2226 		create_empty_buffers(page, blocksize, 0);
2227 	head = page_buffers(page);
2228 
2229 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2230 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2231 	bh = head;
2232 	nr = 0;
2233 	i = 0;
2234 
2235 	do {
2236 		if (buffer_uptodate(bh))
2237 			continue;
2238 
2239 		if (!buffer_mapped(bh)) {
2240 			int err = 0;
2241 
2242 			fully_mapped = 0;
2243 			if (iblock < lblock) {
2244 				WARN_ON(bh->b_size != blocksize);
2245 				err = get_block(inode, iblock, bh, 0);
2246 				if (err)
2247 					SetPageError(page);
2248 			}
2249 			if (!buffer_mapped(bh)) {
2250 				zero_user(page, i * blocksize, blocksize);
2251 				if (!err)
2252 					set_buffer_uptodate(bh);
2253 				continue;
2254 			}
2255 			/*
2256 			 * get_block() might have updated the buffer
2257 			 * synchronously
2258 			 */
2259 			if (buffer_uptodate(bh))
2260 				continue;
2261 		}
2262 		arr[nr++] = bh;
2263 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2264 
2265 	if (fully_mapped)
2266 		SetPageMappedToDisk(page);
2267 
2268 	if (!nr) {
2269 		/*
2270 		 * All buffers are uptodate - we can set the page uptodate
2271 		 * as well. But not if get_block() returned an error.
2272 		 */
2273 		if (!PageError(page))
2274 			SetPageUptodate(page);
2275 		unlock_page(page);
2276 		return 0;
2277 	}
2278 
2279 	/* Stage two: lock the buffers */
2280 	for (i = 0; i < nr; i++) {
2281 		bh = arr[i];
2282 		lock_buffer(bh);
2283 		mark_buffer_async_read(bh);
2284 	}
2285 
2286 	/*
2287 	 * Stage 3: start the IO.  Check for uptodateness
2288 	 * inside the buffer lock in case another process reading
2289 	 * the underlying blockdev brought it uptodate (the sct fix).
2290 	 */
2291 	for (i = 0; i < nr; i++) {
2292 		bh = arr[i];
2293 		if (buffer_uptodate(bh))
2294 			end_buffer_async_read(bh, 1);
2295 		else
2296 			submit_bh(READ, bh);
2297 	}
2298 	return 0;
2299 }
2300 
2301 /* utility function for filesystems that need to do work on expanding
2302  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2303  * deal with the hole.
2304  */
generic_cont_expand_simple(struct inode * inode,loff_t size)2305 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2306 {
2307 	struct address_space *mapping = inode->i_mapping;
2308 	struct page *page;
2309 	void *fsdata;
2310 	unsigned long limit;
2311 	int err;
2312 
2313 	err = -EFBIG;
2314         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2315 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2316 		send_sig(SIGXFSZ, current, 0);
2317 		goto out;
2318 	}
2319 	if (size > inode->i_sb->s_maxbytes)
2320 		goto out;
2321 
2322 	err = pagecache_write_begin(NULL, mapping, size, 0,
2323 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2324 				&page, &fsdata);
2325 	if (err)
2326 		goto out;
2327 
2328 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2329 	BUG_ON(err > 0);
2330 
2331 out:
2332 	return err;
2333 }
2334 
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2335 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2336 			    loff_t pos, loff_t *bytes)
2337 {
2338 	struct inode *inode = mapping->host;
2339 	unsigned blocksize = 1 << inode->i_blkbits;
2340 	struct page *page;
2341 	void *fsdata;
2342 	pgoff_t index, curidx;
2343 	loff_t curpos;
2344 	unsigned zerofrom, offset, len;
2345 	int err = 0;
2346 
2347 	index = pos >> PAGE_CACHE_SHIFT;
2348 	offset = pos & ~PAGE_CACHE_MASK;
2349 
2350 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2351 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2352 		if (zerofrom & (blocksize-1)) {
2353 			*bytes |= (blocksize-1);
2354 			(*bytes)++;
2355 		}
2356 		len = PAGE_CACHE_SIZE - zerofrom;
2357 
2358 		err = pagecache_write_begin(file, mapping, curpos, len,
2359 						AOP_FLAG_UNINTERRUPTIBLE,
2360 						&page, &fsdata);
2361 		if (err)
2362 			goto out;
2363 		zero_user(page, zerofrom, len);
2364 		err = pagecache_write_end(file, mapping, curpos, len, len,
2365 						page, fsdata);
2366 		if (err < 0)
2367 			goto out;
2368 		BUG_ON(err != len);
2369 		err = 0;
2370 
2371 		balance_dirty_pages_ratelimited(mapping);
2372 	}
2373 
2374 	/* page covers the boundary, find the boundary offset */
2375 	if (index == curidx) {
2376 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2377 		/* if we will expand the thing last block will be filled */
2378 		if (offset <= zerofrom) {
2379 			goto out;
2380 		}
2381 		if (zerofrom & (blocksize-1)) {
2382 			*bytes |= (blocksize-1);
2383 			(*bytes)++;
2384 		}
2385 		len = offset - zerofrom;
2386 
2387 		err = pagecache_write_begin(file, mapping, curpos, len,
2388 						AOP_FLAG_UNINTERRUPTIBLE,
2389 						&page, &fsdata);
2390 		if (err)
2391 			goto out;
2392 		zero_user(page, zerofrom, len);
2393 		err = pagecache_write_end(file, mapping, curpos, len, len,
2394 						page, fsdata);
2395 		if (err < 0)
2396 			goto out;
2397 		BUG_ON(err != len);
2398 		err = 0;
2399 	}
2400 out:
2401 	return err;
2402 }
2403 
2404 /*
2405  * For moronic filesystems that do not allow holes in file.
2406  * We may have to extend the file.
2407  */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata,get_block_t * get_block,loff_t * bytes)2408 int cont_write_begin(struct file *file, struct address_space *mapping,
2409 			loff_t pos, unsigned len, unsigned flags,
2410 			struct page **pagep, void **fsdata,
2411 			get_block_t *get_block, loff_t *bytes)
2412 {
2413 	struct inode *inode = mapping->host;
2414 	unsigned blocksize = 1 << inode->i_blkbits;
2415 	unsigned zerofrom;
2416 	int err;
2417 
2418 	err = cont_expand_zero(file, mapping, pos, bytes);
2419 	if (err)
2420 		goto out;
2421 
2422 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2423 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2424 		*bytes |= (blocksize-1);
2425 		(*bytes)++;
2426 	}
2427 
2428 	*pagep = NULL;
2429 	err = block_write_begin(file, mapping, pos, len,
2430 				flags, pagep, fsdata, get_block);
2431 out:
2432 	return err;
2433 }
2434 
block_prepare_write(struct page * page,unsigned from,unsigned to,get_block_t * get_block)2435 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2436 			get_block_t *get_block)
2437 {
2438 	struct inode *inode = page->mapping->host;
2439 	int err = __block_prepare_write(inode, page, from, to, get_block);
2440 	if (err)
2441 		ClearPageUptodate(page);
2442 	return err;
2443 }
2444 
block_commit_write(struct page * page,unsigned from,unsigned to)2445 int block_commit_write(struct page *page, unsigned from, unsigned to)
2446 {
2447 	struct inode *inode = page->mapping->host;
2448 	__block_commit_write(inode,page,from,to);
2449 	return 0;
2450 }
2451 
2452 /*
2453  * block_page_mkwrite() is not allowed to change the file size as it gets
2454  * called from a page fault handler when a page is first dirtied. Hence we must
2455  * be careful to check for EOF conditions here. We set the page up correctly
2456  * for a written page which means we get ENOSPC checking when writing into
2457  * holes and correct delalloc and unwritten extent mapping on filesystems that
2458  * support these features.
2459  *
2460  * We are not allowed to take the i_mutex here so we have to play games to
2461  * protect against truncate races as the page could now be beyond EOF.  Because
2462  * vmtruncate() writes the inode size before removing pages, once we have the
2463  * page lock we can determine safely if the page is beyond EOF. If it is not
2464  * beyond EOF, then the page is guaranteed safe against truncation until we
2465  * unlock the page.
2466  */
2467 int
block_page_mkwrite(struct vm_area_struct * vma,struct page * page,get_block_t get_block)2468 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2469 		   get_block_t get_block)
2470 {
2471 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2472 	unsigned long end;
2473 	loff_t size;
2474 	int ret = -EINVAL;
2475 
2476 	lock_page(page);
2477 	size = i_size_read(inode);
2478 	if ((page->mapping != inode->i_mapping) ||
2479 	    (page_offset(page) > size)) {
2480 		/* page got truncated out from underneath us */
2481 		goto out_unlock;
2482 	}
2483 
2484 	/* page is wholly or partially inside EOF */
2485 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2486 		end = size & ~PAGE_CACHE_MASK;
2487 	else
2488 		end = PAGE_CACHE_SIZE;
2489 
2490 	ret = block_prepare_write(page, 0, end, get_block);
2491 	if (!ret)
2492 		ret = block_commit_write(page, 0, end);
2493 
2494 out_unlock:
2495 	unlock_page(page);
2496 	return ret;
2497 }
2498 
2499 /*
2500  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2501  * immediately, while under the page lock.  So it needs a special end_io
2502  * handler which does not touch the bh after unlocking it.
2503  */
end_buffer_read_nobh(struct buffer_head * bh,int uptodate)2504 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2505 {
2506 	__end_buffer_read_notouch(bh, uptodate);
2507 }
2508 
2509 /*
2510  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2511  * the page (converting it to circular linked list and taking care of page
2512  * dirty races).
2513  */
attach_nobh_buffers(struct page * page,struct buffer_head * head)2514 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2515 {
2516 	struct buffer_head *bh;
2517 
2518 	BUG_ON(!PageLocked(page));
2519 
2520 	spin_lock(&page->mapping->private_lock);
2521 	bh = head;
2522 	do {
2523 		if (PageDirty(page))
2524 			set_buffer_dirty(bh);
2525 		if (!bh->b_this_page)
2526 			bh->b_this_page = head;
2527 		bh = bh->b_this_page;
2528 	} while (bh != head);
2529 	attach_page_buffers(page, head);
2530 	spin_unlock(&page->mapping->private_lock);
2531 }
2532 
2533 /*
2534  * On entry, the page is fully not uptodate.
2535  * On exit the page is fully uptodate in the areas outside (from,to)
2536  */
nobh_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata,get_block_t * get_block)2537 int nobh_write_begin(struct file *file, struct address_space *mapping,
2538 			loff_t pos, unsigned len, unsigned flags,
2539 			struct page **pagep, void **fsdata,
2540 			get_block_t *get_block)
2541 {
2542 	struct inode *inode = mapping->host;
2543 	const unsigned blkbits = inode->i_blkbits;
2544 	const unsigned blocksize = 1 << blkbits;
2545 	struct buffer_head *head, *bh;
2546 	struct page *page;
2547 	pgoff_t index;
2548 	unsigned from, to;
2549 	unsigned block_in_page;
2550 	unsigned block_start, block_end;
2551 	sector_t block_in_file;
2552 	int nr_reads = 0;
2553 	int ret = 0;
2554 	int is_mapped_to_disk = 1;
2555 
2556 	index = pos >> PAGE_CACHE_SHIFT;
2557 	from = pos & (PAGE_CACHE_SIZE - 1);
2558 	to = from + len;
2559 
2560 	page = grab_cache_page_write_begin(mapping, index, flags);
2561 	if (!page)
2562 		return -ENOMEM;
2563 	*pagep = page;
2564 	*fsdata = NULL;
2565 
2566 	if (page_has_buffers(page)) {
2567 		unlock_page(page);
2568 		page_cache_release(page);
2569 		*pagep = NULL;
2570 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2571 					fsdata, get_block);
2572 	}
2573 
2574 	if (PageMappedToDisk(page))
2575 		return 0;
2576 
2577 	/*
2578 	 * Allocate buffers so that we can keep track of state, and potentially
2579 	 * attach them to the page if an error occurs. In the common case of
2580 	 * no error, they will just be freed again without ever being attached
2581 	 * to the page (which is all OK, because we're under the page lock).
2582 	 *
2583 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2584 	 * than the circular one we're used to.
2585 	 */
2586 	head = alloc_page_buffers(page, blocksize, 0);
2587 	if (!head) {
2588 		ret = -ENOMEM;
2589 		goto out_release;
2590 	}
2591 
2592 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2593 
2594 	/*
2595 	 * We loop across all blocks in the page, whether or not they are
2596 	 * part of the affected region.  This is so we can discover if the
2597 	 * page is fully mapped-to-disk.
2598 	 */
2599 	for (block_start = 0, block_in_page = 0, bh = head;
2600 		  block_start < PAGE_CACHE_SIZE;
2601 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2602 		int create;
2603 
2604 		block_end = block_start + blocksize;
2605 		bh->b_state = 0;
2606 		create = 1;
2607 		if (block_start >= to)
2608 			create = 0;
2609 		ret = get_block(inode, block_in_file + block_in_page,
2610 					bh, create);
2611 		if (ret)
2612 			goto failed;
2613 		if (!buffer_mapped(bh))
2614 			is_mapped_to_disk = 0;
2615 		if (buffer_new(bh))
2616 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2617 		if (PageUptodate(page)) {
2618 			set_buffer_uptodate(bh);
2619 			continue;
2620 		}
2621 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2622 			zero_user_segments(page, block_start, from,
2623 							to, block_end);
2624 			continue;
2625 		}
2626 		if (buffer_uptodate(bh))
2627 			continue;	/* reiserfs does this */
2628 		if (block_start < from || block_end > to) {
2629 			lock_buffer(bh);
2630 			bh->b_end_io = end_buffer_read_nobh;
2631 			submit_bh(READ, bh);
2632 			nr_reads++;
2633 		}
2634 	}
2635 
2636 	if (nr_reads) {
2637 		/*
2638 		 * The page is locked, so these buffers are protected from
2639 		 * any VM or truncate activity.  Hence we don't need to care
2640 		 * for the buffer_head refcounts.
2641 		 */
2642 		for (bh = head; bh; bh = bh->b_this_page) {
2643 			wait_on_buffer(bh);
2644 			if (!buffer_uptodate(bh))
2645 				ret = -EIO;
2646 		}
2647 		if (ret)
2648 			goto failed;
2649 	}
2650 
2651 	if (is_mapped_to_disk)
2652 		SetPageMappedToDisk(page);
2653 
2654 	*fsdata = head; /* to be released by nobh_write_end */
2655 
2656 	return 0;
2657 
2658 failed:
2659 	BUG_ON(!ret);
2660 	/*
2661 	 * Error recovery is a bit difficult. We need to zero out blocks that
2662 	 * were newly allocated, and dirty them to ensure they get written out.
2663 	 * Buffers need to be attached to the page at this point, otherwise
2664 	 * the handling of potential IO errors during writeout would be hard
2665 	 * (could try doing synchronous writeout, but what if that fails too?)
2666 	 */
2667 	attach_nobh_buffers(page, head);
2668 	page_zero_new_buffers(page, from, to);
2669 
2670 out_release:
2671 	unlock_page(page);
2672 	page_cache_release(page);
2673 	*pagep = NULL;
2674 
2675 	if (pos + len > inode->i_size)
2676 		vmtruncate(inode, inode->i_size);
2677 
2678 	return ret;
2679 }
2680 EXPORT_SYMBOL(nobh_write_begin);
2681 
nobh_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2682 int nobh_write_end(struct file *file, struct address_space *mapping,
2683 			loff_t pos, unsigned len, unsigned copied,
2684 			struct page *page, void *fsdata)
2685 {
2686 	struct inode *inode = page->mapping->host;
2687 	struct buffer_head *head = fsdata;
2688 	struct buffer_head *bh;
2689 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2690 
2691 	if (unlikely(copied < len) && head)
2692 		attach_nobh_buffers(page, head);
2693 	if (page_has_buffers(page))
2694 		return generic_write_end(file, mapping, pos, len,
2695 					copied, page, fsdata);
2696 
2697 	SetPageUptodate(page);
2698 	set_page_dirty(page);
2699 	if (pos+copied > inode->i_size) {
2700 		i_size_write(inode, pos+copied);
2701 		mark_inode_dirty(inode);
2702 	}
2703 
2704 	unlock_page(page);
2705 	page_cache_release(page);
2706 
2707 	while (head) {
2708 		bh = head;
2709 		head = head->b_this_page;
2710 		free_buffer_head(bh);
2711 	}
2712 
2713 	return copied;
2714 }
2715 EXPORT_SYMBOL(nobh_write_end);
2716 
2717 /*
2718  * nobh_writepage() - based on block_full_write_page() except
2719  * that it tries to operate without attaching bufferheads to
2720  * the page.
2721  */
nobh_writepage(struct page * page,get_block_t * get_block,struct writeback_control * wbc)2722 int nobh_writepage(struct page *page, get_block_t *get_block,
2723 			struct writeback_control *wbc)
2724 {
2725 	struct inode * const inode = page->mapping->host;
2726 	loff_t i_size = i_size_read(inode);
2727 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2728 	unsigned offset;
2729 	int ret;
2730 
2731 	/* Is the page fully inside i_size? */
2732 	if (page->index < end_index)
2733 		goto out;
2734 
2735 	/* Is the page fully outside i_size? (truncate in progress) */
2736 	offset = i_size & (PAGE_CACHE_SIZE-1);
2737 	if (page->index >= end_index+1 || !offset) {
2738 		/*
2739 		 * The page may have dirty, unmapped buffers.  For example,
2740 		 * they may have been added in ext3_writepage().  Make them
2741 		 * freeable here, so the page does not leak.
2742 		 */
2743 #if 0
2744 		/* Not really sure about this  - do we need this ? */
2745 		if (page->mapping->a_ops->invalidatepage)
2746 			page->mapping->a_ops->invalidatepage(page, offset);
2747 #endif
2748 		unlock_page(page);
2749 		return 0; /* don't care */
2750 	}
2751 
2752 	/*
2753 	 * The page straddles i_size.  It must be zeroed out on each and every
2754 	 * writepage invocation because it may be mmapped.  "A file is mapped
2755 	 * in multiples of the page size.  For a file that is not a multiple of
2756 	 * the  page size, the remaining memory is zeroed when mapped, and
2757 	 * writes to that region are not written out to the file."
2758 	 */
2759 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2760 out:
2761 	ret = mpage_writepage(page, get_block, wbc);
2762 	if (ret == -EAGAIN)
2763 		ret = __block_write_full_page(inode, page, get_block, wbc);
2764 	return ret;
2765 }
2766 EXPORT_SYMBOL(nobh_writepage);
2767 
nobh_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2768 int nobh_truncate_page(struct address_space *mapping,
2769 			loff_t from, get_block_t *get_block)
2770 {
2771 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2772 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2773 	unsigned blocksize;
2774 	sector_t iblock;
2775 	unsigned length, pos;
2776 	struct inode *inode = mapping->host;
2777 	struct page *page;
2778 	struct buffer_head map_bh;
2779 	int err;
2780 
2781 	blocksize = 1 << inode->i_blkbits;
2782 	length = offset & (blocksize - 1);
2783 
2784 	/* Block boundary? Nothing to do */
2785 	if (!length)
2786 		return 0;
2787 
2788 	length = blocksize - length;
2789 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2790 
2791 	page = grab_cache_page(mapping, index);
2792 	err = -ENOMEM;
2793 	if (!page)
2794 		goto out;
2795 
2796 	if (page_has_buffers(page)) {
2797 has_buffers:
2798 		unlock_page(page);
2799 		page_cache_release(page);
2800 		return block_truncate_page(mapping, from, get_block);
2801 	}
2802 
2803 	/* Find the buffer that contains "offset" */
2804 	pos = blocksize;
2805 	while (offset >= pos) {
2806 		iblock++;
2807 		pos += blocksize;
2808 	}
2809 
2810 	err = get_block(inode, iblock, &map_bh, 0);
2811 	if (err)
2812 		goto unlock;
2813 	/* unmapped? It's a hole - nothing to do */
2814 	if (!buffer_mapped(&map_bh))
2815 		goto unlock;
2816 
2817 	/* Ok, it's mapped. Make sure it's up-to-date */
2818 	if (!PageUptodate(page)) {
2819 		err = mapping->a_ops->readpage(NULL, page);
2820 		if (err) {
2821 			page_cache_release(page);
2822 			goto out;
2823 		}
2824 		lock_page(page);
2825 		if (!PageUptodate(page)) {
2826 			err = -EIO;
2827 			goto unlock;
2828 		}
2829 		if (page_has_buffers(page))
2830 			goto has_buffers;
2831 	}
2832 	zero_user(page, offset, length);
2833 	set_page_dirty(page);
2834 	err = 0;
2835 
2836 unlock:
2837 	unlock_page(page);
2838 	page_cache_release(page);
2839 out:
2840 	return err;
2841 }
2842 EXPORT_SYMBOL(nobh_truncate_page);
2843 
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2844 int block_truncate_page(struct address_space *mapping,
2845 			loff_t from, get_block_t *get_block)
2846 {
2847 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2848 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2849 	unsigned blocksize;
2850 	sector_t iblock;
2851 	unsigned length, pos;
2852 	struct inode *inode = mapping->host;
2853 	struct page *page;
2854 	struct buffer_head *bh;
2855 	int err;
2856 
2857 	blocksize = 1 << inode->i_blkbits;
2858 	length = offset & (blocksize - 1);
2859 
2860 	/* Block boundary? Nothing to do */
2861 	if (!length)
2862 		return 0;
2863 
2864 	length = blocksize - length;
2865 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2866 
2867 	page = grab_cache_page(mapping, index);
2868 	err = -ENOMEM;
2869 	if (!page)
2870 		goto out;
2871 
2872 	if (!page_has_buffers(page))
2873 		create_empty_buffers(page, blocksize, 0);
2874 
2875 	/* Find the buffer that contains "offset" */
2876 	bh = page_buffers(page);
2877 	pos = blocksize;
2878 	while (offset >= pos) {
2879 		bh = bh->b_this_page;
2880 		iblock++;
2881 		pos += blocksize;
2882 	}
2883 
2884 	err = 0;
2885 	if (!buffer_mapped(bh)) {
2886 		WARN_ON(bh->b_size != blocksize);
2887 		err = get_block(inode, iblock, bh, 0);
2888 		if (err)
2889 			goto unlock;
2890 		/* unmapped? It's a hole - nothing to do */
2891 		if (!buffer_mapped(bh))
2892 			goto unlock;
2893 	}
2894 
2895 	/* Ok, it's mapped. Make sure it's up-to-date */
2896 	if (PageUptodate(page))
2897 		set_buffer_uptodate(bh);
2898 
2899 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2900 		err = -EIO;
2901 		ll_rw_block(READ, 1, &bh);
2902 		wait_on_buffer(bh);
2903 		/* Uhhuh. Read error. Complain and punt. */
2904 		if (!buffer_uptodate(bh))
2905 			goto unlock;
2906 	}
2907 
2908 	zero_user(page, offset, length);
2909 	mark_buffer_dirty(bh);
2910 	err = 0;
2911 
2912 unlock:
2913 	unlock_page(page);
2914 	page_cache_release(page);
2915 out:
2916 	return err;
2917 }
2918 
2919 /*
2920  * The generic ->writepage function for buffer-backed address_spaces
2921  */
block_write_full_page(struct page * page,get_block_t * get_block,struct writeback_control * wbc)2922 int block_write_full_page(struct page *page, get_block_t *get_block,
2923 			struct writeback_control *wbc)
2924 {
2925 	struct inode * const inode = page->mapping->host;
2926 	loff_t i_size = i_size_read(inode);
2927 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2928 	unsigned offset;
2929 
2930 	/* Is the page fully inside i_size? */
2931 	if (page->index < end_index)
2932 		return __block_write_full_page(inode, page, get_block, wbc);
2933 
2934 	/* Is the page fully outside i_size? (truncate in progress) */
2935 	offset = i_size & (PAGE_CACHE_SIZE-1);
2936 	if (page->index >= end_index+1 || !offset) {
2937 		/*
2938 		 * The page may have dirty, unmapped buffers.  For example,
2939 		 * they may have been added in ext3_writepage().  Make them
2940 		 * freeable here, so the page does not leak.
2941 		 */
2942 		do_invalidatepage(page, 0);
2943 		unlock_page(page);
2944 		return 0; /* don't care */
2945 	}
2946 
2947 	/*
2948 	 * The page straddles i_size.  It must be zeroed out on each and every
2949 	 * writepage invokation because it may be mmapped.  "A file is mapped
2950 	 * in multiples of the page size.  For a file that is not a multiple of
2951 	 * the  page size, the remaining memory is zeroed when mapped, and
2952 	 * writes to that region are not written out to the file."
2953 	 */
2954 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2955 	return __block_write_full_page(inode, page, get_block, wbc);
2956 }
2957 
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)2958 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2959 			    get_block_t *get_block)
2960 {
2961 	struct buffer_head tmp;
2962 	struct inode *inode = mapping->host;
2963 	tmp.b_state = 0;
2964 	tmp.b_blocknr = 0;
2965 	tmp.b_size = 1 << inode->i_blkbits;
2966 	get_block(inode, block, &tmp, 0);
2967 	return tmp.b_blocknr;
2968 }
2969 
end_bio_bh_io_sync(struct bio * bio,int err)2970 static void end_bio_bh_io_sync(struct bio *bio, int err)
2971 {
2972 	struct buffer_head *bh = bio->bi_private;
2973 
2974 	if (err == -EOPNOTSUPP) {
2975 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2976 		set_bit(BH_Eopnotsupp, &bh->b_state);
2977 	}
2978 
2979 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2980 		set_bit(BH_Quiet, &bh->b_state);
2981 
2982 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2983 	bio_put(bio);
2984 }
2985 
submit_bh(int rw,struct buffer_head * bh)2986 int submit_bh(int rw, struct buffer_head * bh)
2987 {
2988 	struct bio *bio;
2989 	int ret = 0;
2990 
2991 	BUG_ON(!buffer_locked(bh));
2992 	BUG_ON(!buffer_mapped(bh));
2993 	BUG_ON(!bh->b_end_io);
2994 
2995 	/*
2996 	 * Mask in barrier bit for a write (could be either a WRITE or a
2997 	 * WRITE_SYNC
2998 	 */
2999 	if (buffer_ordered(bh) && (rw & WRITE))
3000 		rw |= WRITE_BARRIER;
3001 
3002 	/*
3003 	 * Only clear out a write error when rewriting
3004 	 */
3005 	if (test_set_buffer_req(bh) && (rw & WRITE))
3006 		clear_buffer_write_io_error(bh);
3007 
3008 	/*
3009 	 * from here on down, it's all bio -- do the initial mapping,
3010 	 * submit_bio -> generic_make_request may further map this bio around
3011 	 */
3012 	bio = bio_alloc(GFP_NOIO, 1);
3013 
3014 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3015 	bio->bi_bdev = bh->b_bdev;
3016 	bio->bi_io_vec[0].bv_page = bh->b_page;
3017 	bio->bi_io_vec[0].bv_len = bh->b_size;
3018 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
3019 
3020 	bio->bi_vcnt = 1;
3021 	bio->bi_idx = 0;
3022 	bio->bi_size = bh->b_size;
3023 
3024 	bio->bi_end_io = end_bio_bh_io_sync;
3025 	bio->bi_private = bh;
3026 
3027 	bio_get(bio);
3028 	submit_bio(rw, bio);
3029 
3030 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
3031 		ret = -EOPNOTSUPP;
3032 
3033 	bio_put(bio);
3034 	return ret;
3035 }
3036 
3037 /**
3038  * ll_rw_block: low-level access to block devices (DEPRECATED)
3039  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3040  * @nr: number of &struct buffer_heads in the array
3041  * @bhs: array of pointers to &struct buffer_head
3042  *
3043  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3044  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3045  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3046  * are sent to disk. The fourth %READA option is described in the documentation
3047  * for generic_make_request() which ll_rw_block() calls.
3048  *
3049  * This function drops any buffer that it cannot get a lock on (with the
3050  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3051  * clean when doing a write request, and any buffer that appears to be
3052  * up-to-date when doing read request.  Further it marks as clean buffers that
3053  * are processed for writing (the buffer cache won't assume that they are
3054  * actually clean until the buffer gets unlocked).
3055  *
3056  * ll_rw_block sets b_end_io to simple completion handler that marks
3057  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3058  * any waiters.
3059  *
3060  * All of the buffers must be for the same device, and must also be a
3061  * multiple of the current approved size for the device.
3062  */
ll_rw_block(int rw,int nr,struct buffer_head * bhs[])3063 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3064 {
3065 	int i;
3066 
3067 	for (i = 0; i < nr; i++) {
3068 		struct buffer_head *bh = bhs[i];
3069 
3070 		if (rw == SWRITE || rw == SWRITE_SYNC)
3071 			lock_buffer(bh);
3072 		else if (!trylock_buffer(bh))
3073 			continue;
3074 
3075 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
3076 			if (test_clear_buffer_dirty(bh)) {
3077 				bh->b_end_io = end_buffer_write_sync;
3078 				get_bh(bh);
3079 				if (rw == SWRITE_SYNC)
3080 					submit_bh(WRITE_SYNC, bh);
3081 				else
3082 					submit_bh(WRITE, bh);
3083 				continue;
3084 			}
3085 		} else {
3086 			if (!buffer_uptodate(bh)) {
3087 				bh->b_end_io = end_buffer_read_sync;
3088 				get_bh(bh);
3089 				submit_bh(rw, bh);
3090 				continue;
3091 			}
3092 		}
3093 		unlock_buffer(bh);
3094 	}
3095 }
3096 
3097 /*
3098  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3099  * and then start new I/O and then wait upon it.  The caller must have a ref on
3100  * the buffer_head.
3101  */
sync_dirty_buffer(struct buffer_head * bh)3102 int sync_dirty_buffer(struct buffer_head *bh)
3103 {
3104 	int ret = 0;
3105 
3106 	WARN_ON(atomic_read(&bh->b_count) < 1);
3107 	lock_buffer(bh);
3108 	if (test_clear_buffer_dirty(bh)) {
3109 		get_bh(bh);
3110 		bh->b_end_io = end_buffer_write_sync;
3111 		ret = submit_bh(WRITE, bh);
3112 		wait_on_buffer(bh);
3113 		if (buffer_eopnotsupp(bh)) {
3114 			clear_buffer_eopnotsupp(bh);
3115 			ret = -EOPNOTSUPP;
3116 		}
3117 		if (!ret && !buffer_uptodate(bh))
3118 			ret = -EIO;
3119 	} else {
3120 		unlock_buffer(bh);
3121 	}
3122 	return ret;
3123 }
3124 
3125 /*
3126  * try_to_free_buffers() checks if all the buffers on this particular page
3127  * are unused, and releases them if so.
3128  *
3129  * Exclusion against try_to_free_buffers may be obtained by either
3130  * locking the page or by holding its mapping's private_lock.
3131  *
3132  * If the page is dirty but all the buffers are clean then we need to
3133  * be sure to mark the page clean as well.  This is because the page
3134  * may be against a block device, and a later reattachment of buffers
3135  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3136  * filesystem data on the same device.
3137  *
3138  * The same applies to regular filesystem pages: if all the buffers are
3139  * clean then we set the page clean and proceed.  To do that, we require
3140  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3141  * private_lock.
3142  *
3143  * try_to_free_buffers() is non-blocking.
3144  */
buffer_busy(struct buffer_head * bh)3145 static inline int buffer_busy(struct buffer_head *bh)
3146 {
3147 	return atomic_read(&bh->b_count) |
3148 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3149 }
3150 
3151 static int
drop_buffers(struct page * page,struct buffer_head ** buffers_to_free)3152 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3153 {
3154 	struct buffer_head *head = page_buffers(page);
3155 	struct buffer_head *bh;
3156 
3157 	bh = head;
3158 	do {
3159 		if (buffer_write_io_error(bh) && page->mapping)
3160 			set_bit(AS_EIO, &page->mapping->flags);
3161 		if (buffer_busy(bh))
3162 			goto failed;
3163 		bh = bh->b_this_page;
3164 	} while (bh != head);
3165 
3166 	do {
3167 		struct buffer_head *next = bh->b_this_page;
3168 
3169 		if (bh->b_assoc_map)
3170 			__remove_assoc_queue(bh);
3171 		bh = next;
3172 	} while (bh != head);
3173 	*buffers_to_free = head;
3174 	__clear_page_buffers(page);
3175 	return 1;
3176 failed:
3177 	return 0;
3178 }
3179 
try_to_free_buffers(struct page * page)3180 int try_to_free_buffers(struct page *page)
3181 {
3182 	struct address_space * const mapping = page->mapping;
3183 	struct buffer_head *buffers_to_free = NULL;
3184 	int ret = 0;
3185 
3186 	BUG_ON(!PageLocked(page));
3187 	if (PageWriteback(page))
3188 		return 0;
3189 
3190 	if (mapping == NULL) {		/* can this still happen? */
3191 		ret = drop_buffers(page, &buffers_to_free);
3192 		goto out;
3193 	}
3194 
3195 	spin_lock(&mapping->private_lock);
3196 	ret = drop_buffers(page, &buffers_to_free);
3197 
3198 	/*
3199 	 * If the filesystem writes its buffers by hand (eg ext3)
3200 	 * then we can have clean buffers against a dirty page.  We
3201 	 * clean the page here; otherwise the VM will never notice
3202 	 * that the filesystem did any IO at all.
3203 	 *
3204 	 * Also, during truncate, discard_buffer will have marked all
3205 	 * the page's buffers clean.  We discover that here and clean
3206 	 * the page also.
3207 	 *
3208 	 * private_lock must be held over this entire operation in order
3209 	 * to synchronise against __set_page_dirty_buffers and prevent the
3210 	 * dirty bit from being lost.
3211 	 */
3212 	if (ret)
3213 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3214 	spin_unlock(&mapping->private_lock);
3215 out:
3216 	if (buffers_to_free) {
3217 		struct buffer_head *bh = buffers_to_free;
3218 
3219 		do {
3220 			struct buffer_head *next = bh->b_this_page;
3221 			free_buffer_head(bh);
3222 			bh = next;
3223 		} while (bh != buffers_to_free);
3224 	}
3225 	return ret;
3226 }
3227 EXPORT_SYMBOL(try_to_free_buffers);
3228 
block_sync_page(struct page * page)3229 void block_sync_page(struct page *page)
3230 {
3231 	struct address_space *mapping;
3232 
3233 	smp_mb();
3234 	mapping = page_mapping(page);
3235 	if (mapping)
3236 		blk_run_backing_dev(mapping->backing_dev_info, page);
3237 }
3238 
3239 /*
3240  * There are no bdflush tunables left.  But distributions are
3241  * still running obsolete flush daemons, so we terminate them here.
3242  *
3243  * Use of bdflush() is deprecated and will be removed in a future kernel.
3244  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3245  */
SYSCALL_DEFINE2(bdflush,int,func,long,data)3246 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3247 {
3248 	static int msg_count;
3249 
3250 	if (!capable(CAP_SYS_ADMIN))
3251 		return -EPERM;
3252 
3253 	if (msg_count < 5) {
3254 		msg_count++;
3255 		printk(KERN_INFO
3256 			"warning: process `%s' used the obsolete bdflush"
3257 			" system call\n", current->comm);
3258 		printk(KERN_INFO "Fix your initscripts?\n");
3259 	}
3260 
3261 	if (func == 1)
3262 		do_exit(0);
3263 	return 0;
3264 }
3265 
3266 /*
3267  * Buffer-head allocation
3268  */
3269 static struct kmem_cache *bh_cachep;
3270 
3271 /*
3272  * Once the number of bh's in the machine exceeds this level, we start
3273  * stripping them in writeback.
3274  */
3275 static int max_buffer_heads;
3276 
3277 int buffer_heads_over_limit;
3278 
3279 struct bh_accounting {
3280 	int nr;			/* Number of live bh's */
3281 	int ratelimit;		/* Limit cacheline bouncing */
3282 };
3283 
3284 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3285 
recalc_bh_state(void)3286 static void recalc_bh_state(void)
3287 {
3288 	int i;
3289 	int tot = 0;
3290 
3291 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3292 		return;
3293 	__get_cpu_var(bh_accounting).ratelimit = 0;
3294 	for_each_online_cpu(i)
3295 		tot += per_cpu(bh_accounting, i).nr;
3296 	buffer_heads_over_limit = (tot > max_buffer_heads);
3297 }
3298 
alloc_buffer_head(gfp_t gfp_flags)3299 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3300 {
3301 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3302 	if (ret) {
3303 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3304 		get_cpu_var(bh_accounting).nr++;
3305 		recalc_bh_state();
3306 		put_cpu_var(bh_accounting);
3307 	}
3308 	return ret;
3309 }
3310 EXPORT_SYMBOL(alloc_buffer_head);
3311 
free_buffer_head(struct buffer_head * bh)3312 void free_buffer_head(struct buffer_head *bh)
3313 {
3314 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3315 	kmem_cache_free(bh_cachep, bh);
3316 	get_cpu_var(bh_accounting).nr--;
3317 	recalc_bh_state();
3318 	put_cpu_var(bh_accounting);
3319 }
3320 EXPORT_SYMBOL(free_buffer_head);
3321 
buffer_exit_cpu(int cpu)3322 static void buffer_exit_cpu(int cpu)
3323 {
3324 	int i;
3325 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3326 
3327 	for (i = 0; i < BH_LRU_SIZE; i++) {
3328 		brelse(b->bhs[i]);
3329 		b->bhs[i] = NULL;
3330 	}
3331 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3332 	per_cpu(bh_accounting, cpu).nr = 0;
3333 	put_cpu_var(bh_accounting);
3334 }
3335 
buffer_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)3336 static int buffer_cpu_notify(struct notifier_block *self,
3337 			      unsigned long action, void *hcpu)
3338 {
3339 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3340 		buffer_exit_cpu((unsigned long)hcpu);
3341 	return NOTIFY_OK;
3342 }
3343 
3344 /**
3345  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3346  * @bh: struct buffer_head
3347  *
3348  * Return true if the buffer is up-to-date and false,
3349  * with the buffer locked, if not.
3350  */
bh_uptodate_or_lock(struct buffer_head * bh)3351 int bh_uptodate_or_lock(struct buffer_head *bh)
3352 {
3353 	if (!buffer_uptodate(bh)) {
3354 		lock_buffer(bh);
3355 		if (!buffer_uptodate(bh))
3356 			return 0;
3357 		unlock_buffer(bh);
3358 	}
3359 	return 1;
3360 }
3361 EXPORT_SYMBOL(bh_uptodate_or_lock);
3362 
3363 /**
3364  * bh_submit_read - Submit a locked buffer for reading
3365  * @bh: struct buffer_head
3366  *
3367  * Returns zero on success and -EIO on error.
3368  */
bh_submit_read(struct buffer_head * bh)3369 int bh_submit_read(struct buffer_head *bh)
3370 {
3371 	BUG_ON(!buffer_locked(bh));
3372 
3373 	if (buffer_uptodate(bh)) {
3374 		unlock_buffer(bh);
3375 		return 0;
3376 	}
3377 
3378 	get_bh(bh);
3379 	bh->b_end_io = end_buffer_read_sync;
3380 	submit_bh(READ, bh);
3381 	wait_on_buffer(bh);
3382 	if (buffer_uptodate(bh))
3383 		return 0;
3384 	return -EIO;
3385 }
3386 EXPORT_SYMBOL(bh_submit_read);
3387 
3388 static void
init_buffer_head(void * data)3389 init_buffer_head(void *data)
3390 {
3391 	struct buffer_head *bh = data;
3392 
3393 	memset(bh, 0, sizeof(*bh));
3394 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3395 }
3396 
buffer_init(void)3397 void __init buffer_init(void)
3398 {
3399 	int nrpages;
3400 
3401 	bh_cachep = kmem_cache_create("buffer_head",
3402 			sizeof(struct buffer_head), 0,
3403 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3404 				SLAB_MEM_SPREAD),
3405 				init_buffer_head);
3406 
3407 	/*
3408 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3409 	 */
3410 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3411 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3412 	hotcpu_notifier(buffer_cpu_notify, 0);
3413 }
3414 
3415 EXPORT_SYMBOL(__bforget);
3416 EXPORT_SYMBOL(__brelse);
3417 EXPORT_SYMBOL(__wait_on_buffer);
3418 EXPORT_SYMBOL(block_commit_write);
3419 EXPORT_SYMBOL(block_prepare_write);
3420 EXPORT_SYMBOL(block_page_mkwrite);
3421 EXPORT_SYMBOL(block_read_full_page);
3422 EXPORT_SYMBOL(block_sync_page);
3423 EXPORT_SYMBOL(block_truncate_page);
3424 EXPORT_SYMBOL(block_write_full_page);
3425 EXPORT_SYMBOL(cont_write_begin);
3426 EXPORT_SYMBOL(end_buffer_read_sync);
3427 EXPORT_SYMBOL(end_buffer_write_sync);
3428 EXPORT_SYMBOL(file_fsync);
3429 EXPORT_SYMBOL(fsync_bdev);
3430 EXPORT_SYMBOL(generic_block_bmap);
3431 EXPORT_SYMBOL(generic_cont_expand_simple);
3432 EXPORT_SYMBOL(init_buffer);
3433 EXPORT_SYMBOL(invalidate_bdev);
3434 EXPORT_SYMBOL(ll_rw_block);
3435 EXPORT_SYMBOL(mark_buffer_dirty);
3436 EXPORT_SYMBOL(submit_bh);
3437 EXPORT_SYMBOL(sync_dirty_buffer);
3438 EXPORT_SYMBOL(unlock_buffer);
3439