1 /*
2 * mdt.c - meta data file for NILFS
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 */
22
23 #include <linux/buffer_head.h>
24 #include <linux/mpage.h>
25 #include <linux/mm.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
30 #include "nilfs.h"
31 #include "btnode.h"
32 #include "segment.h"
33 #include "page.h"
34 #include "mdt.h"
35
36
37 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
38
39
40 static int
nilfs_mdt_insert_new_block(struct inode * inode,unsigned long block,struct buffer_head * bh,void (* init_block)(struct inode *,struct buffer_head *,void *))41 nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
42 struct buffer_head *bh,
43 void (*init_block)(struct inode *,
44 struct buffer_head *, void *))
45 {
46 struct nilfs_inode_info *ii = NILFS_I(inode);
47 void *kaddr;
48 int ret;
49
50 /* Caller exclude read accesses using page lock */
51
52 /* set_buffer_new(bh); */
53 bh->b_blocknr = 0;
54
55 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
56 if (unlikely(ret))
57 return ret;
58
59 set_buffer_mapped(bh);
60
61 kaddr = kmap_atomic(bh->b_page);
62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
63 if (init_block)
64 init_block(inode, bh, kaddr);
65 flush_dcache_page(bh->b_page);
66 kunmap_atomic(kaddr);
67
68 set_buffer_uptodate(bh);
69 mark_buffer_dirty(bh);
70 nilfs_mdt_mark_dirty(inode);
71 return 0;
72 }
73
nilfs_mdt_create_block(struct inode * inode,unsigned long block,struct buffer_head ** out_bh,void (* init_block)(struct inode *,struct buffer_head *,void *))74 static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
75 struct buffer_head **out_bh,
76 void (*init_block)(struct inode *,
77 struct buffer_head *,
78 void *))
79 {
80 struct super_block *sb = inode->i_sb;
81 struct nilfs_transaction_info ti;
82 struct buffer_head *bh;
83 int err;
84
85 nilfs_transaction_begin(sb, &ti, 0);
86
87 err = -ENOMEM;
88 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
89 if (unlikely(!bh))
90 goto failed_unlock;
91
92 err = -EEXIST;
93 if (buffer_uptodate(bh))
94 goto failed_bh;
95
96 wait_on_buffer(bh);
97 if (buffer_uptodate(bh))
98 goto failed_bh;
99
100 bh->b_bdev = sb->s_bdev;
101 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
102 if (likely(!err)) {
103 get_bh(bh);
104 *out_bh = bh;
105 }
106
107 failed_bh:
108 unlock_page(bh->b_page);
109 page_cache_release(bh->b_page);
110 brelse(bh);
111
112 failed_unlock:
113 if (likely(!err))
114 err = nilfs_transaction_commit(sb);
115 else
116 nilfs_transaction_abort(sb);
117
118 return err;
119 }
120
121 static int
nilfs_mdt_submit_block(struct inode * inode,unsigned long blkoff,int mode,struct buffer_head ** out_bh)122 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
123 int mode, struct buffer_head **out_bh)
124 {
125 struct buffer_head *bh;
126 __u64 blknum = 0;
127 int ret = -ENOMEM;
128
129 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
130 if (unlikely(!bh))
131 goto failed;
132
133 ret = -EEXIST; /* internal code */
134 if (buffer_uptodate(bh))
135 goto out;
136
137 if (mode == READA) {
138 if (!trylock_buffer(bh)) {
139 ret = -EBUSY;
140 goto failed_bh;
141 }
142 } else /* mode == READ */
143 lock_buffer(bh);
144
145 if (buffer_uptodate(bh)) {
146 unlock_buffer(bh);
147 goto out;
148 }
149
150 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
151 if (unlikely(ret)) {
152 unlock_buffer(bh);
153 goto failed_bh;
154 }
155 map_bh(bh, inode->i_sb, (sector_t)blknum);
156
157 bh->b_end_io = end_buffer_read_sync;
158 get_bh(bh);
159 submit_bh(mode, bh);
160 ret = 0;
161 out:
162 get_bh(bh);
163 *out_bh = bh;
164
165 failed_bh:
166 unlock_page(bh->b_page);
167 page_cache_release(bh->b_page);
168 brelse(bh);
169 failed:
170 return ret;
171 }
172
nilfs_mdt_read_block(struct inode * inode,unsigned long block,int readahead,struct buffer_head ** out_bh)173 static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
174 int readahead, struct buffer_head **out_bh)
175 {
176 struct buffer_head *first_bh, *bh;
177 unsigned long blkoff;
178 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
179 int err;
180
181 err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
182 if (err == -EEXIST) /* internal code */
183 goto out;
184
185 if (unlikely(err))
186 goto failed;
187
188 if (readahead) {
189 blkoff = block + 1;
190 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
191 err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
192 if (likely(!err || err == -EEXIST))
193 brelse(bh);
194 else if (err != -EBUSY)
195 break;
196 /* abort readahead if bmap lookup failed */
197 if (!buffer_locked(first_bh))
198 goto out_no_wait;
199 }
200 }
201
202 wait_on_buffer(first_bh);
203
204 out_no_wait:
205 err = -EIO;
206 if (!buffer_uptodate(first_bh))
207 goto failed_bh;
208 out:
209 *out_bh = first_bh;
210 return 0;
211
212 failed_bh:
213 brelse(first_bh);
214 failed:
215 return err;
216 }
217
218 /**
219 * nilfs_mdt_get_block - read or create a buffer on meta data file.
220 * @inode: inode of the meta data file
221 * @blkoff: block offset
222 * @create: create flag
223 * @init_block: initializer used for newly allocated block
224 * @out_bh: output of a pointer to the buffer_head
225 *
226 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
227 * a new buffer if @create is not zero. On success, the returned buffer is
228 * assured to be either existing or formatted using a buffer lock on success.
229 * @out_bh is substituted only when zero is returned.
230 *
231 * Return Value: On success, it returns 0. On error, the following negative
232 * error code is returned.
233 *
234 * %-ENOMEM - Insufficient memory available.
235 *
236 * %-EIO - I/O error
237 *
238 * %-ENOENT - the specified block does not exist (hole block)
239 *
240 * %-EROFS - Read only filesystem (for create mode)
241 */
nilfs_mdt_get_block(struct inode * inode,unsigned long blkoff,int create,void (* init_block)(struct inode *,struct buffer_head *,void *),struct buffer_head ** out_bh)242 int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
243 void (*init_block)(struct inode *,
244 struct buffer_head *, void *),
245 struct buffer_head **out_bh)
246 {
247 int ret;
248
249 /* Should be rewritten with merging nilfs_mdt_read_block() */
250 retry:
251 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
252 if (!create || ret != -ENOENT)
253 return ret;
254
255 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
256 if (unlikely(ret == -EEXIST)) {
257 /* create = 0; */ /* limit read-create loop retries */
258 goto retry;
259 }
260 return ret;
261 }
262
263 /**
264 * nilfs_mdt_delete_block - make a hole on the meta data file.
265 * @inode: inode of the meta data file
266 * @block: block offset
267 *
268 * Return Value: On success, zero is returned.
269 * On error, one of the following negative error code is returned.
270 *
271 * %-ENOMEM - Insufficient memory available.
272 *
273 * %-EIO - I/O error
274 */
nilfs_mdt_delete_block(struct inode * inode,unsigned long block)275 int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
276 {
277 struct nilfs_inode_info *ii = NILFS_I(inode);
278 int err;
279
280 err = nilfs_bmap_delete(ii->i_bmap, block);
281 if (!err || err == -ENOENT) {
282 nilfs_mdt_mark_dirty(inode);
283 nilfs_mdt_forget_block(inode, block);
284 }
285 return err;
286 }
287
288 /**
289 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
290 * @inode: inode of the meta data file
291 * @block: block offset
292 *
293 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
294 * tries to release the page including the buffer from a page cache.
295 *
296 * Return Value: On success, 0 is returned. On error, one of the following
297 * negative error code is returned.
298 *
299 * %-EBUSY - page has an active buffer.
300 *
301 * %-ENOENT - page cache has no page addressed by the offset.
302 */
nilfs_mdt_forget_block(struct inode * inode,unsigned long block)303 int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
304 {
305 pgoff_t index = (pgoff_t)block >>
306 (PAGE_CACHE_SHIFT - inode->i_blkbits);
307 struct page *page;
308 unsigned long first_block;
309 int ret = 0;
310 int still_dirty;
311
312 page = find_lock_page(inode->i_mapping, index);
313 if (!page)
314 return -ENOENT;
315
316 wait_on_page_writeback(page);
317
318 first_block = (unsigned long)index <<
319 (PAGE_CACHE_SHIFT - inode->i_blkbits);
320 if (page_has_buffers(page)) {
321 struct buffer_head *bh;
322
323 bh = nilfs_page_get_nth_block(page, block - first_block);
324 nilfs_forget_buffer(bh);
325 }
326 still_dirty = PageDirty(page);
327 unlock_page(page);
328 page_cache_release(page);
329
330 if (still_dirty ||
331 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
332 ret = -EBUSY;
333 return ret;
334 }
335
336 /**
337 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
338 * @inode: inode of the meta data file
339 * @block: block offset
340 *
341 * Return Value: On success, it returns 0. On error, the following negative
342 * error code is returned.
343 *
344 * %-ENOMEM - Insufficient memory available.
345 *
346 * %-EIO - I/O error
347 *
348 * %-ENOENT - the specified block does not exist (hole block)
349 */
nilfs_mdt_mark_block_dirty(struct inode * inode,unsigned long block)350 int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
351 {
352 struct buffer_head *bh;
353 int err;
354
355 err = nilfs_mdt_read_block(inode, block, 0, &bh);
356 if (unlikely(err))
357 return err;
358 mark_buffer_dirty(bh);
359 nilfs_mdt_mark_dirty(inode);
360 brelse(bh);
361 return 0;
362 }
363
nilfs_mdt_fetch_dirty(struct inode * inode)364 int nilfs_mdt_fetch_dirty(struct inode *inode)
365 {
366 struct nilfs_inode_info *ii = NILFS_I(inode);
367
368 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
369 set_bit(NILFS_I_DIRTY, &ii->i_state);
370 return 1;
371 }
372 return test_bit(NILFS_I_DIRTY, &ii->i_state);
373 }
374
375 static int
nilfs_mdt_write_page(struct page * page,struct writeback_control * wbc)376 nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
377 {
378 struct inode *inode = page->mapping->host;
379 struct super_block *sb;
380 int err = 0;
381
382 if (inode && (inode->i_sb->s_flags & MS_RDONLY)) {
383 /*
384 * It means that filesystem was remounted in read-only
385 * mode because of error or metadata corruption. But we
386 * have dirty pages that try to be flushed in background.
387 * So, here we simply discard this dirty page.
388 */
389 nilfs_clear_dirty_page(page, false);
390 unlock_page(page);
391 return -EROFS;
392 }
393
394 redirty_page_for_writepage(wbc, page);
395 unlock_page(page);
396
397 if (!inode)
398 return 0;
399
400 sb = inode->i_sb;
401
402 if (wbc->sync_mode == WB_SYNC_ALL)
403 err = nilfs_construct_segment(sb);
404 else if (wbc->for_reclaim)
405 nilfs_flush_segment(sb, inode->i_ino);
406
407 return err;
408 }
409
410
411 static const struct address_space_operations def_mdt_aops = {
412 .writepage = nilfs_mdt_write_page,
413 };
414
415 static const struct inode_operations def_mdt_iops;
416 static const struct file_operations def_mdt_fops;
417
418
nilfs_mdt_init(struct inode * inode,gfp_t gfp_mask,size_t objsz)419 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
420 {
421 struct nilfs_mdt_info *mi;
422
423 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
424 if (!mi)
425 return -ENOMEM;
426
427 init_rwsem(&mi->mi_sem);
428 inode->i_private = mi;
429
430 inode->i_mode = S_IFREG;
431 mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
432 inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
433
434 inode->i_op = &def_mdt_iops;
435 inode->i_fop = &def_mdt_fops;
436 inode->i_mapping->a_ops = &def_mdt_aops;
437
438 return 0;
439 }
440
nilfs_mdt_set_entry_size(struct inode * inode,unsigned entry_size,unsigned header_size)441 void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
442 unsigned header_size)
443 {
444 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
445
446 mi->mi_entry_size = entry_size;
447 mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
448 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
449 }
450
451 /**
452 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
453 * @inode: inode of the metadata file
454 * @shadow: shadow mapping
455 */
nilfs_mdt_setup_shadow_map(struct inode * inode,struct nilfs_shadow_map * shadow)456 int nilfs_mdt_setup_shadow_map(struct inode *inode,
457 struct nilfs_shadow_map *shadow)
458 {
459 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
460 struct backing_dev_info *bdi = inode->i_sb->s_bdi;
461
462 INIT_LIST_HEAD(&shadow->frozen_buffers);
463 address_space_init_once(&shadow->frozen_data);
464 nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
465 address_space_init_once(&shadow->frozen_btnodes);
466 nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
467 mi->mi_shadow = shadow;
468 return 0;
469 }
470
471 /**
472 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
473 * @inode: inode of the metadata file
474 */
nilfs_mdt_save_to_shadow_map(struct inode * inode)475 int nilfs_mdt_save_to_shadow_map(struct inode *inode)
476 {
477 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
478 struct nilfs_inode_info *ii = NILFS_I(inode);
479 struct nilfs_shadow_map *shadow = mi->mi_shadow;
480 int ret;
481
482 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
483 if (ret)
484 goto out;
485
486 ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
487 &ii->i_btnode_cache);
488 if (ret)
489 goto out;
490
491 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
492 out:
493 return ret;
494 }
495
nilfs_mdt_freeze_buffer(struct inode * inode,struct buffer_head * bh)496 int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
497 {
498 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
499 struct buffer_head *bh_frozen;
500 struct page *page;
501 int blkbits = inode->i_blkbits;
502
503 page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
504 if (!page)
505 return -ENOMEM;
506
507 if (!page_has_buffers(page))
508 create_empty_buffers(page, 1 << blkbits, 0);
509
510 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
511
512 if (!buffer_uptodate(bh_frozen))
513 nilfs_copy_buffer(bh_frozen, bh);
514 if (list_empty(&bh_frozen->b_assoc_buffers)) {
515 list_add_tail(&bh_frozen->b_assoc_buffers,
516 &shadow->frozen_buffers);
517 set_buffer_nilfs_redirected(bh);
518 } else {
519 brelse(bh_frozen); /* already frozen */
520 }
521
522 unlock_page(page);
523 page_cache_release(page);
524 return 0;
525 }
526
527 struct buffer_head *
nilfs_mdt_get_frozen_buffer(struct inode * inode,struct buffer_head * bh)528 nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
529 {
530 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
531 struct buffer_head *bh_frozen = NULL;
532 struct page *page;
533 int n;
534
535 page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
536 if (page) {
537 if (page_has_buffers(page)) {
538 n = bh_offset(bh) >> inode->i_blkbits;
539 bh_frozen = nilfs_page_get_nth_block(page, n);
540 }
541 unlock_page(page);
542 page_cache_release(page);
543 }
544 return bh_frozen;
545 }
546
nilfs_release_frozen_buffers(struct nilfs_shadow_map * shadow)547 static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
548 {
549 struct list_head *head = &shadow->frozen_buffers;
550 struct buffer_head *bh;
551
552 while (!list_empty(head)) {
553 bh = list_first_entry(head, struct buffer_head,
554 b_assoc_buffers);
555 list_del_init(&bh->b_assoc_buffers);
556 brelse(bh); /* drop ref-count to make it releasable */
557 }
558 }
559
560 /**
561 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
562 * @inode: inode of the metadata file
563 */
nilfs_mdt_restore_from_shadow_map(struct inode * inode)564 void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
565 {
566 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
567 struct nilfs_inode_info *ii = NILFS_I(inode);
568 struct nilfs_shadow_map *shadow = mi->mi_shadow;
569
570 down_write(&mi->mi_sem);
571
572 if (mi->mi_palloc_cache)
573 nilfs_palloc_clear_cache(inode);
574
575 nilfs_clear_dirty_pages(inode->i_mapping, true);
576 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
577
578 nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
579 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
580
581 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
582
583 up_write(&mi->mi_sem);
584 }
585
586 /**
587 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
588 * @inode: inode of the metadata file
589 */
nilfs_mdt_clear_shadow_map(struct inode * inode)590 void nilfs_mdt_clear_shadow_map(struct inode *inode)
591 {
592 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
593 struct nilfs_shadow_map *shadow = mi->mi_shadow;
594
595 down_write(&mi->mi_sem);
596 nilfs_release_frozen_buffers(shadow);
597 truncate_inode_pages(&shadow->frozen_data, 0);
598 truncate_inode_pages(&shadow->frozen_btnodes, 0);
599 up_write(&mi->mi_sem);
600 }
601