• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * dat.c - NILFS disk address translation.
3   *
4   * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5   *
6   * This program is free software; you can redistribute it and/or modify
7   * it under the terms of the GNU General Public License as published by
8   * the Free Software Foundation; either version 2 of the License, or
9   * (at your option) any later version.
10   *
11   * This program is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   * GNU General Public License for more details.
15   *
16   * You should have received a copy of the GNU General Public License
17   * along with this program; if not, write to the Free Software
18   * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19   *
20   * Written by Koji Sato <koji@osrg.net>.
21   */
22  
23  #include <linux/types.h>
24  #include <linux/buffer_head.h>
25  #include <linux/string.h>
26  #include <linux/errno.h>
27  #include "nilfs.h"
28  #include "mdt.h"
29  #include "alloc.h"
30  #include "dat.h"
31  
32  
33  #define NILFS_CNO_MIN	((__u64)1)
34  #define NILFS_CNO_MAX	(~(__u64)0)
35  
36  struct nilfs_dat_info {
37  	struct nilfs_mdt_info mi;
38  	struct nilfs_palloc_cache palloc_cache;
39  	struct nilfs_shadow_map shadow;
40  };
41  
NILFS_DAT_I(struct inode * dat)42  static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
43  {
44  	return (struct nilfs_dat_info *)NILFS_MDT(dat);
45  }
46  
nilfs_dat_prepare_entry(struct inode * dat,struct nilfs_palloc_req * req,int create)47  static int nilfs_dat_prepare_entry(struct inode *dat,
48  				   struct nilfs_palloc_req *req, int create)
49  {
50  	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
51  					    create, &req->pr_entry_bh);
52  }
53  
nilfs_dat_commit_entry(struct inode * dat,struct nilfs_palloc_req * req)54  static void nilfs_dat_commit_entry(struct inode *dat,
55  				   struct nilfs_palloc_req *req)
56  {
57  	mark_buffer_dirty(req->pr_entry_bh);
58  	nilfs_mdt_mark_dirty(dat);
59  	brelse(req->pr_entry_bh);
60  }
61  
nilfs_dat_abort_entry(struct inode * dat,struct nilfs_palloc_req * req)62  static void nilfs_dat_abort_entry(struct inode *dat,
63  				  struct nilfs_palloc_req *req)
64  {
65  	brelse(req->pr_entry_bh);
66  }
67  
nilfs_dat_prepare_alloc(struct inode * dat,struct nilfs_palloc_req * req)68  int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
69  {
70  	int ret;
71  
72  	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
73  	if (ret < 0)
74  		return ret;
75  
76  	ret = nilfs_dat_prepare_entry(dat, req, 1);
77  	if (ret < 0)
78  		nilfs_palloc_abort_alloc_entry(dat, req);
79  
80  	return ret;
81  }
82  
nilfs_dat_commit_alloc(struct inode * dat,struct nilfs_palloc_req * req)83  void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
84  {
85  	struct nilfs_dat_entry *entry;
86  	void *kaddr;
87  
88  	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
89  	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
90  					     req->pr_entry_bh, kaddr);
91  	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
92  	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
93  	entry->de_blocknr = cpu_to_le64(0);
94  	kunmap_atomic(kaddr);
95  
96  	nilfs_palloc_commit_alloc_entry(dat, req);
97  	nilfs_dat_commit_entry(dat, req);
98  }
99  
nilfs_dat_abort_alloc(struct inode * dat,struct nilfs_palloc_req * req)100  void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
101  {
102  	nilfs_dat_abort_entry(dat, req);
103  	nilfs_palloc_abort_alloc_entry(dat, req);
104  }
105  
nilfs_dat_commit_free(struct inode * dat,struct nilfs_palloc_req * req)106  static void nilfs_dat_commit_free(struct inode *dat,
107  				  struct nilfs_palloc_req *req)
108  {
109  	struct nilfs_dat_entry *entry;
110  	void *kaddr;
111  
112  	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
113  	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
114  					     req->pr_entry_bh, kaddr);
115  	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
116  	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
117  	entry->de_blocknr = cpu_to_le64(0);
118  	kunmap_atomic(kaddr);
119  
120  	nilfs_dat_commit_entry(dat, req);
121  	nilfs_palloc_commit_free_entry(dat, req);
122  }
123  
nilfs_dat_prepare_start(struct inode * dat,struct nilfs_palloc_req * req)124  int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
125  {
126  	int ret;
127  
128  	ret = nilfs_dat_prepare_entry(dat, req, 0);
129  	WARN_ON(ret == -ENOENT);
130  	return ret;
131  }
132  
nilfs_dat_commit_start(struct inode * dat,struct nilfs_palloc_req * req,sector_t blocknr)133  void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
134  			    sector_t blocknr)
135  {
136  	struct nilfs_dat_entry *entry;
137  	void *kaddr;
138  
139  	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
140  	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
141  					     req->pr_entry_bh, kaddr);
142  	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
143  	entry->de_blocknr = cpu_to_le64(blocknr);
144  	kunmap_atomic(kaddr);
145  
146  	nilfs_dat_commit_entry(dat, req);
147  }
148  
nilfs_dat_prepare_end(struct inode * dat,struct nilfs_palloc_req * req)149  int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
150  {
151  	struct nilfs_dat_entry *entry;
152  	__u64 start;
153  	sector_t blocknr;
154  	void *kaddr;
155  	int ret;
156  
157  	ret = nilfs_dat_prepare_entry(dat, req, 0);
158  	if (ret < 0) {
159  		WARN_ON(ret == -ENOENT);
160  		return ret;
161  	}
162  
163  	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
164  	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
165  					     req->pr_entry_bh, kaddr);
166  	start = le64_to_cpu(entry->de_start);
167  	blocknr = le64_to_cpu(entry->de_blocknr);
168  	kunmap_atomic(kaddr);
169  
170  	if (blocknr == 0) {
171  		ret = nilfs_palloc_prepare_free_entry(dat, req);
172  		if (ret < 0) {
173  			nilfs_dat_abort_entry(dat, req);
174  			return ret;
175  		}
176  	}
177  
178  	return 0;
179  }
180  
nilfs_dat_commit_end(struct inode * dat,struct nilfs_palloc_req * req,int dead)181  void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
182  			  int dead)
183  {
184  	struct nilfs_dat_entry *entry;
185  	__u64 start, end;
186  	sector_t blocknr;
187  	void *kaddr;
188  
189  	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
190  	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
191  					     req->pr_entry_bh, kaddr);
192  	end = start = le64_to_cpu(entry->de_start);
193  	if (!dead) {
194  		end = nilfs_mdt_cno(dat);
195  		WARN_ON(start > end);
196  	}
197  	entry->de_end = cpu_to_le64(end);
198  	blocknr = le64_to_cpu(entry->de_blocknr);
199  	kunmap_atomic(kaddr);
200  
201  	if (blocknr == 0)
202  		nilfs_dat_commit_free(dat, req);
203  	else
204  		nilfs_dat_commit_entry(dat, req);
205  }
206  
nilfs_dat_abort_end(struct inode * dat,struct nilfs_palloc_req * req)207  void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
208  {
209  	struct nilfs_dat_entry *entry;
210  	__u64 start;
211  	sector_t blocknr;
212  	void *kaddr;
213  
214  	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
215  	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
216  					     req->pr_entry_bh, kaddr);
217  	start = le64_to_cpu(entry->de_start);
218  	blocknr = le64_to_cpu(entry->de_blocknr);
219  	kunmap_atomic(kaddr);
220  
221  	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
222  		nilfs_palloc_abort_free_entry(dat, req);
223  	nilfs_dat_abort_entry(dat, req);
224  }
225  
nilfs_dat_prepare_update(struct inode * dat,struct nilfs_palloc_req * oldreq,struct nilfs_palloc_req * newreq)226  int nilfs_dat_prepare_update(struct inode *dat,
227  			     struct nilfs_palloc_req *oldreq,
228  			     struct nilfs_palloc_req *newreq)
229  {
230  	int ret;
231  
232  	ret = nilfs_dat_prepare_end(dat, oldreq);
233  	if (!ret) {
234  		ret = nilfs_dat_prepare_alloc(dat, newreq);
235  		if (ret < 0)
236  			nilfs_dat_abort_end(dat, oldreq);
237  	}
238  	return ret;
239  }
240  
nilfs_dat_commit_update(struct inode * dat,struct nilfs_palloc_req * oldreq,struct nilfs_palloc_req * newreq,int dead)241  void nilfs_dat_commit_update(struct inode *dat,
242  			     struct nilfs_palloc_req *oldreq,
243  			     struct nilfs_palloc_req *newreq, int dead)
244  {
245  	nilfs_dat_commit_end(dat, oldreq, dead);
246  	nilfs_dat_commit_alloc(dat, newreq);
247  }
248  
nilfs_dat_abort_update(struct inode * dat,struct nilfs_palloc_req * oldreq,struct nilfs_palloc_req * newreq)249  void nilfs_dat_abort_update(struct inode *dat,
250  			    struct nilfs_palloc_req *oldreq,
251  			    struct nilfs_palloc_req *newreq)
252  {
253  	nilfs_dat_abort_end(dat, oldreq);
254  	nilfs_dat_abort_alloc(dat, newreq);
255  }
256  
257  /**
258   * nilfs_dat_mark_dirty -
259   * @dat: DAT file inode
260   * @vblocknr: virtual block number
261   *
262   * Description:
263   *
264   * Return Value: On success, 0 is returned. On error, one of the following
265   * negative error codes is returned.
266   *
267   * %-EIO - I/O error.
268   *
269   * %-ENOMEM - Insufficient amount of memory available.
270   */
nilfs_dat_mark_dirty(struct inode * dat,__u64 vblocknr)271  int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
272  {
273  	struct nilfs_palloc_req req;
274  	int ret;
275  
276  	req.pr_entry_nr = vblocknr;
277  	ret = nilfs_dat_prepare_entry(dat, &req, 0);
278  	if (ret == 0)
279  		nilfs_dat_commit_entry(dat, &req);
280  	return ret;
281  }
282  
283  /**
284   * nilfs_dat_freev - free virtual block numbers
285   * @dat: DAT file inode
286   * @vblocknrs: array of virtual block numbers
287   * @nitems: number of virtual block numbers
288   *
289   * Description: nilfs_dat_freev() frees the virtual block numbers specified by
290   * @vblocknrs and @nitems.
291   *
292   * Return Value: On success, 0 is returned. On error, one of the following
293   * negative error codes is returned.
294   *
295   * %-EIO - I/O error.
296   *
297   * %-ENOMEM - Insufficient amount of memory available.
298   *
299   * %-ENOENT - The virtual block number have not been allocated.
300   */
nilfs_dat_freev(struct inode * dat,__u64 * vblocknrs,size_t nitems)301  int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
302  {
303  	return nilfs_palloc_freev(dat, vblocknrs, nitems);
304  }
305  
306  /**
307   * nilfs_dat_move - change a block number
308   * @dat: DAT file inode
309   * @vblocknr: virtual block number
310   * @blocknr: block number
311   *
312   * Description: nilfs_dat_move() changes the block number associated with
313   * @vblocknr to @blocknr.
314   *
315   * Return Value: On success, 0 is returned. On error, one of the following
316   * negative error codes is returned.
317   *
318   * %-EIO - I/O error.
319   *
320   * %-ENOMEM - Insufficient amount of memory available.
321   */
nilfs_dat_move(struct inode * dat,__u64 vblocknr,sector_t blocknr)322  int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
323  {
324  	struct buffer_head *entry_bh;
325  	struct nilfs_dat_entry *entry;
326  	void *kaddr;
327  	int ret;
328  
329  	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
330  	if (ret < 0)
331  		return ret;
332  
333  	/*
334  	 * The given disk block number (blocknr) is not yet written to
335  	 * the device at this point.
336  	 *
337  	 * To prevent nilfs_dat_translate() from returning the
338  	 * uncommitted block number, this makes a copy of the entry
339  	 * buffer and redirects nilfs_dat_translate() to the copy.
340  	 */
341  	if (!buffer_nilfs_redirected(entry_bh)) {
342  		ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
343  		if (ret) {
344  			brelse(entry_bh);
345  			return ret;
346  		}
347  	}
348  
349  	kaddr = kmap_atomic(entry_bh->b_page);
350  	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
351  	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
352  		printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
353  		       (unsigned long long)vblocknr,
354  		       (unsigned long long)le64_to_cpu(entry->de_start),
355  		       (unsigned long long)le64_to_cpu(entry->de_end));
356  		kunmap_atomic(kaddr);
357  		brelse(entry_bh);
358  		return -EINVAL;
359  	}
360  	WARN_ON(blocknr == 0);
361  	entry->de_blocknr = cpu_to_le64(blocknr);
362  	kunmap_atomic(kaddr);
363  
364  	mark_buffer_dirty(entry_bh);
365  	nilfs_mdt_mark_dirty(dat);
366  
367  	brelse(entry_bh);
368  
369  	return 0;
370  }
371  
372  /**
373   * nilfs_dat_translate - translate a virtual block number to a block number
374   * @dat: DAT file inode
375   * @vblocknr: virtual block number
376   * @blocknrp: pointer to a block number
377   *
378   * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
379   * to the corresponding block number.
380   *
381   * Return Value: On success, 0 is returned and the block number associated
382   * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
383   * of the following negative error codes is returned.
384   *
385   * %-EIO - I/O error.
386   *
387   * %-ENOMEM - Insufficient amount of memory available.
388   *
389   * %-ENOENT - A block number associated with @vblocknr does not exist.
390   */
nilfs_dat_translate(struct inode * dat,__u64 vblocknr,sector_t * blocknrp)391  int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
392  {
393  	struct buffer_head *entry_bh, *bh;
394  	struct nilfs_dat_entry *entry;
395  	sector_t blocknr;
396  	void *kaddr;
397  	int ret;
398  
399  	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
400  	if (ret < 0)
401  		return ret;
402  
403  	if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
404  		bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
405  		if (bh) {
406  			WARN_ON(!buffer_uptodate(bh));
407  			brelse(entry_bh);
408  			entry_bh = bh;
409  		}
410  	}
411  
412  	kaddr = kmap_atomic(entry_bh->b_page);
413  	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
414  	blocknr = le64_to_cpu(entry->de_blocknr);
415  	if (blocknr == 0) {
416  		ret = -ENOENT;
417  		goto out;
418  	}
419  	*blocknrp = blocknr;
420  
421   out:
422  	kunmap_atomic(kaddr);
423  	brelse(entry_bh);
424  	return ret;
425  }
426  
nilfs_dat_get_vinfo(struct inode * dat,void * buf,unsigned visz,size_t nvi)427  ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
428  			    size_t nvi)
429  {
430  	struct buffer_head *entry_bh;
431  	struct nilfs_dat_entry *entry;
432  	struct nilfs_vinfo *vinfo = buf;
433  	__u64 first, last;
434  	void *kaddr;
435  	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
436  	int i, j, n, ret;
437  
438  	for (i = 0; i < nvi; i += n) {
439  		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
440  						   0, &entry_bh);
441  		if (ret < 0)
442  			return ret;
443  		kaddr = kmap_atomic(entry_bh->b_page);
444  		/* last virtual block number in this block */
445  		first = vinfo->vi_vblocknr;
446  		do_div(first, entries_per_block);
447  		first *= entries_per_block;
448  		last = first + entries_per_block - 1;
449  		for (j = i, n = 0;
450  		     j < nvi && vinfo->vi_vblocknr >= first &&
451  			     vinfo->vi_vblocknr <= last;
452  		     j++, n++, vinfo = (void *)vinfo + visz) {
453  			entry = nilfs_palloc_block_get_entry(
454  				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
455  			vinfo->vi_start = le64_to_cpu(entry->de_start);
456  			vinfo->vi_end = le64_to_cpu(entry->de_end);
457  			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
458  		}
459  		kunmap_atomic(kaddr);
460  		brelse(entry_bh);
461  	}
462  
463  	return nvi;
464  }
465  
466  /**
467   * nilfs_dat_read - read or get dat inode
468   * @sb: super block instance
469   * @entry_size: size of a dat entry
470   * @raw_inode: on-disk dat inode
471   * @inodep: buffer to store the inode
472   */
nilfs_dat_read(struct super_block * sb,size_t entry_size,struct nilfs_inode * raw_inode,struct inode ** inodep)473  int nilfs_dat_read(struct super_block *sb, size_t entry_size,
474  		   struct nilfs_inode *raw_inode, struct inode **inodep)
475  {
476  	static struct lock_class_key dat_lock_key;
477  	struct inode *dat;
478  	struct nilfs_dat_info *di;
479  	int err;
480  
481  	dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
482  	if (unlikely(!dat))
483  		return -ENOMEM;
484  	if (!(dat->i_state & I_NEW))
485  		goto out;
486  
487  	err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
488  	if (err)
489  		goto failed;
490  
491  	err = nilfs_palloc_init_blockgroup(dat, entry_size);
492  	if (err)
493  		goto failed;
494  
495  	di = NILFS_DAT_I(dat);
496  	lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
497  	nilfs_palloc_setup_cache(dat, &di->palloc_cache);
498  	nilfs_mdt_setup_shadow_map(dat, &di->shadow);
499  
500  	err = nilfs_read_inode_common(dat, raw_inode);
501  	if (err)
502  		goto failed;
503  
504  	unlock_new_inode(dat);
505   out:
506  	*inodep = dat;
507  	return 0;
508   failed:
509  	iget_failed(dat);
510  	return err;
511  }
512