• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS disk address translation.
4  *
5  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Koji Sato.
8  */
9 
10 #include <linux/types.h>
11 #include <linux/buffer_head.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include "nilfs.h"
15 #include "mdt.h"
16 #include "alloc.h"
17 #include "dat.h"
18 
19 
20 #define NILFS_CNO_MIN	((__u64)1)
21 #define NILFS_CNO_MAX	(~(__u64)0)
22 
23 /**
24  * struct nilfs_dat_info - on-memory private data of DAT file
25  * @mi: on-memory private data of metadata file
26  * @palloc_cache: persistent object allocator cache of DAT file
27  * @shadow: shadow map of DAT file
28  */
29 struct nilfs_dat_info {
30 	struct nilfs_mdt_info mi;
31 	struct nilfs_palloc_cache palloc_cache;
32 	struct nilfs_shadow_map shadow;
33 };
34 
NILFS_DAT_I(struct inode * dat)35 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
36 {
37 	return (struct nilfs_dat_info *)NILFS_MDT(dat);
38 }
39 
nilfs_dat_prepare_entry(struct inode * dat,struct nilfs_palloc_req * req,int create)40 static int nilfs_dat_prepare_entry(struct inode *dat,
41 				   struct nilfs_palloc_req *req, int create)
42 {
43 	int ret;
44 
45 	ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
46 					   create, &req->pr_entry_bh);
47 	if (unlikely(ret == -ENOENT)) {
48 		nilfs_err(dat->i_sb,
49 			  "DAT doesn't have a block to manage vblocknr = %llu",
50 			  (unsigned long long)req->pr_entry_nr);
51 		/*
52 		 * Return internal code -EINVAL to notify bmap layer of
53 		 * metadata corruption.
54 		 */
55 		ret = -EINVAL;
56 	}
57 	return ret;
58 }
59 
nilfs_dat_commit_entry(struct inode * dat,struct nilfs_palloc_req * req)60 static void nilfs_dat_commit_entry(struct inode *dat,
61 				   struct nilfs_palloc_req *req)
62 {
63 	mark_buffer_dirty(req->pr_entry_bh);
64 	nilfs_mdt_mark_dirty(dat);
65 	brelse(req->pr_entry_bh);
66 }
67 
nilfs_dat_abort_entry(struct inode * dat,struct nilfs_palloc_req * req)68 static void nilfs_dat_abort_entry(struct inode *dat,
69 				  struct nilfs_palloc_req *req)
70 {
71 	brelse(req->pr_entry_bh);
72 }
73 
nilfs_dat_prepare_alloc(struct inode * dat,struct nilfs_palloc_req * req)74 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
75 {
76 	int ret;
77 
78 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
79 	if (ret < 0)
80 		return ret;
81 
82 	ret = nilfs_dat_prepare_entry(dat, req, 1);
83 	if (ret < 0)
84 		nilfs_palloc_abort_alloc_entry(dat, req);
85 
86 	return ret;
87 }
88 
nilfs_dat_commit_alloc(struct inode * dat,struct nilfs_palloc_req * req)89 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90 {
91 	struct nilfs_dat_entry *entry;
92 	void *kaddr;
93 
94 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
95 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
96 					     req->pr_entry_bh, kaddr);
97 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
98 	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
99 	entry->de_blocknr = cpu_to_le64(0);
100 	kunmap_atomic(kaddr);
101 
102 	nilfs_palloc_commit_alloc_entry(dat, req);
103 	nilfs_dat_commit_entry(dat, req);
104 }
105 
nilfs_dat_abort_alloc(struct inode * dat,struct nilfs_palloc_req * req)106 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
107 {
108 	nilfs_dat_abort_entry(dat, req);
109 	nilfs_palloc_abort_alloc_entry(dat, req);
110 }
111 
nilfs_dat_commit_free(struct inode * dat,struct nilfs_palloc_req * req)112 static void nilfs_dat_commit_free(struct inode *dat,
113 				  struct nilfs_palloc_req *req)
114 {
115 	struct nilfs_dat_entry *entry;
116 	void *kaddr;
117 
118 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
119 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
120 					     req->pr_entry_bh, kaddr);
121 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
122 	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
123 	entry->de_blocknr = cpu_to_le64(0);
124 	kunmap_atomic(kaddr);
125 
126 	nilfs_dat_commit_entry(dat, req);
127 
128 	if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) {
129 		nilfs_error(dat->i_sb,
130 			    "state inconsistency probably due to duplicate use of vblocknr = %llu",
131 			    (unsigned long long)req->pr_entry_nr);
132 		return;
133 	}
134 	nilfs_palloc_commit_free_entry(dat, req);
135 }
136 
nilfs_dat_prepare_start(struct inode * dat,struct nilfs_palloc_req * req)137 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
138 {
139 	return nilfs_dat_prepare_entry(dat, req, 0);
140 }
141 
nilfs_dat_commit_start(struct inode * dat,struct nilfs_palloc_req * req,sector_t blocknr)142 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
143 			    sector_t blocknr)
144 {
145 	struct nilfs_dat_entry *entry;
146 	void *kaddr;
147 
148 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
149 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
150 					     req->pr_entry_bh, kaddr);
151 	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
152 	entry->de_blocknr = cpu_to_le64(blocknr);
153 	kunmap_atomic(kaddr);
154 
155 	nilfs_dat_commit_entry(dat, req);
156 }
157 
nilfs_dat_prepare_end(struct inode * dat,struct nilfs_palloc_req * req)158 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
159 {
160 	struct nilfs_dat_entry *entry;
161 	sector_t blocknr;
162 	void *kaddr;
163 	int ret;
164 
165 	ret = nilfs_dat_prepare_entry(dat, req, 0);
166 	if (ret < 0)
167 		return ret;
168 
169 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
170 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
171 					     req->pr_entry_bh, kaddr);
172 	blocknr = le64_to_cpu(entry->de_blocknr);
173 	kunmap_atomic(kaddr);
174 
175 	if (blocknr == 0) {
176 		ret = nilfs_palloc_prepare_free_entry(dat, req);
177 		if (ret < 0) {
178 			nilfs_dat_abort_entry(dat, req);
179 			return ret;
180 		}
181 	}
182 
183 	return 0;
184 }
185 
nilfs_dat_commit_end(struct inode * dat,struct nilfs_palloc_req * req,int dead)186 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
187 			  int dead)
188 {
189 	struct nilfs_dat_entry *entry;
190 	__u64 start, end;
191 	sector_t blocknr;
192 	void *kaddr;
193 
194 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
195 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
196 					     req->pr_entry_bh, kaddr);
197 	end = start = le64_to_cpu(entry->de_start);
198 	if (!dead) {
199 		end = nilfs_mdt_cno(dat);
200 		WARN_ON(start > end);
201 	}
202 	entry->de_end = cpu_to_le64(end);
203 	blocknr = le64_to_cpu(entry->de_blocknr);
204 	kunmap_atomic(kaddr);
205 
206 	if (blocknr == 0)
207 		nilfs_dat_commit_free(dat, req);
208 	else
209 		nilfs_dat_commit_entry(dat, req);
210 }
211 
nilfs_dat_abort_end(struct inode * dat,struct nilfs_palloc_req * req)212 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
213 {
214 	struct nilfs_dat_entry *entry;
215 	__u64 start;
216 	sector_t blocknr;
217 	void *kaddr;
218 
219 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
220 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
221 					     req->pr_entry_bh, kaddr);
222 	start = le64_to_cpu(entry->de_start);
223 	blocknr = le64_to_cpu(entry->de_blocknr);
224 	kunmap_atomic(kaddr);
225 
226 	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
227 		nilfs_palloc_abort_free_entry(dat, req);
228 	nilfs_dat_abort_entry(dat, req);
229 }
230 
nilfs_dat_prepare_update(struct inode * dat,struct nilfs_palloc_req * oldreq,struct nilfs_palloc_req * newreq)231 int nilfs_dat_prepare_update(struct inode *dat,
232 			     struct nilfs_palloc_req *oldreq,
233 			     struct nilfs_palloc_req *newreq)
234 {
235 	int ret;
236 
237 	ret = nilfs_dat_prepare_end(dat, oldreq);
238 	if (!ret) {
239 		ret = nilfs_dat_prepare_alloc(dat, newreq);
240 		if (ret < 0)
241 			nilfs_dat_abort_end(dat, oldreq);
242 	}
243 	return ret;
244 }
245 
nilfs_dat_commit_update(struct inode * dat,struct nilfs_palloc_req * oldreq,struct nilfs_palloc_req * newreq,int dead)246 void nilfs_dat_commit_update(struct inode *dat,
247 			     struct nilfs_palloc_req *oldreq,
248 			     struct nilfs_palloc_req *newreq, int dead)
249 {
250 	nilfs_dat_commit_end(dat, oldreq, dead);
251 	nilfs_dat_commit_alloc(dat, newreq);
252 }
253 
nilfs_dat_abort_update(struct inode * dat,struct nilfs_palloc_req * oldreq,struct nilfs_palloc_req * newreq)254 void nilfs_dat_abort_update(struct inode *dat,
255 			    struct nilfs_palloc_req *oldreq,
256 			    struct nilfs_palloc_req *newreq)
257 {
258 	nilfs_dat_abort_end(dat, oldreq);
259 	nilfs_dat_abort_alloc(dat, newreq);
260 }
261 
262 /**
263  * nilfs_dat_mark_dirty -
264  * @dat: DAT file inode
265  * @vblocknr: virtual block number
266  *
267  * Description:
268  *
269  * Return Value: On success, 0 is returned. On error, one of the following
270  * negative error codes is returned.
271  *
272  * %-EIO - I/O error.
273  *
274  * %-ENOMEM - Insufficient amount of memory available.
275  */
nilfs_dat_mark_dirty(struct inode * dat,__u64 vblocknr)276 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
277 {
278 	struct nilfs_palloc_req req;
279 	int ret;
280 
281 	req.pr_entry_nr = vblocknr;
282 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
283 	if (ret == 0)
284 		nilfs_dat_commit_entry(dat, &req);
285 	return ret;
286 }
287 
288 /**
289  * nilfs_dat_freev - free virtual block numbers
290  * @dat: DAT file inode
291  * @vblocknrs: array of virtual block numbers
292  * @nitems: number of virtual block numbers
293  *
294  * Description: nilfs_dat_freev() frees the virtual block numbers specified by
295  * @vblocknrs and @nitems.
296  *
297  * Return Value: On success, 0 is returned. On error, one of the following
298  * negative error codes is returned.
299  *
300  * %-EIO - I/O error.
301  *
302  * %-ENOMEM - Insufficient amount of memory available.
303  *
304  * %-ENOENT - The virtual block number have not been allocated.
305  */
nilfs_dat_freev(struct inode * dat,__u64 * vblocknrs,size_t nitems)306 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
307 {
308 	return nilfs_palloc_freev(dat, vblocknrs, nitems);
309 }
310 
311 /**
312  * nilfs_dat_move - change a block number
313  * @dat: DAT file inode
314  * @vblocknr: virtual block number
315  * @blocknr: block number
316  *
317  * Description: nilfs_dat_move() changes the block number associated with
318  * @vblocknr to @blocknr.
319  *
320  * Return Value: On success, 0 is returned. On error, one of the following
321  * negative error codes is returned.
322  *
323  * %-EIO - I/O error.
324  *
325  * %-ENOMEM - Insufficient amount of memory available.
326  */
nilfs_dat_move(struct inode * dat,__u64 vblocknr,sector_t blocknr)327 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
328 {
329 	struct buffer_head *entry_bh;
330 	struct nilfs_dat_entry *entry;
331 	void *kaddr;
332 	int ret;
333 
334 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
335 	if (ret < 0)
336 		return ret;
337 
338 	/*
339 	 * The given disk block number (blocknr) is not yet written to
340 	 * the device at this point.
341 	 *
342 	 * To prevent nilfs_dat_translate() from returning the
343 	 * uncommitted block number, this makes a copy of the entry
344 	 * buffer and redirects nilfs_dat_translate() to the copy.
345 	 */
346 	if (!buffer_nilfs_redirected(entry_bh)) {
347 		ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
348 		if (ret) {
349 			brelse(entry_bh);
350 			return ret;
351 		}
352 	}
353 
354 	kaddr = kmap_atomic(entry_bh->b_page);
355 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
356 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
357 		nilfs_crit(dat->i_sb,
358 			   "%s: invalid vblocknr = %llu, [%llu, %llu)",
359 			   __func__, (unsigned long long)vblocknr,
360 			   (unsigned long long)le64_to_cpu(entry->de_start),
361 			   (unsigned long long)le64_to_cpu(entry->de_end));
362 		kunmap_atomic(kaddr);
363 		brelse(entry_bh);
364 		return -EINVAL;
365 	}
366 	WARN_ON(blocknr == 0);
367 	entry->de_blocknr = cpu_to_le64(blocknr);
368 	kunmap_atomic(kaddr);
369 
370 	mark_buffer_dirty(entry_bh);
371 	nilfs_mdt_mark_dirty(dat);
372 
373 	brelse(entry_bh);
374 
375 	return 0;
376 }
377 
378 /**
379  * nilfs_dat_translate - translate a virtual block number to a block number
380  * @dat: DAT file inode
381  * @vblocknr: virtual block number
382  * @blocknrp: pointer to a block number
383  *
384  * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
385  * to the corresponding block number.
386  *
387  * Return Value: On success, 0 is returned and the block number associated
388  * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
389  * of the following negative error codes is returned.
390  *
391  * %-EIO - I/O error.
392  *
393  * %-ENOMEM - Insufficient amount of memory available.
394  *
395  * %-ENOENT - A block number associated with @vblocknr does not exist.
396  */
nilfs_dat_translate(struct inode * dat,__u64 vblocknr,sector_t * blocknrp)397 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
398 {
399 	struct buffer_head *entry_bh, *bh;
400 	struct nilfs_dat_entry *entry;
401 	sector_t blocknr;
402 	void *kaddr;
403 	int ret;
404 
405 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
406 	if (ret < 0)
407 		return ret;
408 
409 	if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
410 		bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
411 		if (bh) {
412 			WARN_ON(!buffer_uptodate(bh));
413 			brelse(entry_bh);
414 			entry_bh = bh;
415 		}
416 	}
417 
418 	kaddr = kmap_atomic(entry_bh->b_page);
419 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
420 	blocknr = le64_to_cpu(entry->de_blocknr);
421 	if (blocknr == 0) {
422 		ret = -ENOENT;
423 		goto out;
424 	}
425 	*blocknrp = blocknr;
426 
427  out:
428 	kunmap_atomic(kaddr);
429 	brelse(entry_bh);
430 	return ret;
431 }
432 
nilfs_dat_get_vinfo(struct inode * dat,void * buf,unsigned int visz,size_t nvi)433 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
434 			    size_t nvi)
435 {
436 	struct buffer_head *entry_bh;
437 	struct nilfs_dat_entry *entry;
438 	struct nilfs_vinfo *vinfo = buf;
439 	__u64 first, last;
440 	void *kaddr;
441 	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
442 	int i, j, n, ret;
443 
444 	for (i = 0; i < nvi; i += n) {
445 		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
446 						   0, &entry_bh);
447 		if (ret < 0)
448 			return ret;
449 		kaddr = kmap_atomic(entry_bh->b_page);
450 		/* last virtual block number in this block */
451 		first = vinfo->vi_vblocknr;
452 		do_div(first, entries_per_block);
453 		first *= entries_per_block;
454 		last = first + entries_per_block - 1;
455 		for (j = i, n = 0;
456 		     j < nvi && vinfo->vi_vblocknr >= first &&
457 			     vinfo->vi_vblocknr <= last;
458 		     j++, n++, vinfo = (void *)vinfo + visz) {
459 			entry = nilfs_palloc_block_get_entry(
460 				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
461 			vinfo->vi_start = le64_to_cpu(entry->de_start);
462 			vinfo->vi_end = le64_to_cpu(entry->de_end);
463 			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
464 		}
465 		kunmap_atomic(kaddr);
466 		brelse(entry_bh);
467 	}
468 
469 	return nvi;
470 }
471 
472 /**
473  * nilfs_dat_read - read or get dat inode
474  * @sb: super block instance
475  * @entry_size: size of a dat entry
476  * @raw_inode: on-disk dat inode
477  * @inodep: buffer to store the inode
478  */
nilfs_dat_read(struct super_block * sb,size_t entry_size,struct nilfs_inode * raw_inode,struct inode ** inodep)479 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
480 		   struct nilfs_inode *raw_inode, struct inode **inodep)
481 {
482 	static struct lock_class_key dat_lock_key;
483 	struct inode *dat;
484 	struct nilfs_dat_info *di;
485 	int err;
486 
487 	if (entry_size > sb->s_blocksize) {
488 		nilfs_err(sb, "too large DAT entry size: %zu bytes",
489 			  entry_size);
490 		return -EINVAL;
491 	} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
492 		nilfs_err(sb, "too small DAT entry size: %zu bytes",
493 			  entry_size);
494 		return -EINVAL;
495 	}
496 
497 	dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
498 	if (unlikely(!dat))
499 		return -ENOMEM;
500 	if (!(dat->i_state & I_NEW))
501 		goto out;
502 
503 	err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
504 	if (err)
505 		goto failed;
506 
507 	err = nilfs_palloc_init_blockgroup(dat, entry_size);
508 	if (err)
509 		goto failed;
510 
511 	di = NILFS_DAT_I(dat);
512 	lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
513 	nilfs_palloc_setup_cache(dat, &di->palloc_cache);
514 	err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
515 	if (err)
516 		goto failed;
517 
518 	err = nilfs_read_inode_common(dat, raw_inode);
519 	if (err)
520 		goto failed;
521 
522 	unlock_new_inode(dat);
523  out:
524 	*inodep = dat;
525 	return 0;
526  failed:
527 	iget_failed(dat);
528 	return err;
529 }
530