• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/sched/mm.h>
10 #include <linux/dax.h>
11 #include <trace/events/erofs.h>
12 
erofs_unmap_metabuf(struct erofs_buf * buf)13 void erofs_unmap_metabuf(struct erofs_buf *buf)
14 {
15 	if (buf->kmap_type == EROFS_KMAP)
16 		kunmap(buf->page);
17 	else if (buf->kmap_type == EROFS_KMAP_ATOMIC)
18 		kunmap_atomic(buf->base);
19 	buf->base = NULL;
20 	buf->kmap_type = EROFS_NO_KMAP;
21 }
22 
erofs_put_metabuf(struct erofs_buf * buf)23 void erofs_put_metabuf(struct erofs_buf *buf)
24 {
25 	if (!buf->page)
26 		return;
27 	erofs_unmap_metabuf(buf);
28 	put_page(buf->page);
29 	buf->page = NULL;
30 }
31 
32 /*
33  * Derive the block size from inode->i_blkbits to make compatible with
34  * anonymous inode in fscache mode.
35  */
erofs_bread(struct erofs_buf * buf,struct inode * inode,erofs_blk_t blkaddr,enum erofs_kmap_type type)36 void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
37 		  erofs_blk_t blkaddr, enum erofs_kmap_type type)
38 {
39 	erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
40 	struct address_space *const mapping = inode->i_mapping;
41 	pgoff_t index = offset >> PAGE_SHIFT;
42 	struct page *page = buf->page;
43 	struct folio *folio;
44 	unsigned int nofs_flag;
45 
46 	if (!page || page->index != index) {
47 		erofs_put_metabuf(buf);
48 
49 		nofs_flag = memalloc_nofs_save();
50 		folio = read_cache_folio(mapping, index, NULL, NULL);
51 		memalloc_nofs_restore(nofs_flag);
52 		if (IS_ERR(folio))
53 			return folio;
54 
55 		/* should already be PageUptodate, no need to lock page */
56 		page = folio_file_page(folio, index);
57 		buf->page = page;
58 	}
59 	if (buf->kmap_type == EROFS_NO_KMAP) {
60 		if (type == EROFS_KMAP)
61 			buf->base = kmap(page);
62 		else if (type == EROFS_KMAP_ATOMIC)
63 			buf->base = kmap_atomic(page);
64 		buf->kmap_type = type;
65 	} else if (buf->kmap_type != type) {
66 		DBG_BUGON(1);
67 		return ERR_PTR(-EFAULT);
68 	}
69 	if (type == EROFS_NO_KMAP)
70 		return NULL;
71 	return buf->base + (offset & ~PAGE_MASK);
72 }
73 
erofs_read_metabuf(struct erofs_buf * buf,struct super_block * sb,erofs_blk_t blkaddr,enum erofs_kmap_type type)74 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
75 			 erofs_blk_t blkaddr, enum erofs_kmap_type type)
76 {
77 	if (erofs_is_fscache_mode(sb))
78 		return erofs_bread(buf, EROFS_SB(sb)->s_fscache->inode,
79 				   blkaddr, type);
80 
81 	return erofs_bread(buf, sb->s_bdev->bd_inode, blkaddr, type);
82 }
83 
erofs_map_blocks_flatmode(struct inode * inode,struct erofs_map_blocks * map,int flags)84 static int erofs_map_blocks_flatmode(struct inode *inode,
85 				     struct erofs_map_blocks *map,
86 				     int flags)
87 {
88 	erofs_blk_t nblocks, lastblk;
89 	u64 offset = map->m_la;
90 	struct erofs_inode *vi = EROFS_I(inode);
91 	struct super_block *sb = inode->i_sb;
92 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
93 
94 	nblocks = erofs_iblks(inode);
95 	lastblk = nblocks - tailendpacking;
96 
97 	/* there is no hole in flatmode */
98 	map->m_flags = EROFS_MAP_MAPPED;
99 	if (offset < erofs_pos(sb, lastblk)) {
100 		map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
101 		map->m_plen = erofs_pos(sb, lastblk) - offset;
102 	} else if (tailendpacking) {
103 		map->m_pa = erofs_iloc(inode) + vi->inode_isize +
104 			vi->xattr_isize + erofs_blkoff(sb, offset);
105 		map->m_plen = inode->i_size - offset;
106 
107 		/* inline data should be located in the same meta block */
108 		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
109 			erofs_err(sb, "inline data cross block boundary @ nid %llu",
110 				  vi->nid);
111 			DBG_BUGON(1);
112 			return -EFSCORRUPTED;
113 		}
114 		map->m_flags |= EROFS_MAP_META;
115 	} else {
116 		erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
117 			  vi->nid, inode->i_size, map->m_la);
118 		DBG_BUGON(1);
119 		return -EIO;
120 	}
121 	return 0;
122 }
123 
erofs_map_blocks(struct inode * inode,struct erofs_map_blocks * map,int flags)124 int erofs_map_blocks(struct inode *inode,
125 		     struct erofs_map_blocks *map, int flags)
126 {
127 	struct super_block *sb = inode->i_sb;
128 	struct erofs_inode *vi = EROFS_I(inode);
129 	struct erofs_inode_chunk_index *idx;
130 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
131 	u64 chunknr;
132 	unsigned int unit;
133 	erofs_off_t pos;
134 	void *kaddr;
135 	int err = 0;
136 
137 	trace_erofs_map_blocks_enter(inode, map, flags);
138 	map->m_deviceid = 0;
139 	if (map->m_la >= inode->i_size) {
140 		/* leave out-of-bound access unmapped */
141 		map->m_flags = 0;
142 		map->m_plen = 0;
143 		goto out;
144 	}
145 
146 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
147 		err = erofs_map_blocks_flatmode(inode, map, flags);
148 		goto out;
149 	}
150 
151 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
152 		unit = sizeof(*idx);			/* chunk index */
153 	else
154 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
155 
156 	chunknr = map->m_la >> vi->chunkbits;
157 	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
158 		    vi->xattr_isize, unit) + unit * chunknr;
159 
160 	kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
161 	if (IS_ERR(kaddr)) {
162 		err = PTR_ERR(kaddr);
163 		goto out;
164 	}
165 	map->m_la = chunknr << vi->chunkbits;
166 	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
167 			round_up(inode->i_size - map->m_la, sb->s_blocksize));
168 
169 	/* handle block map */
170 	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
171 		__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
172 
173 		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
174 			map->m_flags = 0;
175 		} else {
176 			map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
177 			map->m_flags = EROFS_MAP_MAPPED;
178 		}
179 		goto out_unlock;
180 	}
181 	/* parse chunk indexes */
182 	idx = kaddr + erofs_blkoff(sb, pos);
183 	switch (le32_to_cpu(idx->blkaddr)) {
184 	case EROFS_NULL_ADDR:
185 		map->m_flags = 0;
186 		break;
187 	default:
188 		map->m_deviceid = le16_to_cpu(idx->device_id) &
189 			EROFS_SB(sb)->device_id_mask;
190 		map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
191 		map->m_flags = EROFS_MAP_MAPPED;
192 		break;
193 	}
194 out_unlock:
195 	erofs_put_metabuf(&buf);
196 out:
197 	if (!err)
198 		map->m_llen = map->m_plen;
199 	trace_erofs_map_blocks_exit(inode, map, flags, 0);
200 	return err;
201 }
202 
erofs_map_dev(struct super_block * sb,struct erofs_map_dev * map)203 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
204 {
205 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
206 	struct erofs_device_info *dif;
207 	int id;
208 
209 	/* primary device by default */
210 	map->m_bdev = sb->s_bdev;
211 	map->m_daxdev = EROFS_SB(sb)->dax_dev;
212 	map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
213 	map->m_fscache = EROFS_SB(sb)->s_fscache;
214 
215 	if (map->m_deviceid) {
216 		down_read(&devs->rwsem);
217 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
218 		if (!dif) {
219 			up_read(&devs->rwsem);
220 			return -ENODEV;
221 		}
222 		map->m_bdev = dif->bdev;
223 		map->m_daxdev = dif->dax_dev;
224 		map->m_dax_part_off = dif->dax_part_off;
225 		map->m_fscache = dif->fscache;
226 		up_read(&devs->rwsem);
227 	} else if (devs->extra_devices) {
228 		down_read(&devs->rwsem);
229 		idr_for_each_entry(&devs->tree, dif, id) {
230 			erofs_off_t startoff, length;
231 
232 			if (!dif->mapped_blkaddr)
233 				continue;
234 			startoff = erofs_pos(sb, dif->mapped_blkaddr);
235 			length = erofs_pos(sb, dif->blocks);
236 
237 			if (map->m_pa >= startoff &&
238 			    map->m_pa < startoff + length) {
239 				map->m_pa -= startoff;
240 				map->m_bdev = dif->bdev;
241 				map->m_daxdev = dif->dax_dev;
242 				map->m_dax_part_off = dif->dax_part_off;
243 				map->m_fscache = dif->fscache;
244 				break;
245 			}
246 		}
247 		up_read(&devs->rwsem);
248 	}
249 	return 0;
250 }
251 
erofs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)252 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
253 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
254 {
255 	int ret;
256 	struct super_block *sb = inode->i_sb;
257 	struct erofs_map_blocks map;
258 	struct erofs_map_dev mdev;
259 
260 	map.m_la = offset;
261 	map.m_llen = length;
262 
263 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
264 	if (ret < 0)
265 		return ret;
266 
267 	mdev = (struct erofs_map_dev) {
268 		.m_deviceid = map.m_deviceid,
269 		.m_pa = map.m_pa,
270 	};
271 	ret = erofs_map_dev(sb, &mdev);
272 	if (ret)
273 		return ret;
274 
275 	iomap->offset = map.m_la;
276 	if (flags & IOMAP_DAX)
277 		iomap->dax_dev = mdev.m_daxdev;
278 	else
279 		iomap->bdev = mdev.m_bdev;
280 	iomap->length = map.m_llen;
281 	iomap->flags = 0;
282 	iomap->private = NULL;
283 
284 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
285 		iomap->type = IOMAP_HOLE;
286 		iomap->addr = IOMAP_NULL_ADDR;
287 		if (!iomap->length)
288 			iomap->length = length;
289 		return 0;
290 	}
291 
292 	if (map.m_flags & EROFS_MAP_META) {
293 		void *ptr;
294 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
295 
296 		iomap->type = IOMAP_INLINE;
297 		ptr = erofs_read_metabuf(&buf, sb,
298 				erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
299 		if (IS_ERR(ptr))
300 			return PTR_ERR(ptr);
301 		iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
302 		iomap->private = buf.base;
303 	} else {
304 		iomap->type = IOMAP_MAPPED;
305 		iomap->addr = mdev.m_pa;
306 		if (flags & IOMAP_DAX)
307 			iomap->addr += mdev.m_dax_part_off;
308 	}
309 	return 0;
310 }
311 
erofs_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)312 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
313 		ssize_t written, unsigned int flags, struct iomap *iomap)
314 {
315 	void *ptr = iomap->private;
316 
317 	if (ptr) {
318 		struct erofs_buf buf = {
319 			.page = kmap_to_page(ptr),
320 			.base = ptr,
321 			.kmap_type = EROFS_KMAP,
322 		};
323 
324 		DBG_BUGON(iomap->type != IOMAP_INLINE);
325 		erofs_put_metabuf(&buf);
326 	} else {
327 		DBG_BUGON(iomap->type == IOMAP_INLINE);
328 	}
329 	return written;
330 }
331 
332 static const struct iomap_ops erofs_iomap_ops = {
333 	.iomap_begin = erofs_iomap_begin,
334 	.iomap_end = erofs_iomap_end,
335 };
336 
erofs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)337 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
338 		 u64 start, u64 len)
339 {
340 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
341 #ifdef CONFIG_EROFS_FS_ZIP
342 		return iomap_fiemap(inode, fieinfo, start, len,
343 				    &z_erofs_iomap_report_ops);
344 #else
345 		return -EOPNOTSUPP;
346 #endif
347 	}
348 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
349 }
350 
351 /*
352  * since we dont have write or truncate flows, so no inode
353  * locking needs to be held at the moment.
354  */
erofs_read_folio(struct file * file,struct folio * folio)355 static int erofs_read_folio(struct file *file, struct folio *folio)
356 {
357 	return iomap_read_folio(folio, &erofs_iomap_ops);
358 }
359 
erofs_readahead(struct readahead_control * rac)360 static void erofs_readahead(struct readahead_control *rac)
361 {
362 	return iomap_readahead(rac, &erofs_iomap_ops);
363 }
364 
erofs_bmap(struct address_space * mapping,sector_t block)365 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
366 {
367 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
368 }
369 
erofs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)370 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
371 {
372 	struct inode *inode = file_inode(iocb->ki_filp);
373 
374 	/* no need taking (shared) inode lock since it's a ro filesystem */
375 	if (!iov_iter_count(to))
376 		return 0;
377 
378 #ifdef CONFIG_FS_DAX
379 	if (IS_DAX(inode))
380 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
381 #endif
382 	if (iocb->ki_flags & IOCB_DIRECT) {
383 		struct block_device *bdev = inode->i_sb->s_bdev;
384 		unsigned int blksize_mask;
385 
386 		if (bdev)
387 			blksize_mask = bdev_logical_block_size(bdev) - 1;
388 		else
389 			blksize_mask = (1 << inode->i_blkbits) - 1;
390 
391 		if ((iocb->ki_pos | iov_iter_count(to) |
392 		     iov_iter_alignment(to)) & blksize_mask)
393 			return -EINVAL;
394 
395 		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
396 				    NULL, 0, NULL, 0);
397 	}
398 	return filemap_read(iocb, to, 0);
399 }
400 
401 /* for uncompressed (aligned) files and raw access for other files */
402 const struct address_space_operations erofs_raw_access_aops = {
403 	.read_folio = erofs_read_folio,
404 	.readahead = erofs_readahead,
405 	.bmap = erofs_bmap,
406 	.direct_IO = noop_direct_IO,
407 	.release_folio = iomap_release_folio,
408 	.invalidate_folio = iomap_invalidate_folio,
409 };
410 
411 #ifdef CONFIG_FS_DAX
erofs_dax_huge_fault(struct vm_fault * vmf,enum page_entry_size pe_size)412 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
413 		enum page_entry_size pe_size)
414 {
415 	return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
416 }
417 
erofs_dax_fault(struct vm_fault * vmf)418 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
419 {
420 	return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
421 }
422 
423 static const struct vm_operations_struct erofs_dax_vm_ops = {
424 	.fault		= erofs_dax_fault,
425 	.huge_fault	= erofs_dax_huge_fault,
426 };
427 
erofs_file_mmap(struct file * file,struct vm_area_struct * vma)428 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
429 {
430 	if (!IS_DAX(file_inode(file)))
431 		return generic_file_readonly_mmap(file, vma);
432 
433 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
434 		return -EINVAL;
435 
436 	vma->vm_ops = &erofs_dax_vm_ops;
437 	vm_flags_set(vma, VM_HUGEPAGE);
438 	return 0;
439 }
440 #else
441 #define erofs_file_mmap	generic_file_readonly_mmap
442 #endif
443 
444 const struct file_operations erofs_file_fops = {
445 	.llseek		= generic_file_llseek,
446 	.read_iter	= erofs_file_read_iter,
447 	.mmap		= erofs_file_mmap,
448 	.splice_read	= generic_file_splice_read,
449 };
450