• Home
  • Raw
  • Download

Lines Matching +full:no +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
15 if (buf->kmap_type == EROFS_KMAP) in erofs_unmap_metabuf()
16 kunmap_local(buf->base); in erofs_unmap_metabuf()
17 buf->base = NULL; in erofs_unmap_metabuf()
18 buf->kmap_type = EROFS_NO_KMAP; in erofs_unmap_metabuf()
23 if (!buf->page) in erofs_put_metabuf()
26 put_page(buf->page); in erofs_put_metabuf()
27 buf->page = NULL; in erofs_put_metabuf()
31 * Derive the block size from inode->i_blkbits to make compatible with
37 struct inode *inode = buf->inode; in erofs_bread()
38 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; in erofs_bread()
40 struct page *page = buf->page; in erofs_bread()
44 if (!page || page->index != index) { in erofs_bread()
48 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); in erofs_bread()
53 /* should already be PageUptodate, no need to lock page */ in erofs_bread()
55 buf->page = page; in erofs_bread()
57 if (buf->kmap_type == EROFS_NO_KMAP) { in erofs_bread()
59 buf->base = kmap_local_page(page); in erofs_bread()
60 buf->kmap_type = type; in erofs_bread()
61 } else if (buf->kmap_type != type) { in erofs_bread()
63 return ERR_PTR(-EFAULT); in erofs_bread()
67 return buf->base + (offset & ~PAGE_MASK); in erofs_bread()
73 buf->inode = EROFS_SB(sb)->s_fscache->inode; in erofs_init_metabuf()
75 buf->inode = sb->s_bdev->bd_inode; in erofs_init_metabuf()
86 struct erofs_map_blocks *map) in erofs_map_blocks_flatmode() argument
89 u64 offset = map->m_la; in erofs_map_blocks_flatmode()
91 struct super_block *sb = inode->i_sb; in erofs_map_blocks_flatmode()
92 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); in erofs_map_blocks_flatmode()
95 lastblk = nblocks - tailendpacking; in erofs_map_blocks_flatmode()
97 /* there is no hole in flatmode */ in erofs_map_blocks_flatmode()
98 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks_flatmode()
100 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; in erofs_map_blocks_flatmode()
101 map->m_plen = erofs_pos(sb, lastblk) - offset; in erofs_map_blocks_flatmode()
103 map->m_pa = erofs_iloc(inode) + vi->inode_isize + in erofs_map_blocks_flatmode()
104 vi->xattr_isize + erofs_blkoff(sb, offset); in erofs_map_blocks_flatmode()
105 map->m_plen = inode->i_size - offset; in erofs_map_blocks_flatmode()
108 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { in erofs_map_blocks_flatmode()
110 vi->nid); in erofs_map_blocks_flatmode()
112 return -EFSCORRUPTED; in erofs_map_blocks_flatmode()
114 map->m_flags |= EROFS_MAP_META; in erofs_map_blocks_flatmode()
117 vi->nid, inode->i_size, map->m_la); in erofs_map_blocks_flatmode()
119 return -EIO; in erofs_map_blocks_flatmode()
124 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) in erofs_map_blocks() argument
126 struct super_block *sb = inode->i_sb; in erofs_map_blocks()
136 trace_erofs_map_blocks_enter(inode, map, 0); in erofs_map_blocks()
137 map->m_deviceid = 0; in erofs_map_blocks()
138 if (map->m_la >= inode->i_size) { in erofs_map_blocks()
139 /* leave out-of-bound access unmapped */ in erofs_map_blocks()
140 map->m_flags = 0; in erofs_map_blocks()
141 map->m_plen = 0; in erofs_map_blocks()
145 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { in erofs_map_blocks()
146 err = erofs_map_blocks_flatmode(inode, map); in erofs_map_blocks()
150 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) in erofs_map_blocks()
153 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ in erofs_map_blocks()
155 chunknr = map->m_la >> vi->chunkbits; in erofs_map_blocks()
156 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + in erofs_map_blocks()
157 vi->xattr_isize, unit) + unit * chunknr; in erofs_map_blocks()
164 map->m_la = chunknr << vi->chunkbits; in erofs_map_blocks()
165 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, in erofs_map_blocks()
166 round_up(inode->i_size - map->m_la, sb->s_blocksize)); in erofs_map_blocks()
168 /* handle block map */ in erofs_map_blocks()
169 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { in erofs_map_blocks()
173 map->m_flags = 0; in erofs_map_blocks()
175 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr)); in erofs_map_blocks()
176 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks()
182 switch (le32_to_cpu(idx->blkaddr)) { in erofs_map_blocks()
184 map->m_flags = 0; in erofs_map_blocks()
187 map->m_deviceid = le16_to_cpu(idx->device_id) & in erofs_map_blocks()
188 EROFS_SB(sb)->device_id_mask; in erofs_map_blocks()
189 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr)); in erofs_map_blocks()
190 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks()
197 map->m_llen = map->m_plen; in erofs_map_blocks()
198 trace_erofs_map_blocks_exit(inode, map, 0, err); in erofs_map_blocks()
202 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) in erofs_map_dev() argument
204 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; in erofs_map_dev()
208 map->m_bdev = sb->s_bdev; in erofs_map_dev()
209 map->m_daxdev = EROFS_SB(sb)->dax_dev; in erofs_map_dev()
210 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; in erofs_map_dev()
211 map->m_fscache = EROFS_SB(sb)->s_fscache; in erofs_map_dev()
213 if (map->m_deviceid) { in erofs_map_dev()
214 down_read(&devs->rwsem); in erofs_map_dev()
215 dif = idr_find(&devs->tree, map->m_deviceid - 1); in erofs_map_dev()
217 up_read(&devs->rwsem); in erofs_map_dev()
218 return -ENODEV; in erofs_map_dev()
220 if (devs->flatdev) { in erofs_map_dev()
221 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr); in erofs_map_dev()
222 up_read(&devs->rwsem); in erofs_map_dev()
225 map->m_bdev = dif->bdev; in erofs_map_dev()
226 map->m_daxdev = dif->dax_dev; in erofs_map_dev()
227 map->m_dax_part_off = dif->dax_part_off; in erofs_map_dev()
228 map->m_fscache = dif->fscache; in erofs_map_dev()
229 up_read(&devs->rwsem); in erofs_map_dev()
230 } else if (devs->extra_devices && !devs->flatdev) { in erofs_map_dev()
231 down_read(&devs->rwsem); in erofs_map_dev()
232 idr_for_each_entry(&devs->tree, dif, id) { in erofs_map_dev()
235 if (!dif->mapped_blkaddr) in erofs_map_dev()
237 startoff = erofs_pos(sb, dif->mapped_blkaddr); in erofs_map_dev()
238 length = erofs_pos(sb, dif->blocks); in erofs_map_dev()
240 if (map->m_pa >= startoff && in erofs_map_dev()
241 map->m_pa < startoff + length) { in erofs_map_dev()
242 map->m_pa -= startoff; in erofs_map_dev()
243 map->m_bdev = dif->bdev; in erofs_map_dev()
244 map->m_daxdev = dif->dax_dev; in erofs_map_dev()
245 map->m_dax_part_off = dif->dax_part_off; in erofs_map_dev()
246 map->m_fscache = dif->fscache; in erofs_map_dev()
250 up_read(&devs->rwsem); in erofs_map_dev()
259 struct super_block *sb = inode->i_sb; in erofs_iomap_begin()
260 struct erofs_map_blocks map; in erofs_iomap_begin() local
263 map.m_la = offset; in erofs_iomap_begin()
264 map.m_llen = length; in erofs_iomap_begin()
266 ret = erofs_map_blocks(inode, &map); in erofs_iomap_begin()
271 .m_deviceid = map.m_deviceid, in erofs_iomap_begin()
272 .m_pa = map.m_pa, in erofs_iomap_begin()
278 iomap->offset = map.m_la; in erofs_iomap_begin()
280 iomap->dax_dev = mdev.m_daxdev; in erofs_iomap_begin()
282 iomap->bdev = mdev.m_bdev; in erofs_iomap_begin()
283 iomap->length = map.m_llen; in erofs_iomap_begin()
284 iomap->flags = 0; in erofs_iomap_begin()
285 iomap->private = NULL; in erofs_iomap_begin()
287 if (!(map.m_flags & EROFS_MAP_MAPPED)) { in erofs_iomap_begin()
288 iomap->type = IOMAP_HOLE; in erofs_iomap_begin()
289 iomap->addr = IOMAP_NULL_ADDR; in erofs_iomap_begin()
290 if (!iomap->length) in erofs_iomap_begin()
291 iomap->length = length; in erofs_iomap_begin()
295 if (map.m_flags & EROFS_MAP_META) { in erofs_iomap_begin()
299 iomap->type = IOMAP_INLINE; in erofs_iomap_begin()
304 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); in erofs_iomap_begin()
305 iomap->private = buf.base; in erofs_iomap_begin()
307 iomap->type = IOMAP_MAPPED; in erofs_iomap_begin()
308 iomap->addr = mdev.m_pa; in erofs_iomap_begin()
310 iomap->addr += mdev.m_dax_part_off; in erofs_iomap_begin()
318 void *ptr = iomap->private; in erofs_iomap_end()
327 DBG_BUGON(iomap->type != IOMAP_INLINE); in erofs_iomap_end()
330 DBG_BUGON(iomap->type == IOMAP_INLINE); in erofs_iomap_end()
343 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { in erofs_fiemap()
348 return -EOPNOTSUPP; in erofs_fiemap()
355 * since we dont have write or truncate flows, so no inode
375 struct inode *inode = file_inode(iocb->ki_filp); in erofs_file_read_iter()
377 /* no need taking (shared) inode lock since it's a ro filesystem */ in erofs_file_read_iter()
385 if (iocb->ki_flags & IOCB_DIRECT) { in erofs_file_read_iter()
386 struct block_device *bdev = inode->i_sb->s_bdev; in erofs_file_read_iter()
390 blksize_mask = bdev_logical_block_size(bdev) - 1; in erofs_file_read_iter()
392 blksize_mask = i_blocksize(inode) - 1; in erofs_file_read_iter()
394 if ((iocb->ki_pos | iov_iter_count(to) | in erofs_file_read_iter()
396 return -EINVAL; in erofs_file_read_iter()
436 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in erofs_file_mmap()
437 return -EINVAL; in erofs_file_mmap()
439 vma->vm_ops = &erofs_dax_vm_ops; in erofs_file_mmap()