1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_IOMAP_H
3 #define LINUX_IOMAP_H 1
4
5 #include <linux/atomic.h>
6 #include <linux/bitmap.h>
7 #include <linux/blk_types.h>
8 #include <linux/mm.h>
9 #include <linux/types.h>
10 #include <linux/mm_types.h>
11 #include <linux/blkdev.h>
12 #include <linux/android_kabi.h>
13
14 struct address_space;
15 struct fiemap_extent_info;
16 struct inode;
17 struct iomap_iter;
18 struct iomap_dio;
19 struct iomap_writepage_ctx;
20 struct iov_iter;
21 struct kiocb;
22 struct page;
23 struct vm_area_struct;
24 struct vm_fault;
25
26 /*
27 * Types of block ranges for iomap mappings:
28 */
29 #define IOMAP_HOLE 0 /* no blocks allocated, need allocation */
30 #define IOMAP_DELALLOC 1 /* delayed allocation blocks */
31 #define IOMAP_MAPPED 2 /* blocks allocated at @addr */
32 #define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */
33 #define IOMAP_INLINE 4 /* data inline in the inode */
34
35 /*
36 * Flags reported by the file system from iomap_begin:
37 *
38 * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
39 * zeroing for areas that no data is copied to.
40 *
41 * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
42 * written data and requires fdatasync to commit them to persistent storage.
43 * This needs to take into account metadata changes that *may* be made at IO
44 * completion, such as file size updates from direct IO.
45 *
46 * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
47 * unshared as part a write.
48 *
49 * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
50 * mappings.
51 *
52 * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
53 * buffer heads for this mapping.
54 *
55 * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
56 * rather than a file data extent.
57 */
58 #define IOMAP_F_NEW (1U << 0)
59 #define IOMAP_F_DIRTY (1U << 1)
60 #define IOMAP_F_SHARED (1U << 2)
61 #define IOMAP_F_MERGED (1U << 3)
62 #ifdef CONFIG_BUFFER_HEAD
63 #define IOMAP_F_BUFFER_HEAD (1U << 4)
64 #else
65 #define IOMAP_F_BUFFER_HEAD 0
66 #endif /* CONFIG_BUFFER_HEAD */
67 #define IOMAP_F_XATTR (1U << 5)
68
69 /*
70 * Flags set by the core iomap code during operations:
71 *
72 * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
73 * has changed as the result of this write operation.
74 *
75 * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
76 * range it covers needs to be remapped by the high level before the operation
77 * can proceed.
78 */
79 #define IOMAP_F_SIZE_CHANGED (1U << 8)
80 #define IOMAP_F_STALE (1U << 9)
81
82 /*
83 * Flags from 0x1000 up are for file system specific usage:
84 */
85 #define IOMAP_F_PRIVATE (1U << 12)
86
87
88 /*
89 * Magic value for addr:
90 */
91 #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
92
93 struct iomap_folio_ops;
94
95 struct iomap {
96 u64 addr; /* disk offset of mapping, bytes */
97 loff_t offset; /* file offset of mapping, bytes */
98 u64 length; /* length of mapping, bytes */
99 u16 type; /* type of mapping */
100 u16 flags; /* flags for mapping */
101 struct block_device *bdev; /* block device for I/O */
102 struct dax_device *dax_dev; /* dax_dev for dax operations */
103 void *inline_data;
104 void *private; /* filesystem private */
105 const struct iomap_folio_ops *folio_ops;
106 u64 validity_cookie; /* used with .iomap_valid() */
107
108 ANDROID_KABI_RESERVE(1);
109 };
110
iomap_sector(const struct iomap * iomap,loff_t pos)111 static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
112 {
113 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
114 }
115
116 /*
117 * Returns the inline data pointer for logical offset @pos.
118 */
iomap_inline_data(const struct iomap * iomap,loff_t pos)119 static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
120 {
121 return iomap->inline_data + pos - iomap->offset;
122 }
123
124 /*
125 * Check if the mapping's length is within the valid range for inline data.
126 * This is used to guard against accessing data beyond the page inline_data
127 * points at.
128 */
iomap_inline_data_valid(const struct iomap * iomap)129 static inline bool iomap_inline_data_valid(const struct iomap *iomap)
130 {
131 return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
132 }
133
134 /*
135 * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
136 * and put_folio will be called for each folio written to. This only applies
137 * to buffered writes as unbuffered writes will not typically have folios
138 * associated with them.
139 *
140 * When get_folio succeeds, put_folio will always be called to do any
141 * cleanup work necessary. put_folio is responsible for unlocking and putting
142 * @folio.
143 */
144 struct iomap_folio_ops {
145 struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
146 unsigned len);
147 void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
148 struct folio *folio);
149
150 /*
151 * Check that the cached iomap still maps correctly to the filesystem's
152 * internal extent map. FS internal extent maps can change while iomap
153 * is iterating a cached iomap, so this hook allows iomap to detect that
154 * the iomap needs to be refreshed during a long running write
155 * operation.
156 *
157 * The filesystem can store internal state (e.g. a sequence number) in
158 * iomap->validity_cookie when the iomap is first mapped to be able to
159 * detect changes between mapping time and whenever .iomap_valid() is
160 * called.
161 *
162 * This is called with the folio over the specified file position held
163 * locked by the iomap code.
164 */
165 bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
166 };
167
168 /*
169 * Flags for iomap_begin / iomap_end. No flag implies a read.
170 */
171 #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
172 #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
173 #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
174 #define IOMAP_FAULT (1 << 3) /* mapping for page fault */
175 #define IOMAP_DIRECT (1 << 4) /* direct I/O */
176 #define IOMAP_NOWAIT (1 << 5) /* do not block */
177 #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
178 #define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
179 #ifdef CONFIG_FS_DAX
180 #define IOMAP_DAX (1 << 8) /* DAX mapping */
181 #else
182 #define IOMAP_DAX 0
183 #endif /* CONFIG_FS_DAX */
184
185 struct iomap_ops {
186 /*
187 * Return the existing mapping at pos, or reserve space starting at
188 * pos for up to length, as long as we can do it as a single mapping.
189 * The actual length is returned in iomap->length.
190 */
191 int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
192 unsigned flags, struct iomap *iomap,
193 struct iomap *srcmap);
194
195 /*
196 * Commit and/or unreserve space previous allocated using iomap_begin.
197 * Written indicates the length of the successful write operation which
198 * needs to be commited, while the rest needs to be unreserved.
199 * Written might be zero if no data was written.
200 */
201 int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
202 ssize_t written, unsigned flags, struct iomap *iomap);
203
204 ANDROID_KABI_RESERVE(1);
205 ANDROID_KABI_RESERVE(2);
206 };
207
208 /**
209 * struct iomap_iter - Iterate through a range of a file
210 * @inode: Set at the start of the iteration and should not change.
211 * @pos: The current file position we are operating on. It is updated by
212 * calls to iomap_iter(). Treat as read-only in the body.
213 * @len: The remaining length of the file segment we're operating on.
214 * It is updated at the same time as @pos.
215 * @processed: The number of bytes processed by the body in the most recent
216 * iteration, or a negative errno. 0 causes the iteration to stop.
217 * @flags: Zero or more of the iomap_begin flags above.
218 * @iomap: Map describing the I/O iteration
219 * @srcmap: Source map for COW operations
220 */
221 struct iomap_iter {
222 struct inode *inode;
223 loff_t pos;
224 u64 len;
225 s64 processed;
226 unsigned flags;
227 struct iomap iomap;
228 struct iomap srcmap;
229 void *private;
230 };
231
232 int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
233
234 /**
235 * iomap_length - length of the current iomap iteration
236 * @iter: iteration structure
237 *
238 * Returns the length that the operation applies to for the current iteration.
239 */
iomap_length(const struct iomap_iter * iter)240 static inline u64 iomap_length(const struct iomap_iter *iter)
241 {
242 u64 end = iter->iomap.offset + iter->iomap.length;
243
244 if (iter->srcmap.type != IOMAP_HOLE)
245 end = min(end, iter->srcmap.offset + iter->srcmap.length);
246 return min(iter->len, end - iter->pos);
247 }
248
249 /**
250 * iomap_iter_srcmap - return the source map for the current iomap iteration
251 * @i: iteration structure
252 *
253 * Write operations on file systems with reflink support might require a
254 * source and a destination map. This function retourns the source map
255 * for a given operation, which may or may no be identical to the destination
256 * map in &i->iomap.
257 */
iomap_iter_srcmap(const struct iomap_iter * i)258 static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
259 {
260 if (i->srcmap.type != IOMAP_HOLE)
261 return &i->srcmap;
262 return &i->iomap;
263 }
264
265 /*
266 * Return the file offset for the first unchanged block after a short write.
267 *
268 * If nothing was written, round @pos down to point at the first block in
269 * the range, else round up to include the partially written block.
270 */
iomap_last_written_block(struct inode * inode,loff_t pos,ssize_t written)271 static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
272 ssize_t written)
273 {
274 if (unlikely(!written))
275 return round_down(pos, i_blocksize(inode));
276 return round_up(pos + written, i_blocksize(inode));
277 }
278
279 /*
280 * Check if the range needs to be unshared for a FALLOC_FL_UNSHARE_RANGE
281 * operation.
282 *
283 * Don't bother with blocks that are not shared to start with; or mappings that
284 * cannot be shared, such as inline data, delalloc reservations, holes or
285 * unwritten extents.
286 *
287 * Note that we use srcmap directly instead of iomap_iter_srcmap as unsharing
288 * requires providing a separate source map, and the presence of one is a good
289 * indicator that unsharing is needed, unlike IOMAP_F_SHARED which can be set
290 * for any data that goes into the COW fork for XFS.
291 */
iomap_want_unshare_iter(const struct iomap_iter * iter)292 static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
293 {
294 return (iter->iomap.flags & IOMAP_F_SHARED) &&
295 iter->srcmap.type == IOMAP_MAPPED;
296 }
297
298 ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
299 const struct iomap_ops *ops, void *private);
300 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
301 void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
302 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
303 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
304 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
305 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
306 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
307 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
308 const struct iomap_ops *ops);
309 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
310 bool *did_zero, const struct iomap_ops *ops);
311 int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
312 const struct iomap_ops *ops);
313 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
314 const struct iomap_ops *ops);
315
316 typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
317 struct iomap *iomap);
318 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
319 loff_t end_byte, unsigned flags, struct iomap *iomap,
320 iomap_punch_t punch);
321
322 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
323 u64 start, u64 len, const struct iomap_ops *ops);
324 loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
325 const struct iomap_ops *ops);
326 loff_t iomap_seek_data(struct inode *inode, loff_t offset,
327 const struct iomap_ops *ops);
328 sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
329 const struct iomap_ops *ops);
330
331 /*
332 * Structure for writeback I/O completions.
333 */
334 struct iomap_ioend {
335 struct list_head io_list; /* next ioend in chain */
336 u16 io_type;
337 u16 io_flags; /* IOMAP_F_* */
338 struct inode *io_inode; /* file being written to */
339 size_t io_size; /* size of data within eof */
340 loff_t io_offset; /* offset in the file */
341 sector_t io_sector; /* start sector of ioend */
342 struct bio io_bio; /* MUST BE LAST! */
343 };
344
iomap_ioend_from_bio(struct bio * bio)345 static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
346 {
347 return container_of(bio, struct iomap_ioend, io_bio);
348 }
349
350 struct iomap_writeback_ops {
351 /*
352 * Required, maps the blocks so that writeback can be performed on
353 * the range starting at offset.
354 *
355 * Can return arbitrarily large regions, but we need to call into it at
356 * least once per folio to allow the file systems to synchronize with
357 * the write path that could be invalidating mappings.
358 *
359 * An existing mapping from a previous call to this method can be reused
360 * by the file system if it is still valid.
361 */
362 int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
363 loff_t offset, unsigned len);
364
365 /*
366 * Optional, allows the file systems to perform actions just before
367 * submitting the bio and/or override the bio end_io handler for complex
368 * operations like copy on write extent manipulation or unwritten extent
369 * conversions.
370 */
371 int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
372
373 /*
374 * Optional, allows the file system to discard state on a page where
375 * we failed to submit any I/O.
376 */
377 void (*discard_folio)(struct folio *folio, loff_t pos);
378 };
379
380 struct iomap_writepage_ctx {
381 struct iomap iomap;
382 struct iomap_ioend *ioend;
383 const struct iomap_writeback_ops *ops;
384 u32 nr_folios; /* folios added to the ioend */
385 };
386
387 void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
388 void iomap_ioend_try_merge(struct iomap_ioend *ioend,
389 struct list_head *more_ioends);
390 void iomap_sort_ioends(struct list_head *ioend_list);
391 int iomap_writepages(struct address_space *mapping,
392 struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
393 const struct iomap_writeback_ops *ops);
394
395 /*
396 * Flags for direct I/O ->end_io:
397 */
398 #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
399 #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
400
401 struct iomap_dio_ops {
402 int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
403 unsigned flags);
404 void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
405 loff_t file_offset);
406
407 /*
408 * Filesystems wishing to attach private information to a direct io bio
409 * must provide a ->submit_io method that attaches the additional
410 * information to the bio and changes the ->bi_end_io callback to a
411 * custom function. This function should, at a minimum, perform any
412 * relevant post-processing of the bio and end with a call to
413 * iomap_dio_bio_end_io.
414 */
415 struct bio_set *bio_set;
416 };
417
418 /*
419 * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
420 * synchronous.
421 */
422 #define IOMAP_DIO_FORCE_WAIT (1 << 0)
423
424 /*
425 * Do not allocate blocks or zero partial blocks, but instead fall back to
426 * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
427 * are not aligned to the file system block size.
428 */
429 #define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
430
431 /*
432 * When a page fault occurs, return a partial synchronous result and allow
433 * the caller to retry the rest of the operation after dealing with the page
434 * fault.
435 */
436 #define IOMAP_DIO_PARTIAL (1 << 2)
437
438 ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
439 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
440 unsigned int dio_flags, void *private, size_t done_before);
441 struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
442 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
443 unsigned int dio_flags, void *private, size_t done_before);
444 ssize_t iomap_dio_complete(struct iomap_dio *dio);
445 void iomap_dio_bio_end_io(struct bio *bio);
446
447 #ifdef CONFIG_SWAP
448 struct file;
449 struct swap_info_struct;
450
451 int iomap_swapfile_activate(struct swap_info_struct *sis,
452 struct file *swap_file, sector_t *pagespan,
453 const struct iomap_ops *ops);
454 #else
455 # define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
456 #endif /* CONFIG_SWAP */
457
458 #endif /* LINUX_IOMAP_H */
459