1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11 /*
12 * This file implements VFS file and inode operations for regular files, device
13 * nodes and symlinks as well as address space operations.
14 *
15 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
16 * the page is dirty and is used for optimization purposes - dirty pages are
17 * not budgeted so the flag shows that 'ubifs_write_end()' should not release
18 * the budget for this page. The @PG_checked flag is set if full budgeting is
19 * required for the page e.g., when it corresponds to a file hole or it is
20 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
21 * it is OK to fail in this function, and the budget is released in
22 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
23 * information about how the page was budgeted, to make it possible to release
24 * the budget properly.
25 *
26 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
27 * implement. However, this is not true for 'ubifs_writepage()', which may be
28 * called with @i_mutex unlocked. For example, when flusher thread is doing
29 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
30 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
31 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
32 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
33 *
34 * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
35 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
36 * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
37 * set as well. However, UBIFS disables readahead.
38 */
39
40 #include "ubifs.h"
41 #include <linux/mount.h>
42 #include <linux/slab.h>
43 #include <linux/migrate.h>
44
read_block(struct inode * inode,void * addr,unsigned int block,struct ubifs_data_node * dn)45 static int read_block(struct inode *inode, void *addr, unsigned int block,
46 struct ubifs_data_node *dn)
47 {
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, &key, inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, &key, dn);
55 if (err) {
56 if (err == -ENOENT)
57 /* Not found, so it must be a hole */
58 memset(addr, 0, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (IS_ENCRYPTED(inode)) {
71 err = ubifs_decrypt(inode, dn, &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
78 le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82 /*
83 * Data length can be less than a full block, even for blocks that are
84 * not the last in the file (e.g., as a result of making a hole and
85 * appending data). Ensure that the remainder is zeroed out.
86 */
87 if (len < UBIFS_BLOCK_SIZE)
88 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92 dump:
93 ubifs_err(c, "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
96 return -EINVAL;
97 }
98
do_readpage(struct page * page)99 static int do_readpage(struct page *page)
100 {
101 void *addr;
102 int err = 0, i;
103 unsigned int block, beyond;
104 struct ubifs_data_node *dn;
105 struct inode *inode = page->mapping->host;
106 struct ubifs_info *c = inode->i_sb->s_fs_info;
107 loff_t i_size = i_size_read(inode);
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, page->index, i_size, page->flags);
111 ubifs_assert(c, !PageChecked(page));
112 ubifs_assert(c, !PagePrivate(page));
113
114 addr = kmap(page);
115
116 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
118 if (block >= beyond) {
119 /* Reading beyond inode */
120 SetPageChecked(page);
121 memset(addr, 0, PAGE_SIZE);
122 goto out;
123 }
124
125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
126 if (!dn) {
127 err = -ENOMEM;
128 goto error;
129 }
130
131 i = 0;
132 while (1) {
133 int ret;
134
135 if (block >= beyond) {
136 /* Reading beyond inode */
137 err = -ENOENT;
138 memset(addr, 0, UBIFS_BLOCK_SIZE);
139 } else {
140 ret = read_block(inode, addr, block, dn);
141 if (ret) {
142 err = ret;
143 if (err != -ENOENT)
144 break;
145 } else if (block + 1 == beyond) {
146 int dlen = le32_to_cpu(dn->size);
147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
148
149 if (ilen && ilen < dlen)
150 memset(addr + ilen, 0, dlen - ilen);
151 }
152 }
153 if (++i >= UBIFS_BLOCKS_PER_PAGE)
154 break;
155 block += 1;
156 addr += UBIFS_BLOCK_SIZE;
157 }
158 if (err) {
159 struct ubifs_info *c = inode->i_sb->s_fs_info;
160 if (err == -ENOENT) {
161 /* Not found, so it must be a hole */
162 SetPageChecked(page);
163 dbg_gen("hole");
164 goto out_free;
165 }
166 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
167 page->index, inode->i_ino, err);
168 goto error;
169 }
170
171 out_free:
172 kfree(dn);
173 out:
174 SetPageUptodate(page);
175 ClearPageError(page);
176 flush_dcache_page(page);
177 kunmap(page);
178 return 0;
179
180 error:
181 kfree(dn);
182 ClearPageUptodate(page);
183 SetPageError(page);
184 flush_dcache_page(page);
185 kunmap(page);
186 return err;
187 }
188
189 /**
190 * release_new_page_budget - release budget of a new page.
191 * @c: UBIFS file-system description object
192 *
193 * This is a helper function which releases budget corresponding to the budget
194 * of one new page of data.
195 */
release_new_page_budget(struct ubifs_info * c)196 static void release_new_page_budget(struct ubifs_info *c)
197 {
198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
199
200 ubifs_release_budget(c, &req);
201 }
202
203 /**
204 * release_existing_page_budget - release budget of an existing page.
205 * @c: UBIFS file-system description object
206 *
207 * This is a helper function which releases budget corresponding to the budget
208 * of changing one page of data which already exists on the flash media.
209 */
release_existing_page_budget(struct ubifs_info * c)210 static void release_existing_page_budget(struct ubifs_info *c)
211 {
212 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
213
214 ubifs_release_budget(c, &req);
215 }
216
write_begin_slow(struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep)217 static int write_begin_slow(struct address_space *mapping,
218 loff_t pos, unsigned len, struct page **pagep)
219 {
220 struct inode *inode = mapping->host;
221 struct ubifs_info *c = inode->i_sb->s_fs_info;
222 pgoff_t index = pos >> PAGE_SHIFT;
223 struct ubifs_budget_req req = { .new_page = 1 };
224 int err, appending = !!(pos + len > inode->i_size);
225 struct page *page;
226
227 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
228 inode->i_ino, pos, len, inode->i_size);
229
230 /*
231 * At the slow path we have to budget before locking the page, because
232 * budgeting may force write-back, which would wait on locked pages and
233 * deadlock if we had the page locked. At this point we do not know
234 * anything about the page, so assume that this is a new page which is
235 * written to a hole. This corresponds to largest budget. Later the
236 * budget will be amended if this is not true.
237 */
238 if (appending)
239 /* We are appending data, budget for inode change */
240 req.dirtied_ino = 1;
241
242 err = ubifs_budget_space(c, &req);
243 if (unlikely(err))
244 return err;
245
246 page = grab_cache_page_write_begin(mapping, index);
247 if (unlikely(!page)) {
248 ubifs_release_budget(c, &req);
249 return -ENOMEM;
250 }
251
252 if (!PageUptodate(page)) {
253 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
254 SetPageChecked(page);
255 else {
256 err = do_readpage(page);
257 if (err) {
258 unlock_page(page);
259 put_page(page);
260 ubifs_release_budget(c, &req);
261 return err;
262 }
263 }
264 }
265
266 if (PagePrivate(page))
267 /*
268 * The page is dirty, which means it was budgeted twice:
269 * o first time the budget was allocated by the task which
270 * made the page dirty and set the PG_private flag;
271 * o and then we budgeted for it for the second time at the
272 * very beginning of this function.
273 *
274 * So what we have to do is to release the page budget we
275 * allocated.
276 */
277 release_new_page_budget(c);
278 else if (!PageChecked(page))
279 /*
280 * We are changing a page which already exists on the media.
281 * This means that changing the page does not make the amount
282 * of indexing information larger, and this part of the budget
283 * which we have already acquired may be released.
284 */
285 ubifs_convert_page_budget(c);
286
287 if (appending) {
288 struct ubifs_inode *ui = ubifs_inode(inode);
289
290 /*
291 * 'ubifs_write_end()' is optimized from the fast-path part of
292 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
293 * if data is appended.
294 */
295 mutex_lock(&ui->ui_mutex);
296 if (ui->dirty)
297 /*
298 * The inode is dirty already, so we may free the
299 * budget we allocated.
300 */
301 ubifs_release_dirty_inode_budget(c, ui);
302 }
303
304 *pagep = page;
305 return 0;
306 }
307
308 /**
309 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
310 * @c: UBIFS file-system description object
311 * @page: page to allocate budget for
312 * @ui: UBIFS inode object the page belongs to
313 * @appending: non-zero if the page is appended
314 *
315 * This is a helper function for 'ubifs_write_begin()' which allocates budget
316 * for the operation. The budget is allocated differently depending on whether
317 * this is appending, whether the page is dirty or not, and so on. This
318 * function leaves the @ui->ui_mutex locked in case of appending. Returns zero
319 * in case of success and %-ENOSPC in case of failure.
320 */
allocate_budget(struct ubifs_info * c,struct page * page,struct ubifs_inode * ui,int appending)321 static int allocate_budget(struct ubifs_info *c, struct page *page,
322 struct ubifs_inode *ui, int appending)
323 {
324 struct ubifs_budget_req req = { .fast = 1 };
325
326 if (PagePrivate(page)) {
327 if (!appending)
328 /*
329 * The page is dirty and we are not appending, which
330 * means no budget is needed at all.
331 */
332 return 0;
333
334 mutex_lock(&ui->ui_mutex);
335 if (ui->dirty)
336 /*
337 * The page is dirty and we are appending, so the inode
338 * has to be marked as dirty. However, it is already
339 * dirty, so we do not need any budget. We may return,
340 * but @ui->ui_mutex hast to be left locked because we
341 * should prevent write-back from flushing the inode
342 * and freeing the budget. The lock will be released in
343 * 'ubifs_write_end()'.
344 */
345 return 0;
346
347 /*
348 * The page is dirty, we are appending, the inode is clean, so
349 * we need to budget the inode change.
350 */
351 req.dirtied_ino = 1;
352 } else {
353 if (PageChecked(page))
354 /*
355 * The page corresponds to a hole and does not
356 * exist on the media. So changing it makes
357 * make the amount of indexing information
358 * larger, and we have to budget for a new
359 * page.
360 */
361 req.new_page = 1;
362 else
363 /*
364 * Not a hole, the change will not add any new
365 * indexing information, budget for page
366 * change.
367 */
368 req.dirtied_page = 1;
369
370 if (appending) {
371 mutex_lock(&ui->ui_mutex);
372 if (!ui->dirty)
373 /*
374 * The inode is clean but we will have to mark
375 * it as dirty because we are appending. This
376 * needs a budget.
377 */
378 req.dirtied_ino = 1;
379 }
380 }
381
382 return ubifs_budget_space(c, &req);
383 }
384
385 /*
386 * This function is called when a page of data is going to be written. Since
387 * the page of data will not necessarily go to the flash straight away, UBIFS
388 * has to reserve space on the media for it, which is done by means of
389 * budgeting.
390 *
391 * This is the hot-path of the file-system and we are trying to optimize it as
392 * much as possible. For this reasons it is split on 2 parts - slow and fast.
393 *
394 * There many budgeting cases:
395 * o a new page is appended - we have to budget for a new page and for
396 * changing the inode; however, if the inode is already dirty, there is
397 * no need to budget for it;
398 * o an existing clean page is changed - we have budget for it; if the page
399 * does not exist on the media (a hole), we have to budget for a new
400 * page; otherwise, we may budget for changing an existing page; the
401 * difference between these cases is that changing an existing page does
402 * not introduce anything new to the FS indexing information, so it does
403 * not grow, and smaller budget is acquired in this case;
404 * o an existing dirty page is changed - no need to budget at all, because
405 * the page budget has been acquired by earlier, when the page has been
406 * marked dirty.
407 *
408 * UBIFS budgeting sub-system may force write-back if it thinks there is no
409 * space to reserve. This imposes some locking restrictions and makes it
410 * impossible to take into account the above cases, and makes it impossible to
411 * optimize budgeting.
412 *
413 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
414 * there is a plenty of flash space and the budget will be acquired quickly,
415 * without forcing write-back. The slow path does not make this assumption.
416 */
ubifs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)417 static int ubifs_write_begin(struct file *file, struct address_space *mapping,
418 loff_t pos, unsigned len,
419 struct page **pagep, void **fsdata)
420 {
421 struct inode *inode = mapping->host;
422 struct ubifs_info *c = inode->i_sb->s_fs_info;
423 struct ubifs_inode *ui = ubifs_inode(inode);
424 pgoff_t index = pos >> PAGE_SHIFT;
425 int err, appending = !!(pos + len > inode->i_size);
426 int skipped_read = 0;
427 struct page *page;
428
429 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
430 ubifs_assert(c, !c->ro_media && !c->ro_mount);
431
432 if (unlikely(c->ro_error))
433 return -EROFS;
434
435 /* Try out the fast-path part first */
436 page = grab_cache_page_write_begin(mapping, index);
437 if (unlikely(!page))
438 return -ENOMEM;
439
440 if (!PageUptodate(page)) {
441 /* The page is not loaded from the flash */
442 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
443 /*
444 * We change whole page so no need to load it. But we
445 * do not know whether this page exists on the media or
446 * not, so we assume the latter because it requires
447 * larger budget. The assumption is that it is better
448 * to budget a bit more than to read the page from the
449 * media. Thus, we are setting the @PG_checked flag
450 * here.
451 */
452 SetPageChecked(page);
453 skipped_read = 1;
454 } else {
455 err = do_readpage(page);
456 if (err) {
457 unlock_page(page);
458 put_page(page);
459 return err;
460 }
461 }
462 }
463
464 err = allocate_budget(c, page, ui, appending);
465 if (unlikely(err)) {
466 ubifs_assert(c, err == -ENOSPC);
467 /*
468 * If we skipped reading the page because we were going to
469 * write all of it, then it is not up to date.
470 */
471 if (skipped_read)
472 ClearPageChecked(page);
473 /*
474 * Budgeting failed which means it would have to force
475 * write-back but didn't, because we set the @fast flag in the
476 * request. Write-back cannot be done now, while we have the
477 * page locked, because it would deadlock. Unlock and free
478 * everything and fall-back to slow-path.
479 */
480 if (appending) {
481 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
482 mutex_unlock(&ui->ui_mutex);
483 }
484 unlock_page(page);
485 put_page(page);
486
487 return write_begin_slow(mapping, pos, len, pagep);
488 }
489
490 /*
491 * Whee, we acquired budgeting quickly - without involving
492 * garbage-collection, committing or forcing write-back. We return
493 * with @ui->ui_mutex locked if we are appending pages, and unlocked
494 * otherwise. This is an optimization (slightly hacky though).
495 */
496 *pagep = page;
497 return 0;
498
499 }
500
501 /**
502 * cancel_budget - cancel budget.
503 * @c: UBIFS file-system description object
504 * @page: page to cancel budget for
505 * @ui: UBIFS inode object the page belongs to
506 * @appending: non-zero if the page is appended
507 *
508 * This is a helper function for a page write operation. It unlocks the
509 * @ui->ui_mutex in case of appending.
510 */
cancel_budget(struct ubifs_info * c,struct page * page,struct ubifs_inode * ui,int appending)511 static void cancel_budget(struct ubifs_info *c, struct page *page,
512 struct ubifs_inode *ui, int appending)
513 {
514 if (appending) {
515 if (!ui->dirty)
516 ubifs_release_dirty_inode_budget(c, ui);
517 mutex_unlock(&ui->ui_mutex);
518 }
519 if (!PagePrivate(page)) {
520 if (PageChecked(page))
521 release_new_page_budget(c);
522 else
523 release_existing_page_budget(c);
524 }
525 }
526
ubifs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)527 static int ubifs_write_end(struct file *file, struct address_space *mapping,
528 loff_t pos, unsigned len, unsigned copied,
529 struct page *page, void *fsdata)
530 {
531 struct inode *inode = mapping->host;
532 struct ubifs_inode *ui = ubifs_inode(inode);
533 struct ubifs_info *c = inode->i_sb->s_fs_info;
534 loff_t end_pos = pos + len;
535 int appending = !!(end_pos > inode->i_size);
536
537 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
538 inode->i_ino, pos, page->index, len, copied, inode->i_size);
539
540 if (unlikely(copied < len && len == PAGE_SIZE)) {
541 /*
542 * VFS copied less data to the page that it intended and
543 * declared in its '->write_begin()' call via the @len
544 * argument. If the page was not up-to-date, and @len was
545 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
546 * not load it from the media (for optimization reasons). This
547 * means that part of the page contains garbage. So read the
548 * page now.
549 */
550 dbg_gen("copied %d instead of %d, read page and repeat",
551 copied, len);
552 cancel_budget(c, page, ui, appending);
553 ClearPageChecked(page);
554
555 /*
556 * Return 0 to force VFS to repeat the whole operation, or the
557 * error code if 'do_readpage()' fails.
558 */
559 copied = do_readpage(page);
560 goto out;
561 }
562
563 if (len == PAGE_SIZE)
564 SetPageUptodate(page);
565
566 if (!PagePrivate(page)) {
567 attach_page_private(page, (void *)1);
568 atomic_long_inc(&c->dirty_pg_cnt);
569 __set_page_dirty_nobuffers(page);
570 }
571
572 if (appending) {
573 i_size_write(inode, end_pos);
574 ui->ui_size = end_pos;
575 /*
576 * Note, we do not set @I_DIRTY_PAGES (which means that the
577 * inode has dirty pages), this has been done in
578 * '__set_page_dirty_nobuffers()'.
579 */
580 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
581 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
582 mutex_unlock(&ui->ui_mutex);
583 }
584
585 out:
586 unlock_page(page);
587 put_page(page);
588 return copied;
589 }
590
591 /**
592 * populate_page - copy data nodes into a page for bulk-read.
593 * @c: UBIFS file-system description object
594 * @page: page
595 * @bu: bulk-read information
596 * @n: next zbranch slot
597 *
598 * This function returns %0 on success and a negative error code on failure.
599 */
populate_page(struct ubifs_info * c,struct page * page,struct bu_info * bu,int * n)600 static int populate_page(struct ubifs_info *c, struct page *page,
601 struct bu_info *bu, int *n)
602 {
603 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
604 struct inode *inode = page->mapping->host;
605 loff_t i_size = i_size_read(inode);
606 unsigned int page_block;
607 void *addr, *zaddr;
608 pgoff_t end_index;
609
610 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
611 inode->i_ino, page->index, i_size, page->flags);
612
613 addr = zaddr = kmap(page);
614
615 end_index = (i_size - 1) >> PAGE_SHIFT;
616 if (!i_size || page->index > end_index) {
617 hole = 1;
618 memset(addr, 0, PAGE_SIZE);
619 goto out_hole;
620 }
621
622 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
623 while (1) {
624 int err, len, out_len, dlen;
625
626 if (nn >= bu->cnt) {
627 hole = 1;
628 memset(addr, 0, UBIFS_BLOCK_SIZE);
629 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
630 struct ubifs_data_node *dn;
631
632 dn = bu->buf + (bu->zbranch[nn].offs - offs);
633
634 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
635 ubifs_inode(inode)->creat_sqnum);
636
637 len = le32_to_cpu(dn->size);
638 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
639 goto out_err;
640
641 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
642 out_len = UBIFS_BLOCK_SIZE;
643
644 if (IS_ENCRYPTED(inode)) {
645 err = ubifs_decrypt(inode, dn, &dlen, page_block);
646 if (err)
647 goto out_err;
648 }
649
650 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
651 le16_to_cpu(dn->compr_type));
652 if (err || len != out_len)
653 goto out_err;
654
655 if (len < UBIFS_BLOCK_SIZE)
656 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
657
658 nn += 1;
659 read = (i << UBIFS_BLOCK_SHIFT) + len;
660 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
661 nn += 1;
662 continue;
663 } else {
664 hole = 1;
665 memset(addr, 0, UBIFS_BLOCK_SIZE);
666 }
667 if (++i >= UBIFS_BLOCKS_PER_PAGE)
668 break;
669 addr += UBIFS_BLOCK_SIZE;
670 page_block += 1;
671 }
672
673 if (end_index == page->index) {
674 int len = i_size & (PAGE_SIZE - 1);
675
676 if (len && len < read)
677 memset(zaddr + len, 0, read - len);
678 }
679
680 out_hole:
681 if (hole) {
682 SetPageChecked(page);
683 dbg_gen("hole");
684 }
685
686 SetPageUptodate(page);
687 ClearPageError(page);
688 flush_dcache_page(page);
689 kunmap(page);
690 *n = nn;
691 return 0;
692
693 out_err:
694 ClearPageUptodate(page);
695 SetPageError(page);
696 flush_dcache_page(page);
697 kunmap(page);
698 ubifs_err(c, "bad data node (block %u, inode %lu)",
699 page_block, inode->i_ino);
700 return -EINVAL;
701 }
702
703 /**
704 * ubifs_do_bulk_read - do bulk-read.
705 * @c: UBIFS file-system description object
706 * @bu: bulk-read information
707 * @page1: first page to read
708 *
709 * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
710 */
ubifs_do_bulk_read(struct ubifs_info * c,struct bu_info * bu,struct page * page1)711 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
712 struct page *page1)
713 {
714 pgoff_t offset = page1->index, end_index;
715 struct address_space *mapping = page1->mapping;
716 struct inode *inode = mapping->host;
717 struct ubifs_inode *ui = ubifs_inode(inode);
718 int err, page_idx, page_cnt, ret = 0, n = 0;
719 int allocate = bu->buf ? 0 : 1;
720 loff_t isize;
721 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
722
723 err = ubifs_tnc_get_bu_keys(c, bu);
724 if (err)
725 goto out_warn;
726
727 if (bu->eof) {
728 /* Turn off bulk-read at the end of the file */
729 ui->read_in_a_row = 1;
730 ui->bulk_read = 0;
731 }
732
733 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
734 if (!page_cnt) {
735 /*
736 * This happens when there are multiple blocks per page and the
737 * blocks for the first page we are looking for, are not
738 * together. If all the pages were like this, bulk-read would
739 * reduce performance, so we turn it off for a while.
740 */
741 goto out_bu_off;
742 }
743
744 if (bu->cnt) {
745 if (allocate) {
746 /*
747 * Allocate bulk-read buffer depending on how many data
748 * nodes we are going to read.
749 */
750 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
751 bu->zbranch[bu->cnt - 1].len -
752 bu->zbranch[0].offs;
753 ubifs_assert(c, bu->buf_len > 0);
754 ubifs_assert(c, bu->buf_len <= c->leb_size);
755 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
756 if (!bu->buf)
757 goto out_bu_off;
758 }
759
760 err = ubifs_tnc_bulk_read(c, bu);
761 if (err)
762 goto out_warn;
763 }
764
765 err = populate_page(c, page1, bu, &n);
766 if (err)
767 goto out_warn;
768
769 unlock_page(page1);
770 ret = 1;
771
772 isize = i_size_read(inode);
773 if (isize == 0)
774 goto out_free;
775 end_index = ((isize - 1) >> PAGE_SHIFT);
776
777 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
778 pgoff_t page_offset = offset + page_idx;
779 struct page *page;
780
781 if (page_offset > end_index)
782 break;
783 page = pagecache_get_page(mapping, page_offset,
784 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
785 ra_gfp_mask);
786 if (!page)
787 break;
788 if (!PageUptodate(page))
789 err = populate_page(c, page, bu, &n);
790 unlock_page(page);
791 put_page(page);
792 if (err)
793 break;
794 }
795
796 ui->last_page_read = offset + page_idx - 1;
797
798 out_free:
799 if (allocate)
800 kfree(bu->buf);
801 return ret;
802
803 out_warn:
804 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
805 goto out_free;
806
807 out_bu_off:
808 ui->read_in_a_row = ui->bulk_read = 0;
809 goto out_free;
810 }
811
812 /**
813 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
814 * @page: page from which to start bulk-read.
815 *
816 * Some flash media are capable of reading sequentially at faster rates. UBIFS
817 * bulk-read facility is designed to take advantage of that, by reading in one
818 * go consecutive data nodes that are also located consecutively in the same
819 * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
820 */
ubifs_bulk_read(struct page * page)821 static int ubifs_bulk_read(struct page *page)
822 {
823 struct inode *inode = page->mapping->host;
824 struct ubifs_info *c = inode->i_sb->s_fs_info;
825 struct ubifs_inode *ui = ubifs_inode(inode);
826 pgoff_t index = page->index, last_page_read = ui->last_page_read;
827 struct bu_info *bu;
828 int err = 0, allocated = 0;
829
830 ui->last_page_read = index;
831 if (!c->bulk_read)
832 return 0;
833
834 /*
835 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
836 * so don't bother if we cannot lock the mutex.
837 */
838 if (!mutex_trylock(&ui->ui_mutex))
839 return 0;
840
841 if (index != last_page_read + 1) {
842 /* Turn off bulk-read if we stop reading sequentially */
843 ui->read_in_a_row = 1;
844 if (ui->bulk_read)
845 ui->bulk_read = 0;
846 goto out_unlock;
847 }
848
849 if (!ui->bulk_read) {
850 ui->read_in_a_row += 1;
851 if (ui->read_in_a_row < 3)
852 goto out_unlock;
853 /* Three reads in a row, so switch on bulk-read */
854 ui->bulk_read = 1;
855 }
856
857 /*
858 * If possible, try to use pre-allocated bulk-read information, which
859 * is protected by @c->bu_mutex.
860 */
861 if (mutex_trylock(&c->bu_mutex))
862 bu = &c->bu;
863 else {
864 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
865 if (!bu)
866 goto out_unlock;
867
868 bu->buf = NULL;
869 allocated = 1;
870 }
871
872 bu->buf_len = c->max_bu_buf_len;
873 data_key_init(c, &bu->key, inode->i_ino,
874 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
875 err = ubifs_do_bulk_read(c, bu, page);
876
877 if (!allocated)
878 mutex_unlock(&c->bu_mutex);
879 else
880 kfree(bu);
881
882 out_unlock:
883 mutex_unlock(&ui->ui_mutex);
884 return err;
885 }
886
ubifs_read_folio(struct file * file,struct folio * folio)887 static int ubifs_read_folio(struct file *file, struct folio *folio)
888 {
889 struct page *page = &folio->page;
890
891 if (ubifs_bulk_read(page))
892 return 0;
893 do_readpage(page);
894 folio_unlock(folio);
895 return 0;
896 }
897
do_writepage(struct page * page,int len)898 static int do_writepage(struct page *page, int len)
899 {
900 int err = 0, i, blen;
901 unsigned int block;
902 void *addr;
903 union ubifs_key key;
904 struct inode *inode = page->mapping->host;
905 struct ubifs_info *c = inode->i_sb->s_fs_info;
906
907 #ifdef UBIFS_DEBUG
908 struct ubifs_inode *ui = ubifs_inode(inode);
909 spin_lock(&ui->ui_lock);
910 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
911 spin_unlock(&ui->ui_lock);
912 #endif
913
914 /* Update radix tree tags */
915 set_page_writeback(page);
916
917 addr = kmap(page);
918 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
919 i = 0;
920 while (len) {
921 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
922 data_key_init(c, &key, inode->i_ino, block);
923 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
924 if (err)
925 break;
926 if (++i >= UBIFS_BLOCKS_PER_PAGE)
927 break;
928 block += 1;
929 addr += blen;
930 len -= blen;
931 }
932 if (err) {
933 SetPageError(page);
934 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
935 page->index, inode->i_ino, err);
936 ubifs_ro_mode(c, err);
937 }
938
939 ubifs_assert(c, PagePrivate(page));
940 if (PageChecked(page))
941 release_new_page_budget(c);
942 else
943 release_existing_page_budget(c);
944
945 atomic_long_dec(&c->dirty_pg_cnt);
946 detach_page_private(page);
947 ClearPageChecked(page);
948
949 kunmap(page);
950 unlock_page(page);
951 end_page_writeback(page);
952 return err;
953 }
954
955 /*
956 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
957 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
958 * situation when a we have an inode with size 0, then a megabyte of data is
959 * appended to the inode, then write-back starts and flushes some amount of the
960 * dirty pages, the journal becomes full, commit happens and finishes, and then
961 * an unclean reboot happens. When the file system is mounted next time, the
962 * inode size would still be 0, but there would be many pages which are beyond
963 * the inode size, they would be indexed and consume flash space. Because the
964 * journal has been committed, the replay would not be able to detect this
965 * situation and correct the inode size. This means UBIFS would have to scan
966 * whole index and correct all inode sizes, which is long an unacceptable.
967 *
968 * To prevent situations like this, UBIFS writes pages back only if they are
969 * within the last synchronized inode size, i.e. the size which has been
970 * written to the flash media last time. Otherwise, UBIFS forces inode
971 * write-back, thus making sure the on-flash inode contains current inode size,
972 * and then keeps writing pages back.
973 *
974 * Some locking issues explanation. 'ubifs_writepage()' first is called with
975 * the page locked, and it locks @ui_mutex. However, write-back does take inode
976 * @i_mutex, which means other VFS operations may be run on this inode at the
977 * same time. And the problematic one is truncation to smaller size, from where
978 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
979 * then drops the truncated pages. And while dropping the pages, it takes the
980 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
981 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
982 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
983 *
984 * XXX(truncate): with the new truncate sequence this is not true anymore,
985 * and the calls to truncate_setsize can be move around freely. They should
986 * be moved to the very end of the truncate sequence.
987 *
988 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
989 * inode size. How do we do this if @inode->i_size may became smaller while we
990 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
991 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
992 * internally and updates it under @ui_mutex.
993 *
994 * Q: why we do not worry that if we race with truncation, we may end up with a
995 * situation when the inode is truncated while we are in the middle of
996 * 'do_writepage()', so we do write beyond inode size?
997 * A: If we are in the middle of 'do_writepage()', truncation would be locked
998 * on the page lock and it would not write the truncated inode node to the
999 * journal before we have finished.
1000 */
ubifs_writepage(struct page * page,struct writeback_control * wbc)1001 static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1002 {
1003 struct inode *inode = page->mapping->host;
1004 struct ubifs_info *c = inode->i_sb->s_fs_info;
1005 struct ubifs_inode *ui = ubifs_inode(inode);
1006 loff_t i_size = i_size_read(inode), synced_i_size;
1007 pgoff_t end_index = i_size >> PAGE_SHIFT;
1008 int err, len = i_size & (PAGE_SIZE - 1);
1009 void *kaddr;
1010
1011 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1012 inode->i_ino, page->index, page->flags);
1013 ubifs_assert(c, PagePrivate(page));
1014
1015 /* Is the page fully outside @i_size? (truncate in progress) */
1016 if (page->index > end_index || (page->index == end_index && !len)) {
1017 err = 0;
1018 goto out_unlock;
1019 }
1020
1021 spin_lock(&ui->ui_lock);
1022 synced_i_size = ui->synced_i_size;
1023 spin_unlock(&ui->ui_lock);
1024
1025 /* Is the page fully inside @i_size? */
1026 if (page->index < end_index) {
1027 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1028 err = inode->i_sb->s_op->write_inode(inode, NULL);
1029 if (err)
1030 goto out_redirty;
1031 /*
1032 * The inode has been written, but the write-buffer has
1033 * not been synchronized, so in case of an unclean
1034 * reboot we may end up with some pages beyond inode
1035 * size, but they would be in the journal (because
1036 * commit flushes write buffers) and recovery would deal
1037 * with this.
1038 */
1039 }
1040 return do_writepage(page, PAGE_SIZE);
1041 }
1042
1043 /*
1044 * The page straddles @i_size. It must be zeroed out on each and every
1045 * writepage invocation because it may be mmapped. "A file is mapped
1046 * in multiples of the page size. For a file that is not a multiple of
1047 * the page size, the remaining memory is zeroed when mapped, and
1048 * writes to that region are not written out to the file."
1049 */
1050 kaddr = kmap_atomic(page);
1051 memset(kaddr + len, 0, PAGE_SIZE - len);
1052 flush_dcache_page(page);
1053 kunmap_atomic(kaddr);
1054
1055 if (i_size > synced_i_size) {
1056 err = inode->i_sb->s_op->write_inode(inode, NULL);
1057 if (err)
1058 goto out_redirty;
1059 }
1060
1061 return do_writepage(page, len);
1062 out_redirty:
1063 /*
1064 * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
1065 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
1066 * there is no need to do space budget for dirty inode.
1067 */
1068 redirty_page_for_writepage(wbc, page);
1069 out_unlock:
1070 unlock_page(page);
1071 return err;
1072 }
1073
1074 /**
1075 * do_attr_changes - change inode attributes.
1076 * @inode: inode to change attributes for
1077 * @attr: describes attributes to change
1078 */
do_attr_changes(struct inode * inode,const struct iattr * attr)1079 static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1080 {
1081 if (attr->ia_valid & ATTR_UID)
1082 inode->i_uid = attr->ia_uid;
1083 if (attr->ia_valid & ATTR_GID)
1084 inode->i_gid = attr->ia_gid;
1085 if (attr->ia_valid & ATTR_ATIME)
1086 inode->i_atime = attr->ia_atime;
1087 if (attr->ia_valid & ATTR_MTIME)
1088 inode->i_mtime = attr->ia_mtime;
1089 if (attr->ia_valid & ATTR_CTIME)
1090 inode_set_ctime_to_ts(inode, attr->ia_ctime);
1091 if (attr->ia_valid & ATTR_MODE) {
1092 umode_t mode = attr->ia_mode;
1093
1094 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1095 mode &= ~S_ISGID;
1096 inode->i_mode = mode;
1097 }
1098 }
1099
1100 /**
1101 * do_truncation - truncate an inode.
1102 * @c: UBIFS file-system description object
1103 * @inode: inode to truncate
1104 * @attr: inode attribute changes description
1105 *
1106 * This function implements VFS '->setattr()' call when the inode is truncated
1107 * to a smaller size. Returns zero in case of success and a negative error code
1108 * in case of failure.
1109 */
do_truncation(struct ubifs_info * c,struct inode * inode,const struct iattr * attr)1110 static int do_truncation(struct ubifs_info *c, struct inode *inode,
1111 const struct iattr *attr)
1112 {
1113 int err;
1114 struct ubifs_budget_req req;
1115 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1116 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1117 struct ubifs_inode *ui = ubifs_inode(inode);
1118
1119 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1120 memset(&req, 0, sizeof(struct ubifs_budget_req));
1121
1122 /*
1123 * If this is truncation to a smaller size, and we do not truncate on a
1124 * block boundary, budget for changing one data block, because the last
1125 * block will be re-written.
1126 */
1127 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1128 req.dirtied_page = 1;
1129
1130 req.dirtied_ino = 1;
1131 /* A funny way to budget for truncation node */
1132 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1133 err = ubifs_budget_space(c, &req);
1134 if (err) {
1135 /*
1136 * Treat truncations to zero as deletion and always allow them,
1137 * just like we do for '->unlink()'.
1138 */
1139 if (new_size || err != -ENOSPC)
1140 return err;
1141 budgeted = 0;
1142 }
1143
1144 truncate_setsize(inode, new_size);
1145
1146 if (offset) {
1147 pgoff_t index = new_size >> PAGE_SHIFT;
1148 struct page *page;
1149
1150 page = find_lock_page(inode->i_mapping, index);
1151 if (page) {
1152 if (PageDirty(page)) {
1153 /*
1154 * 'ubifs_jnl_truncate()' will try to truncate
1155 * the last data node, but it contains
1156 * out-of-date data because the page is dirty.
1157 * Write the page now, so that
1158 * 'ubifs_jnl_truncate()' will see an already
1159 * truncated (and up to date) data node.
1160 */
1161 ubifs_assert(c, PagePrivate(page));
1162
1163 clear_page_dirty_for_io(page);
1164 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1165 offset = new_size &
1166 (PAGE_SIZE - 1);
1167 err = do_writepage(page, offset);
1168 put_page(page);
1169 if (err)
1170 goto out_budg;
1171 /*
1172 * We could now tell 'ubifs_jnl_truncate()' not
1173 * to read the last block.
1174 */
1175 } else {
1176 /*
1177 * We could 'kmap()' the page and pass the data
1178 * to 'ubifs_jnl_truncate()' to save it from
1179 * having to read it.
1180 */
1181 unlock_page(page);
1182 put_page(page);
1183 }
1184 }
1185 }
1186
1187 mutex_lock(&ui->ui_mutex);
1188 ui->ui_size = inode->i_size;
1189 /* Truncation changes inode [mc]time */
1190 inode->i_mtime = inode_set_ctime_current(inode);
1191 /* Other attributes may be changed at the same time as well */
1192 do_attr_changes(inode, attr);
1193 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1194 mutex_unlock(&ui->ui_mutex);
1195
1196 out_budg:
1197 if (budgeted)
1198 ubifs_release_budget(c, &req);
1199 else {
1200 c->bi.nospace = c->bi.nospace_rp = 0;
1201 smp_wmb();
1202 }
1203 return err;
1204 }
1205
1206 /**
1207 * do_setattr - change inode attributes.
1208 * @c: UBIFS file-system description object
1209 * @inode: inode to change attributes for
1210 * @attr: inode attribute changes description
1211 *
1212 * This function implements VFS '->setattr()' call for all cases except
1213 * truncations to smaller size. Returns zero in case of success and a negative
1214 * error code in case of failure.
1215 */
do_setattr(struct ubifs_info * c,struct inode * inode,const struct iattr * attr)1216 static int do_setattr(struct ubifs_info *c, struct inode *inode,
1217 const struct iattr *attr)
1218 {
1219 int err, release;
1220 loff_t new_size = attr->ia_size;
1221 struct ubifs_inode *ui = ubifs_inode(inode);
1222 struct ubifs_budget_req req = { .dirtied_ino = 1,
1223 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1224
1225 err = ubifs_budget_space(c, &req);
1226 if (err)
1227 return err;
1228
1229 if (attr->ia_valid & ATTR_SIZE) {
1230 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1231 truncate_setsize(inode, new_size);
1232 }
1233
1234 mutex_lock(&ui->ui_mutex);
1235 if (attr->ia_valid & ATTR_SIZE) {
1236 /* Truncation changes inode [mc]time */
1237 inode->i_mtime = inode_set_ctime_current(inode);
1238 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1239 ui->ui_size = inode->i_size;
1240 }
1241
1242 do_attr_changes(inode, attr);
1243
1244 release = ui->dirty;
1245 if (attr->ia_valid & ATTR_SIZE)
1246 /*
1247 * Inode length changed, so we have to make sure
1248 * @I_DIRTY_DATASYNC is set.
1249 */
1250 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1251 else
1252 mark_inode_dirty_sync(inode);
1253 mutex_unlock(&ui->ui_mutex);
1254
1255 if (release)
1256 ubifs_release_budget(c, &req);
1257 if (IS_SYNC(inode))
1258 err = inode->i_sb->s_op->write_inode(inode, NULL);
1259 return err;
1260 }
1261
ubifs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1262 int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1263 struct iattr *attr)
1264 {
1265 int err;
1266 struct inode *inode = d_inode(dentry);
1267 struct ubifs_info *c = inode->i_sb->s_fs_info;
1268
1269 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1270 inode->i_ino, inode->i_mode, attr->ia_valid);
1271 err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1272 if (err)
1273 return err;
1274
1275 err = dbg_check_synced_i_size(c, inode);
1276 if (err)
1277 return err;
1278
1279 err = fscrypt_prepare_setattr(dentry, attr);
1280 if (err)
1281 return err;
1282
1283 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1284 /* Truncation to a smaller size */
1285 err = do_truncation(c, inode, attr);
1286 else
1287 err = do_setattr(c, inode, attr);
1288
1289 return err;
1290 }
1291
ubifs_invalidate_folio(struct folio * folio,size_t offset,size_t length)1292 static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1293 size_t length)
1294 {
1295 struct inode *inode = folio->mapping->host;
1296 struct ubifs_info *c = inode->i_sb->s_fs_info;
1297
1298 ubifs_assert(c, folio_test_private(folio));
1299 if (offset || length < folio_size(folio))
1300 /* Partial folio remains dirty */
1301 return;
1302
1303 if (folio_test_checked(folio))
1304 release_new_page_budget(c);
1305 else
1306 release_existing_page_budget(c);
1307
1308 atomic_long_dec(&c->dirty_pg_cnt);
1309 folio_detach_private(folio);
1310 folio_clear_checked(folio);
1311 }
1312
ubifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)1313 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1314 {
1315 struct inode *inode = file->f_mapping->host;
1316 struct ubifs_info *c = inode->i_sb->s_fs_info;
1317 int err;
1318
1319 dbg_gen("syncing inode %lu", inode->i_ino);
1320
1321 if (c->ro_mount)
1322 /*
1323 * For some really strange reasons VFS does not filter out
1324 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1325 */
1326 return 0;
1327
1328 err = file_write_and_wait_range(file, start, end);
1329 if (err)
1330 return err;
1331 inode_lock(inode);
1332
1333 /* Synchronize the inode unless this is a 'datasync()' call. */
1334 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1335 err = inode->i_sb->s_op->write_inode(inode, NULL);
1336 if (err)
1337 goto out;
1338 }
1339
1340 /*
1341 * Nodes related to this inode may still sit in a write-buffer. Flush
1342 * them.
1343 */
1344 err = ubifs_sync_wbufs_by_inode(c, inode);
1345 out:
1346 inode_unlock(inode);
1347 return err;
1348 }
1349
1350 /**
1351 * mctime_update_needed - check if mtime or ctime update is needed.
1352 * @inode: the inode to do the check for
1353 * @now: current time
1354 *
1355 * This helper function checks if the inode mtime/ctime should be updated or
1356 * not. If current values of the time-stamps are within the UBIFS inode time
1357 * granularity, they are not updated. This is an optimization.
1358 */
mctime_update_needed(const struct inode * inode,const struct timespec64 * now)1359 static inline int mctime_update_needed(const struct inode *inode,
1360 const struct timespec64 *now)
1361 {
1362 struct timespec64 ctime = inode_get_ctime(inode);
1363
1364 if (!timespec64_equal(&inode->i_mtime, now) ||
1365 !timespec64_equal(&ctime, now))
1366 return 1;
1367 return 0;
1368 }
1369
1370 /**
1371 * ubifs_update_time - update time of inode.
1372 * @inode: inode to update
1373 *
1374 * This function updates time of the inode.
1375 */
ubifs_update_time(struct inode * inode,int flags)1376 int ubifs_update_time(struct inode *inode, int flags)
1377 {
1378 struct ubifs_inode *ui = ubifs_inode(inode);
1379 struct ubifs_info *c = inode->i_sb->s_fs_info;
1380 struct ubifs_budget_req req = { .dirtied_ino = 1,
1381 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1382 int err, release;
1383
1384 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
1385 generic_update_time(inode, flags);
1386 return 0;
1387 }
1388
1389 err = ubifs_budget_space(c, &req);
1390 if (err)
1391 return err;
1392
1393 mutex_lock(&ui->ui_mutex);
1394 inode_update_timestamps(inode, flags);
1395 release = ui->dirty;
1396 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1397 mutex_unlock(&ui->ui_mutex);
1398 if (release)
1399 ubifs_release_budget(c, &req);
1400 return 0;
1401 }
1402
1403 /**
1404 * update_mctime - update mtime and ctime of an inode.
1405 * @inode: inode to update
1406 *
1407 * This function updates mtime and ctime of the inode if it is not equivalent to
1408 * current time. Returns zero in case of success and a negative error code in
1409 * case of failure.
1410 */
update_mctime(struct inode * inode)1411 static int update_mctime(struct inode *inode)
1412 {
1413 struct timespec64 now = current_time(inode);
1414 struct ubifs_inode *ui = ubifs_inode(inode);
1415 struct ubifs_info *c = inode->i_sb->s_fs_info;
1416
1417 if (mctime_update_needed(inode, &now)) {
1418 int err, release;
1419 struct ubifs_budget_req req = { .dirtied_ino = 1,
1420 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1421
1422 err = ubifs_budget_space(c, &req);
1423 if (err)
1424 return err;
1425
1426 mutex_lock(&ui->ui_mutex);
1427 inode->i_mtime = inode_set_ctime_current(inode);
1428 release = ui->dirty;
1429 mark_inode_dirty_sync(inode);
1430 mutex_unlock(&ui->ui_mutex);
1431 if (release)
1432 ubifs_release_budget(c, &req);
1433 }
1434
1435 return 0;
1436 }
1437
ubifs_write_iter(struct kiocb * iocb,struct iov_iter * from)1438 static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1439 {
1440 int err = update_mctime(file_inode(iocb->ki_filp));
1441 if (err)
1442 return err;
1443
1444 return generic_file_write_iter(iocb, from);
1445 }
1446
ubifs_dirty_folio(struct address_space * mapping,struct folio * folio)1447 static bool ubifs_dirty_folio(struct address_space *mapping,
1448 struct folio *folio)
1449 {
1450 bool ret;
1451 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1452
1453 ret = filemap_dirty_folio(mapping, folio);
1454 /*
1455 * An attempt to dirty a page without budgeting for it - should not
1456 * happen.
1457 */
1458 ubifs_assert(c, ret == false);
1459 return ret;
1460 }
1461
ubifs_release_folio(struct folio * folio,gfp_t unused_gfp_flags)1462 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1463 {
1464 struct inode *inode = folio->mapping->host;
1465 struct ubifs_info *c = inode->i_sb->s_fs_info;
1466
1467 if (folio_test_writeback(folio))
1468 return false;
1469
1470 /*
1471 * Page is private but not dirty, weird? There is one condition
1472 * making it happened. ubifs_writepage skipped the page because
1473 * page index beyonds isize (for example. truncated by other
1474 * process named A), then the page is invalidated by fadvise64
1475 * syscall before being truncated by process A.
1476 */
1477 ubifs_assert(c, folio_test_private(folio));
1478 if (folio_test_checked(folio))
1479 release_new_page_budget(c);
1480 else
1481 release_existing_page_budget(c);
1482
1483 atomic_long_dec(&c->dirty_pg_cnt);
1484 folio_detach_private(folio);
1485 folio_clear_checked(folio);
1486 return true;
1487 }
1488
1489 /*
1490 * mmap()d file has taken write protection fault and is being made writable.
1491 * UBIFS must ensure page is budgeted for.
1492 */
ubifs_vm_page_mkwrite(struct vm_fault * vmf)1493 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1494 {
1495 struct page *page = vmf->page;
1496 struct inode *inode = file_inode(vmf->vma->vm_file);
1497 struct ubifs_info *c = inode->i_sb->s_fs_info;
1498 struct timespec64 now = current_time(inode);
1499 struct ubifs_budget_req req = { .new_page = 1 };
1500 int err, update_time;
1501
1502 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1503 i_size_read(inode));
1504 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1505
1506 if (unlikely(c->ro_error))
1507 return VM_FAULT_SIGBUS; /* -EROFS */
1508
1509 /*
1510 * We have not locked @page so far so we may budget for changing the
1511 * page. Note, we cannot do this after we locked the page, because
1512 * budgeting may cause write-back which would cause deadlock.
1513 *
1514 * At the moment we do not know whether the page is dirty or not, so we
1515 * assume that it is not and budget for a new page. We could look at
1516 * the @PG_private flag and figure this out, but we may race with write
1517 * back and the page state may change by the time we lock it, so this
1518 * would need additional care. We do not bother with this at the
1519 * moment, although it might be good idea to do. Instead, we allocate
1520 * budget for a new page and amend it later on if the page was in fact
1521 * dirty.
1522 *
1523 * The budgeting-related logic of this function is similar to what we
1524 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1525 * for more comments.
1526 */
1527 update_time = mctime_update_needed(inode, &now);
1528 if (update_time)
1529 /*
1530 * We have to change inode time stamp which requires extra
1531 * budgeting.
1532 */
1533 req.dirtied_ino = 1;
1534
1535 err = ubifs_budget_space(c, &req);
1536 if (unlikely(err)) {
1537 if (err == -ENOSPC)
1538 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1539 inode->i_ino);
1540 return VM_FAULT_SIGBUS;
1541 }
1542
1543 lock_page(page);
1544 if (unlikely(page->mapping != inode->i_mapping ||
1545 page_offset(page) > i_size_read(inode))) {
1546 /* Page got truncated out from underneath us */
1547 goto sigbus;
1548 }
1549
1550 if (PagePrivate(page))
1551 release_new_page_budget(c);
1552 else {
1553 if (!PageChecked(page))
1554 ubifs_convert_page_budget(c);
1555 attach_page_private(page, (void *)1);
1556 atomic_long_inc(&c->dirty_pg_cnt);
1557 __set_page_dirty_nobuffers(page);
1558 }
1559
1560 if (update_time) {
1561 int release;
1562 struct ubifs_inode *ui = ubifs_inode(inode);
1563
1564 mutex_lock(&ui->ui_mutex);
1565 inode->i_mtime = inode_set_ctime_current(inode);
1566 release = ui->dirty;
1567 mark_inode_dirty_sync(inode);
1568 mutex_unlock(&ui->ui_mutex);
1569 if (release)
1570 ubifs_release_dirty_inode_budget(c, ui);
1571 }
1572
1573 wait_for_stable_page(page);
1574 return VM_FAULT_LOCKED;
1575
1576 sigbus:
1577 unlock_page(page);
1578 ubifs_release_budget(c, &req);
1579 return VM_FAULT_SIGBUS;
1580 }
1581
1582 static const struct vm_operations_struct ubifs_file_vm_ops = {
1583 .fault = filemap_fault,
1584 .map_pages = filemap_map_pages,
1585 .page_mkwrite = ubifs_vm_page_mkwrite,
1586 };
1587
ubifs_file_mmap(struct file * file,struct vm_area_struct * vma)1588 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1589 {
1590 int err;
1591
1592 err = generic_file_mmap(file, vma);
1593 if (err)
1594 return err;
1595 vma->vm_ops = &ubifs_file_vm_ops;
1596
1597 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1598 file_accessed(file);
1599
1600 return 0;
1601 }
1602
ubifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1603 static const char *ubifs_get_link(struct dentry *dentry,
1604 struct inode *inode,
1605 struct delayed_call *done)
1606 {
1607 struct ubifs_inode *ui = ubifs_inode(inode);
1608
1609 if (!IS_ENCRYPTED(inode))
1610 return ui->data;
1611
1612 if (!dentry)
1613 return ERR_PTR(-ECHILD);
1614
1615 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1616 }
1617
ubifs_symlink_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1618 static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
1619 const struct path *path, struct kstat *stat,
1620 u32 request_mask, unsigned int query_flags)
1621 {
1622 ubifs_getattr(idmap, path, stat, request_mask, query_flags);
1623
1624 if (IS_ENCRYPTED(d_inode(path->dentry)))
1625 return fscrypt_symlink_getattr(path, stat);
1626 return 0;
1627 }
1628
1629 const struct address_space_operations ubifs_file_address_operations = {
1630 .read_folio = ubifs_read_folio,
1631 .writepage = ubifs_writepage,
1632 .write_begin = ubifs_write_begin,
1633 .write_end = ubifs_write_end,
1634 .invalidate_folio = ubifs_invalidate_folio,
1635 .dirty_folio = ubifs_dirty_folio,
1636 .migrate_folio = filemap_migrate_folio,
1637 .release_folio = ubifs_release_folio,
1638 };
1639
1640 const struct inode_operations ubifs_file_inode_operations = {
1641 .setattr = ubifs_setattr,
1642 .getattr = ubifs_getattr,
1643 .listxattr = ubifs_listxattr,
1644 .update_time = ubifs_update_time,
1645 .fileattr_get = ubifs_fileattr_get,
1646 .fileattr_set = ubifs_fileattr_set,
1647 };
1648
1649 const struct inode_operations ubifs_symlink_inode_operations = {
1650 .get_link = ubifs_get_link,
1651 .setattr = ubifs_setattr,
1652 .getattr = ubifs_symlink_getattr,
1653 .listxattr = ubifs_listxattr,
1654 .update_time = ubifs_update_time,
1655 };
1656
1657 const struct file_operations ubifs_file_operations = {
1658 .llseek = generic_file_llseek,
1659 .read_iter = generic_file_read_iter,
1660 .write_iter = ubifs_write_iter,
1661 .mmap = ubifs_file_mmap,
1662 .fsync = ubifs_fsync,
1663 .unlocked_ioctl = ubifs_ioctl,
1664 .splice_read = filemap_splice_read,
1665 .splice_write = iter_file_splice_write,
1666 .open = fscrypt_file_open,
1667 #ifdef CONFIG_COMPAT
1668 .compat_ioctl = ubifs_compat_ioctl,
1669 #endif
1670 };
1671