1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS filesystem file handling
3 *
4 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/writeback.h>
14 #include <linux/gfp.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18 #include <linux/netfs.h>
19 #include "internal.h"
20
21 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
22 static int afs_symlink_read_folio(struct file *file, struct folio *folio);
23 static void afs_invalidate_folio(struct folio *folio, size_t offset,
24 size_t length);
25 static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
26
27 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
28 static void afs_vm_open(struct vm_area_struct *area);
29 static void afs_vm_close(struct vm_area_struct *area);
30 static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
31
32 const struct file_operations afs_file_operations = {
33 .open = afs_open,
34 .release = afs_release,
35 .llseek = generic_file_llseek,
36 .read_iter = afs_file_read_iter,
37 .write_iter = afs_file_write,
38 .mmap = afs_file_mmap,
39 .splice_read = generic_file_splice_read,
40 .splice_write = iter_file_splice_write,
41 .fsync = afs_fsync,
42 .lock = afs_lock,
43 .flock = afs_flock,
44 };
45
46 const struct inode_operations afs_file_inode_operations = {
47 .getattr = afs_getattr,
48 .setattr = afs_setattr,
49 .permission = afs_permission,
50 };
51
52 const struct address_space_operations afs_file_aops = {
53 .read_folio = netfs_read_folio,
54 .readahead = netfs_readahead,
55 .dirty_folio = afs_dirty_folio,
56 .launder_folio = afs_launder_folio,
57 .release_folio = afs_release_folio,
58 .invalidate_folio = afs_invalidate_folio,
59 .write_begin = afs_write_begin,
60 .write_end = afs_write_end,
61 .writepage = afs_writepage,
62 .writepages = afs_writepages,
63 };
64
65 const struct address_space_operations afs_symlink_aops = {
66 .read_folio = afs_symlink_read_folio,
67 .release_folio = afs_release_folio,
68 .invalidate_folio = afs_invalidate_folio,
69 };
70
71 static const struct vm_operations_struct afs_vm_ops = {
72 .open = afs_vm_open,
73 .close = afs_vm_close,
74 .fault = filemap_fault,
75 .map_pages = afs_vm_map_pages,
76 .page_mkwrite = afs_page_mkwrite,
77 };
78
79 /*
80 * Discard a pin on a writeback key.
81 */
afs_put_wb_key(struct afs_wb_key * wbk)82 void afs_put_wb_key(struct afs_wb_key *wbk)
83 {
84 if (wbk && refcount_dec_and_test(&wbk->usage)) {
85 key_put(wbk->key);
86 kfree(wbk);
87 }
88 }
89
90 /*
91 * Cache key for writeback.
92 */
afs_cache_wb_key(struct afs_vnode * vnode,struct afs_file * af)93 int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
94 {
95 struct afs_wb_key *wbk, *p;
96
97 wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
98 if (!wbk)
99 return -ENOMEM;
100 refcount_set(&wbk->usage, 2);
101 wbk->key = af->key;
102
103 spin_lock(&vnode->wb_lock);
104 list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
105 if (p->key == wbk->key)
106 goto found;
107 }
108
109 key_get(wbk->key);
110 list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
111 spin_unlock(&vnode->wb_lock);
112 af->wb = wbk;
113 return 0;
114
115 found:
116 refcount_inc(&p->usage);
117 spin_unlock(&vnode->wb_lock);
118 af->wb = p;
119 kfree(wbk);
120 return 0;
121 }
122
123 /*
124 * open an AFS file or directory and attach a key to it
125 */
afs_open(struct inode * inode,struct file * file)126 int afs_open(struct inode *inode, struct file *file)
127 {
128 struct afs_vnode *vnode = AFS_FS_I(inode);
129 struct afs_file *af;
130 struct key *key;
131 int ret;
132
133 _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
134
135 key = afs_request_key(vnode->volume->cell);
136 if (IS_ERR(key)) {
137 ret = PTR_ERR(key);
138 goto error;
139 }
140
141 af = kzalloc(sizeof(*af), GFP_KERNEL);
142 if (!af) {
143 ret = -ENOMEM;
144 goto error_key;
145 }
146 af->key = key;
147
148 ret = afs_validate(vnode, key);
149 if (ret < 0)
150 goto error_af;
151
152 if (file->f_mode & FMODE_WRITE) {
153 ret = afs_cache_wb_key(vnode, af);
154 if (ret < 0)
155 goto error_af;
156 }
157
158 if (file->f_flags & O_TRUNC)
159 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
160
161 fscache_use_cookie(afs_vnode_cache(vnode), file->f_mode & FMODE_WRITE);
162
163 file->private_data = af;
164 _leave(" = 0");
165 return 0;
166
167 error_af:
168 kfree(af);
169 error_key:
170 key_put(key);
171 error:
172 _leave(" = %d", ret);
173 return ret;
174 }
175
176 /*
177 * release an AFS file or directory and discard its key
178 */
afs_release(struct inode * inode,struct file * file)179 int afs_release(struct inode *inode, struct file *file)
180 {
181 struct afs_vnode_cache_aux aux;
182 struct afs_vnode *vnode = AFS_FS_I(inode);
183 struct afs_file *af = file->private_data;
184 loff_t i_size;
185 int ret = 0;
186
187 _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
188
189 if ((file->f_mode & FMODE_WRITE))
190 ret = vfs_fsync(file, 0);
191
192 file->private_data = NULL;
193 if (af->wb)
194 afs_put_wb_key(af->wb);
195
196 if ((file->f_mode & FMODE_WRITE)) {
197 i_size = i_size_read(&vnode->netfs.inode);
198 afs_set_cache_aux(vnode, &aux);
199 fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size);
200 } else {
201 fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
202 }
203
204 key_put(af->key);
205 kfree(af);
206 afs_prune_wb_keys(vnode);
207 _leave(" = %d", ret);
208 return ret;
209 }
210
211 /*
212 * Allocate a new read record.
213 */
afs_alloc_read(gfp_t gfp)214 struct afs_read *afs_alloc_read(gfp_t gfp)
215 {
216 struct afs_read *req;
217
218 req = kzalloc(sizeof(struct afs_read), gfp);
219 if (req)
220 refcount_set(&req->usage, 1);
221
222 return req;
223 }
224
225 /*
226 * Dispose of a ref to a read record.
227 */
afs_put_read(struct afs_read * req)228 void afs_put_read(struct afs_read *req)
229 {
230 if (refcount_dec_and_test(&req->usage)) {
231 if (req->cleanup)
232 req->cleanup(req);
233 key_put(req->key);
234 kfree(req);
235 }
236 }
237
afs_fetch_data_notify(struct afs_operation * op)238 static void afs_fetch_data_notify(struct afs_operation *op)
239 {
240 struct afs_read *req = op->fetch.req;
241 struct netfs_io_subrequest *subreq = req->subreq;
242 int error = op->error;
243
244 if (error == -ECONNABORTED)
245 error = afs_abort_to_error(op->ac.abort_code);
246 req->error = error;
247
248 if (subreq) {
249 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
250 netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
251 req->subreq = NULL;
252 } else if (req->done) {
253 req->done(req);
254 }
255 }
256
afs_fetch_data_success(struct afs_operation * op)257 static void afs_fetch_data_success(struct afs_operation *op)
258 {
259 struct afs_vnode *vnode = op->file[0].vnode;
260
261 _enter("op=%08x", op->debug_id);
262 afs_vnode_commit_status(op, &op->file[0]);
263 afs_stat_v(vnode, n_fetches);
264 atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
265 afs_fetch_data_notify(op);
266 }
267
afs_fetch_data_put(struct afs_operation * op)268 static void afs_fetch_data_put(struct afs_operation *op)
269 {
270 op->fetch.req->error = op->error;
271 afs_put_read(op->fetch.req);
272 }
273
274 static const struct afs_operation_ops afs_fetch_data_operation = {
275 .issue_afs_rpc = afs_fs_fetch_data,
276 .issue_yfs_rpc = yfs_fs_fetch_data,
277 .success = afs_fetch_data_success,
278 .aborted = afs_check_for_remote_deletion,
279 .failed = afs_fetch_data_notify,
280 .put = afs_fetch_data_put,
281 };
282
283 /*
284 * Fetch file data from the volume.
285 */
afs_fetch_data(struct afs_vnode * vnode,struct afs_read * req)286 int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
287 {
288 struct afs_operation *op;
289
290 _enter("%s{%llx:%llu.%u},%x,,,",
291 vnode->volume->name,
292 vnode->fid.vid,
293 vnode->fid.vnode,
294 vnode->fid.unique,
295 key_serial(req->key));
296
297 op = afs_alloc_operation(req->key, vnode->volume);
298 if (IS_ERR(op)) {
299 if (req->subreq)
300 netfs_subreq_terminated(req->subreq, PTR_ERR(op), false);
301 return PTR_ERR(op);
302 }
303
304 afs_op_set_vnode(op, 0, vnode);
305
306 op->fetch.req = afs_get_read(req);
307 op->ops = &afs_fetch_data_operation;
308 return afs_do_sync_operation(op);
309 }
310
afs_issue_read(struct netfs_io_subrequest * subreq)311 static void afs_issue_read(struct netfs_io_subrequest *subreq)
312 {
313 struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
314 struct afs_read *fsreq;
315
316 fsreq = afs_alloc_read(GFP_NOFS);
317 if (!fsreq)
318 return netfs_subreq_terminated(subreq, -ENOMEM, false);
319
320 fsreq->subreq = subreq;
321 fsreq->pos = subreq->start + subreq->transferred;
322 fsreq->len = subreq->len - subreq->transferred;
323 fsreq->key = key_get(subreq->rreq->netfs_priv);
324 fsreq->vnode = vnode;
325 fsreq->iter = &fsreq->def_iter;
326
327 iov_iter_xarray(&fsreq->def_iter, ITER_DEST,
328 &fsreq->vnode->netfs.inode.i_mapping->i_pages,
329 fsreq->pos, fsreq->len);
330
331 afs_fetch_data(fsreq->vnode, fsreq);
332 afs_put_read(fsreq);
333 }
334
afs_symlink_read_folio(struct file * file,struct folio * folio)335 static int afs_symlink_read_folio(struct file *file, struct folio *folio)
336 {
337 struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
338 struct afs_read *fsreq;
339 int ret;
340
341 fsreq = afs_alloc_read(GFP_NOFS);
342 if (!fsreq)
343 return -ENOMEM;
344
345 fsreq->pos = folio_pos(folio);
346 fsreq->len = folio_size(folio);
347 fsreq->vnode = vnode;
348 fsreq->iter = &fsreq->def_iter;
349 iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages,
350 fsreq->pos, fsreq->len);
351
352 ret = afs_fetch_data(fsreq->vnode, fsreq);
353 if (ret == 0)
354 folio_mark_uptodate(folio);
355 folio_unlock(folio);
356 return ret;
357 }
358
afs_init_request(struct netfs_io_request * rreq,struct file * file)359 static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
360 {
361 rreq->netfs_priv = key_get(afs_file_key(file));
362 return 0;
363 }
364
afs_begin_cache_operation(struct netfs_io_request * rreq)365 static int afs_begin_cache_operation(struct netfs_io_request *rreq)
366 {
367 #ifdef CONFIG_AFS_FSCACHE
368 struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
369
370 return fscache_begin_read_operation(&rreq->cache_resources,
371 afs_vnode_cache(vnode));
372 #else
373 return -ENOBUFS;
374 #endif
375 }
376
afs_check_write_begin(struct file * file,loff_t pos,unsigned len,struct folio ** foliop,void ** _fsdata)377 static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
378 struct folio **foliop, void **_fsdata)
379 {
380 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
381
382 return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
383 }
384
afs_free_request(struct netfs_io_request * rreq)385 static void afs_free_request(struct netfs_io_request *rreq)
386 {
387 key_put(rreq->netfs_priv);
388 }
389
390 const struct netfs_request_ops afs_req_ops = {
391 .init_request = afs_init_request,
392 .free_request = afs_free_request,
393 .begin_cache_operation = afs_begin_cache_operation,
394 .check_write_begin = afs_check_write_begin,
395 .issue_read = afs_issue_read,
396 };
397
afs_write_inode(struct inode * inode,struct writeback_control * wbc)398 int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
399 {
400 fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
401 return 0;
402 }
403
404 /*
405 * Adjust the dirty region of the page on truncation or full invalidation,
406 * getting rid of the markers altogether if the region is entirely invalidated.
407 */
afs_invalidate_dirty(struct folio * folio,size_t offset,size_t length)408 static void afs_invalidate_dirty(struct folio *folio, size_t offset,
409 size_t length)
410 {
411 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
412 unsigned long priv;
413 unsigned int f, t, end = offset + length;
414
415 priv = (unsigned long)folio_get_private(folio);
416
417 /* we clean up only if the entire page is being invalidated */
418 if (offset == 0 && length == folio_size(folio))
419 goto full_invalidate;
420
421 /* If the page was dirtied by page_mkwrite(), the PTE stays writable
422 * and we don't get another notification to tell us to expand it
423 * again.
424 */
425 if (afs_is_folio_dirty_mmapped(priv))
426 return;
427
428 /* We may need to shorten the dirty region */
429 f = afs_folio_dirty_from(folio, priv);
430 t = afs_folio_dirty_to(folio, priv);
431
432 if (t <= offset || f >= end)
433 return; /* Doesn't overlap */
434
435 if (f < offset && t > end)
436 return; /* Splits the dirty region - just absorb it */
437
438 if (f >= offset && t <= end)
439 goto undirty;
440
441 if (f < offset)
442 t = offset;
443 else
444 f = end;
445 if (f == t)
446 goto undirty;
447
448 priv = afs_folio_dirty(folio, f, t);
449 folio_change_private(folio, (void *)priv);
450 trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
451 return;
452
453 undirty:
454 trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
455 folio_clear_dirty_for_io(folio);
456 full_invalidate:
457 trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
458 folio_detach_private(folio);
459 }
460
461 /*
462 * invalidate part or all of a page
463 * - release a page and clean up its private data if offset is 0 (indicating
464 * the entire page)
465 */
afs_invalidate_folio(struct folio * folio,size_t offset,size_t length)466 static void afs_invalidate_folio(struct folio *folio, size_t offset,
467 size_t length)
468 {
469 _enter("{%lu},%zu,%zu", folio->index, offset, length);
470
471 BUG_ON(!folio_test_locked(folio));
472
473 if (folio_get_private(folio))
474 afs_invalidate_dirty(folio, offset, length);
475
476 folio_wait_fscache(folio);
477 _leave("");
478 }
479
480 /*
481 * release a page and clean up its private state if it's not busy
482 * - return true if the page can now be released, false if not
483 */
afs_release_folio(struct folio * folio,gfp_t gfp)484 static bool afs_release_folio(struct folio *folio, gfp_t gfp)
485 {
486 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
487
488 _enter("{{%llx:%llu}[%lu],%lx},%x",
489 vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
490 gfp);
491
492 /* deny if folio is being written to the cache and the caller hasn't
493 * elected to wait */
494 #ifdef CONFIG_AFS_FSCACHE
495 if (folio_test_fscache(folio)) {
496 if (current_is_kswapd() || !(gfp & __GFP_FS))
497 return false;
498 folio_wait_fscache(folio);
499 }
500 fscache_note_page_release(afs_vnode_cache(vnode));
501 #endif
502
503 if (folio_test_private(folio)) {
504 trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
505 folio_detach_private(folio);
506 }
507
508 /* Indicate that the folio can be released */
509 _leave(" = T");
510 return true;
511 }
512
afs_add_open_mmap(struct afs_vnode * vnode)513 static void afs_add_open_mmap(struct afs_vnode *vnode)
514 {
515 if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
516 down_write(&vnode->volume->cell->fs_open_mmaps_lock);
517
518 if (list_empty(&vnode->cb_mmap_link))
519 list_add_tail(&vnode->cb_mmap_link,
520 &vnode->volume->cell->fs_open_mmaps);
521
522 up_write(&vnode->volume->cell->fs_open_mmaps_lock);
523 }
524 }
525
afs_drop_open_mmap(struct afs_vnode * vnode)526 static void afs_drop_open_mmap(struct afs_vnode *vnode)
527 {
528 if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
529 return;
530
531 down_write(&vnode->volume->cell->fs_open_mmaps_lock);
532
533 if (atomic_read(&vnode->cb_nr_mmap) == 0)
534 list_del_init(&vnode->cb_mmap_link);
535
536 up_write(&vnode->volume->cell->fs_open_mmaps_lock);
537 flush_work(&vnode->cb_work);
538 }
539
540 /*
541 * Handle setting up a memory mapping on an AFS file.
542 */
afs_file_mmap(struct file * file,struct vm_area_struct * vma)543 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
544 {
545 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
546 int ret;
547
548 afs_add_open_mmap(vnode);
549
550 ret = generic_file_mmap(file, vma);
551 if (ret == 0)
552 vma->vm_ops = &afs_vm_ops;
553 else
554 afs_drop_open_mmap(vnode);
555 return ret;
556 }
557
afs_vm_open(struct vm_area_struct * vma)558 static void afs_vm_open(struct vm_area_struct *vma)
559 {
560 afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
561 }
562
afs_vm_close(struct vm_area_struct * vma)563 static void afs_vm_close(struct vm_area_struct *vma)
564 {
565 afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
566 }
567
afs_vm_map_pages(struct vm_fault * vmf,pgoff_t start_pgoff,pgoff_t end_pgoff)568 static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
569 {
570 struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
571 struct afs_file *af = vmf->vma->vm_file->private_data;
572
573 switch (afs_validate(vnode, af->key)) {
574 case 0:
575 return filemap_map_pages(vmf, start_pgoff, end_pgoff);
576 case -ENOMEM:
577 return VM_FAULT_OOM;
578 case -EINTR:
579 case -ERESTARTSYS:
580 return VM_FAULT_RETRY;
581 case -ESTALE:
582 default:
583 return VM_FAULT_SIGBUS;
584 }
585 }
586
afs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)587 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
588 {
589 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
590 struct afs_file *af = iocb->ki_filp->private_data;
591 int ret;
592
593 ret = afs_validate(vnode, af->key);
594 if (ret < 0)
595 return ret;
596
597 return generic_file_read_iter(iocb, iter);
598 }
599