Lines Matching +full:eye +full:- +full:src
1 // SPDX-License-Identifier: GPL-2.0-only
23 #include <linux/backing-dev.h>
86 INIT_LIST_HEAD(&p->pages); in nfs_commitdata_alloc()
108 p->rw_mode = FMODE_WRITE; in nfs_writehdr_alloc()
125 ioc->complete = complete; in nfs_io_completion_init()
126 ioc->data = data; in nfs_io_completion_init()
127 kref_init(&ioc->refcount); in nfs_io_completion_init()
134 ioc->complete(ioc->data); in nfs_io_completion_release()
141 kref_get(&ioc->refcount); in nfs_io_completion_get()
147 kref_put(&ioc->refcount, nfs_io_completion_release); in nfs_io_completion_put()
153 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { in nfs_page_set_inode_ref()
154 kref_get(&req->wb_kref); in nfs_page_set_inode_ref()
155 atomic_long_inc(&NFS_I(inode)->nrequests); in nfs_page_set_inode_ref()
164 if (!test_bit(PG_REMOVE, &req->wb_flags)) in nfs_cancel_remove_inode()
169 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) in nfs_cancel_remove_inode()
181 * nfs_folio_find_private_request - find head request associated with a folio
195 spin_lock(&mapping->private_lock); in nfs_folio_find_private_request()
198 WARN_ON_ONCE(req->wb_head != req); in nfs_folio_find_private_request()
199 kref_get(&req->wb_kref); in nfs_folio_find_private_request()
201 spin_unlock(&mapping->private_lock); in nfs_folio_find_private_request()
207 struct inode *inode = folio_file_mapping(folio)->host; in nfs_folio_find_swap_request()
212 mutex_lock(&nfsi->commit_mutex); in nfs_folio_find_swap_request()
217 WARN_ON_ONCE(req->wb_head != req); in nfs_folio_find_swap_request()
218 kref_get(&req->wb_kref); in nfs_folio_find_swap_request()
221 mutex_unlock(&nfsi->commit_mutex); in nfs_folio_find_swap_request()
226 * nfs_folio_find_head_request - find head request associated with a folio
243 struct inode *inode = folio_file_mapping(folio)->host; in nfs_folio_find_and_lock_request()
275 struct inode *inode = folio_file_mapping(folio)->host; in nfs_grow_file()
279 spin_lock(&inode->i_lock); in nfs_grow_file()
281 end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); in nfs_grow_file()
289 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; in nfs_grow_file()
292 spin_unlock(&inode->i_lock); in nfs_grow_file()
299 struct inode *inode = mapping->host; in nfs_set_pageerror()
301 nfs_zap_mapping(mapping->host, mapping); in nfs_set_pageerror()
303 spin_lock(&inode->i_lock); in nfs_set_pageerror()
307 spin_unlock(&inode->i_lock); in nfs_set_pageerror()
316 if (mapping->host) in nfs_mapping_set_error()
317 errseq_set(&mapping->host->i_sb->s_wb_err, in nfs_mapping_set_error()
318 error == -ENOSPC ? -ENOSPC : -EIO); in nfs_mapping_set_error()
324 * @head - head request of page group
325 * @page_offset - offset into page
342 if (page_offset >= req->wb_pgbase && in nfs_page_group_search_locked()
343 page_offset < (req->wb_pgbase + req->wb_bytes)) in nfs_page_group_search_locked()
346 req = req->wb_this_page; in nfs_page_group_search_locked()
354 * @head - head request of page group
368 tmp = nfs_page_group_search_locked(req->wb_head, pos); in nfs_page_group_covers_page()
371 pos = tmp->wb_pgbase + tmp->wb_bytes; in nfs_page_group_covers_page()
396 if (wbc->sync_mode == WB_SYNC_ALL) in wb_priority()
407 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
409 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
413 struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host); in nfs_folio_set_writeback()
416 if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) in nfs_folio_set_writeback()
417 nfss->write_congested = 1; in nfs_folio_set_writeback()
422 struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host); in nfs_folio_end_writeback()
425 if (atomic_long_dec_return(&nfss->writeback) < in nfs_folio_end_writeback()
427 nfss->write_congested = 0; in nfs_folio_end_writeback()
440 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
442 * @destroy_list - request list (using wb_this_page) terminated by @old_head
443 * @old_head - the old head of the list
457 destroy_list = (subreq->wb_this_page == old_head) ? in nfs_destroy_unlinked_subrequests()
458 NULL : subreq->wb_this_page; in nfs_destroy_unlinked_subrequests()
460 /* Note: lock subreq in order to change subreq->wb_head */ in nfs_destroy_unlinked_subrequests()
462 WARN_ON_ONCE(old_head != subreq->wb_head); in nfs_destroy_unlinked_subrequests()
465 subreq->wb_this_page = subreq; in nfs_destroy_unlinked_subrequests()
466 subreq->wb_head = subreq; in nfs_destroy_unlinked_subrequests()
468 clear_bit(PG_REMOVE, &subreq->wb_flags); in nfs_destroy_unlinked_subrequests()
471 if (!kref_read(&subreq->wb_kref)) { in nfs_destroy_unlinked_subrequests()
473 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { in nfs_destroy_unlinked_subrequests()
484 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { in nfs_destroy_unlinked_subrequests()
486 atomic_long_dec(&NFS_I(inode)->nrequests); in nfs_destroy_unlinked_subrequests()
496 * nfs_join_page_group - destroy subrequests of the head req
513 pgbase = head->wb_pgbase; in nfs_join_page_group()
514 bytes = head->wb_bytes; in nfs_join_page_group()
515 off = head->wb_offset; in nfs_join_page_group()
516 for (subreq = head->wb_this_page; subreq != head; in nfs_join_page_group()
517 subreq = subreq->wb_this_page) { in nfs_join_page_group()
519 if (pgbase > subreq->wb_pgbase) { in nfs_join_page_group()
520 off -= pgbase - subreq->wb_pgbase; in nfs_join_page_group()
521 bytes += pgbase - subreq->wb_pgbase; in nfs_join_page_group()
522 pgbase = subreq->wb_pgbase; in nfs_join_page_group()
524 bytes = max(subreq->wb_pgbase + subreq->wb_bytes in nfs_join_page_group()
525 - pgbase, bytes); in nfs_join_page_group()
529 head->wb_pgbase = pgbase; in nfs_join_page_group()
530 head->wb_bytes = bytes; in nfs_join_page_group()
531 head->wb_offset = off; in nfs_join_page_group()
538 subreq = subreq->wb_this_page; in nfs_join_page_group()
542 if (head->wb_this_page != head) { in nfs_join_page_group()
544 destroy_list = head->wb_this_page; in nfs_join_page_group()
545 head->wb_this_page = head; in nfs_join_page_group()
552 * nfs_lock_and_join_requests - join all subreqs to the head req
561 * Returns a locked, referenced pointer to the head request - which after
568 struct inode *inode = folio_file_mapping(folio)->host; in nfs_lock_and_join_requests()
576 * reference to the whole page group - the group will not be destroyed in nfs_lock_and_join_requests()
623 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); in nfs_page_async_flush()
626 ret = pgio->pg_error; in nfs_page_async_flush()
632 ret = pgio->pg_error; in nfs_page_async_flush()
638 if (wbc->sync_mode == WB_SYNC_NONE) in nfs_page_async_flush()
642 pgio->pg_error = 0; in nfs_page_async_flush()
644 nfs_add_stats(folio_file_mapping(folio)->host, in nfs_page_async_flush()
667 struct inode *inode = folio_file_mapping(folio)->host; in nfs_writepage_locked()
670 if (wbc->sync_mode == WB_SYNC_NONE && in nfs_writepage_locked()
671 NFS_SERVER(inode)->write_congested) { in nfs_writepage_locked()
714 struct inode *inode = mapping->host; in nfs_writepages()
717 unsigned int mntflags = NFS_SERVER(inode)->flags; in nfs_writepages()
721 if (wbc->sync_mode == WB_SYNC_NONE && in nfs_writepages()
722 NFS_SERVER(inode)->write_congested) in nfs_writepages()
727 if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || in nfs_writepages()
728 wbc->for_background || wbc->for_sync || wbc->for_reclaim) { in nfs_writepages()
761 struct nfs_inode *nfsi = NFS_I(mapping->host); in nfs_inode_add_request()
763 WARN_ON_ONCE(req->wb_this_page != req); in nfs_inode_add_request()
769 * Swap-space should not get truncated. Hence no need to plug the race in nfs_inode_add_request()
772 spin_lock(&mapping->private_lock); in nfs_inode_add_request()
774 set_bit(PG_MAPPED, &req->wb_flags); in nfs_inode_add_request()
776 folio->private = req; in nfs_inode_add_request()
778 spin_unlock(&mapping->private_lock); in nfs_inode_add_request()
779 atomic_long_inc(&nfsi->nrequests); in nfs_inode_add_request()
780 /* this a head request for a page group - mark it as having an in nfs_inode_add_request()
784 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); in nfs_inode_add_request()
785 kref_get(&req->wb_kref); in nfs_inode_add_request()
796 struct folio *folio = nfs_page_to_folio(req->wb_head); in nfs_inode_remove_request()
799 spin_lock(&mapping->private_lock); in nfs_inode_remove_request()
801 folio->private = NULL; in nfs_inode_remove_request()
803 clear_bit(PG_MAPPED, &req->wb_head->wb_flags); in nfs_inode_remove_request()
805 spin_unlock(&mapping->private_lock); in nfs_inode_remove_request()
808 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { in nfs_inode_remove_request()
809 atomic_long_dec(&nfsi->nrequests); in nfs_inode_remove_request()
835 struct inode *inode = &nfsi->vfs_inode; in nfs_page_search_commits_for_head_request_locked()
842 return freq->wb_head; in nfs_page_search_commits_for_head_request_locked()
845 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { in nfs_page_search_commits_for_head_request_locked()
847 return freq->wb_head; in nfs_page_search_commits_for_head_request_locked()
854 * nfs_request_add_commit_list_locked - add request to a commit list
863 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the
870 set_bit(PG_CLEAN, &req->wb_flags); in nfs_request_add_commit_list_locked()
872 atomic_long_inc(&cinfo->mds->ncommit); in nfs_request_add_commit_list_locked()
877 * nfs_request_add_commit_list - add request to a commit list
885 * The caller must _not_ hold the cinfo->lock, but must be
891 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_request_add_commit_list()
892 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); in nfs_request_add_commit_list()
893 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_request_add_commit_list()
899 * nfs_request_remove_commit_list - Remove request from a commit list
907 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
913 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) in nfs_request_remove_commit_list()
916 atomic_long_dec(&cinfo->mds->ncommit); in nfs_request_remove_commit_list()
923 cinfo->inode = inode; in nfs_init_cinfo_from_inode()
924 cinfo->mds = &NFS_I(inode)->commit_info; in nfs_init_cinfo_from_inode()
925 cinfo->ds = pnfs_get_ds_info(inode); in nfs_init_cinfo_from_inode()
926 cinfo->dreq = NULL; in nfs_init_cinfo_from_inode()
927 cinfo->completion_ops = &nfs_commit_completion_ops; in nfs_init_cinfo_from_inode()
958 node_stat_mod_folio(folio, NR_WRITEBACK, -nr); in nfs_folio_clear_commit()
959 wb_stat_mod(&inode_to_bdi(folio_file_mapping(folio)->host)->wb, in nfs_folio_clear_commit()
960 WB_WRITEBACK, -nr); in nfs_folio_clear_commit()
968 if (test_bit(PG_CLEAN, &req->wb_flags)) { in nfs_clear_request_commit()
970 struct inode *inode = d_inode(ctx->dentry); in nfs_clear_request_commit()
972 mutex_lock(&NFS_I(inode)->commit_mutex); in nfs_clear_request_commit()
976 mutex_unlock(&NFS_I(inode)->commit_mutex); in nfs_clear_request_commit()
983 if (hdr->verf.committed == NFS_DATA_SYNC) in nfs_write_need_commit()
984 return hdr->lseg == NULL; in nfs_write_need_commit()
985 return hdr->verf.committed != NFS_FILE_SYNC; in nfs_write_need_commit()
990 nfs_io_completion_get(hdr->io_completion); in nfs_async_write_init()
998 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) in nfs_write_completion()
1000 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); in nfs_write_completion()
1001 while (!list_empty(&hdr->pages)) { in nfs_write_completion()
1002 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_write_completion()
1004 bytes += req->wb_bytes; in nfs_write_completion()
1006 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && in nfs_write_completion()
1007 (hdr->good_bytes < bytes)) { in nfs_write_completion()
1008 trace_nfs_comp_error(hdr->inode, req, hdr->error); in nfs_write_completion()
1010 hdr->error); in nfs_write_completion()
1015 req->wb_nio = 0; in nfs_write_completion()
1016 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); in nfs_write_completion()
1017 nfs_mark_request_commit(req, hdr->lseg, &cinfo, in nfs_write_completion()
1018 hdr->pgio_mirror_idx); in nfs_write_completion()
1028 nfs_io_completion_put(hdr->io_completion); in nfs_write_completion()
1029 hdr->release(hdr); in nfs_write_completion()
1035 return atomic_long_read(&cinfo->mds->ncommit); in nfs_reqs_to_commit()
1038 /* NFS_I(cinfo->inode)->commit_mutex held by caller */
1040 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, in nfs_scan_commit_list() argument
1046 list_for_each_entry_safe(req, tmp, src, wb_list) { in nfs_scan_commit_list()
1047 kref_get(&req->wb_kref); in nfs_scan_commit_list()
1053 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); in nfs_scan_commit_list()
1056 if ((ret == max) && !cinfo->dreq) in nfs_scan_commit_list()
1065 * nfs_scan_commit - Scan an inode for commit requests
1079 if (!atomic_long_read(&cinfo->mds->ncommit)) in nfs_scan_commit()
1081 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_scan_commit()
1082 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { in nfs_scan_commit()
1085 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, in nfs_scan_commit()
1087 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); in nfs_scan_commit()
1089 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_scan_commit()
1115 rqend = req->wb_offset + req->wb_bytes; in nfs_try_to_update_request()
1118 * the offsets are non-contiguous. in nfs_try_to_update_request()
1122 if (offset > rqend || end < req->wb_offset) in nfs_try_to_update_request()
1126 if (offset < req->wb_offset) { in nfs_try_to_update_request()
1127 req->wb_offset = offset; in nfs_try_to_update_request()
1128 req->wb_pgbase = offset; in nfs_try_to_update_request()
1131 req->wb_bytes = end - req->wb_offset; in nfs_try_to_update_request()
1133 req->wb_bytes = rqend - req->wb_offset; in nfs_try_to_update_request()
1134 req->wb_nio = 0; in nfs_try_to_update_request()
1144 error = nfs_wb_folio(folio_file_mapping(folio)->host, folio); in nfs_try_to_update_request()
1201 * due to the lack of an ACCESS-type call in NFSv2. in nfs_flush_incompatible()
1209 l_ctx = req->wb_lock_context; in nfs_flush_incompatible()
1213 !(list_empty_careful(&flctx->flc_posix) && in nfs_flush_incompatible()
1214 list_empty_careful(&flctx->flc_flock))) { in nfs_flush_incompatible()
1215 do_flush |= l_ctx->lockowner != current->files; in nfs_flush_incompatible()
1220 status = nfs_wb_folio(folio_file_mapping(folio)->host, folio); in nfs_flush_incompatible()
1229 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1241 !rcu_access_pointer(ctx->ll_cred)) in nfs_key_timeout_notify()
1243 return -EACCES; in nfs_key_timeout_notify()
1252 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; in nfs_ctx_key_to_expire()
1255 .cred = ctx->cred, in nfs_ctx_key_to_expire()
1260 cred = rcu_dereference(ctx->ll_cred); in nfs_ctx_key_to_expire()
1261 if (cred && !(cred->cr_ops->crkey_timeout && in nfs_ctx_key_to_expire()
1262 cred->cr_ops->crkey_timeout(cred))) in nfs_ctx_key_to_expire()
1266 new = auth->au_ops->lookup_cred(auth, &acred, 0); in nfs_ctx_key_to_expire()
1274 } else if (new->cr_ops->crkey_timeout && in nfs_ctx_key_to_expire()
1275 new->cr_ops->crkey_timeout(new)) in nfs_ctx_key_to_expire()
1279 old = rcu_dereference_protected(xchg(&ctx->ll_cred, in nfs_ctx_key_to_expire()
1294 struct inode *inode = folio_file_mapping(folio)->host; in nfs_folio_write_uptodate()
1299 if (nfsi->cache_validity & in nfs_folio_write_uptodate()
1303 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) in nfs_folio_write_uptodate()
1306 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) in nfs_folio_write_uptodate()
1314 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && in is_whole_file_wrlock()
1315 fl->fl_type == F_WRLCK; in is_whole_file_wrlock()
1334 if (file->f_flags & O_DSYNC) in nfs_can_extend_write()
1338 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) in nfs_can_extend_write()
1340 if (!flctx || (list_empty_careful(&flctx->flc_flock) && in nfs_can_extend_write()
1341 list_empty_careful(&flctx->flc_posix))) in nfs_can_extend_write()
1346 spin_lock(&flctx->flc_lock); in nfs_can_extend_write()
1347 if (!list_empty(&flctx->flc_posix)) { in nfs_can_extend_write()
1348 fl = list_first_entry(&flctx->flc_posix, struct file_lock, in nfs_can_extend_write()
1352 } else if (!list_empty(&flctx->flc_flock)) { in nfs_can_extend_write()
1353 fl = list_first_entry(&flctx->flc_flock, struct file_lock, in nfs_can_extend_write()
1355 if (fl->fl_type == F_WRLCK) in nfs_can_extend_write()
1358 spin_unlock(&flctx->flc_lock); in nfs_can_extend_write()
1365 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1373 struct inode *inode = mapping->host; in nfs_update_folio()
1417 if (IS_SWAPFILE(hdr->inode)) in nfs_initiate_write()
1418 task_setup_data->flags |= RPC_TASK_SWAPPER; in nfs_initiate_write()
1419 task_setup_data->priority = priority; in nfs_initiate_write()
1420 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); in nfs_initiate_write()
1433 req->wb_nio++; in nfs_redirty_request()
1435 atomic_long_inc(&nfsi->redirtied_pages); in nfs_redirty_request()
1445 req = nfs_list_entry(head->next); in nfs_async_write_error()
1456 nfs_async_write_error(&hdr->pages, 0); in nfs_async_write_reschedule_io()
1474 if (server->pnfs_curr_ld && !force_mds) in nfs_pageio_init_write()
1475 pg_ops = server->pnfs_curr_ld->pg_write_ops; in nfs_pageio_init_write()
1478 server->wsize, ioflags); in nfs_pageio_init_write()
1486 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) in nfs_pageio_reset_write_mds()
1487 pgio->pg_ops->pg_cleanup(pgio); in nfs_pageio_reset_write_mds()
1489 pgio->pg_ops = &nfs_pgio_rw_ops; in nfs_pageio_reset_write_mds()
1493 mirror = &pgio->pg_mirrors[0]; in nfs_pageio_reset_write_mds()
1494 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; in nfs_pageio_reset_write_mds()
1503 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); in nfs_commit_prepare()
1509 struct nfs_pgio_args *argp = &hdr->args; in nfs_writeback_check_extend()
1510 struct nfs_pgio_res *resp = &hdr->res; in nfs_writeback_check_extend()
1511 u64 size = argp->offset + resp->count; in nfs_writeback_check_extend()
1513 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) in nfs_writeback_check_extend()
1514 fattr->size = size; in nfs_writeback_check_extend()
1515 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { in nfs_writeback_check_extend()
1516 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; in nfs_writeback_check_extend()
1519 if (size != fattr->size) in nfs_writeback_check_extend()
1524 fattr->valid |= NFS_ATTR_FATTR_SIZE; in nfs_writeback_check_extend()
1529 struct nfs_fattr *fattr = &hdr->fattr; in nfs_writeback_update_inode()
1530 struct inode *inode = hdr->inode; in nfs_writeback_update_inode()
1532 spin_lock(&inode->i_lock); in nfs_writeback_update_inode()
1535 spin_unlock(&inode->i_lock); in nfs_writeback_update_inode()
1549 * ->write_done will attempt to use post-op attributes to detect in nfs_writeback_done()
1551 * of close-to-open would allow us to continue caching even if in nfs_writeback_done()
1555 status = NFS_PROTO(inode)->write_done(task, hdr); in nfs_writeback_done()
1559 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); in nfs_writeback_done()
1562 if (task->tk_status >= 0) { in nfs_writeback_done()
1563 enum nfs3_stable_how committed = hdr->res.verf->committed; in nfs_writeback_done()
1571 set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags); in nfs_writeback_done()
1574 if (committed < hdr->args.stable) { in nfs_writeback_done()
1589 NFS_SERVER(inode)->nfs_client->cl_hostname, in nfs_writeback_done()
1590 committed, hdr->args.stable); in nfs_writeback_done()
1598 spin_lock(&inode->i_lock); in nfs_writeback_done()
1600 spin_unlock(&inode->i_lock); in nfs_writeback_done()
1611 struct nfs_pgio_args *argp = &hdr->args; in nfs_writeback_result()
1612 struct nfs_pgio_res *resp = &hdr->res; in nfs_writeback_result()
1614 if (resp->count < argp->count) { in nfs_writeback_result()
1618 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); in nfs_writeback_result()
1621 if (resp->count == 0) { in nfs_writeback_result()
1625 argp->count); in nfs_writeback_result()
1628 nfs_set_pgio_error(hdr, -EIO, argp->offset); in nfs_writeback_result()
1629 task->tk_status = -EIO; in nfs_writeback_result()
1633 /* For non rpc-based layout drivers, retry-through-MDS */ in nfs_writeback_result()
1634 if (!task->tk_ops) { in nfs_writeback_result()
1635 hdr->pnfs_error = -EAGAIN; in nfs_writeback_result()
1640 if (resp->verf->committed != NFS_UNSTABLE) { in nfs_writeback_result()
1642 hdr->mds_offset += resp->count; in nfs_writeback_result()
1643 argp->offset += resp->count; in nfs_writeback_result()
1644 argp->pgbase += resp->count; in nfs_writeback_result()
1645 argp->count -= resp->count; in nfs_writeback_result()
1650 argp->stable = NFS_FILE_SYNC; in nfs_writeback_result()
1652 resp->count = 0; in nfs_writeback_result()
1653 resp->verf->committed = 0; in nfs_writeback_result()
1660 return wait_var_event_killable(&cinfo->rpcs_out, in wait_on_commit()
1661 !atomic_read(&cinfo->rpcs_out)); in wait_on_commit()
1666 atomic_inc(&cinfo->rpcs_out); in nfs_commit_begin()
1671 if (atomic_dec_and_test(&cinfo->rpcs_out)) { in nfs_commit_end()
1672 wake_up_var(&cinfo->rpcs_out); in nfs_commit_end()
1680 put_nfs_open_context(data->context); in nfs_commitdata_release()
1693 .rpc_argp = &data->args, in nfs_initiate_commit()
1694 .rpc_resp = &data->res, in nfs_initiate_commit()
1695 .rpc_cred = data->cred, in nfs_initiate_commit()
1698 .task = &data->task, in nfs_initiate_commit()
1708 if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE)) in nfs_initiate_commit()
1712 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); in nfs_initiate_commit()
1733 if (lwb < (req_offset(req) + req->wb_bytes)) in nfs_get_lwb()
1734 lwb = req_offset(req) + req->wb_bytes; in nfs_get_lwb()
1752 * NB: take care not to mess about with data->commit et al. */ in nfs_init_commit()
1755 list_splice_init(head, &data->pages); in nfs_init_commit()
1757 first = nfs_list_entry(data->pages.next); in nfs_init_commit()
1759 inode = d_inode(ctx->dentry); in nfs_init_commit()
1761 data->inode = inode; in nfs_init_commit()
1762 data->cred = ctx->cred; in nfs_init_commit()
1763 data->lseg = lseg; /* reference transferred */ in nfs_init_commit()
1766 data->lwb = nfs_get_lwb(&data->pages); in nfs_init_commit()
1767 data->mds_ops = &nfs_commit_ops; in nfs_init_commit()
1768 data->completion_ops = cinfo->completion_ops; in nfs_init_commit()
1769 data->dreq = cinfo->dreq; in nfs_init_commit()
1771 data->args.fh = NFS_FH(data->inode); in nfs_init_commit()
1773 data->args.offset = 0; in nfs_init_commit()
1774 data->args.count = 0; in nfs_init_commit()
1775 data->context = get_nfs_open_context(ctx); in nfs_init_commit()
1776 data->res.fattr = &data->fattr; in nfs_init_commit()
1777 data->res.verf = &data->verf; in nfs_init_commit()
1778 nfs_fattr_init(&data->fattr); in nfs_init_commit()
1779 nfs_commit_begin(cinfo->mds); in nfs_init_commit()
1791 req = nfs_list_entry(page_list->next); in nfs_retry_commit()
1824 nfs_retry_commit(head, NULL, cinfo, -1); in nfs_commit_list()
1825 return -ENOMEM; in nfs_commit_list()
1830 if (NFS_SERVER(inode)->nfs_client->cl_minorversion) in nfs_commit_list()
1833 data->mds_ops, how, in nfs_commit_list()
1844 /* Call the NFS version-specific code */ in nfs_commit_done()
1845 NFS_PROTO(data->inode)->commit_done(task, data); in nfs_commit_done()
1851 const struct nfs_writeverf *verf = data->res.verf; in nfs_commit_release_pages()
1853 int status = data->task.tk_status; in nfs_commit_release_pages()
1858 while (!list_empty(&data->pages)) { in nfs_commit_release_pages()
1859 req = nfs_list_entry(data->pages.next); in nfs_commit_release_pages()
1865 nfs_req_openctx(req)->dentry->d_sb->s_id, in nfs_commit_release_pages()
1866 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), in nfs_commit_release_pages()
1867 req->wb_bytes, in nfs_commit_release_pages()
1871 trace_nfs_commit_error(data->inode, req, in nfs_commit_release_pages()
1892 atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); in nfs_commit_release_pages()
1898 nfss = NFS_SERVER(data->inode); in nfs_commit_release_pages()
1899 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) in nfs_commit_release_pages()
1900 nfss->write_congested = 0; in nfs_commit_release_pages()
1902 nfs_init_cinfo(&cinfo, data->inode, data->dreq); in nfs_commit_release_pages()
1910 data->completion_ops->completion(data); in nfs_commit_release()
1955 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { in __nfs_commit_inode()
1956 if (nscan < wbc->nr_to_write) in __nfs_commit_inode()
1957 wbc->nr_to_write -= nscan; in __nfs_commit_inode()
1959 wbc->nr_to_write = 0; in __nfs_commit_inode()
1983 if (wbc->sync_mode == WB_SYNC_NONE) { in nfs_write_inode()
1985 if (!atomic_long_read(&nfsi->commit_info.ncommit)) in nfs_write_inode()
1988 /* Don't commit yet if this is a non-blocking flush and there in nfs_write_inode()
1991 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) in nfs_write_inode()
2002 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) in nfs_write_inode()
2006 if (!atomic_read(&nfsi->commit_info.rpcs_out)) in nfs_write_inode()
2027 ret = pnfs_sync_inode(mapping->host, true); in nfs_filemap_write_and_wait_range()
2041 ret = filemap_write_and_wait(inode->i_mapping); in nfs_wb_all()
2082 * nfs_wb_folio - Write back all requests on one page
2092 loff_t range_end = range_start + (loff_t)folio_size(folio) - 1; in nfs_wb_folio()
2125 struct folio *src, enum migrate_mode mode) in nfs_migrate_folio() argument
2129 * an in-progress read or write request. Don't try to migrate it. in nfs_migrate_folio()
2135 if (folio_test_private(src)) in nfs_migrate_folio()
2136 return -EBUSY; in nfs_migrate_folio()
2138 if (folio_test_fscache(src)) { in nfs_migrate_folio()
2140 return -EBUSY; in nfs_migrate_folio()
2141 folio_wait_fscache(src); in nfs_migrate_folio()
2144 return migrate_folio(mapping, dst, src, mode); in nfs_migrate_folio()
2155 return -ENOMEM; in nfs_init_writepagecache()
2190 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); in nfs_init_writepagecache()
2202 return -ENOMEM; in nfs_init_writepagecache()