Lines Matching +full:embedded +full:- +full:trace +full:- +full:extension
58 #include "trace.h"
106 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
139 rc = -ENOMEM; in nfsd4_create_laundry_wq()
150 return ses->se_flags & NFS4_SESSION_DEAD; in is_session_dead()
155 if (atomic_read(&ses->se_ref) > ref_held_by_me) in mark_session_dead_locked()
157 ses->se_flags |= NFS4_SESSION_DEAD; in mark_session_dead_locked()
163 return clp->cl_time == 0; in is_client_expired()
169 if (clp->cl_state != NFSD4_ACTIVE) in nfsd4_dec_courtesy_client_count()
170 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); in nfsd4_dec_courtesy_client_count()
175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in get_client_locked()
177 lockdep_assert_held(&nn->client_lock); in get_client_locked()
181 atomic_inc(&clp->cl_rpc_users); in get_client_locked()
183 clp->cl_state = NFSD4_ACTIVE; in get_client_locked()
191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in renew_client_locked()
197 clp->cl_clientid.cl_boot, in renew_client_locked()
198 clp->cl_clientid.cl_id); in renew_client_locked()
202 list_move_tail(&clp->cl_lru, &nn->client_lru); in renew_client_locked()
203 clp->cl_time = ktime_get_boottime_seconds(); in renew_client_locked()
205 clp->cl_state = NFSD4_ACTIVE; in renew_client_locked()
210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in put_client_renew_locked()
212 lockdep_assert_held(&nn->client_lock); in put_client_renew_locked()
214 if (!atomic_dec_and_test(&clp->cl_rpc_users)) in put_client_renew_locked()
224 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in put_client_renew()
226 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) in put_client_renew()
232 spin_unlock(&nn->client_lock); in put_client_renew()
241 status = get_client_locked(ses->se_client); in nfsd4_get_session_locked()
244 atomic_inc(&ses->se_ref); in nfsd4_get_session_locked()
250 struct nfs4_client *clp = ses->se_client; in nfsd4_put_session_locked()
251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_put_session_locked()
253 lockdep_assert_held(&nn->client_lock); in nfsd4_put_session_locked()
255 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) in nfsd4_put_session_locked()
262 struct nfs4_client *clp = ses->se_client; in nfsd4_put_session()
263 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_put_session()
265 spin_lock(&nn->client_lock); in nfsd4_put_session()
267 spin_unlock(&nn->client_lock); in nfsd4_put_session()
276 spin_lock(&nn->blocked_locks_lock); in find_blocked_lock()
277 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { in find_blocked_lock()
278 if (fh_match(fh, &cur->nbl_fh)) { in find_blocked_lock()
279 list_del_init(&cur->nbl_list); in find_blocked_lock()
280 WARN_ON(list_empty(&cur->nbl_lru)); in find_blocked_lock()
281 list_del_init(&cur->nbl_lru); in find_blocked_lock()
286 spin_unlock(&nn->blocked_locks_lock); in find_blocked_lock()
288 locks_delete_block(&found->nbl_lock); in find_blocked_lock()
302 INIT_LIST_HEAD(&nbl->nbl_list); in find_or_allocate_block()
303 INIT_LIST_HEAD(&nbl->nbl_lru); in find_or_allocate_block()
304 fh_copy_shallow(&nbl->nbl_fh, fh); in find_or_allocate_block()
305 locks_init_lock(&nbl->nbl_lock); in find_or_allocate_block()
306 kref_init(&nbl->nbl_kref); in find_or_allocate_block()
307 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, in find_or_allocate_block()
327 locks_delete_block(&nbl->nbl_lock); in free_blocked_lock()
328 locks_release_private(&nbl->nbl_lock); in free_blocked_lock()
329 kref_put(&nbl->nbl_kref, free_nbl); in free_blocked_lock()
335 struct nfs4_client *clp = lo->lo_owner.so_client; in remove_blocked_locks()
336 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in remove_blocked_locks()
341 spin_lock(&nn->blocked_locks_lock); in remove_blocked_locks()
342 while (!list_empty(&lo->lo_blocked)) { in remove_blocked_locks()
343 nbl = list_first_entry(&lo->lo_blocked, in remove_blocked_locks()
346 list_del_init(&nbl->nbl_list); in remove_blocked_locks()
347 WARN_ON(list_empty(&nbl->nbl_lru)); in remove_blocked_locks()
348 list_move(&nbl->nbl_lru, &reaplist); in remove_blocked_locks()
350 spin_unlock(&nn->blocked_locks_lock); in remove_blocked_locks()
356 list_del_init(&nbl->nbl_lru); in remove_blocked_locks()
366 locks_delete_block(&nbl->nbl_lock); in nfsd4_cb_notify_lock_prepare()
379 switch (task->tk_status) { in nfsd4_cb_notify_lock_done()
380 case -NFS4ERR_DELAY: in nfsd4_cb_notify_lock_done()
409 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
447 stp->st_access_bmap |= mask; in set_access()
457 stp->st_access_bmap &= ~mask; in clear_access()
466 return (bool)(stp->st_access_bmap & mask); in test_access()
476 stp->st_deny_bmap |= mask; in set_deny()
486 stp->st_deny_bmap &= ~mask; in clear_deny()
495 return (bool)(stp->st_deny_bmap & mask); in test_deny()
530 atomic_inc(&sop->so_count); in nfs4_get_stateowner()
537 return (sop->so_owner.len == owner->len) && in same_owner_str()
538 0 == memcmp(sop->so_owner.data, owner->data, owner->len); in same_owner_str()
547 lockdep_assert_held(&clp->cl_lock); in find_openstateowner_str_locked()
549 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], in find_openstateowner_str_locked()
551 if (!so->so_is_open_owner) in find_openstateowner_str_locked()
553 if (same_owner_str(so, &open->op_owner)) in find_openstateowner_str_locked()
565 spin_lock(&clp->cl_lock); in find_openstateowner_str()
567 spin_unlock(&clp->cl_lock); in find_openstateowner_str()
577 while (nbytes--) { in opaque_hashval()
594 if (refcount_dec_and_test(&fi->fi_ref)) { in put_nfs4_file()
596 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); in put_nfs4_file()
597 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); in put_nfs4_file()
598 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); in put_nfs4_file()
607 lockdep_assert_held(&f->fi_lock); in find_writeable_file_locked()
609 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); in find_writeable_file_locked()
611 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_writeable_file_locked()
620 spin_lock(&f->fi_lock); in find_writeable_file()
622 spin_unlock(&f->fi_lock); in find_writeable_file()
632 lockdep_assert_held(&f->fi_lock); in find_readable_file_locked()
634 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); in find_readable_file_locked()
636 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_readable_file_locked()
645 spin_lock(&f->fi_lock); in find_readable_file()
647 spin_unlock(&f->fi_lock); in find_readable_file()
657 spin_lock(&f->fi_lock); in find_rw_file()
658 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_rw_file()
659 spin_unlock(&f->fi_lock); in find_rw_file()
671 spin_lock(&f->fi_lock); in find_any_file()
672 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_any_file()
674 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); in find_any_file()
676 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); in find_any_file()
678 spin_unlock(&f->fi_lock); in find_any_file()
684 lockdep_assert_held(&f->fi_lock); in find_any_file_locked()
686 if (f->fi_fds[O_RDWR]) in find_any_file_locked()
687 return f->fi_fds[O_RDWR]; in find_any_file_locked()
688 if (f->fi_fds[O_WRONLY]) in find_any_file_locked()
689 return f->fi_fds[O_WRONLY]; in find_any_file_locked()
690 if (f->fi_fds[O_RDONLY]) in find_any_file_locked()
691 return f->fi_fds[O_RDONLY]; in find_any_file_locked()
705 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
711 ret = opaque_hashval(ownername->data, ownername->len); in ownerstr_hashval()
743 * false - access/deny mode conflict with normal client.
744 * true - no conflict or conflict with courtesy client(s) is resolved.
756 lockdep_assert_held(&fp->fi_lock); in nfs4_resolve_deny_conflicts_locked()
757 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { in nfs4_resolve_deny_conflicts_locked()
759 if (st->st_openstp) in nfs4_resolve_deny_conflicts_locked()
764 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; in nfs4_resolve_deny_conflicts_locked()
767 clp = st->st_stid.sc_client; in nfs4_resolve_deny_conflicts_locked()
774 clp = stp->st_stid.sc_client; in nfs4_resolve_deny_conflicts_locked()
775 nn = net_generic(clp->net, nfsd_net_id); in nfs4_resolve_deny_conflicts_locked()
776 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfs4_resolve_deny_conflicts_locked()
784 lockdep_assert_held(&fp->fi_lock); in __nfs4_file_get_access()
787 atomic_inc(&fp->fi_access[O_WRONLY]); in __nfs4_file_get_access()
789 atomic_inc(&fp->fi_access[O_RDONLY]); in __nfs4_file_get_access()
795 lockdep_assert_held(&fp->fi_lock); in nfs4_file_get_access()
802 if ((access & fp->fi_share_deny) != 0) in nfs4_file_get_access()
818 atomic_read(&fp->fi_access[O_RDONLY])) in nfs4_file_check_deny()
822 atomic_read(&fp->fi_access[O_WRONLY])) in nfs4_file_check_deny()
830 might_lock(&fp->fi_lock); in __nfs4_file_put_access()
832 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { in __nfs4_file_put_access()
836 swap(f1, fp->fi_fds[oflag]); in __nfs4_file_put_access()
837 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) in __nfs4_file_put_access()
838 swap(f2, fp->fi_fds[O_RDWR]); in __nfs4_file_put_access()
839 spin_unlock(&fp->fi_lock); in __nfs4_file_put_access()
861 * Note that we only allocate it for pNFS-enabled exports, otherwise
871 co->co_client = clp; in alloc_clnt_odstate()
872 refcount_set(&co->co_odcount, 1); in alloc_clnt_odstate()
880 struct nfs4_file *fp = co->co_file; in hash_clnt_odstate_locked()
882 lockdep_assert_held(&fp->fi_lock); in hash_clnt_odstate_locked()
883 list_add(&co->co_perfile, &fp->fi_clnt_odstate); in hash_clnt_odstate_locked()
890 refcount_inc(&co->co_odcount); in get_clnt_odstate()
901 fp = co->co_file; in put_clnt_odstate()
902 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { in put_clnt_odstate()
903 list_del(&co->co_perfile); in put_clnt_odstate()
904 spin_unlock(&fp->fi_lock); in put_clnt_odstate()
906 nfsd4_return_all_file_layouts(co->co_client, fp); in put_clnt_odstate()
920 cl = new->co_client; in find_or_hash_clnt_odstate()
922 spin_lock(&fp->fi_lock); in find_or_hash_clnt_odstate()
923 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { in find_or_hash_clnt_odstate()
924 if (co->co_client == cl) { in find_or_hash_clnt_odstate()
930 co->co_file = fp; in find_or_hash_clnt_odstate()
933 spin_unlock(&fp->fi_lock); in find_or_hash_clnt_odstate()
948 spin_lock(&cl->cl_lock); in nfs4_alloc_stid()
950 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); in nfs4_alloc_stid()
951 spin_unlock(&cl->cl_lock); in nfs4_alloc_stid()
956 stid->sc_free = sc_free; in nfs4_alloc_stid()
957 stid->sc_client = cl; in nfs4_alloc_stid()
958 stid->sc_stateid.si_opaque.so_id = new_id; in nfs4_alloc_stid()
959 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; in nfs4_alloc_stid()
961 refcount_set(&stid->sc_count, 1); in nfs4_alloc_stid()
962 spin_lock_init(&stid->sc_lock); in nfs4_alloc_stid()
963 INIT_LIST_HEAD(&stid->sc_cp_list); in nfs4_alloc_stid()
988 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; in nfs4_init_cp_state()
989 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; in nfs4_init_cp_state()
992 spin_lock(&nn->s2s_cp_lock); in nfs4_init_cp_state()
993 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); in nfs4_init_cp_state()
994 stid->cs_stid.si_opaque.so_id = new_id; in nfs4_init_cp_state()
995 stid->cs_stid.si_generation = 1; in nfs4_init_cp_state()
996 spin_unlock(&nn->s2s_cp_lock); in nfs4_init_cp_state()
1000 stid->cs_type = cs_type; in nfs4_init_cp_state()
1006 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); in nfs4_init_copy_state()
1017 cps->cpntf_time = ktime_get_boottime_seconds(); in nfs4_alloc_init_cpntf_state()
1018 refcount_set(&cps->cp_stateid.cs_count, 1); in nfs4_alloc_init_cpntf_state()
1019 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) in nfs4_alloc_init_cpntf_state()
1021 spin_lock(&nn->s2s_cp_lock); in nfs4_alloc_init_cpntf_state()
1022 list_add(&cps->cp_list, &p_stid->sc_cp_list); in nfs4_alloc_init_cpntf_state()
1023 spin_unlock(&nn->s2s_cp_lock); in nfs4_alloc_init_cpntf_state()
1034 if (copy->cp_stateid.cs_type != NFS4_COPY_STID) in nfs4_free_copy_state()
1036 nn = net_generic(copy->cp_clp->net, nfsd_net_id); in nfs4_free_copy_state()
1037 spin_lock(&nn->s2s_cp_lock); in nfs4_free_copy_state()
1038 idr_remove(&nn->s2s_cp_stateids, in nfs4_free_copy_state()
1039 copy->cp_stateid.cs_stid.si_opaque.so_id); in nfs4_free_copy_state()
1040 spin_unlock(&nn->s2s_cp_lock); in nfs4_free_copy_state()
1049 spin_lock(&nn->s2s_cp_lock); in nfs4_free_cpntf_statelist()
1050 while (!list_empty(&stid->sc_cp_list)) { in nfs4_free_cpntf_statelist()
1051 cps = list_first_entry(&stid->sc_cp_list, in nfs4_free_cpntf_statelist()
1055 spin_unlock(&nn->s2s_cp_lock); in nfs4_free_cpntf_statelist()
1072 * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
1079 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); in nfs4_free_deleg()
1080 WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); in nfs4_free_deleg()
1081 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); in nfs4_free_deleg()
1082 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); in nfs4_free_deleg()
1100 * low 3 bytes as hash-table indices.
1119 if (bd->entries == 0) in delegation_blocked()
1121 if (ktime_get_seconds() - bd->swap_time > 30) { in delegation_blocked()
1123 if (ktime_get_seconds() - bd->swap_time > 30) { in delegation_blocked()
1124 bd->entries -= bd->old_entries; in delegation_blocked()
1125 bd->old_entries = bd->entries; in delegation_blocked()
1126 bd->new = 1-bd->new; in delegation_blocked()
1127 memset(bd->set[bd->new], 0, in delegation_blocked()
1128 sizeof(bd->set[0])); in delegation_blocked()
1129 bd->swap_time = ktime_get_seconds(); in delegation_blocked()
1133 hash = jhash(&fh->fh_raw, fh->fh_size, 0); in delegation_blocked()
1134 if (test_bit(hash&255, bd->set[0]) && in delegation_blocked()
1135 test_bit((hash>>8)&255, bd->set[0]) && in delegation_blocked()
1136 test_bit((hash>>16)&255, bd->set[0])) in delegation_blocked()
1139 if (test_bit(hash&255, bd->set[1]) && in delegation_blocked()
1140 test_bit((hash>>8)&255, bd->set[1]) && in delegation_blocked()
1141 test_bit((hash>>16)&255, bd->set[1])) in delegation_blocked()
1152 hash = jhash(&fh->fh_raw, fh->fh_size, 0); in block_delegations()
1155 __set_bit(hash&255, bd->set[bd->new]); in block_delegations()
1156 __set_bit((hash>>8)&255, bd->set[bd->new]); in block_delegations()
1157 __set_bit((hash>>16)&255, bd->set[bd->new]); in block_delegations()
1158 if (bd->entries == 0) in block_delegations()
1159 bd->swap_time = ktime_get_seconds(); in block_delegations()
1160 bd->entries += 1; in block_delegations()
1175 if (delegation_blocked(&fp->fi_fhandle)) in alloc_init_deleg()
1186 dp->dl_stid.sc_stateid.si_generation = 1; in alloc_init_deleg()
1187 INIT_LIST_HEAD(&dp->dl_perfile); in alloc_init_deleg()
1188 INIT_LIST_HEAD(&dp->dl_perclnt); in alloc_init_deleg()
1189 INIT_LIST_HEAD(&dp->dl_recall_lru); in alloc_init_deleg()
1190 dp->dl_clnt_odstate = odstate; in alloc_init_deleg()
1192 dp->dl_type = dl_type; in alloc_init_deleg()
1193 dp->dl_retries = 1; in alloc_init_deleg()
1194 dp->dl_recalled = false; in alloc_init_deleg()
1195 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, in alloc_init_deleg()
1198 dp->dl_stid.sc_file = fp; in alloc_init_deleg()
1208 struct nfs4_file *fp = s->sc_file; in nfs4_put_stid()
1209 struct nfs4_client *clp = s->sc_client; in nfs4_put_stid()
1211 might_lock(&clp->cl_lock); in nfs4_put_stid()
1213 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { in nfs4_put_stid()
1217 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); in nfs4_put_stid()
1218 nfs4_free_cpntf_statelist(clp->net, s); in nfs4_put_stid()
1219 spin_unlock(&clp->cl_lock); in nfs4_put_stid()
1220 s->sc_free(s); in nfs4_put_stid()
1228 stateid_t *src = &stid->sc_stateid; in nfs4_inc_and_copy_stateid()
1230 spin_lock(&stid->sc_lock); in nfs4_inc_and_copy_stateid()
1231 if (unlikely(++src->si_generation == 0)) in nfs4_inc_and_copy_stateid()
1232 src->si_generation = 1; in nfs4_inc_and_copy_stateid()
1234 spin_unlock(&stid->sc_lock); in nfs4_inc_and_copy_stateid()
1241 spin_lock(&fp->fi_lock); in put_deleg_file()
1242 if (--fp->fi_delegees == 0) in put_deleg_file()
1243 swap(nf, fp->fi_deleg_file); in put_deleg_file()
1244 spin_unlock(&fp->fi_lock); in put_deleg_file()
1252 struct nfs4_file *fp = dp->dl_stid.sc_file; in nfs4_unlock_deleg_lease()
1253 struct nfsd_file *nf = fp->fi_deleg_file; in nfs4_unlock_deleg_lease()
1255 WARN_ON_ONCE(!fp->fi_delegees); in nfs4_unlock_deleg_lease()
1257 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); in nfs4_unlock_deleg_lease()
1263 put_clnt_odstate(dp->dl_clnt_odstate); in destroy_unhashed_deleg()
1265 nfs4_put_stid(&dp->dl_stid); in destroy_unhashed_deleg()
1270 s->sc_type = 0; in nfs4_unhash_stid()
1274 * nfs4_delegation_exists - Discover if this delegation already exists
1289 lockdep_assert_held(&fp->fi_lock); in nfs4_delegation_exists()
1291 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { in nfs4_delegation_exists()
1292 searchclp = searchdp->dl_stid.sc_client; in nfs4_delegation_exists()
1301 * hash_delegation_locked - Add a delegation to the appropriate lists
1308 * On error: -EAGAIN if one was previously granted to this
1316 struct nfs4_client *clp = dp->dl_stid.sc_client; in hash_delegation_locked()
1319 lockdep_assert_held(&fp->fi_lock); in hash_delegation_locked()
1322 return -EAGAIN; in hash_delegation_locked()
1323 refcount_inc(&dp->dl_stid.sc_count); in hash_delegation_locked()
1324 dp->dl_stid.sc_type = NFS4_DELEG_STID; in hash_delegation_locked()
1325 list_add(&dp->dl_perfile, &fp->fi_delegations); in hash_delegation_locked()
1326 list_add(&dp->dl_perclnt, &clp->cl_delegations); in hash_delegation_locked()
1332 return !(list_empty(&dp->dl_perfile)); in delegation_hashed()
1338 struct nfs4_file *fp = dp->dl_stid.sc_file; in unhash_delegation_locked()
1345 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; in unhash_delegation_locked()
1347 ++dp->dl_time; in unhash_delegation_locked()
1348 spin_lock(&fp->fi_lock); in unhash_delegation_locked()
1349 list_del_init(&dp->dl_perclnt); in unhash_delegation_locked()
1350 list_del_init(&dp->dl_recall_lru); in unhash_delegation_locked()
1351 list_del_init(&dp->dl_perfile); in unhash_delegation_locked()
1352 spin_unlock(&fp->fi_lock); in unhash_delegation_locked()
1369 struct nfs4_client *clp = dp->dl_stid.sc_client; in revoke_delegation()
1371 WARN_ON(!list_empty(&dp->dl_recall_lru)); in revoke_delegation()
1373 trace_nfsd_stid_revoke(&dp->dl_stid); in revoke_delegation()
1375 if (clp->cl_minorversion) { in revoke_delegation()
1376 spin_lock(&clp->cl_lock); in revoke_delegation()
1377 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; in revoke_delegation()
1378 refcount_inc(&dp->dl_stid.sc_count); in revoke_delegation()
1379 list_add(&dp->dl_recall_lru, &clp->cl_revoked); in revoke_delegation()
1380 spin_unlock(&clp->cl_lock); in revoke_delegation()
1408 spin_lock(&fp->fi_lock); in recalculate_deny_mode()
1409 fp->fi_share_deny = 0; in recalculate_deny_mode()
1410 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) in recalculate_deny_mode()
1411 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); in recalculate_deny_mode()
1412 spin_unlock(&fp->fi_lock); in recalculate_deny_mode()
1428 /* Recalculate per-file deny mode if there was a change */ in reset_union_bmap_deny()
1430 recalculate_deny_mode(stp->st_stid.sc_file); in reset_union_bmap_deny()
1438 struct nfs4_file *fp = stp->st_stid.sc_file; in release_all_access()
1440 if (fp && stp->st_deny_bmap != 0) in release_all_access()
1445 nfs4_file_put_access(stp->st_stid.sc_file, i); in release_all_access()
1452 kfree(sop->so_owner.data); in nfs4_free_stateowner()
1453 sop->so_ops->so_free(sop); in nfs4_free_stateowner()
1458 struct nfs4_client *clp = sop->so_client; in nfs4_put_stateowner()
1460 might_lock(&clp->cl_lock); in nfs4_put_stateowner()
1462 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) in nfs4_put_stateowner()
1464 sop->so_ops->so_unhash(sop); in nfs4_put_stateowner()
1465 spin_unlock(&clp->cl_lock); in nfs4_put_stateowner()
1472 return list_empty(&stp->st_perfile); in nfs4_ol_stateid_unhashed()
1477 struct nfs4_file *fp = stp->st_stid.sc_file; in unhash_ol_stateid()
1479 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); in unhash_ol_stateid()
1481 if (list_empty(&stp->st_perfile)) in unhash_ol_stateid()
1484 spin_lock(&fp->fi_lock); in unhash_ol_stateid()
1485 list_del_init(&stp->st_perfile); in unhash_ol_stateid()
1486 spin_unlock(&fp->fi_lock); in unhash_ol_stateid()
1487 list_del(&stp->st_perstateowner); in unhash_ol_stateid()
1495 put_clnt_odstate(stp->st_clnt_odstate); in nfs4_free_ol_stateid()
1497 if (stp->st_stateowner) in nfs4_free_ol_stateid()
1498 nfs4_put_stateowner(stp->st_stateowner); in nfs4_free_ol_stateid()
1499 WARN_ON(!list_empty(&stid->sc_cp_list)); in nfs4_free_ol_stateid()
1506 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); in nfs4_free_lock_stateid()
1509 nf = find_any_file(stp->st_stid.sc_file); in nfs4_free_lock_stateid()
1511 get_file(nf->nf_file); in nfs4_free_lock_stateid()
1512 filp_close(nf->nf_file, (fl_owner_t)lo); in nfs4_free_lock_stateid()
1526 struct nfs4_stid *s = &stp->st_stid; in put_ol_stateid_locked()
1527 struct nfs4_client *clp = s->sc_client; in put_ol_stateid_locked()
1529 lockdep_assert_held(&clp->cl_lock); in put_ol_stateid_locked()
1531 WARN_ON_ONCE(!list_empty(&stp->st_locks)); in put_ol_stateid_locked()
1533 if (!refcount_dec_and_test(&s->sc_count)) { in put_ol_stateid_locked()
1538 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); in put_ol_stateid_locked()
1539 list_add(&stp->st_locks, reaplist); in put_ol_stateid_locked()
1544 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); in unhash_lock_stateid()
1548 list_del_init(&stp->st_locks); in unhash_lock_stateid()
1549 nfs4_unhash_stid(&stp->st_stid); in unhash_lock_stateid()
1555 struct nfs4_client *clp = stp->st_stid.sc_client; in release_lock_stateid()
1558 spin_lock(&clp->cl_lock); in release_lock_stateid()
1560 spin_unlock(&clp->cl_lock); in release_lock_stateid()
1562 nfs4_put_stid(&stp->st_stid); in release_lock_stateid()
1567 struct nfs4_client *clp = lo->lo_owner.so_client; in unhash_lockowner_locked()
1569 lockdep_assert_held(&clp->cl_lock); in unhash_lockowner_locked()
1571 list_del_init(&lo->lo_owner.so_strhash); in unhash_lockowner_locked()
1589 list_del(&stp->st_locks); in free_ol_stateid_reaplist()
1590 fp = stp->st_stid.sc_file; in free_ol_stateid_reaplist()
1591 stp->st_stid.sc_free(&stp->st_stid); in free_ol_stateid_reaplist()
1602 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); in release_open_stateid_locks()
1604 while (!list_empty(&open_stp->st_locks)) { in release_open_stateid_locks()
1605 stp = list_entry(open_stp->st_locks.next, in release_open_stateid_locks()
1615 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); in unhash_open_stateid()
1627 spin_lock(&stp->st_stid.sc_client->cl_lock); in release_open_stateid()
1630 spin_unlock(&stp->st_stid.sc_client->cl_lock); in release_open_stateid()
1636 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); in nfs4_openowner_unhashed()
1638 return list_empty(&oo->oo_owner.so_strhash) && in nfs4_openowner_unhashed()
1639 list_empty(&oo->oo_perclient); in nfs4_openowner_unhashed()
1644 struct nfs4_client *clp = oo->oo_owner.so_client; in unhash_openowner_locked()
1646 lockdep_assert_held(&clp->cl_lock); in unhash_openowner_locked()
1648 list_del_init(&oo->oo_owner.so_strhash); in unhash_openowner_locked()
1649 list_del_init(&oo->oo_perclient); in unhash_openowner_locked()
1654 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, in release_last_closed_stateid()
1658 spin_lock(&nn->client_lock); in release_last_closed_stateid()
1659 s = oo->oo_last_closed_stid; in release_last_closed_stateid()
1661 list_del_init(&oo->oo_close_lru); in release_last_closed_stateid()
1662 oo->oo_last_closed_stid = NULL; in release_last_closed_stateid()
1664 spin_unlock(&nn->client_lock); in release_last_closed_stateid()
1666 nfs4_put_stid(&s->st_stid); in release_last_closed_stateid()
1672 struct nfs4_client *clp = oo->oo_owner.so_client; in release_openowner()
1677 spin_lock(&clp->cl_lock); in release_openowner()
1679 while (!list_empty(&oo->oo_owner.so_stateids)) { in release_openowner()
1680 stp = list_first_entry(&oo->oo_owner.so_stateids, in release_openowner()
1685 spin_unlock(&clp->cl_lock); in release_openowner()
1688 nfs4_put_stateowner(&oo->oo_owner); in release_openowner()
1696 return sid->sequence % SESSION_HASH_SIZE; in hash_sessionid()
1703 u32 *ptr = (u32 *)(&sessionid->data[0]); in dump_sessionid()
1714 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1719 struct nfs4_stateowner *so = cstate->replay_owner; in nfsd4_bump_seqid()
1730 if (so->so_is_open_owner) in nfsd4_bump_seqid()
1732 so->so_seqid++; in nfsd4_bump_seqid()
1739 struct nfs4_client *clp = ses->se_client; in gen_sessionid()
1742 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; in gen_sessionid()
1743 sid->clientid = clp->cl_clientid; in gen_sessionid()
1744 sid->sequence = current_sessionid++; in gen_sessionid()
1745 sid->reserved = 0; in gen_sessionid()
1751 * the end of the initial SEQUENCE operation--the rest we regenerate
1757 * verifier), 12 for the compound header (with zero-length tag), and 44
1767 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { in free_session_slots()
1768 free_svc_cred(&ses->se_slots[i]->sl_cred); in free_session_slots()
1769 kfree(ses->se_slots[i]); in free_session_slots()
1781 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) in slot_bytes()
1784 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; in slot_bytes()
1790 * re-negotiate active sessions and reduce their slot usage to make
1796 u32 num = ca->maxreqs; in nfsd4_get_drc_mem()
1802 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; in nfsd4_get_drc_mem()
1818 * over-allocation--it is better than failure. in nfsd4_get_drc_mem()
1820 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); in nfsd4_get_drc_mem()
1837 nfsd_drc_mem_used -= slotsize * ca->maxreqs; in nfsd4_put_drc_mem()
1844 int numslots = fattrs->maxreqs; in alloc_session()
1857 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); in alloc_session()
1858 if (!new->se_slots[i]) in alloc_session()
1862 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); in alloc_session()
1863 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); in alloc_session()
1867 while (i--) in alloc_session()
1868 kfree(new->se_slots[i]); in alloc_session()
1875 svc_xprt_put(c->cn_xprt); in free_conn()
1882 struct nfs4_client *clp = c->cn_session->se_client; in nfsd4_conn_lost()
1886 spin_lock(&clp->cl_lock); in nfsd4_conn_lost()
1887 if (!list_empty(&c->cn_persession)) { in nfsd4_conn_lost()
1888 list_del(&c->cn_persession); in nfsd4_conn_lost()
1892 spin_unlock(&clp->cl_lock); in nfsd4_conn_lost()
1902 svc_xprt_get(rqstp->rq_xprt); in alloc_conn()
1903 conn->cn_xprt = rqstp->rq_xprt; in alloc_conn()
1904 conn->cn_flags = flags; in alloc_conn()
1905 INIT_LIST_HEAD(&conn->cn_xpt_user.list); in alloc_conn()
1911 conn->cn_session = ses; in __nfsd4_hash_conn()
1912 list_add(&conn->cn_persession, &ses->se_conns); in __nfsd4_hash_conn()
1917 struct nfs4_client *clp = ses->se_client; in nfsd4_hash_conn()
1919 spin_lock(&clp->cl_lock); in nfsd4_hash_conn()
1921 spin_unlock(&clp->cl_lock); in nfsd4_hash_conn()
1926 conn->cn_xpt_user.callback = nfsd4_conn_lost; in nfsd4_register_conn()
1927 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); in nfsd4_register_conn()
1938 nfsd4_conn_lost(&conn->cn_xpt_user); in nfsd4_init_conn()
1940 nfsd4_probe_callback_sync(ses->se_client); in nfsd4_init_conn()
1947 if (cses->flags & SESSION4_BACK_CHAN) in alloc_conn_from_crses()
1955 struct nfs4_client *clp = s->se_client; in nfsd4_del_conns()
1958 spin_lock(&clp->cl_lock); in nfsd4_del_conns()
1959 while (!list_empty(&s->se_conns)) { in nfsd4_del_conns()
1960 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); in nfsd4_del_conns()
1961 list_del_init(&c->cn_persession); in nfsd4_del_conns()
1962 spin_unlock(&clp->cl_lock); in nfsd4_del_conns()
1964 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); in nfsd4_del_conns()
1967 spin_lock(&clp->cl_lock); in nfsd4_del_conns()
1969 spin_unlock(&clp->cl_lock); in nfsd4_del_conns()
1981 nfsd4_put_drc_mem(&ses->se_fchannel); in free_session()
1990 new->se_client = clp; in init_session()
1993 INIT_LIST_HEAD(&new->se_conns); in init_session()
1995 new->se_cb_seq_nr = 1; in init_session()
1996 new->se_flags = cses->flags; in init_session()
1997 new->se_cb_prog = cses->callback_prog; in init_session()
1998 new->se_cb_sec = cses->cb_sec; in init_session()
1999 atomic_set(&new->se_ref, 0); in init_session()
2000 idx = hash_sessionid(&new->se_sessionid); in init_session()
2001 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); in init_session()
2002 spin_lock(&clp->cl_lock); in init_session()
2003 list_add(&new->se_perclnt, &clp->cl_sessions); in init_session()
2004 spin_unlock(&clp->cl_lock); in init_session()
2015 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); in init_session()
2016 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); in init_session()
2028 lockdep_assert_held(&nn->client_lock); in __find_in_sessionid_hashtbl()
2033 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { in __find_in_sessionid_hashtbl()
2034 if (!memcmp(elem->se_sessionid.data, sessionid->data, in __find_in_sessionid_hashtbl()
2066 struct nfs4_client *clp = ses->se_client; in unhash_session()
2067 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_session()
2069 lockdep_assert_held(&nn->client_lock); in unhash_session()
2071 list_del(&ses->se_hash); in unhash_session()
2072 spin_lock(&ses->se_client->cl_lock); in unhash_session()
2073 list_del(&ses->se_perclnt); in unhash_session()
2074 spin_unlock(&ses->se_client->cl_lock); in unhash_session()
2086 if (clid->cl_boot == (u32)nn->boot_time) in STALE_CLIENTID()
2103 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) { in alloc_client()
2104 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in alloc_client()
2110 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); in alloc_client()
2111 if (clp->cl_name.data == NULL) in alloc_client()
2113 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, in alloc_client()
2116 if (!clp->cl_ownerstr_hashtbl) in alloc_client()
2119 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); in alloc_client()
2120 INIT_LIST_HEAD(&clp->cl_sessions); in alloc_client()
2121 idr_init(&clp->cl_stateids); in alloc_client()
2122 atomic_set(&clp->cl_rpc_users, 0); in alloc_client()
2123 clp->cl_cb_state = NFSD4_CB_UNKNOWN; in alloc_client()
2124 clp->cl_state = NFSD4_ACTIVE; in alloc_client()
2125 atomic_inc(&nn->nfs4_client_count); in alloc_client()
2126 atomic_set(&clp->cl_delegs_in_recall, 0); in alloc_client()
2127 INIT_LIST_HEAD(&clp->cl_idhash); in alloc_client()
2128 INIT_LIST_HEAD(&clp->cl_openowners); in alloc_client()
2129 INIT_LIST_HEAD(&clp->cl_delegations); in alloc_client()
2130 INIT_LIST_HEAD(&clp->cl_lru); in alloc_client()
2131 INIT_LIST_HEAD(&clp->cl_revoked); in alloc_client()
2133 INIT_LIST_HEAD(&clp->cl_lo_states); in alloc_client()
2135 INIT_LIST_HEAD(&clp->async_copies); in alloc_client()
2136 spin_lock_init(&clp->async_lock); in alloc_client()
2137 spin_lock_init(&clp->cl_lock); in alloc_client()
2138 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); in alloc_client()
2141 kfree(clp->cl_name.data); in alloc_client()
2152 free_svc_cred(&clp->cl_cred); in __free_client()
2153 kfree(clp->cl_ownerstr_hashtbl); in __free_client()
2154 kfree(clp->cl_name.data); in __free_client()
2155 kfree(clp->cl_nii_domain.data); in __free_client()
2156 kfree(clp->cl_nii_name.data); in __free_client()
2157 idr_destroy(&clp->cl_stateids); in __free_client()
2158 kfree(clp->cl_ra); in __free_client()
2164 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); in drop_client()
2170 while (!list_empty(&clp->cl_sessions)) { in free_client()
2172 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, in free_client()
2174 list_del(&ses->se_perclnt); in free_client()
2175 WARN_ON_ONCE(atomic_read(&ses->se_ref)); in free_client()
2178 rpc_destroy_wait_queue(&clp->cl_cb_waitq); in free_client()
2179 if (clp->cl_nfsd_dentry) { in free_client()
2180 nfsd_client_rmdir(clp->cl_nfsd_dentry); in free_client()
2181 clp->cl_nfsd_dentry = NULL; in free_client()
2191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_client_locked()
2194 lockdep_assert_held(&nn->client_lock); in unhash_client_locked()
2197 clp->cl_time = 0; in unhash_client_locked()
2199 if (!list_empty(&clp->cl_idhash)) { in unhash_client_locked()
2200 list_del_init(&clp->cl_idhash); in unhash_client_locked()
2201 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) in unhash_client_locked()
2202 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); in unhash_client_locked()
2204 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); in unhash_client_locked()
2206 list_del_init(&clp->cl_lru); in unhash_client_locked()
2207 spin_lock(&clp->cl_lock); in unhash_client_locked()
2208 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) in unhash_client_locked()
2209 list_del_init(&ses->se_hash); in unhash_client_locked()
2210 spin_unlock(&clp->cl_lock); in unhash_client_locked()
2216 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_client()
2218 spin_lock(&nn->client_lock); in unhash_client()
2220 spin_unlock(&nn->client_lock); in unhash_client()
2225 if (atomic_read(&clp->cl_rpc_users)) in mark_client_expired_locked()
2234 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in __destroy_client()
2242 while (!list_empty(&clp->cl_delegations)) { in __destroy_client()
2243 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); in __destroy_client()
2245 list_add(&dp->dl_recall_lru, &reaplist); in __destroy_client()
2250 list_del_init(&dp->dl_recall_lru); in __destroy_client()
2253 while (!list_empty(&clp->cl_revoked)) { in __destroy_client()
2254 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); in __destroy_client()
2255 list_del_init(&dp->dl_recall_lru); in __destroy_client()
2256 nfs4_put_stid(&dp->dl_stid); in __destroy_client()
2258 while (!list_empty(&clp->cl_openowners)) { in __destroy_client()
2259 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); in __destroy_client()
2260 nfs4_get_stateowner(&oo->oo_owner); in __destroy_client()
2266 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], in __destroy_client()
2269 WARN_ON_ONCE(so->so_is_open_owner); in __destroy_client()
2276 if (clp->cl_cb_conn.cb_xprt) in __destroy_client()
2277 svc_xprt_put(clp->cl_cb_conn.cb_xprt); in __destroy_client()
2278 atomic_add_unless(&nn->nfs4_client_count, -1, 0); in __destroy_client()
2293 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in inc_reclaim_complete()
2295 if (!nn->track_reclaim_completes) in inc_reclaim_complete()
2297 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) in inc_reclaim_complete()
2299 if (atomic_inc_return(&nn->nr_reclaim_complete) == in inc_reclaim_complete()
2300 nn->reclaim_str_hashtbl_size) { in inc_reclaim_complete()
2302 clp->net->ns.inum); in inc_reclaim_complete()
2316 memcpy(target->cl_verifier.data, source->data, in copy_verf()
2317 sizeof(target->cl_verifier.data)); in copy_verf()
2322 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; in copy_clid()
2323 target->cl_clientid.cl_id = source->cl_clientid.cl_id; in copy_clid()
2328 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); in copy_cred()
2329 target->cr_raw_principal = kstrdup(source->cr_raw_principal, in copy_cred()
2331 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); in copy_cred()
2332 if ((source->cr_principal && !target->cr_principal) || in copy_cred()
2333 (source->cr_raw_principal && !target->cr_raw_principal) || in copy_cred()
2334 (source->cr_targ_princ && !target->cr_targ_princ)) in copy_cred()
2335 return -ENOMEM; in copy_cred()
2337 target->cr_flavor = source->cr_flavor; in copy_cred()
2338 target->cr_uid = source->cr_uid; in copy_cred()
2339 target->cr_gid = source->cr_gid; in copy_cred()
2340 target->cr_group_info = source->cr_group_info; in copy_cred()
2341 get_group_info(target->cr_group_info); in copy_cred()
2342 target->cr_gss_mech = source->cr_gss_mech; in copy_cred()
2343 if (source->cr_gss_mech) in copy_cred()
2344 gss_mech_get(source->cr_gss_mech); in copy_cred()
2351 if (o1->len < o2->len) in compare_blob()
2352 return -1; in compare_blob()
2353 if (o1->len > o2->len) in compare_blob()
2355 return memcmp(o1->data, o2->data, o1->len); in compare_blob()
2361 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); in same_verf()
2367 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); in same_clid()
2374 if (g1->ngroups != g2->ngroups) in groups_equal()
2376 for (i=0; i<g1->ngroups; i++) in groups_equal()
2377 if (!gid_eq(g1->gid[i], g2->gid[i])) in groups_equal()
2386 * approximation. We also don't want to allow non-gss use of a client
2394 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); in is_gss_cred()
2402 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) in same_creds()
2403 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) in same_creds()
2404 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) in same_creds()
2407 if (cr1->cr_principal == cr2->cr_principal) in same_creds()
2409 if (!cr1->cr_principal || !cr2->cr_principal) in same_creds()
2411 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); in same_creds()
2416 struct svc_cred *cr = &rqstp->rq_cred; in svc_rqst_integrity_protected()
2419 if (!cr->cr_gss_mech) in svc_rqst_integrity_protected()
2421 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); in svc_rqst_integrity_protected()
2428 struct svc_cred *cr = &rqstp->rq_cred; in nfsd4_mach_creds_match()
2430 if (!cl->cl_mach_cred) in nfsd4_mach_creds_match()
2432 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) in nfsd4_mach_creds_match()
2436 if (cl->cl_cred.cr_raw_principal) in nfsd4_mach_creds_match()
2437 return 0 == strcmp(cl->cl_cred.cr_raw_principal, in nfsd4_mach_creds_match()
2438 cr->cr_raw_principal); in nfsd4_mach_creds_match()
2439 if (!cr->cr_principal) in nfsd4_mach_creds_match()
2441 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); in nfsd4_mach_creds_match()
2449 * This is opaque to client, so no need to byte-swap. Use in gen_confirm()
2453 verf[1] = (__force __be32)nn->clverifier_counter++; in gen_confirm()
2454 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); in gen_confirm()
2459 clp->cl_clientid.cl_boot = (u32)nn->boot_time; in gen_clid()
2460 clp->cl_clientid.cl_id = nn->clientid_counter++; in gen_clid()
2469 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); in find_stateid_locked()
2470 if (!ret || !ret->sc_type) in find_stateid_locked()
2480 spin_lock(&cl->cl_lock); in find_stateid_by_type()
2483 if (typemask & s->sc_type) in find_stateid_by_type()
2484 refcount_inc(&s->sc_count); in find_stateid_by_type()
2488 spin_unlock(&cl->cl_lock); in find_stateid_by_type()
2525 struct inode *inode = file_inode(m->file); in client_info_show()
2531 return -ENXIO; in client_info_show()
2532 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); in client_info_show()
2534 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); in client_info_show()
2536 if (clp->cl_state == NFSD4_COURTESY) in client_info_show()
2538 else if (clp->cl_state == NFSD4_EXPIRABLE) in client_info_show()
2540 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) in client_info_show()
2545 ktime_get_boottime_seconds() - clp->cl_time); in client_info_show()
2547 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); in client_info_show()
2548 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); in client_info_show()
2549 if (clp->cl_nii_domain.data) { in client_info_show()
2551 seq_quote_mem(m, clp->cl_nii_domain.data, in client_info_show()
2552 clp->cl_nii_domain.len); in client_info_show()
2554 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); in client_info_show()
2556 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); in client_info_show()
2558 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); in client_info_show()
2559 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr); in client_info_show()
2568 __acquires(&clp->cl_lock) in states_start()
2570 struct nfs4_client *clp = s->private; in states_start()
2574 spin_lock(&clp->cl_lock); in states_start()
2575 ret = idr_get_next_ul(&clp->cl_stateids, &id); in states_start()
2582 struct nfs4_client *clp = s->private; in states_next()
2588 ret = idr_get_next_ul(&clp->cl_stateids, &id); in states_next()
2594 __releases(&clp->cl_lock) in states_stop()
2596 struct nfs4_client *clp = s->private; in states_stop()
2598 spin_unlock(&clp->cl_lock); in states_stop()
2603 seq_printf(s, "filename: \"%pD2\"", f->nf_file); in nfs4_show_fname()
2608 struct inode *inode = file_inode(f->nf_file); in nfs4_show_superblock()
2611 MAJOR(inode->i_sb->s_dev), in nfs4_show_superblock()
2612 MINOR(inode->i_sb->s_dev), in nfs4_show_superblock()
2613 inode->i_ino); in nfs4_show_superblock()
2619 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); in nfs4_show_owner()
2624 seq_printf(s, "0x%.8x", stid->si_generation); in nfs4_show_stateid()
2625 seq_printf(s, "%12phN", &stid->si_opaque); in nfs4_show_stateid()
2636 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) in nfs4_show_open()
2639 oo = ols->st_stateowner; in nfs4_show_open()
2640 nf = st->sc_file; in nfs4_show_open()
2642 spin_lock(&nf->fi_lock); in nfs4_show_open()
2647 seq_printf(s, "- "); in nfs4_show_open()
2648 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_open()
2651 access = bmap_to_share_mode(ols->st_access_bmap); in nfs4_show_open()
2652 deny = bmap_to_share_mode(ols->st_deny_bmap); in nfs4_show_open()
2655 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", in nfs4_show_open()
2656 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); in nfs4_show_open()
2658 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", in nfs4_show_open()
2659 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); in nfs4_show_open()
2668 spin_unlock(&nf->fi_lock); in nfs4_show_open()
2680 oo = ols->st_stateowner; in nfs4_show_lock()
2681 nf = st->sc_file; in nfs4_show_lock()
2682 spin_lock(&nf->fi_lock); in nfs4_show_lock()
2687 seq_printf(s, "- "); in nfs4_show_lock()
2688 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_lock()
2706 spin_unlock(&nf->fi_lock); in nfs4_show_lock()
2717 nf = st->sc_file; in nfs4_show_deleg()
2718 spin_lock(&nf->fi_lock); in nfs4_show_deleg()
2719 file = nf->fi_deleg_file; in nfs4_show_deleg()
2723 seq_printf(s, "- "); in nfs4_show_deleg()
2724 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_deleg()
2729 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); in nfs4_show_deleg()
2738 spin_unlock(&nf->fi_lock); in nfs4_show_deleg()
2748 file = ls->ls_file; in nfs4_show_layout()
2750 seq_printf(s, "- "); in nfs4_show_layout()
2751 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_layout()
2768 switch (st->sc_type) { in states_show()
2798 return -ENXIO; in client_states_open()
2803 s = file->private_data; in client_states_open()
2804 s->private = clp; in client_states_open()
2810 struct seq_file *m = file->private_data; in client_opens_release()
2811 struct nfs4_client *clp = m->private; in client_opens_release()
2833 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in force_expire_client()
2836 trace_nfsd_clid_admin_expired(&clp->cl_clientid); in force_expire_client()
2838 spin_lock(&nn->client_lock); in force_expire_client()
2839 clp->cl_time = 0; in force_expire_client()
2840 spin_unlock(&nn->client_lock); in force_expire_client()
2842 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); in force_expire_client()
2843 spin_lock(&nn->client_lock); in force_expire_client()
2844 already_expired = list_empty(&clp->cl_lru); in force_expire_client()
2847 spin_unlock(&nn->client_lock); in force_expire_client()
2852 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); in force_expire_client()
2865 return -EINVAL; in client_ctl_write()
2868 return -ENXIO; in client_ctl_write()
2891 switch (task->tk_status) { in nfsd4_cb_recall_any_done()
2892 case -NFS4ERR_DELAY: in nfsd4_cb_recall_any_done()
2903 struct nfs4_client *clp = cb->cb_clp; in nfsd4_cb_recall_any_release()
2905 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); in nfsd4_cb_recall_any_release()
2928 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); in create_client()
2934 kref_init(&clp->cl_nfsdfs.cl_ref); in create_client()
2935 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); in create_client()
2936 clp->cl_time = ktime_get_boottime_seconds(); in create_client()
2937 clear_bit(0, &clp->cl_cb_slot_busy); in create_client()
2939 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); in create_client()
2940 clp->cl_cb_session = NULL; in create_client()
2941 clp->net = net; in create_client()
2942 clp->cl_nfsd_dentry = nfsd_client_mkdir( in create_client()
2943 nn, &clp->cl_nfsdfs, in create_client()
2944 clp->cl_clientid.cl_id - nn->clientid_base, in create_client()
2946 clp->cl_nfsd_info_dentry = dentries[0]; in create_client()
2947 if (!clp->cl_nfsd_dentry) { in create_client()
2951 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); in create_client()
2952 if (!clp->cl_ra) { in create_client()
2956 clp->cl_ra_time = 0; in create_client()
2957 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, in create_client()
2965 struct rb_node **new = &(root->rb_node), *parent = NULL; in add_clp_to_name_tree()
2972 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) in add_clp_to_name_tree()
2973 new = &((*new)->rb_left); in add_clp_to_name_tree()
2975 new = &((*new)->rb_right); in add_clp_to_name_tree()
2978 rb_link_node(&new_clp->cl_namenode, parent, new); in add_clp_to_name_tree()
2979 rb_insert_color(&new_clp->cl_namenode, root); in add_clp_to_name_tree()
2986 struct rb_node *node = root->rb_node; in find_clp_in_name_tree()
2991 cmp = compare_blob(&clp->cl_name, name); in find_clp_in_name_tree()
2993 node = node->rb_left; in find_clp_in_name_tree()
2995 node = node->rb_right; in find_clp_in_name_tree()
3006 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in add_to_unconfirmed()
3008 lockdep_assert_held(&nn->client_lock); in add_to_unconfirmed()
3010 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); in add_to_unconfirmed()
3011 add_clp_to_name_tree(clp, &nn->unconf_name_tree); in add_to_unconfirmed()
3012 idhashval = clientid_hashval(clp->cl_clientid.cl_id); in add_to_unconfirmed()
3013 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); in add_to_unconfirmed()
3020 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); in move_to_confirmed()
3021 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in move_to_confirmed()
3023 lockdep_assert_held(&nn->client_lock); in move_to_confirmed()
3025 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); in move_to_confirmed()
3026 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); in move_to_confirmed()
3027 add_clp_to_name_tree(clp, &nn->conf_name_tree); in move_to_confirmed()
3028 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); in move_to_confirmed()
3029 trace_nfsd_clid_confirmed(&clp->cl_clientid); in move_to_confirmed()
3037 unsigned int idhashval = clientid_hashval(clid->cl_id); in find_client_in_id_table()
3040 if (same_clid(&clp->cl_clientid, clid)) { in find_client_in_id_table()
3041 if ((bool)clp->cl_minorversion != sessions) in find_client_in_id_table()
3053 struct list_head *tbl = nn->conf_id_hashtbl; in find_confirmed_client()
3055 lockdep_assert_held(&nn->client_lock); in find_confirmed_client()
3062 struct list_head *tbl = nn->unconf_id_hashtbl; in find_unconfirmed_client()
3064 lockdep_assert_held(&nn->client_lock); in find_unconfirmed_client()
3070 return clp->cl_exchange_flags != 0; in clp_used_exchangeid()
3076 lockdep_assert_held(&nn->client_lock); in find_confirmed_client_by_name()
3077 return find_clp_in_name_tree(name, &nn->conf_name_tree); in find_confirmed_client_by_name()
3083 lockdep_assert_held(&nn->client_lock); in find_unconfirmed_client_by_name()
3084 return find_clp_in_name_tree(name, &nn->unconf_name_tree); in find_unconfirmed_client_by_name()
3090 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; in gen_callback()
3096 if (se->se_callback_netid_len == 3 && in gen_callback()
3097 !memcmp(se->se_callback_netid_val, "tcp", 3)) in gen_callback()
3099 else if (se->se_callback_netid_len == 4 && in gen_callback()
3100 !memcmp(se->se_callback_netid_val, "tcp6", 4)) in gen_callback()
3105 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, in gen_callback()
3106 se->se_callback_addr_len, in gen_callback()
3107 (struct sockaddr *)&conn->cb_addr, in gen_callback()
3108 sizeof(conn->cb_addr)); in gen_callback()
3110 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) in gen_callback()
3113 if (conn->cb_addr.ss_family == AF_INET6) in gen_callback()
3114 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; in gen_callback()
3116 conn->cb_prog = se->se_callback_prog; in gen_callback()
3117 conn->cb_ident = se->se_callback_ident; in gen_callback()
3118 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); in gen_callback()
3122 conn->cb_addr.ss_family = AF_UNSPEC; in gen_callback()
3123 conn->cb_addrlen = 0; in gen_callback()
3134 struct xdr_buf *buf = resp->xdr->buf; in nfsd4_store_cache_entry()
3135 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_store_cache_entry()
3138 dprintk("--> %s slot %p\n", __func__, slot); in nfsd4_store_cache_entry()
3140 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; in nfsd4_store_cache_entry()
3141 slot->sl_opcnt = resp->opcnt; in nfsd4_store_cache_entry()
3142 slot->sl_status = resp->cstate.status; in nfsd4_store_cache_entry()
3143 free_svc_cred(&slot->sl_cred); in nfsd4_store_cache_entry()
3144 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); in nfsd4_store_cache_entry()
3147 slot->sl_flags &= ~NFSD4_SLOT_CACHED; in nfsd4_store_cache_entry()
3150 slot->sl_flags |= NFSD4_SLOT_CACHED; in nfsd4_store_cache_entry()
3152 base = resp->cstate.data_offset; in nfsd4_store_cache_entry()
3153 slot->sl_datalen = buf->len - base; in nfsd4_store_cache_entry()
3154 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) in nfsd4_store_cache_entry()
3163 * operation which sets resp->p and increments resp->opcnt for
3172 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_enc_sequence_replay()
3175 op = &args->ops[resp->opcnt - 1]; in nfsd4_enc_sequence_replay()
3178 if (slot->sl_flags & NFSD4_SLOT_CACHED) in nfsd4_enc_sequence_replay()
3179 return op->status; in nfsd4_enc_sequence_replay()
3180 if (args->opcnt == 1) { in nfsd4_enc_sequence_replay()
3182 * The original operation wasn't a solo sequence--we in nfsd4_enc_sequence_replay()
3183 * always cache those--so this retry must not match the in nfsd4_enc_sequence_replay()
3186 op->status = nfserr_seq_false_retry; in nfsd4_enc_sequence_replay()
3188 op = &args->ops[resp->opcnt++]; in nfsd4_enc_sequence_replay()
3189 op->status = nfserr_retry_uncached_rep; in nfsd4_enc_sequence_replay()
3192 return op->status; in nfsd4_enc_sequence_replay()
3203 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_replay_cache_entry()
3204 struct xdr_stream *xdr = resp->xdr; in nfsd4_replay_cache_entry()
3208 dprintk("--> %s slot %p\n", __func__, slot); in nfsd4_replay_cache_entry()
3210 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); in nfsd4_replay_cache_entry()
3214 p = xdr_reserve_space(xdr, slot->sl_datalen); in nfsd4_replay_cache_entry()
3219 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); in nfsd4_replay_cache_entry()
3222 resp->opcnt = slot->sl_opcnt; in nfsd4_replay_cache_entry()
3223 return slot->sl_status; in nfsd4_replay_cache_entry()
3233 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; in nfsd4_set_ex_flags()
3235 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; in nfsd4_set_ex_flags()
3239 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; in nfsd4_set_ex_flags()
3242 clid->flags = new->cl_exchange_flags; in nfsd4_set_ex_flags()
3249 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { in client_has_openowners()
3250 if (!list_empty(&oo->oo_owner.so_stateids)) in client_has_openowners()
3260 || !list_empty(&clp->cl_lo_states) in client_has_state()
3262 || !list_empty(&clp->cl_delegations) in client_has_state()
3263 || !list_empty(&clp->cl_sessions) in client_has_state()
3264 || !list_empty(&clp->async_copies); in client_has_state()
3270 if (!exid->nii_domain.data) in copy_impl_id()
3272 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); in copy_impl_id()
3273 if (!clp->cl_nii_domain.data) in copy_impl_id()
3275 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); in copy_impl_id()
3276 if (!clp->cl_nii_name.data) in copy_impl_id()
3278 clp->cl_nii_time = exid->nii_time; in copy_impl_id()
3286 struct nfsd4_exchange_id *exid = &u->exchange_id; in nfsd4_exchange_id()
3291 nfs4_verifier verf = exid->verifier; in nfsd4_exchange_id()
3293 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; in nfsd4_exchange_id()
3299 __func__, rqstp, exid, exid->clname.len, exid->clname.data, in nfsd4_exchange_id()
3300 addr_str, exid->flags, exid->spa_how); in nfsd4_exchange_id()
3302 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) in nfsd4_exchange_id()
3305 new = create_client(exid->clname, rqstp, &verf); in nfsd4_exchange_id()
3312 switch (exid->spa_how) { in nfsd4_exchange_id()
3314 exid->spo_must_enforce[0] = 0; in nfsd4_exchange_id()
3315 exid->spo_must_enforce[1] = ( in nfsd4_exchange_id()
3316 1 << (OP_BIND_CONN_TO_SESSION - 32) | in nfsd4_exchange_id()
3317 1 << (OP_EXCHANGE_ID - 32) | in nfsd4_exchange_id()
3318 1 << (OP_CREATE_SESSION - 32) | in nfsd4_exchange_id()
3319 1 << (OP_DESTROY_SESSION - 32) | in nfsd4_exchange_id()
3320 1 << (OP_DESTROY_CLIENTID - 32)); in nfsd4_exchange_id()
3322 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | in nfsd4_exchange_id()
3327 exid->spo_must_allow[1] &= ( in nfsd4_exchange_id()
3328 1 << (OP_TEST_STATEID - 32) | in nfsd4_exchange_id()
3329 1 << (OP_FREE_STATEID - 32)); in nfsd4_exchange_id()
3339 if (!new->cl_cred.cr_principal && in nfsd4_exchange_id()
3340 !new->cl_cred.cr_raw_principal) { in nfsd4_exchange_id()
3344 new->cl_mach_cred = true; in nfsd4_exchange_id()
3357 spin_lock(&nn->client_lock); in nfsd4_exchange_id()
3358 conf = find_confirmed_client_by_name(&exid->clname, nn); in nfsd4_exchange_id()
3360 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); in nfsd4_exchange_id()
3361 bool verfs_match = same_verf(&verf, &conf->cl_verifier); in nfsd4_exchange_id()
3381 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; in nfsd4_exchange_id()
3394 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; in nfsd4_exchange_id()
3409 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); in nfsd4_exchange_id()
3421 trace_nfsd_clid_replaced(&conf->cl_clientid); in nfsd4_exchange_id()
3423 new->cl_minorversion = cstate->minorversion; in nfsd4_exchange_id()
3424 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; in nfsd4_exchange_id()
3425 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; in nfsd4_exchange_id()
3430 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; in nfsd4_exchange_id()
3431 exid->clientid.cl_id = conf->cl_clientid.cl_id; in nfsd4_exchange_id()
3433 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; in nfsd4_exchange_id()
3437 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); in nfsd4_exchange_id()
3441 spin_unlock(&nn->client_lock); in nfsd4_exchange_id()
3446 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); in nfsd4_exchange_id()
3465 /* Note unsigned 32-bit arithmetic handles wraparound: */ in check_slot_seqid()
3476 * Do this for solo or embedded create session operations.
3482 slot->sl_status = nfserr; in nfsd4_cache_create_session()
3483 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); in nfsd4_cache_create_session()
3490 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); in nfsd4_replay_create_session()
3491 return slot->sl_status; in nfsd4_replay_create_session()
3513 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; in check_forechannel_attrs()
3515 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) in check_forechannel_attrs()
3517 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) in check_forechannel_attrs()
3519 ca->headerpadsz = 0; in check_forechannel_attrs()
3520 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); in check_forechannel_attrs()
3521 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); in check_forechannel_attrs()
3522 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); in check_forechannel_attrs()
3523 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, in check_forechannel_attrs()
3525 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); in check_forechannel_attrs()
3536 ca->maxreqs = nfsd4_get_drc_mem(ca, nn); in check_forechannel_attrs()
3542 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3559 ca->headerpadsz = 0; in check_backchannel_attrs()
3561 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) in check_backchannel_attrs()
3563 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) in check_backchannel_attrs()
3565 ca->maxresp_cached = 0; in check_backchannel_attrs()
3566 if (ca->maxops < 2) in check_backchannel_attrs()
3574 switch (cbs->flavor) { in nfsd4_check_cb_sec()
3594 struct nfsd4_create_session *cr_ses = &u->create_session; in nfsd4_create_session()
3604 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) in nfsd4_create_session()
3606 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); in nfsd4_create_session()
3609 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); in nfsd4_create_session()
3612 status = check_backchannel_attrs(&cr_ses->back_channel); in nfsd4_create_session()
3616 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); in nfsd4_create_session()
3623 spin_lock(&nn->client_lock); in nfsd4_create_session()
3624 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); in nfsd4_create_session()
3625 conf = find_confirmed_client(&cr_ses->clientid, true, nn); in nfsd4_create_session()
3632 cs_slot = &conf->cl_cs_slot; in nfsd4_create_session()
3633 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); in nfsd4_create_session()
3641 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || in nfsd4_create_session()
3642 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { in nfsd4_create_session()
3649 cs_slot = &unconf->cl_cs_slot; in nfsd4_create_session()
3650 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); in nfsd4_create_session()
3656 old = find_confirmed_client_by_name(&unconf->cl_name, nn); in nfsd4_create_session()
3663 trace_nfsd_clid_replaced(&old->cl_clientid); in nfsd4_create_session()
3673 cr_ses->flags &= ~SESSION4_PERSIST; in nfsd4_create_session()
3675 cr_ses->flags &= ~SESSION4_RDMA; in nfsd4_create_session()
3680 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, in nfsd4_create_session()
3682 cs_slot->sl_seqid++; in nfsd4_create_session()
3683 cr_ses->seqid = cs_slot->sl_seqid; in nfsd4_create_session()
3685 /* cache solo and embedded create sessions under the client_lock */ in nfsd4_create_session()
3687 spin_unlock(&nn->client_lock); in nfsd4_create_session()
3689 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); in nfsd4_create_session()
3697 spin_unlock(&nn->client_lock); in nfsd4_create_session()
3704 nfsd4_put_drc_mem(&cr_ses->fore_channel); in nfsd4_create_session()
3726 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; in nfsd4_backchannel_ctl()
3727 struct nfsd4_session *session = cstate->session; in nfsd4_backchannel_ctl()
3731 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); in nfsd4_backchannel_ctl()
3734 spin_lock(&nn->client_lock); in nfsd4_backchannel_ctl()
3735 session->se_cb_prog = bc->bc_cb_program; in nfsd4_backchannel_ctl()
3736 session->se_cb_sec = bc->bc_cb_sec; in nfsd4_backchannel_ctl()
3737 spin_unlock(&nn->client_lock); in nfsd4_backchannel_ctl()
3739 nfsd4_probe_callback(session->se_client); in nfsd4_backchannel_ctl()
3748 list_for_each_entry(c, &s->se_conns, cn_persession) { in __nfsd4_find_conn()
3749 if (c->cn_xprt == xpt) { in __nfsd4_find_conn()
3759 struct nfs4_client *clp = session->se_client; in nfsd4_match_existing_connection()
3760 struct svc_xprt *xpt = rqst->rq_xprt; in nfsd4_match_existing_connection()
3765 spin_lock(&clp->cl_lock); in nfsd4_match_existing_connection()
3769 else if (req == c->cn_flags) in nfsd4_match_existing_connection()
3772 c->cn_flags != NFS4_CDFC4_BACK) in nfsd4_match_existing_connection()
3775 c->cn_flags != NFS4_CDFC4_FORE) in nfsd4_match_existing_connection()
3779 spin_unlock(&clp->cl_lock); in nfsd4_match_existing_connection()
3789 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; in nfsd4_bind_conn_to_session()
3798 spin_lock(&nn->client_lock); in nfsd4_bind_conn_to_session()
3799 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); in nfsd4_bind_conn_to_session()
3800 spin_unlock(&nn->client_lock); in nfsd4_bind_conn_to_session()
3804 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) in nfsd4_bind_conn_to_session()
3807 bcts->dir, &conn); in nfsd4_bind_conn_to_session()
3809 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || in nfsd4_bind_conn_to_session()
3810 bcts->dir == NFS4_CDFC4_BACK) in nfsd4_bind_conn_to_session()
3811 conn->cn_flags |= NFS4_CDFC4_BACK; in nfsd4_bind_conn_to_session()
3812 nfsd4_probe_callback(session->se_client); in nfsd4_bind_conn_to_session()
3817 status = nfsd4_map_bcts_dir(&bcts->dir); in nfsd4_bind_conn_to_session()
3820 conn = alloc_conn(rqstp, bcts->dir); in nfsd4_bind_conn_to_session()
3834 if (!cstate->session) in nfsd4_compound_in_session()
3836 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); in nfsd4_compound_in_session()
3843 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; in nfsd4_destroy_session()
3857 spin_lock(&nn->client_lock); in nfsd4_destroy_session()
3862 if (!nfsd4_mach_creds_match(ses->se_client, r)) in nfsd4_destroy_session()
3868 spin_unlock(&nn->client_lock); in nfsd4_destroy_session()
3870 nfsd4_probe_callback_sync(ses->se_client); in nfsd4_destroy_session()
3872 spin_lock(&nn->client_lock); in nfsd4_destroy_session()
3877 spin_unlock(&nn->client_lock); in nfsd4_destroy_session()
3884 struct nfs4_client *clp = ses->se_client; in nfsd4_sequence_check_conn()
3889 spin_lock(&clp->cl_lock); in nfsd4_sequence_check_conn()
3890 c = __nfsd4_find_conn(new->cn_xprt, ses); in nfsd4_sequence_check_conn()
3894 if (clp->cl_mach_cred) in nfsd4_sequence_check_conn()
3897 spin_unlock(&clp->cl_lock); in nfsd4_sequence_check_conn()
3901 nfsd4_conn_lost(&new->cn_xpt_user); in nfsd4_sequence_check_conn()
3904 spin_unlock(&clp->cl_lock); in nfsd4_sequence_check_conn()
3911 struct nfsd4_compoundargs *args = rqstp->rq_argp; in nfsd4_session_too_many_ops()
3913 return args->opcnt > session->se_fchannel.maxops; in nfsd4_session_too_many_ops()
3919 struct xdr_buf *xb = &rqstp->rq_arg; in nfsd4_request_too_big()
3921 return xb->len > session->se_fchannel.maxreq_sz; in nfsd4_request_too_big()
3927 struct nfsd4_compoundargs *argp = rqstp->rq_argp; in replay_matches_cache()
3929 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != in replay_matches_cache()
3930 (bool)seq->cachethis) in replay_matches_cache()
3936 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) in replay_matches_cache()
3943 if (slot->sl_opcnt > argp->opcnt) in replay_matches_cache()
3946 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) in replay_matches_cache()
3961 struct nfsd4_sequence *seq = &u->sequence; in nfsd4_sequence()
3962 struct nfsd4_compoundres *resp = rqstp->rq_resp; in nfsd4_sequence()
3963 struct xdr_stream *xdr = resp->xdr; in nfsd4_sequence()
3973 if (resp->opcnt != 1) in nfsd4_sequence()
3984 spin_lock(&nn->client_lock); in nfsd4_sequence()
3985 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); in nfsd4_sequence()
3988 clp = session->se_client; in nfsd4_sequence()
3999 if (seq->slotid >= session->se_fchannel.maxreqs) in nfsd4_sequence()
4002 slot = session->se_slots[seq->slotid]; in nfsd4_sequence()
4003 dprintk("%s: slotid %d\n", __func__, seq->slotid); in nfsd4_sequence()
4008 seq->maxslots = session->se_fchannel.maxreqs; in nfsd4_sequence()
4010 status = check_slot_seqid(seq->seqid, slot->sl_seqid, in nfsd4_sequence()
4011 slot->sl_flags & NFSD4_SLOT_INUSE); in nfsd4_sequence()
4014 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) in nfsd4_sequence()
4019 cstate->slot = slot; in nfsd4_sequence()
4020 cstate->session = session; in nfsd4_sequence()
4021 cstate->clp = clp; in nfsd4_sequence()
4022 /* Return the cached reply status and set cstate->status in nfsd4_sequence()
4025 cstate->status = nfserr_replay_cache; in nfsd4_sequence()
4036 buflen = (seq->cachethis) ? in nfsd4_sequence()
4037 session->se_fchannel.maxresp_cached : in nfsd4_sequence()
4038 session->se_fchannel.maxresp_sz; in nfsd4_sequence()
4039 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : in nfsd4_sequence()
4041 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) in nfsd4_sequence()
4047 slot->sl_seqid = seq->seqid; in nfsd4_sequence()
4048 slot->sl_flags |= NFSD4_SLOT_INUSE; in nfsd4_sequence()
4049 if (seq->cachethis) in nfsd4_sequence()
4050 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; in nfsd4_sequence()
4052 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; in nfsd4_sequence()
4054 cstate->slot = slot; in nfsd4_sequence()
4055 cstate->session = session; in nfsd4_sequence()
4056 cstate->clp = clp; in nfsd4_sequence()
4059 switch (clp->cl_cb_state) { in nfsd4_sequence()
4061 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; in nfsd4_sequence()
4064 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; in nfsd4_sequence()
4067 seq->status_flags = 0; in nfsd4_sequence()
4069 if (!list_empty(&clp->cl_revoked)) in nfsd4_sequence()
4070 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; in nfsd4_sequence()
4074 spin_unlock(&nn->client_lock); in nfsd4_sequence()
4084 struct nfsd4_compound_state *cs = &resp->cstate; in nfsd4_sequence_done()
4087 if (cs->status != nfserr_replay_cache) { in nfsd4_sequence_done()
4089 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; in nfsd4_sequence_done()
4092 nfsd4_put_session(cs->session); in nfsd4_sequence_done()
4093 } else if (cs->clp) in nfsd4_sequence_done()
4094 put_client_renew(cs->clp); in nfsd4_sequence_done()
4102 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; in nfsd4_destroy_clientid()
4108 spin_lock(&nn->client_lock); in nfsd4_destroy_clientid()
4109 unconf = find_unconfirmed_client(&dc->clientid, true, nn); in nfsd4_destroy_clientid()
4110 conf = find_confirmed_client(&dc->clientid, true, nn); in nfsd4_destroy_clientid()
4133 trace_nfsd_clid_destroyed(&clp->cl_clientid); in nfsd4_destroy_clientid()
4136 spin_unlock(&nn->client_lock); in nfsd4_destroy_clientid()
4146 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; in nfsd4_reclaim_complete()
4147 struct nfs4_client *clp = cstate->clp; in nfsd4_reclaim_complete()
4150 if (rc->rca_one_fs) { in nfsd4_reclaim_complete()
4151 if (!cstate->current_fh.fh_dentry) in nfsd4_reclaim_complete()
4161 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) in nfsd4_reclaim_complete()
4176 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); in nfsd4_reclaim_complete()
4187 struct nfsd4_setclientid *setclid = &u->setclientid; in nfsd4_setclientid()
4188 struct xdr_netobj clname = setclid->se_name; in nfsd4_setclientid()
4189 nfs4_verifier clverifier = setclid->se_verf; in nfsd4_setclientid()
4198 spin_lock(&nn->client_lock); in nfsd4_setclientid()
4204 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid()
4213 if (same_verf(&conf->cl_verifier, &clverifier)) { in nfsd4_setclientid()
4221 new->cl_minorversion = 0; in nfsd4_setclientid()
4224 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; in nfsd4_setclientid()
4225 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; in nfsd4_setclientid()
4226 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); in nfsd4_setclientid()
4230 spin_unlock(&nn->client_lock); in nfsd4_setclientid()
4234 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); in nfsd4_setclientid()
4246 &u->setclientid_confirm; in nfsd4_setclientid_confirm()
4249 nfs4_verifier confirm = setclientid_confirm->sc_confirm; in nfsd4_setclientid_confirm()
4250 clientid_t * clid = &setclientid_confirm->sc_clientid; in nfsd4_setclientid_confirm()
4257 spin_lock(&nn->client_lock); in nfsd4_setclientid_confirm()
4268 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid_confirm()
4272 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid_confirm()
4276 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { in nfsd4_setclientid_confirm()
4277 if (conf && same_verf(&confirm, &conf->cl_confirm)) { in nfsd4_setclientid_confirm()
4287 nfsd4_change_callback(conf, &unconf->cl_cb_conn); in nfsd4_setclientid_confirm()
4289 old = find_confirmed_client_by_name(&unconf->cl_name, nn); in nfsd4_setclientid_confirm()
4293 && !same_creds(&unconf->cl_cred, in nfsd4_setclientid_confirm()
4294 &old->cl_cred)) { in nfsd4_setclientid_confirm()
4303 trace_nfsd_clid_replaced(&old->cl_clientid); in nfsd4_setclientid_confirm()
4309 spin_unlock(&nn->client_lock); in nfsd4_setclientid_confirm()
4311 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); in nfsd4_setclientid_confirm()
4313 spin_lock(&nn->client_lock); in nfsd4_setclientid_confirm()
4316 spin_unlock(&nn->client_lock); in nfsd4_setclientid_confirm()
4331 refcount_set(&fp->fi_ref, 1); in nfsd4_file_init()
4332 spin_lock_init(&fp->fi_lock); in nfsd4_file_init()
4333 INIT_LIST_HEAD(&fp->fi_stateids); in nfsd4_file_init()
4334 INIT_LIST_HEAD(&fp->fi_delegations); in nfsd4_file_init()
4335 INIT_LIST_HEAD(&fp->fi_clnt_odstate); in nfsd4_file_init()
4336 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); in nfsd4_file_init()
4337 fp->fi_deleg_file = NULL; in nfsd4_file_init()
4338 fp->fi_had_conflict = false; in nfsd4_file_init()
4339 fp->fi_share_deny = 0; in nfsd4_file_init()
4340 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); in nfsd4_file_init()
4341 memset(fp->fi_access, 0, sizeof(fp->fi_access)); in nfsd4_file_init()
4342 fp->fi_aliased = false; in nfsd4_file_init()
4343 fp->fi_inode = d_inode(fh->fh_dentry); in nfsd4_file_init()
4345 INIT_LIST_HEAD(&fp->fi_lo_states); in nfsd4_file_init()
4346 atomic_set(&fp->fi_lo_recalls, 0); in nfsd4_file_init()
4408 return -ENOMEM; in nfsd4_init_slabs()
4418 count = atomic_read(&nn->nfsd_courtesy_clients); in nfsd4_state_shrinker_count()
4422 queue_work(laundry_wq, &nn->nfsd_shrinker_work); in nfsd4_state_shrinker_count()
4438 nn->nfsd4_lease = 90; /* default lease time */ in nfsd4_init_leases_net()
4439 nn->nfsd4_grace = 90; in nfsd4_init_leases_net()
4440 nn->somebody_reclaimed = false; in nfsd4_init_leases_net()
4441 nn->track_reclaim_completes = false; in nfsd4_init_leases_net()
4442 nn->clverifier_counter = get_random_u32(); in nfsd4_init_leases_net()
4443 nn->clientid_base = get_random_u32(); in nfsd4_init_leases_net()
4444 nn->clientid_counter = nn->clientid_base + 1; in nfsd4_init_leases_net()
4445 nn->s2s_cp_cl_id = nn->clientid_counter++; in nfsd4_init_leases_net()
4447 atomic_set(&nn->nfs4_client_count, 0); in nfsd4_init_leases_net()
4451 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); in nfsd4_init_leases_net()
4453 atomic_set(&nn->nfsd_courtesy_clients, 0); in nfsd4_init_leases_net()
4458 rp->rp_status = nfserr_serverfault; in init_nfs4_replay()
4459 rp->rp_buflen = 0; in init_nfs4_replay()
4460 rp->rp_buf = rp->rp_ibuf; in init_nfs4_replay()
4461 mutex_init(&rp->rp_mutex); in init_nfs4_replay()
4468 mutex_lock(&so->so_replay.rp_mutex); in nfsd4_cstate_assign_replay()
4469 cstate->replay_owner = nfs4_get_stateowner(so); in nfsd4_cstate_assign_replay()
4475 struct nfs4_stateowner *so = cstate->replay_owner; in nfsd4_cstate_clear_replay()
4478 cstate->replay_owner = NULL; in nfsd4_cstate_clear_replay()
4479 mutex_unlock(&so->so_replay.rp_mutex); in nfsd4_cstate_clear_replay()
4492 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); in alloc_stateowner()
4493 if (!sop->so_owner.data) { in alloc_stateowner()
4498 INIT_LIST_HEAD(&sop->so_stateids); in alloc_stateowner()
4499 sop->so_client = clp; in alloc_stateowner()
4500 init_nfs4_replay(&sop->so_replay); in alloc_stateowner()
4501 atomic_set(&sop->so_count, 1); in alloc_stateowner()
4507 lockdep_assert_held(&clp->cl_lock); in hash_openowner()
4509 list_add(&oo->oo_owner.so_strhash, in hash_openowner()
4510 &clp->cl_ownerstr_hashtbl[strhashval]); in hash_openowner()
4511 list_add(&oo->oo_perclient, &clp->cl_openowners); in hash_openowner()
4535 struct nfs4_openowner *oo = open->op_openowner; in nfsd4_find_existing_open()
4537 lockdep_assert_held(&fp->fi_lock); in nfsd4_find_existing_open()
4539 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { in nfsd4_find_existing_open()
4541 if (local->st_stateowner->so_is_open_owner == 0) in nfsd4_find_existing_open()
4543 if (local->st_stateowner != &oo->oo_owner) in nfsd4_find_existing_open()
4545 if (local->st_stid.sc_type == NFS4_OPEN_STID) { in nfsd4_find_existing_open()
4547 refcount_inc(&ret->st_stid.sc_count); in nfsd4_find_existing_open()
4559 switch (s->sc_type) { in nfsd4_verify_open_stid()
4579 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); in nfsd4_lock_ol_stateid()
4580 ret = nfsd4_verify_open_stid(&stp->st_stid); in nfsd4_lock_ol_stateid()
4582 mutex_unlock(&stp->st_mutex); in nfsd4_lock_ol_stateid()
4591 spin_lock(&fp->fi_lock); in nfsd4_find_and_lock_existing_open()
4593 spin_unlock(&fp->fi_lock); in nfsd4_find_and_lock_existing_open()
4596 nfs4_put_stid(&stp->st_stid); in nfsd4_find_and_lock_existing_open()
4605 struct nfs4_client *clp = cstate->clp; in alloc_init_open_stateowner()
4608 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); in alloc_init_open_stateowner()
4611 oo->oo_owner.so_ops = &openowner_ops; in alloc_init_open_stateowner()
4612 oo->oo_owner.so_is_open_owner = 1; in alloc_init_open_stateowner()
4613 oo->oo_owner.so_seqid = open->op_seqid; in alloc_init_open_stateowner()
4614 oo->oo_flags = 0; in alloc_init_open_stateowner()
4616 oo->oo_flags |= NFS4_OO_CONFIRMED; in alloc_init_open_stateowner()
4617 oo->oo_time = 0; in alloc_init_open_stateowner()
4618 oo->oo_last_closed_stid = NULL; in alloc_init_open_stateowner()
4619 INIT_LIST_HEAD(&oo->oo_close_lru); in alloc_init_open_stateowner()
4620 spin_lock(&clp->cl_lock); in alloc_init_open_stateowner()
4626 nfs4_free_stateowner(&oo->oo_owner); in alloc_init_open_stateowner()
4628 spin_unlock(&clp->cl_lock); in alloc_init_open_stateowner()
4636 struct nfs4_openowner *oo = open->op_openowner; in init_open_stateid()
4640 stp = open->op_stp; in init_open_stateid()
4642 mutex_init(&stp->st_mutex); in init_open_stateid()
4643 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); in init_open_stateid()
4646 spin_lock(&oo->oo_owner.so_client->cl_lock); in init_open_stateid()
4647 spin_lock(&fp->fi_lock); in init_open_stateid()
4650 mutex_unlock(&stp->st_mutex); in init_open_stateid()
4659 open->op_stp = NULL; in init_open_stateid()
4660 refcount_inc(&stp->st_stid.sc_count); in init_open_stateid()
4661 stp->st_stid.sc_type = NFS4_OPEN_STID; in init_open_stateid()
4662 INIT_LIST_HEAD(&stp->st_locks); in init_open_stateid()
4663 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); in init_open_stateid()
4665 stp->st_stid.sc_file = fp; in init_open_stateid()
4666 stp->st_access_bmap = 0; in init_open_stateid()
4667 stp->st_deny_bmap = 0; in init_open_stateid()
4668 stp->st_openstp = NULL; in init_open_stateid()
4669 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); in init_open_stateid()
4670 list_add(&stp->st_perfile, &fp->fi_stateids); in init_open_stateid()
4673 spin_unlock(&fp->fi_lock); in init_open_stateid()
4674 spin_unlock(&oo->oo_owner.so_client->cl_lock); in init_open_stateid()
4678 nfs4_put_stid(&retstp->st_stid); in init_open_stateid()
4682 mutex_unlock(&stp->st_mutex); in init_open_stateid()
4697 struct nfs4_openowner *oo = openowner(s->st_stateowner); in move_to_close_lru()
4698 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, in move_to_close_lru()
4712 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); in move_to_close_lru()
4715 if (s->st_stid.sc_file) { in move_to_close_lru()
4716 put_nfs4_file(s->st_stid.sc_file); in move_to_close_lru()
4717 s->st_stid.sc_file = NULL; in move_to_close_lru()
4720 spin_lock(&nn->client_lock); in move_to_close_lru()
4721 last = oo->oo_last_closed_stid; in move_to_close_lru()
4722 oo->oo_last_closed_stid = s; in move_to_close_lru()
4723 list_move_tail(&oo->oo_close_lru, &nn->close_lru); in move_to_close_lru()
4724 oo->oo_time = ktime_get_boottime_seconds(); in move_to_close_lru()
4725 spin_unlock(&nn->client_lock); in move_to_close_lru()
4727 nfs4_put_stid(&last->st_stid); in move_to_close_lru()
4733 struct inode *inode = d_inode(fhp->fh_dentry); in nfsd4_file_hash_lookup()
4741 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { in nfsd4_file_hash_lookup()
4742 if (refcount_inc_not_zero(&fi->fi_ref)) { in nfsd4_file_hash_lookup()
4757 * inode->i_lock prevents racing insertions from adding an entry
4763 struct inode *inode = d_inode(fhp->fh_dentry); in nfsd4_file_hash_insert()
4771 spin_lock(&inode->i_lock); in nfsd4_file_hash_insert()
4776 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { in nfsd4_file_hash_insert()
4777 if (refcount_inc_not_zero(&fi->fi_ref)) in nfsd4_file_hash_insert()
4780 fi->fi_aliased = alias_found = true; in nfsd4_file_hash_insert()
4786 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, in nfsd4_file_hash_insert()
4791 new->fi_aliased = alias_found; in nfsd4_file_hash_insert()
4795 spin_unlock(&inode->i_lock); in nfsd4_file_hash_insert()
4802 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, in nfsd4_file_hash_remove()
4821 spin_lock(&fp->fi_lock); in nfs4_share_conflict()
4822 if (fp->fi_share_deny & deny_type) in nfs4_share_conflict()
4824 spin_unlock(&fp->fi_lock); in nfs4_share_conflict()
4833 return ctx && !list_empty_careful(&ctx->flc_lease); in nfsd4_deleg_present()
4837 * nfsd_wait_for_delegreturn - wait for delegations to be returned
4839 * @inode: in-core inode of the file being waited for
4861 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, in nfsd4_cb_recall_prepare()
4864 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); in nfsd4_cb_recall_prepare()
4868 * already holding inode->i_lock. in nfsd4_cb_recall_prepare()
4874 if (delegation_hashed(dp) && dp->dl_time == 0) { in nfsd4_cb_recall_prepare()
4875 dp->dl_time = ktime_get_boottime_seconds(); in nfsd4_cb_recall_prepare()
4876 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); in nfsd4_cb_recall_prepare()
4886 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); in nfsd4_cb_recall_done()
4888 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID || in nfsd4_cb_recall_done()
4889 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) in nfsd4_cb_recall_done()
4892 switch (task->tk_status) { in nfsd4_cb_recall_done()
4895 case -NFS4ERR_DELAY: in nfsd4_cb_recall_done()
4898 case -EBADHANDLE: in nfsd4_cb_recall_done()
4899 case -NFS4ERR_BAD_STATEID: in nfsd4_cb_recall_done()
4904 if (dp->dl_retries--) { in nfsd4_cb_recall_done()
4918 nfs4_put_stid(&dp->dl_stid); in nfsd4_cb_recall_release()
4937 refcount_inc(&dp->dl_stid.sc_count); in nfsd_break_one_deleg()
4938 queued = nfsd4_run_cb(&dp->dl_recall); in nfsd_break_one_deleg()
4941 refcount_dec(&dp->dl_stid.sc_count); in nfsd_break_one_deleg()
4948 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; in nfsd_break_deleg_cb()
4949 struct nfs4_file *fp = dp->dl_stid.sc_file; in nfsd_break_deleg_cb()
4950 struct nfs4_client *clp = dp->dl_stid.sc_client; in nfsd_break_deleg_cb()
4953 trace_nfsd_cb_recall(&dp->dl_stid); in nfsd_break_deleg_cb()
4955 dp->dl_recalled = true; in nfsd_break_deleg_cb()
4956 atomic_inc(&clp->cl_delegs_in_recall); in nfsd_break_deleg_cb()
4958 nn = net_generic(clp->net, nfsd_net_id); in nfsd_break_deleg_cb()
4959 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfsd_break_deleg_cb()
4967 fl->fl_break_time = 0; in nfsd_break_deleg_cb()
4969 fp->fi_had_conflict = true; in nfsd_break_deleg_cb()
4975 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4984 struct nfs4_delegation *dl = fl->fl_owner; in nfsd_breaker_owns_lease()
4992 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) in nfsd_breaker_owns_lease()
4994 clp = *(rqst->rq_lease_breaker); in nfsd_breaker_owns_lease()
4995 return dl->dl_stid.sc_client == clp; in nfsd_breaker_owns_lease()
5002 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner; in nfsd_change_deleg_cb()
5003 struct nfs4_client *clp = dp->dl_stid.sc_client; in nfsd_change_deleg_cb()
5006 if (dp->dl_recalled) in nfsd_change_deleg_cb()
5007 atomic_dec(&clp->cl_delegs_in_recall); in nfsd_change_deleg_cb()
5010 return -EAGAIN; in nfsd_change_deleg_cb()
5023 if (seqid == so->so_seqid - 1) in nfsd4_check_seqid()
5025 if (seqid == so->so_seqid) in nfsd4_check_seqid()
5035 spin_lock(&nn->client_lock); in lookup_clientid()
5038 atomic_inc(&found->cl_rpc_users); in lookup_clientid()
5039 spin_unlock(&nn->client_lock); in lookup_clientid()
5047 if (cstate->clp) { in set_client()
5048 if (!same_clid(&cstate->clp->cl_clientid, clid)) in set_client()
5056 * set cstate->clp), so session = false: in set_client()
5058 cstate->clp = lookup_clientid(clid, false, nn); in set_client()
5059 if (!cstate->clp) in set_client()
5068 clientid_t *clientid = &open->op_clientid; in nfsd4_process_open1()
5078 open->op_file = nfsd4_alloc_file(); in nfsd4_process_open1()
5079 if (open->op_file == NULL) in nfsd4_process_open1()
5085 clp = cstate->clp; in nfsd4_process_open1()
5087 strhashval = ownerstr_hashval(&open->op_owner); in nfsd4_process_open1()
5089 open->op_openowner = oo; in nfsd4_process_open1()
5093 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { in nfsd4_process_open1()
5096 open->op_openowner = NULL; in nfsd4_process_open1()
5099 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); in nfsd4_process_open1()
5107 open->op_openowner = oo; in nfsd4_process_open1()
5109 open->op_stp = nfs4_alloc_open_stateid(clp); in nfsd4_process_open1()
5110 if (!open->op_stp) in nfsd4_process_open1()
5114 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { in nfsd4_process_open1()
5115 open->op_odstate = alloc_clnt_odstate(clp); in nfsd4_process_open1()
5116 if (!open->op_odstate) in nfsd4_process_open1()
5126 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) in nfs4_check_delegmode()
5150 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || in nfsd4_is_deleg_cur()
5151 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; in nfsd4_is_deleg_cur()
5162 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); in nfs4_check_deleg()
5165 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { in nfs4_check_deleg()
5166 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5167 if (cl->cl_minorversion) in nfs4_check_deleg()
5171 flags = share_access_to_flags(open->op_share_access); in nfs4_check_deleg()
5174 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5183 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; in nfs4_check_deleg()
5209 if (!open->op_truncate) in nfsd4_truncate()
5211 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) in nfsd4_truncate()
5222 int oflag = nfs4_access_to_omode(open->op_share_access); in nfs4_get_vfs_file()
5223 int access = nfs4_access_to_access(open->op_share_access); in nfs4_get_vfs_file()
5226 spin_lock(&fp->fi_lock); in nfs4_get_vfs_file()
5232 status = nfs4_file_check_deny(fp, open->op_share_deny); in nfs4_get_vfs_file()
5235 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5239 stp, open->op_share_deny, false)) in nfs4_get_vfs_file()
5241 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5246 status = nfs4_file_get_access(fp, open->op_share_access); in nfs4_get_vfs_file()
5249 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5253 stp, open->op_share_access, true)) in nfs4_get_vfs_file()
5255 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5260 old_access_bmap = stp->st_access_bmap; in nfs4_get_vfs_file()
5261 set_access(open->op_share_access, stp); in nfs4_get_vfs_file()
5264 old_deny_bmap = stp->st_deny_bmap; in nfs4_get_vfs_file()
5265 set_deny(open->op_share_deny, stp); in nfs4_get_vfs_file()
5266 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); in nfs4_get_vfs_file()
5268 if (!fp->fi_fds[oflag]) { in nfs4_get_vfs_file()
5269 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5272 open->op_filp, &nf); in nfs4_get_vfs_file()
5276 spin_lock(&fp->fi_lock); in nfs4_get_vfs_file()
5277 if (!fp->fi_fds[oflag]) { in nfs4_get_vfs_file()
5278 fp->fi_fds[oflag] = nf; in nfs4_get_vfs_file()
5282 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5286 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, in nfs4_get_vfs_file()
5297 stp->st_access_bmap = old_access_bmap; in nfs4_get_vfs_file()
5298 nfs4_file_put_access(fp, open->op_share_access); in nfs4_get_vfs_file()
5309 unsigned char old_deny_bmap = stp->st_deny_bmap; in nfs4_upgrade_open()
5311 if (!test_access(open->op_share_access, stp)) in nfs4_upgrade_open()
5315 spin_lock(&fp->fi_lock); in nfs4_upgrade_open()
5316 status = nfs4_file_check_deny(fp, open->op_share_deny); in nfs4_upgrade_open()
5319 set_deny(open->op_share_deny, stp); in nfs4_upgrade_open()
5320 fp->fi_share_deny |= in nfs4_upgrade_open()
5321 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); in nfs4_upgrade_open()
5325 stp, open->op_share_deny, false)) in nfs4_upgrade_open()
5329 spin_unlock(&fp->fi_lock); in nfs4_upgrade_open()
5343 if (clp->cl_cb_state == NFSD4_CB_UP) in nfsd4_cb_channel_good()
5350 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; in nfsd4_cb_channel_good()
5361 fl->fl_lmops = &nfsd_lease_mng_ops; in nfs4_alloc_init_lease()
5362 fl->fl_flags = FL_DELEG; in nfs4_alloc_init_lease()
5363 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; in nfs4_alloc_init_lease()
5364 fl->fl_end = OFFSET_MAX; in nfs4_alloc_init_lease()
5365 fl->fl_owner = (fl_owner_t)dp; in nfs4_alloc_init_lease()
5366 fl->fl_pid = current->tgid; in nfs4_alloc_init_lease()
5367 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; in nfs4_alloc_init_lease()
5375 struct file *f = fp->fi_deleg_file->nf_file; in nfsd4_check_conflicting_opens()
5379 writes = atomic_read(&ino->i_writecount); in nfsd4_check_conflicting_opens()
5389 if (fp->fi_aliased) in nfsd4_check_conflicting_opens()
5390 return -EAGAIN; in nfsd4_check_conflicting_opens()
5398 if (fp->fi_fds[O_WRONLY]) in nfsd4_check_conflicting_opens()
5399 writes--; in nfsd4_check_conflicting_opens()
5400 if (fp->fi_fds[O_RDWR]) in nfsd4_check_conflicting_opens()
5401 writes--; in nfsd4_check_conflicting_opens()
5403 return -EAGAIN; /* There may be non-NFSv4 writers */ in nfsd4_check_conflicting_opens()
5405 * It's possible there are non-NFSv4 write opens in progress, in nfsd4_check_conflicting_opens()
5411 spin_lock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5412 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { in nfsd4_check_conflicting_opens()
5413 if (st->st_openstp == NULL /* it's an open */ && in nfsd4_check_conflicting_opens()
5415 st->st_stid.sc_client != clp) { in nfsd4_check_conflicting_opens()
5416 spin_unlock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5417 return -EAGAIN; in nfsd4_check_conflicting_opens()
5420 spin_unlock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5443 err = nfsd_lookup_dentry(open->op_rqstp, parent, in nfsd4_verify_deleg_dentry()
5444 open->op_fname, open->op_fnamelen, in nfsd4_verify_deleg_dentry()
5448 return -EAGAIN; in nfsd4_verify_deleg_dentry()
5452 if (child != file_dentry(fp->fi_deleg_file->nf_file)) in nfsd4_verify_deleg_dentry()
5453 return -EAGAIN; in nfsd4_verify_deleg_dentry()
5467 struct inode *inode = file_inode(nf->nf_file); in nfsd4_verify_setuid_write()
5469 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && in nfsd4_verify_setuid_write()
5470 (inode->i_mode & (S_ISUID|S_ISGID))) in nfsd4_verify_setuid_write()
5471 return -EAGAIN; in nfsd4_verify_setuid_write()
5480 struct nfs4_client *clp = stp->st_stid.sc_client; in nfs4_set_delegation()
5481 struct nfs4_file *fp = stp->st_stid.sc_file; in nfs4_set_delegation()
5482 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; in nfs4_set_delegation()
5493 if (fp->fi_had_conflict) in nfs4_set_delegation()
5494 return ERR_PTR(-EAGAIN); in nfs4_set_delegation()
5508 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) { in nfs4_set_delegation()
5517 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { in nfs4_set_delegation()
5523 return ERR_PTR(-EAGAIN); in nfs4_set_delegation()
5526 spin_lock(&fp->fi_lock); in nfs4_set_delegation()
5528 status = -EAGAIN; in nfs4_set_delegation()
5530 status = -EAGAIN; in nfs4_set_delegation()
5531 else if (!fp->fi_deleg_file) { in nfs4_set_delegation()
5532 fp->fi_deleg_file = nf; in nfs4_set_delegation()
5535 fp->fi_delegees = 1; in nfs4_set_delegation()
5538 fp->fi_delegees++; in nfs4_set_delegation()
5539 spin_unlock(&fp->fi_lock); in nfs4_set_delegation()
5546 status = -ENOMEM; in nfs4_set_delegation()
5555 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); in nfs4_set_delegation()
5575 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); in nfs4_set_delegation()
5579 status = -EAGAIN; in nfs4_set_delegation()
5580 if (fp->fi_had_conflict) in nfs4_set_delegation()
5584 spin_lock(&fp->fi_lock); in nfs4_set_delegation()
5586 spin_unlock(&fp->fi_lock); in nfs4_set_delegation()
5594 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); in nfs4_set_delegation()
5596 put_clnt_odstate(dp->dl_clnt_odstate); in nfs4_set_delegation()
5597 nfs4_put_stid(&dp->dl_stid); in nfs4_set_delegation()
5605 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; in nfsd4_open_deleg_none_ext()
5606 if (status == -EAGAIN) in nfsd4_open_deleg_none_ext()
5607 open->op_why_no_deleg = WND4_CONTENTION; in nfsd4_open_deleg_none_ext()
5609 open->op_why_no_deleg = WND4_RESOURCE; in nfsd4_open_deleg_none_ext()
5610 switch (open->op_deleg_want) { in nfsd4_open_deleg_none_ext()
5616 open->op_why_no_deleg = WND4_CANCELLED; in nfsd4_open_deleg_none_ext()
5653 struct nfs4_openowner *oo = openowner(stp->st_stateowner); in nfs4_open_delegation()
5654 struct nfs4_client *clp = stp->st_stid.sc_client; in nfs4_open_delegation()
5659 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); in nfs4_open_delegation()
5660 open->op_recall = 0; in nfs4_open_delegation()
5661 switch (open->op_claim_type) { in nfs4_open_delegation()
5664 open->op_recall = 1; in nfs4_open_delegation()
5675 if (locks_in_grace(clp->net)) in nfs4_open_delegation()
5677 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) in nfs4_open_delegation()
5679 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE && in nfs4_open_delegation()
5680 !clp->cl_minorversion) in nfs4_open_delegation()
5690 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); in nfs4_open_delegation()
5692 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { in nfs4_open_delegation()
5693 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; in nfs4_open_delegation()
5694 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); in nfs4_open_delegation()
5696 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; in nfs4_open_delegation()
5697 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); in nfs4_open_delegation()
5699 nfs4_put_stid(&dp->dl_stid); in nfs4_open_delegation()
5702 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; in nfs4_open_delegation()
5703 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && in nfs4_open_delegation()
5704 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { in nfs4_open_delegation()
5706 open->op_recall = 1; in nfs4_open_delegation()
5710 if (open->op_deleg_want) in nfs4_open_delegation()
5718 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && in nfsd4_deleg_xgrade_none_ext()
5719 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { in nfsd4_deleg_xgrade_none_ext()
5720 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; in nfsd4_deleg_xgrade_none_ext()
5721 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; in nfsd4_deleg_xgrade_none_ext()
5722 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && in nfsd4_deleg_xgrade_none_ext()
5723 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { in nfsd4_deleg_xgrade_none_ext()
5724 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; in nfsd4_deleg_xgrade_none_ext()
5725 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; in nfsd4_deleg_xgrade_none_ext()
5734 * nfsd4_process_open2 - finish open processing
5739 * If successful, (1) truncate the file if open->op_truncate was
5740 * set, (2) set open->op_stateid, (3) set open->op_delegation.
5748 struct nfsd4_compoundres *resp = rqstp->rq_resp; in nfsd4_process_open2()
5749 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; in nfsd4_process_open2()
5761 fp = nfsd4_file_hash_insert(open->op_file, current_fh); in nfsd4_process_open2()
5764 if (fp != open->op_file) { in nfsd4_process_open2()
5770 open->op_file = NULL; in nfsd4_process_open2()
5783 if (!open->op_stp) in nfsd4_process_open2()
5797 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
5803 stp->st_stid.sc_type = NFS4_CLOSED_STID; in nfsd4_process_open2()
5805 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
5809 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, in nfsd4_process_open2()
5810 open->op_odstate); in nfsd4_process_open2()
5811 if (stp->st_clnt_odstate == open->op_odstate) in nfsd4_process_open2()
5812 open->op_odstate = NULL; in nfsd4_process_open2()
5815 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); in nfsd4_process_open2()
5816 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
5818 if (nfsd4_has_session(&resp->cstate)) { in nfsd4_process_open2()
5819 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { in nfsd4_process_open2()
5820 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; in nfsd4_process_open2()
5821 open->op_why_no_deleg = WND4_NOT_WANTED; in nfsd4_process_open2()
5830 nfs4_open_delegation(open, stp, &resp->cstate.current_fh); in nfsd4_process_open2()
5833 trace_nfsd_open(&stp->st_stid.sc_stateid); in nfsd4_process_open2()
5836 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && in nfsd4_process_open2()
5837 open->op_deleg_want) in nfsd4_process_open2()
5842 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) in nfsd4_process_open2()
5843 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; in nfsd4_process_open2()
5847 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; in nfsd4_process_open2()
5848 if (nfsd4_has_session(&resp->cstate)) in nfsd4_process_open2()
5849 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; in nfsd4_process_open2()
5850 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) in nfsd4_process_open2()
5851 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; in nfsd4_process_open2()
5854 nfs4_put_stid(&dp->dl_stid); in nfsd4_process_open2()
5856 nfs4_put_stid(&stp->st_stid); in nfsd4_process_open2()
5864 if (open->op_openowner) { in nfsd4_cleanup_open_state()
5865 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; in nfsd4_cleanup_open_state()
5870 if (open->op_file) in nfsd4_cleanup_open_state()
5871 kmem_cache_free(file_slab, open->op_file); in nfsd4_cleanup_open_state()
5872 if (open->op_stp) in nfsd4_cleanup_open_state()
5873 nfs4_put_stid(&open->op_stp->st_stid); in nfsd4_cleanup_open_state()
5874 if (open->op_odstate) in nfsd4_cleanup_open_state()
5875 kmem_cache_free(odstate_slab, open->op_odstate); in nfsd4_cleanup_open_state()
5882 clientid_t *clid = &u->renew; in nfsd4_renew()
5891 clp = cstate->clp; in nfsd4_renew()
5892 if (!list_empty(&clp->cl_delegations) in nfsd4_renew()
5893 && clp->cl_cb_state != NFSD4_CB_UP) in nfsd4_renew()
5902 if (nn->grace_ended) in nfsd4_end_grace()
5906 nn->grace_ended = true; in nfsd4_end_grace()
5923 locks_end_grace(&nn->nfsd4_manager); in nfsd4_end_grace()
5937 time64_t double_grace_period_end = nn->boot_time + in clients_still_reclaiming()
5938 2 * nn->nfsd4_lease; in clients_still_reclaiming()
5940 if (nn->track_reclaim_completes && in clients_still_reclaiming()
5941 atomic_read(&nn->nr_reclaim_complete) == in clients_still_reclaiming()
5942 nn->reclaim_str_hashtbl_size) in clients_still_reclaiming()
5944 if (!nn->somebody_reclaimed) in clients_still_reclaiming()
5946 nn->somebody_reclaimed = false; in clients_still_reclaiming()
5965 if (last_refresh < lt->cutoff) in state_expired()
5967 time_remaining = last_refresh - lt->cutoff; in state_expired()
5968 lt->new_timeo = min(lt->new_timeo, time_remaining); in state_expired()
5975 spin_lock_init(&nn->nfsd_ssc_lock); in nfsd4_ssc_init_umount_work()
5976 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); in nfsd4_ssc_init_umount_work()
5977 init_waitqueue_head(&nn->nfsd_ssc_waitq); in nfsd4_ssc_init_umount_work()
5990 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
5991 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { in nfsd4_ssc_shutdown_umount()
5992 list_del(&ni->nsui_list); in nfsd4_ssc_shutdown_umount()
5993 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
5994 mntput(ni->nsui_vfsmount); in nfsd4_ssc_shutdown_umount()
5996 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
5998 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6007 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6008 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { in nfsd4_ssc_expire_umount()
6009 if (time_after(jiffies, ni->nsui_expire)) { in nfsd4_ssc_expire_umount()
6010 if (refcount_read(&ni->nsui_refcnt) > 1) in nfsd4_ssc_expire_umount()
6014 ni->nsui_busy = true; in nfsd4_ssc_expire_umount()
6015 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6016 mntput(ni->nsui_vfsmount); in nfsd4_ssc_expire_umount()
6017 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6020 list_del(&ni->nsui_list); in nfsd4_ssc_expire_umount()
6030 wake_up_all(&nn->nfsd_ssc_waitq); in nfsd4_ssc_expire_umount()
6031 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6043 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { in nfs4_lockowner_has_blockers()
6044 nf = stp->st_stid.sc_file; in nfs4_lockowner_has_blockers()
6045 ctx = locks_inode_context(nf->fi_inode); in nfs4_lockowner_has_blockers()
6061 if (atomic_read(&clp->cl_delegs_in_recall)) in nfs4_anylock_blockers()
6063 spin_lock(&clp->cl_lock); in nfs4_anylock_blockers()
6065 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], in nfs4_anylock_blockers()
6067 if (so->so_is_open_owner) in nfs4_anylock_blockers()
6071 spin_unlock(&clp->cl_lock); in nfs4_anylock_blockers()
6076 spin_unlock(&clp->cl_lock); in nfs4_anylock_blockers()
6088 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? in nfs4_get_client_reaplist()
6091 spin_lock(&nn->client_lock); in nfs4_get_client_reaplist()
6092 list_for_each_safe(pos, next, &nn->client_lru) { in nfs4_get_client_reaplist()
6094 if (clp->cl_state == NFSD4_EXPIRABLE) in nfs4_get_client_reaplist()
6096 if (!state_expired(lt, clp->cl_time)) in nfs4_get_client_reaplist()
6098 if (!atomic_read(&clp->cl_rpc_users)) { in nfs4_get_client_reaplist()
6099 if (clp->cl_state == NFSD4_ACTIVE) in nfs4_get_client_reaplist()
6100 atomic_inc(&nn->nfsd_courtesy_clients); in nfs4_get_client_reaplist()
6101 clp->cl_state = NFSD4_COURTESY; in nfs4_get_client_reaplist()
6110 list_add(&clp->cl_lru, reaplist); in nfs4_get_client_reaplist()
6114 spin_unlock(&nn->client_lock); in nfs4_get_client_reaplist()
6128 spin_lock(&nn->client_lock); in nfs4_get_courtesy_client_reaplist()
6129 list_for_each_safe(pos, next, &nn->client_lru) { in nfs4_get_courtesy_client_reaplist()
6131 if (clp->cl_state == NFSD4_ACTIVE) in nfs4_get_courtesy_client_reaplist()
6136 list_add(&clp->cl_lru, reaplist); in nfs4_get_courtesy_client_reaplist()
6140 spin_unlock(&nn->client_lock); in nfs4_get_courtesy_client_reaplist()
6151 trace_nfsd_clid_purged(&clp->cl_clientid); in nfs4_process_client_reaplist()
6152 list_del_init(&clp->cl_lru); in nfs4_process_client_reaplist()
6166 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, in nfs4_laundromat()
6167 .new_timeo = nn->nfsd4_lease in nfs4_laundromat()
6179 spin_lock(&nn->s2s_cp_lock); in nfs4_laundromat()
6180 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { in nfs4_laundromat()
6182 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && in nfs4_laundromat()
6183 state_expired(<, cps->cpntf_time)) in nfs4_laundromat()
6186 spin_unlock(&nn->s2s_cp_lock); in nfs4_laundromat()
6191 list_for_each_safe(pos, next, &nn->del_recall_lru) { in nfs4_laundromat()
6193 if (!state_expired(<, dp->dl_time)) in nfs4_laundromat()
6196 list_add(&dp->dl_recall_lru, &reaplist); in nfs4_laundromat()
6202 list_del_init(&dp->dl_recall_lru); in nfs4_laundromat()
6206 spin_lock(&nn->client_lock); in nfs4_laundromat()
6207 while (!list_empty(&nn->close_lru)) { in nfs4_laundromat()
6208 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, in nfs4_laundromat()
6210 if (!state_expired(<, oo->oo_time)) in nfs4_laundromat()
6212 list_del_init(&oo->oo_close_lru); in nfs4_laundromat()
6213 stp = oo->oo_last_closed_stid; in nfs4_laundromat()
6214 oo->oo_last_closed_stid = NULL; in nfs4_laundromat()
6215 spin_unlock(&nn->client_lock); in nfs4_laundromat()
6216 nfs4_put_stid(&stp->st_stid); in nfs4_laundromat()
6217 spin_lock(&nn->client_lock); in nfs4_laundromat()
6219 spin_unlock(&nn->client_lock); in nfs4_laundromat()
6224 * So, we clean out any un-revisited request after a lease period in nfs4_laundromat()
6233 spin_lock(&nn->blocked_locks_lock); in nfs4_laundromat()
6234 while (!list_empty(&nn->blocked_locks_lru)) { in nfs4_laundromat()
6235 nbl = list_first_entry(&nn->blocked_locks_lru, in nfs4_laundromat()
6237 if (!state_expired(<, nbl->nbl_time)) in nfs4_laundromat()
6239 list_move(&nbl->nbl_lru, &reaplist); in nfs4_laundromat()
6240 list_del_init(&nbl->nbl_list); in nfs4_laundromat()
6242 spin_unlock(&nn->blocked_locks_lock); in nfs4_laundromat()
6247 list_del_init(&nbl->nbl_lru); in nfs4_laundromat()
6251 /* service the server-to-server copy delayed unmount list */ in nfs4_laundromat()
6269 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); in laundromat_main()
6289 spin_lock(&nn->client_lock); in deleg_reaper()
6290 list_for_each_safe(pos, next, &nn->client_lru) { in deleg_reaper()
6293 if (clp->cl_state != NFSD4_ACTIVE) in deleg_reaper()
6295 if (list_empty(&clp->cl_delegations)) in deleg_reaper()
6297 if (atomic_read(&clp->cl_delegs_in_recall)) in deleg_reaper()
6299 if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags)) in deleg_reaper()
6301 if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) in deleg_reaper()
6303 if (clp->cl_cb_state != NFSD4_CB_UP) in deleg_reaper()
6305 list_add(&clp->cl_ra_cblist, &cblist); in deleg_reaper()
6308 kref_get(&clp->cl_nfsdfs.cl_ref); in deleg_reaper()
6309 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); in deleg_reaper()
6310 clp->cl_ra_time = ktime_get_boottime_seconds(); in deleg_reaper()
6312 spin_unlock(&nn->client_lock); in deleg_reaper()
6317 list_del_init(&clp->cl_ra_cblist); in deleg_reaper()
6318 clp->cl_ra->ra_keep = 0; in deleg_reaper()
6319 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG); in deleg_reaper()
6320 trace_nfsd_cb_recall_any(clp->cl_ra); in deleg_reaper()
6321 nfsd4_run_cb(&clp->cl_ra->ra_cb); in deleg_reaper()
6337 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) in nfs4_check_fh()
6348 if (stp->st_openstp) in nfs4_check_openmode()
6349 stp = stp->st_openstp; in nfs4_check_openmode()
6382 if (has_session && in->si_generation == 0) in check_stateid_generation()
6385 if (in->si_generation == ref->si_generation) in check_stateid_generation()
6393 * non-buggy client. For example, if the client sends a lock in check_stateid_generation()
6408 spin_lock(&s->sc_lock); in nfsd4_stid_check_stateid_generation()
6411 ret = check_stateid_generation(in, &s->sc_stateid, has_session); in nfsd4_stid_check_stateid_generation()
6412 spin_unlock(&s->sc_lock); in nfsd4_stid_check_stateid_generation()
6418 if (ols->st_stateowner->so_is_open_owner && in nfsd4_check_openowner_confirmed()
6419 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) in nfsd4_check_openowner_confirmed()
6432 spin_lock(&cl->cl_lock); in nfsd4_validate_stateid()
6439 switch (s->sc_type) { in nfsd4_validate_stateid()
6451 printk("unknown stateid type %x\n", s->sc_type); in nfsd4_validate_stateid()
6458 spin_unlock(&cl->cl_lock); in nfsd4_validate_stateid()
6483 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); in nfsd4_lookup_stateid()
6485 if (cstate->session) in nfsd4_lookup_stateid()
6491 stid = find_stateid_by_type(cstate->clp, stateid, typemask); in nfsd4_lookup_stateid()
6494 if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { in nfsd4_lookup_stateid()
6496 if (cstate->minorversion) in nfsd4_lookup_stateid()
6512 switch (s->sc_type) { in nfs4_find_file()
6514 spin_lock(&s->sc_file->fi_lock); in nfs4_find_file()
6515 ret = nfsd_file_get(s->sc_file->fi_deleg_file); in nfs4_find_file()
6516 spin_unlock(&s->sc_file->fi_lock); in nfs4_find_file()
6521 ret = find_readable_file(s->sc_file); in nfs4_find_file()
6523 ret = find_writeable_file(s->sc_file); in nfs4_find_file()
6550 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, in nfs4_check_file()
6568 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); in _free_cpntf_state_locked()
6569 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) in _free_cpntf_state_locked()
6571 list_del(&cps->cp_list); in _free_cpntf_state_locked()
6572 idr_remove(&nn->s2s_cp_stateids, in _free_cpntf_state_locked()
6573 cps->cp_stateid.cs_stid.si_opaque.so_id); in _free_cpntf_state_locked()
6588 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) in manage_cpntf_state()
6590 spin_lock(&nn->s2s_cp_lock); in manage_cpntf_state()
6591 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); in manage_cpntf_state()
6595 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { in manage_cpntf_state()
6600 refcount_inc(&state->cp_stateid.cs_count); in manage_cpntf_state()
6605 spin_unlock(&nn->s2s_cp_lock); in manage_cpntf_state()
6624 cps->cpntf_time = ktime_get_boottime_seconds(); in find_cpntf_state()
6627 found = lookup_clientid(&cps->cp_p_clid, true, nn); in find_cpntf_state()
6631 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, in find_cpntf_state()
6646 spin_lock(&nn->s2s_cp_lock); in nfs4_put_cpntf_state()
6648 spin_unlock(&nn->s2s_cp_lock); in nfs4_put_cpntf_state()
6652 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
6700 switch (s->sc_type) { in nfs4_preprocess_stateid_op()
6736 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; in nfsd4_test_stateid()
6738 struct nfs4_client *cl = cstate->clp; in nfsd4_test_stateid()
6740 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) in nfsd4_test_stateid()
6741 stateid->ts_id_status = in nfsd4_test_stateid()
6742 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); in nfsd4_test_stateid()
6757 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); in nfsd4_free_lock_stateid()
6762 if (check_for_locks(stp->st_stid.sc_file, in nfsd4_free_lock_stateid()
6763 lockowner(stp->st_stateowner))) in nfsd4_free_lock_stateid()
6770 mutex_unlock(&stp->st_mutex); in nfsd4_free_lock_stateid()
6780 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; in nfsd4_free_stateid()
6781 stateid_t *stateid = &free_stateid->fr_stateid; in nfsd4_free_stateid()
6784 struct nfs4_client *cl = cstate->clp; in nfsd4_free_stateid()
6787 spin_lock(&cl->cl_lock); in nfsd4_free_stateid()
6791 spin_lock(&s->sc_lock); in nfsd4_free_stateid()
6792 switch (s->sc_type) { in nfsd4_free_stateid()
6797 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); in nfsd4_free_stateid()
6803 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
6804 refcount_inc(&s->sc_count); in nfsd4_free_stateid()
6805 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
6809 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
6811 list_del_init(&dp->dl_recall_lru); in nfsd4_free_stateid()
6812 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
6818 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
6820 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
6834 struct svc_fh *current_fh = &cstate->current_fh; in nfs4_seqid_op_checks()
6835 struct nfs4_stateowner *sop = stp->st_stateowner; in nfs4_seqid_op_checks()
6844 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); in nfs4_seqid_op_checks()
6846 status = nfs4_check_fh(current_fh, &stp->st_stid); in nfs4_seqid_op_checks()
6848 mutex_unlock(&stp->st_mutex); in nfs4_seqid_op_checks()
6853 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
6882 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); in nfs4_preprocess_seqid_op()
6888 nfs4_put_stid(&stp->st_stid); in nfs4_preprocess_seqid_op()
6903 oo = openowner(stp->st_stateowner); in nfs4_preprocess_confirmed_seqid_op()
6904 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { in nfs4_preprocess_confirmed_seqid_op()
6905 mutex_unlock(&stp->st_mutex); in nfs4_preprocess_confirmed_seqid_op()
6906 nfs4_put_stid(&stp->st_stid); in nfs4_preprocess_confirmed_seqid_op()
6917 struct nfsd4_open_confirm *oc = &u->open_confirm; in nfsd4_open_confirm()
6924 cstate->current_fh.fh_dentry); in nfsd4_open_confirm()
6926 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); in nfsd4_open_confirm()
6931 oc->oc_seqid, &oc->oc_req_stateid, in nfsd4_open_confirm()
6935 oo = openowner(stp->st_stateowner); in nfsd4_open_confirm()
6937 if (oo->oo_flags & NFS4_OO_CONFIRMED) { in nfsd4_open_confirm()
6938 mutex_unlock(&stp->st_mutex); in nfsd4_open_confirm()
6941 oo->oo_flags |= NFS4_OO_CONFIRMED; in nfsd4_open_confirm()
6942 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); in nfsd4_open_confirm()
6943 mutex_unlock(&stp->st_mutex); in nfsd4_open_confirm()
6944 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); in nfsd4_open_confirm()
6945 nfsd4_client_record_create(oo->oo_owner.so_client); in nfsd4_open_confirm()
6948 nfs4_put_stid(&stp->st_stid); in nfsd4_open_confirm()
6958 nfs4_file_put_access(stp->st_stid.sc_file, access); in nfs4_stateid_downgrade_bit()
6984 struct nfsd4_open_downgrade *od = &u->open_downgrade; in nfsd4_open_downgrade()
6990 cstate->current_fh.fh_dentry); in nfsd4_open_downgrade()
6993 if (od->od_deleg_want) in nfsd4_open_downgrade()
6995 od->od_deleg_want); in nfsd4_open_downgrade()
6997 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, in nfsd4_open_downgrade()
6998 &od->od_stateid, &stp, nn); in nfsd4_open_downgrade()
7002 if (!test_access(od->od_share_access, stp)) { in nfsd4_open_downgrade()
7004 stp->st_access_bmap, od->od_share_access); in nfsd4_open_downgrade()
7007 if (!test_deny(od->od_share_deny, stp)) { in nfsd4_open_downgrade()
7009 stp->st_deny_bmap, od->od_share_deny); in nfsd4_open_downgrade()
7012 nfs4_stateid_downgrade(stp, od->od_share_access); in nfsd4_open_downgrade()
7013 reset_union_bmap_deny(od->od_share_deny, stp); in nfsd4_open_downgrade()
7014 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); in nfsd4_open_downgrade()
7017 mutex_unlock(&stp->st_mutex); in nfsd4_open_downgrade()
7018 nfs4_put_stid(&stp->st_stid); in nfsd4_open_downgrade()
7026 struct nfs4_client *clp = s->st_stid.sc_client; in nfsd4_close_open_stateid()
7031 spin_lock(&clp->cl_lock); in nfsd4_close_open_stateid()
7034 if (clp->cl_minorversion) { in nfsd4_close_open_stateid()
7037 spin_unlock(&clp->cl_lock); in nfsd4_close_open_stateid()
7039 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); in nfsd4_close_open_stateid()
7042 spin_unlock(&clp->cl_lock); in nfsd4_close_open_stateid()
7045 move_to_close_lru(s, clp->net); in nfsd4_close_open_stateid()
7056 struct nfsd4_close *close = &u->close; in nfsd4_close()
7063 cstate->current_fh.fh_dentry); in nfsd4_close()
7065 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, in nfsd4_close()
7066 &close->cl_stateid, in nfsd4_close()
7073 stp->st_stid.sc_type = NFS4_CLOSED_STID; in nfsd4_close()
7081 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); in nfsd4_close()
7084 mutex_unlock(&stp->st_mutex); in nfsd4_close()
7093 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); in nfsd4_close()
7096 nfs4_put_stid(&stp->st_stid); in nfsd4_close()
7105 struct nfsd4_delegreturn *dr = &u->delegreturn; in nfsd4_delegreturn()
7107 stateid_t *stateid = &dr->dr_stateid; in nfsd4_delegreturn()
7112 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) in nfsd4_delegreturn()
7119 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); in nfsd4_delegreturn()
7124 wake_up_var(d_inode(cstate->current_fh.fh_dentry)); in nfsd4_delegreturn()
7127 nfs4_put_stid(&dp->dl_stid); in nfsd4_delegreturn()
7140 return end > start ? end - 1: NFS4_MAX_UINT64; in last_byte_offset()
7144 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7145 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7146 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7147 * locking, this prevents us from being completely protocol-compliant. The
7154 if (lock->fl_start < 0) in nfs4_transform_lock_offset()
7155 lock->fl_start = OFFSET_MAX; in nfs4_transform_lock_offset()
7156 if (lock->fl_end < 0) in nfs4_transform_lock_offset()
7157 lock->fl_end = OFFSET_MAX; in nfs4_transform_lock_offset()
7165 nfs4_get_stateowner(&lo->lo_owner); in nfsd4_lm_get_owner()
7175 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_lm_put_owner()
7182 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner; in nfsd4_lm_lock_expirable()
7183 struct nfs4_client *clp = lo->lo_owner.so_client; in nfsd4_lm_lock_expirable()
7187 nn = net_generic(clp->net, nfsd_net_id); in nfsd4_lm_lock_expirable()
7188 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfsd4_lm_lock_expirable()
7204 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; in nfsd4_lm_notify()
7205 struct net *net = lo->lo_owner.so_client->net; in nfsd4_lm_notify()
7212 spin_lock(&nn->blocked_locks_lock); in nfsd4_lm_notify()
7213 if (!list_empty(&nbl->nbl_list)) { in nfsd4_lm_notify()
7214 list_del_init(&nbl->nbl_list); in nfsd4_lm_notify()
7215 list_del_init(&nbl->nbl_lru); in nfsd4_lm_notify()
7218 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lm_notify()
7222 nfsd4_run_cb(&nbl->nbl_cb); in nfsd4_lm_notify()
7240 if (fl->fl_lmops == &nfsd_posix_mng_ops) { in nfs4_set_lock_denied()
7241 lo = (struct nfs4_lockowner *) fl->fl_owner; in nfs4_set_lock_denied()
7242 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, in nfs4_set_lock_denied()
7244 if (!deny->ld_owner.data) in nfs4_set_lock_denied()
7247 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; in nfs4_set_lock_denied()
7250 deny->ld_owner.len = 0; in nfs4_set_lock_denied()
7251 deny->ld_owner.data = NULL; in nfs4_set_lock_denied()
7252 deny->ld_clientid.cl_boot = 0; in nfs4_set_lock_denied()
7253 deny->ld_clientid.cl_id = 0; in nfs4_set_lock_denied()
7255 deny->ld_start = fl->fl_start; in nfs4_set_lock_denied()
7256 deny->ld_length = NFS4_MAX_UINT64; in nfs4_set_lock_denied()
7257 if (fl->fl_end != NFS4_MAX_UINT64) in nfs4_set_lock_denied()
7258 deny->ld_length = fl->fl_end - fl->fl_start + 1; in nfs4_set_lock_denied()
7259 deny->ld_type = NFS4_READ_LT; in nfs4_set_lock_denied()
7260 if (fl->fl_type != F_RDLCK) in nfs4_set_lock_denied()
7261 deny->ld_type = NFS4_WRITE_LT; in nfs4_set_lock_denied()
7270 lockdep_assert_held(&clp->cl_lock); in find_lockowner_str_locked()
7272 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], in find_lockowner_str_locked()
7274 if (so->so_is_open_owner) in find_lockowner_str_locked()
7287 spin_lock(&clp->cl_lock); in find_lockowner_str()
7289 spin_unlock(&clp->cl_lock); in find_lockowner_str()
7312 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7324 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); in alloc_init_lock_stateowner()
7327 INIT_LIST_HEAD(&lo->lo_blocked); in alloc_init_lock_stateowner()
7328 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); in alloc_init_lock_stateowner()
7329 lo->lo_owner.so_is_open_owner = 0; in alloc_init_lock_stateowner()
7330 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; in alloc_init_lock_stateowner()
7331 lo->lo_owner.so_ops = &lockowner_ops; in alloc_init_lock_stateowner()
7332 spin_lock(&clp->cl_lock); in alloc_init_lock_stateowner()
7333 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); in alloc_init_lock_stateowner()
7335 list_add(&lo->lo_owner.so_strhash, in alloc_init_lock_stateowner()
7336 &clp->cl_ownerstr_hashtbl[strhashval]); in alloc_init_lock_stateowner()
7339 nfs4_free_stateowner(&lo->lo_owner); in alloc_init_lock_stateowner()
7341 spin_unlock(&clp->cl_lock); in alloc_init_lock_stateowner()
7351 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); in find_lock_stateid()
7353 /* If ost is not hashed, ost->st_locks will not be valid */ in find_lock_stateid()
7355 list_for_each_entry(lst, &ost->st_locks, st_locks) { in find_lock_stateid()
7356 if (lst->st_stateowner == &lo->lo_owner) { in find_lock_stateid()
7357 refcount_inc(&lst->st_stid.sc_count); in find_lock_stateid()
7369 struct nfs4_client *clp = lo->lo_owner.so_client; in init_lock_stateid()
7372 mutex_init(&stp->st_mutex); in init_lock_stateid()
7373 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); in init_lock_stateid()
7375 spin_lock(&clp->cl_lock); in init_lock_stateid()
7381 refcount_inc(&stp->st_stid.sc_count); in init_lock_stateid()
7382 stp->st_stid.sc_type = NFS4_LOCK_STID; in init_lock_stateid()
7383 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); in init_lock_stateid()
7385 stp->st_stid.sc_file = fp; in init_lock_stateid()
7386 stp->st_access_bmap = 0; in init_lock_stateid()
7387 stp->st_deny_bmap = open_stp->st_deny_bmap; in init_lock_stateid()
7388 stp->st_openstp = open_stp; in init_lock_stateid()
7389 spin_lock(&fp->fi_lock); in init_lock_stateid()
7390 list_add(&stp->st_locks, &open_stp->st_locks); in init_lock_stateid()
7391 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); in init_lock_stateid()
7392 list_add(&stp->st_perfile, &fp->fi_stateids); in init_lock_stateid()
7393 spin_unlock(&fp->fi_lock); in init_lock_stateid()
7394 spin_unlock(&clp->cl_lock); in init_lock_stateid()
7397 spin_unlock(&clp->cl_lock); in init_lock_stateid()
7399 nfs4_put_stid(&retstp->st_stid); in init_lock_stateid()
7403 mutex_unlock(&stp->st_mutex); in init_lock_stateid()
7406 spin_unlock(&clp->cl_lock); in init_lock_stateid()
7407 mutex_unlock(&stp->st_mutex); in init_lock_stateid()
7418 struct nfs4_openowner *oo = openowner(ost->st_stateowner); in find_or_create_lock_stateid()
7419 struct nfs4_client *clp = oo->oo_owner.so_client; in find_or_create_lock_stateid()
7422 spin_lock(&clp->cl_lock); in find_or_create_lock_stateid()
7424 spin_unlock(&clp->cl_lock); in find_or_create_lock_stateid()
7428 nfs4_put_stid(&lst->st_stid); in find_or_create_lock_stateid()
7452 struct nfs4_file *fp = lock_stp->st_stid.sc_file; in get_lock_access()
7454 lockdep_assert_held(&fp->fi_lock); in get_lock_access()
7469 struct nfs4_file *fi = ost->st_stid.sc_file; in lookup_or_create_lock_state()
7470 struct nfs4_openowner *oo = openowner(ost->st_stateowner); in lookup_or_create_lock_state()
7471 struct nfs4_client *cl = oo->oo_owner.so_client; in lookup_or_create_lock_state()
7472 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); in lookup_or_create_lock_state()
7477 lo = find_lockowner_str(cl, &lock->lk_new_owner); in lookup_or_create_lock_state()
7479 strhashval = ownerstr_hashval(&lock->lk_new_owner); in lookup_or_create_lock_state()
7486 if (!cstate->minorversion && in lookup_or_create_lock_state()
7487 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) in lookup_or_create_lock_state()
7500 nfs4_put_stateowner(&lo->lo_owner); in lookup_or_create_lock_state()
7511 struct nfsd4_lock *lock = &u->lock; in nfsd4_lock()
7531 (long long) lock->lk_offset, in nfsd4_lock()
7532 (long long) lock->lk_length); in nfsd4_lock()
7534 if (check_lock_length(lock->lk_offset, lock->lk_length)) in nfsd4_lock()
7537 if ((status = fh_verify(rqstp, &cstate->current_fh, in nfsd4_lock()
7543 if (lock->lk_is_new) { in nfsd4_lock()
7546 memcpy(&lock->lk_new_clientid, in nfsd4_lock()
7547 &cstate->clp->cl_clientid, in nfsd4_lock()
7552 lock->lk_new_open_seqid, in nfsd4_lock()
7553 &lock->lk_new_open_stateid, in nfsd4_lock()
7557 mutex_unlock(&open_stp->st_mutex); in nfsd4_lock()
7558 open_sop = openowner(open_stp->st_stateowner); in nfsd4_lock()
7560 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, in nfsd4_lock()
7561 &lock->lk_new_clientid)) in nfsd4_lock()
7567 lock->lk_old_lock_seqid, in nfsd4_lock()
7568 &lock->lk_old_lock_stateid, in nfsd4_lock()
7573 lock_sop = lockowner(lock_stp->st_stateowner); in nfsd4_lock()
7575 lkflg = setlkflg(lock->lk_type); in nfsd4_lock()
7581 if (locks_in_grace(net) && !lock->lk_reclaim) in nfsd4_lock()
7584 if (!locks_in_grace(net) && lock->lk_reclaim) in nfsd4_lock()
7587 if (lock->lk_reclaim) in nfsd4_lock()
7590 fp = lock_stp->st_stid.sc_file; in nfsd4_lock()
7591 switch (lock->lk_type) { in nfsd4_lock()
7597 spin_lock(&fp->fi_lock); in nfsd4_lock()
7601 spin_unlock(&fp->fi_lock); in nfsd4_lock()
7609 spin_lock(&fp->fi_lock); in nfsd4_lock()
7613 spin_unlock(&fp->fi_lock); in nfsd4_lock()
7627 * Most filesystems with their own ->lock operations will block in nfsd4_lock()
7633 if (nf->nf_file->f_op->lock) in nfsd4_lock()
7636 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); in nfsd4_lock()
7643 file_lock = &nbl->nbl_lock; in nfsd4_lock()
7644 file_lock->fl_type = fl_type; in nfsd4_lock()
7645 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); in nfsd4_lock()
7646 file_lock->fl_pid = current->tgid; in nfsd4_lock()
7647 file_lock->fl_file = nf->nf_file; in nfsd4_lock()
7648 file_lock->fl_flags = fl_flags; in nfsd4_lock()
7649 file_lock->fl_lmops = &nfsd_posix_mng_ops; in nfsd4_lock()
7650 file_lock->fl_start = lock->lk_offset; in nfsd4_lock()
7651 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); in nfsd4_lock()
7662 nbl->nbl_time = ktime_get_boottime_seconds(); in nfsd4_lock()
7663 spin_lock(&nn->blocked_locks_lock); in nfsd4_lock()
7664 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); in nfsd4_lock()
7665 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); in nfsd4_lock()
7666 kref_get(&nbl->nbl_kref); in nfsd4_lock()
7667 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lock()
7670 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); in nfsd4_lock()
7673 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); in nfsd4_lock()
7675 if (lock->lk_reclaim) in nfsd4_lock()
7676 nn->somebody_reclaimed = true; in nfsd4_lock()
7679 kref_put(&nbl->nbl_kref, free_nbl); in nfsd4_lock()
7682 case -EAGAIN: /* conflock holds conflicting lock */ in nfsd4_lock()
7685 nfs4_set_lock_denied(conflock, &lock->lk_denied); in nfsd4_lock()
7687 case -EDEADLK: in nfsd4_lock()
7699 spin_lock(&nn->blocked_locks_lock); in nfsd4_lock()
7700 if (!list_empty(&nbl->nbl_list) && in nfsd4_lock()
7701 !list_empty(&nbl->nbl_lru)) { in nfsd4_lock()
7702 list_del_init(&nbl->nbl_list); in nfsd4_lock()
7703 list_del_init(&nbl->nbl_lru); in nfsd4_lock()
7704 kref_put(&nbl->nbl_kref, free_nbl); in nfsd4_lock()
7707 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lock()
7715 if (cstate->replay_owner && in nfsd4_lock()
7716 cstate->replay_owner != &lock_sop->lo_owner && in nfsd4_lock()
7718 lock_sop->lo_owner.so_seqid++; in nfsd4_lock()
7721 * If this is a new, never-before-used stateid, and we are in nfsd4_lock()
7727 mutex_unlock(&lock_stp->st_mutex); in nfsd4_lock()
7729 nfs4_put_stid(&lock_stp->st_stid); in nfsd4_lock()
7732 nfs4_put_stid(&open_stp->st_stid); in nfsd4_lock()
7753 inode = fhp->fh_dentry->d_inode; in nfsd_test_lock()
7758 lock->fl_file = nf->nf_file; in nfsd_test_lock()
7759 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); in nfsd_test_lock()
7760 lock->fl_file = NULL; in nfsd_test_lock()
7774 struct nfsd4_lockt *lockt = &u->lockt; in nfsd4_lockt()
7783 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) in nfsd4_lockt()
7787 status = set_client(&lockt->lt_clientid, cstate, nn); in nfsd4_lockt()
7792 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) in nfsd4_lockt()
7802 switch (lockt->lt_type) { in nfsd4_lockt()
7805 file_lock->fl_type = F_RDLCK; in nfsd4_lockt()
7809 file_lock->fl_type = F_WRLCK; in nfsd4_lockt()
7817 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); in nfsd4_lockt()
7819 file_lock->fl_owner = (fl_owner_t)lo; in nfsd4_lockt()
7820 file_lock->fl_pid = current->tgid; in nfsd4_lockt()
7821 file_lock->fl_flags = FL_POSIX; in nfsd4_lockt()
7823 file_lock->fl_start = lockt->lt_offset; in nfsd4_lockt()
7824 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); in nfsd4_lockt()
7828 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); in nfsd4_lockt()
7832 if (file_lock->fl_type != F_UNLCK) { in nfsd4_lockt()
7834 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); in nfsd4_lockt()
7838 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_lockt()
7848 struct nfsd4_locku *locku = &u->locku; in nfsd4_locku()
7857 (long long) locku->lu_offset, in nfsd4_locku()
7858 (long long) locku->lu_length); in nfsd4_locku()
7860 if (check_lock_length(locku->lu_offset, locku->lu_length)) in nfsd4_locku()
7863 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, in nfsd4_locku()
7864 &locku->lu_stateid, NFS4_LOCK_STID, in nfsd4_locku()
7868 nf = find_any_file(stp->st_stid.sc_file); in nfsd4_locku()
7880 file_lock->fl_type = F_UNLCK; in nfsd4_locku()
7881 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); in nfsd4_locku()
7882 file_lock->fl_pid = current->tgid; in nfsd4_locku()
7883 file_lock->fl_file = nf->nf_file; in nfsd4_locku()
7884 file_lock->fl_flags = FL_POSIX; in nfsd4_locku()
7885 file_lock->fl_lmops = &nfsd_posix_mng_ops; in nfsd4_locku()
7886 file_lock->fl_start = locku->lu_offset; in nfsd4_locku()
7888 file_lock->fl_end = last_byte_offset(locku->lu_offset, in nfsd4_locku()
7889 locku->lu_length); in nfsd4_locku()
7892 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); in nfsd4_locku()
7897 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); in nfsd4_locku()
7901 mutex_unlock(&stp->st_mutex); in nfsd4_locku()
7902 nfs4_put_stid(&stp->st_stid); in nfsd4_locku()
7928 spin_lock(&fp->fi_lock); in check_for_locks()
7936 inode = file_inode(nf->nf_file); in check_for_locks()
7939 if (flctx && !list_empty_careful(&flctx->flc_posix)) { in check_for_locks()
7940 spin_lock(&flctx->flc_lock); in check_for_locks()
7941 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { in check_for_locks()
7942 if (fl->fl_owner == (fl_owner_t)lowner) { in check_for_locks()
7947 spin_unlock(&flctx->flc_lock); in check_for_locks()
7950 spin_unlock(&fp->fi_lock); in check_for_locks()
7955 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
7960 * Check if theree are any locks still held and if not - free the lockowner
7974 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; in nfsd4_release_lockowner()
7976 clientid_t *clid = &rlockowner->rl_clientid; in nfsd4_release_lockowner()
7984 clid->cl_boot, clid->cl_id); in nfsd4_release_lockowner()
7989 clp = cstate->clp; in nfsd4_release_lockowner()
7991 spin_lock(&clp->cl_lock); in nfsd4_release_lockowner()
7992 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); in nfsd4_release_lockowner()
7994 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
7998 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { in nfsd4_release_lockowner()
7999 if (check_for_locks(stp->st_stid.sc_file, lo)) { in nfsd4_release_lockowner()
8000 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8001 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_release_lockowner()
8006 while (!list_empty(&lo->lo_owner.so_stateids)) { in nfsd4_release_lockowner()
8007 stp = list_first_entry(&lo->lo_owner.so_stateids, in nfsd4_release_lockowner()
8013 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8017 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_release_lockowner()
8033 return (crp && crp->cr_clp); in nfs4_has_reclaimed_state()
8052 INIT_LIST_HEAD(&crp->cr_strhash); in nfs4_client_to_reclaim()
8053 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); in nfs4_client_to_reclaim()
8054 crp->cr_name.data = name.data; in nfs4_client_to_reclaim()
8055 crp->cr_name.len = name.len; in nfs4_client_to_reclaim()
8056 crp->cr_princhash.data = princhash.data; in nfs4_client_to_reclaim()
8057 crp->cr_princhash.len = princhash.len; in nfs4_client_to_reclaim()
8058 crp->cr_clp = NULL; in nfs4_client_to_reclaim()
8059 nn->reclaim_str_hashtbl_size++; in nfs4_client_to_reclaim()
8067 list_del(&crp->cr_strhash); in nfs4_remove_reclaim_record()
8068 kfree(crp->cr_name.data); in nfs4_remove_reclaim_record()
8069 kfree(crp->cr_princhash.data); in nfs4_remove_reclaim_record()
8071 nn->reclaim_str_hashtbl_size--; in nfs4_remove_reclaim_record()
8081 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { in nfs4_release_reclaim()
8082 crp = list_entry(nn->reclaim_str_hashtbl[i].next, in nfs4_release_reclaim()
8087 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); in nfs4_release_reclaim()
8099 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { in nfsd4_find_reclaim_client()
8100 if (compare_blob(&crp->cr_name, &name) == 0) { in nfsd4_find_reclaim_client()
8110 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) in nfs4_check_open_reclaim()
8137 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); in set_max_delegations()
8145 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, in nfs4_state_create_net()
8148 if (!nn->conf_id_hashtbl) in nfs4_state_create_net()
8150 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, in nfs4_state_create_net()
8153 if (!nn->unconf_id_hashtbl) in nfs4_state_create_net()
8155 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, in nfs4_state_create_net()
8158 if (!nn->sessionid_hashtbl) in nfs4_state_create_net()
8162 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); in nfs4_state_create_net()
8163 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); in nfs4_state_create_net()
8166 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); in nfs4_state_create_net()
8167 nn->conf_name_tree = RB_ROOT; in nfs4_state_create_net()
8168 nn->unconf_name_tree = RB_ROOT; in nfs4_state_create_net()
8169 nn->boot_time = ktime_get_real_seconds(); in nfs4_state_create_net()
8170 nn->grace_ended = false; in nfs4_state_create_net()
8171 nn->nfsd4_manager.block_opens = true; in nfs4_state_create_net()
8172 INIT_LIST_HEAD(&nn->nfsd4_manager.list); in nfs4_state_create_net()
8173 INIT_LIST_HEAD(&nn->client_lru); in nfs4_state_create_net()
8174 INIT_LIST_HEAD(&nn->close_lru); in nfs4_state_create_net()
8175 INIT_LIST_HEAD(&nn->del_recall_lru); in nfs4_state_create_net()
8176 spin_lock_init(&nn->client_lock); in nfs4_state_create_net()
8177 spin_lock_init(&nn->s2s_cp_lock); in nfs4_state_create_net()
8178 idr_init(&nn->s2s_cp_stateids); in nfs4_state_create_net()
8179 atomic_set(&nn->pending_async_copies, 0); in nfs4_state_create_net()
8181 spin_lock_init(&nn->blocked_locks_lock); in nfs4_state_create_net()
8182 INIT_LIST_HEAD(&nn->blocked_locks_lru); in nfs4_state_create_net()
8184 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); in nfs4_state_create_net()
8185 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); in nfs4_state_create_net()
8188 nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan; in nfs4_state_create_net()
8189 nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count; in nfs4_state_create_net()
8190 nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS; in nfs4_state_create_net()
8192 if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client")) in nfs4_state_create_net()
8198 kfree(nn->sessionid_hashtbl); in nfs4_state_create_net()
8200 kfree(nn->unconf_id_hashtbl); in nfs4_state_create_net()
8202 kfree(nn->conf_id_hashtbl); in nfs4_state_create_net()
8204 return -ENOMEM; in nfs4_state_create_net()
8215 while (!list_empty(&nn->conf_id_hashtbl[i])) { in nfs4_state_destroy_net()
8216 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); in nfs4_state_destroy_net()
8221 WARN_ON(!list_empty(&nn->blocked_locks_lru)); in nfs4_state_destroy_net()
8224 while (!list_empty(&nn->unconf_id_hashtbl[i])) { in nfs4_state_destroy_net()
8225 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); in nfs4_state_destroy_net()
8230 kfree(nn->sessionid_hashtbl); in nfs4_state_destroy_net()
8231 kfree(nn->unconf_id_hashtbl); in nfs4_state_destroy_net()
8232 kfree(nn->conf_id_hashtbl); in nfs4_state_destroy_net()
8245 locks_start_grace(net, &nn->nfsd4_manager); in nfs4_state_start_net()
8247 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) in nfs4_state_start_net()
8249 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", in nfs4_state_start_net()
8250 nn->nfsd4_grace, net->ns.inum); in nfs4_state_start_net()
8252 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); in nfs4_state_start_net()
8257 net->ns.inum); in nfs4_state_start_net()
8258 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); in nfs4_state_start_net()
8291 unregister_shrinker(&nn->nfsd_client_shrinker); in nfs4_state_shutdown_net()
8292 cancel_work_sync(&nn->nfsd_shrinker_work); in nfs4_state_shutdown_net()
8293 cancel_delayed_work_sync(&nn->laundromat_work); in nfs4_state_shutdown_net()
8294 locks_end_grace(&nn->nfsd4_manager); in nfs4_state_shutdown_net()
8298 list_for_each_safe(pos, next, &nn->del_recall_lru) { in nfs4_state_shutdown_net()
8301 list_add(&dp->dl_recall_lru, &reaplist); in nfs4_state_shutdown_net()
8306 list_del_init(&dp->dl_recall_lru); in nfs4_state_shutdown_net()
8329 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); in get_stateid()
8335 if (cstate->minorversion) { in put_stateid()
8336 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); in put_stateid()
8354 put_stateid(cstate, &u->open_downgrade.od_stateid); in nfsd4_set_opendowngradestateid()
8361 put_stateid(cstate, &u->open.op_stateid); in nfsd4_set_openstateid()
8368 put_stateid(cstate, &u->close.cl_stateid); in nfsd4_set_closestateid()
8375 put_stateid(cstate, &u->lock.lk_resp_stateid); in nfsd4_set_lockstateid()
8386 get_stateid(cstate, &u->open_downgrade.od_stateid); in nfsd4_get_opendowngradestateid()
8393 get_stateid(cstate, &u->delegreturn.dr_stateid); in nfsd4_get_delegreturnstateid()
8400 get_stateid(cstate, &u->free_stateid.fr_stateid); in nfsd4_get_freestateid()
8407 get_stateid(cstate, &u->setattr.sa_stateid); in nfsd4_get_setattrstateid()
8414 get_stateid(cstate, &u->close.cl_stateid); in nfsd4_get_closestateid()
8421 get_stateid(cstate, &u->locku.lu_stateid); in nfsd4_get_lockustateid()
8428 get_stateid(cstate, &u->read.rd_stateid); in nfsd4_get_readstateid()
8435 get_stateid(cstate, &u->write.wr_stateid); in nfsd4_get_writestateid()
8439 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8469 spin_lock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
8470 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { in nfsd4_deleg_getattr_conflict()
8471 if (fl->fl_flags == FL_LAYOUT) in nfsd4_deleg_getattr_conflict()
8473 if (fl->fl_lmops != &nfsd_lease_mng_ops) { in nfsd4_deleg_getattr_conflict()
8475 * non-nfs lease, if it's a lease with F_RDLCK then in nfsd4_deleg_getattr_conflict()
8479 if (fl->fl_type == F_RDLCK) in nfsd4_deleg_getattr_conflict()
8483 if (fl->fl_type == F_WRLCK) { in nfsd4_deleg_getattr_conflict()
8484 dp = fl->fl_owner; in nfsd4_deleg_getattr_conflict()
8485 if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { in nfsd4_deleg_getattr_conflict()
8486 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
8490 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
8500 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()