Lines Matching +full:hall +full:- +full:switch +full:-
1 // SPDX-License-Identifier: GPL-2.0
4 * change in the future and be a per-client cache.
64 * XXX: these limits are per-container, so memory used will increase
71 unsigned long low_pages = totalram_pages() - totalhigh_pages(); in nfsd_cache_size_limit()
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); in nfsd_cache_size_limit()
90 return hash_32(be32_to_cpu(xid), nn->maskbits); in nfsd_cache_hash()
101 rp->c_state = RC_UNUSED; in nfsd_reply_cache_alloc()
102 rp->c_type = RC_NOCACHE; in nfsd_reply_cache_alloc()
103 RB_CLEAR_NODE(&rp->c_node); in nfsd_reply_cache_alloc()
104 INIT_LIST_HEAD(&rp->c_lru); in nfsd_reply_cache_alloc()
106 memset(&rp->c_key, 0, sizeof(rp->c_key)); in nfsd_reply_cache_alloc()
107 rp->c_key.k_xid = rqstp->rq_xid; in nfsd_reply_cache_alloc()
108 rp->c_key.k_proc = rqstp->rq_proc; in nfsd_reply_cache_alloc()
109 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); in nfsd_reply_cache_alloc()
110 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); in nfsd_reply_cache_alloc()
111 rp->c_key.k_prot = rqstp->rq_prot; in nfsd_reply_cache_alloc()
112 rp->c_key.k_vers = rqstp->rq_vers; in nfsd_reply_cache_alloc()
113 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_reply_cache_alloc()
114 rp->c_key.k_csum = csum; in nfsd_reply_cache_alloc()
123 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { in nfsd_reply_cache_free_locked()
124 nn->drc_mem_usage -= rp->c_replvec.iov_len; in nfsd_reply_cache_free_locked()
125 kfree(rp->c_replvec.iov_base); in nfsd_reply_cache_free_locked()
127 if (rp->c_state != RC_UNUSED) { in nfsd_reply_cache_free_locked()
128 rb_erase(&rp->c_node, &b->rb_head); in nfsd_reply_cache_free_locked()
129 list_del(&rp->c_lru); in nfsd_reply_cache_free_locked()
130 atomic_dec(&nn->num_drc_entries); in nfsd_reply_cache_free_locked()
131 nn->drc_mem_usage -= sizeof(*rp); in nfsd_reply_cache_free_locked()
140 spin_lock(&b->cache_lock); in nfsd_reply_cache_free()
142 spin_unlock(&b->cache_lock); in nfsd_reply_cache_free()
149 return drc_slab ? 0: -ENOMEM; in nfsd_drc_slab_create()
163 nn->max_drc_entries = nfsd_cache_size_limit(); in nfsd_reply_cache_init()
164 atomic_set(&nn->num_drc_entries, 0); in nfsd_reply_cache_init()
165 hashsize = nfsd_hashsize(nn->max_drc_entries); in nfsd_reply_cache_init()
166 nn->maskbits = ilog2(hashsize); in nfsd_reply_cache_init()
168 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan; in nfsd_reply_cache_init()
169 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count; in nfsd_reply_cache_init()
170 nn->nfsd_reply_cache_shrinker.seeks = 1; in nfsd_reply_cache_init()
171 status = register_shrinker(&nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_init()
175 nn->drc_hashtbl = kvzalloc(array_size(hashsize, in nfsd_reply_cache_init()
176 sizeof(*nn->drc_hashtbl)), GFP_KERNEL); in nfsd_reply_cache_init()
177 if (!nn->drc_hashtbl) in nfsd_reply_cache_init()
181 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); in nfsd_reply_cache_init()
182 spin_lock_init(&nn->drc_hashtbl[i].cache_lock); in nfsd_reply_cache_init()
184 nn->drc_hashsize = hashsize; in nfsd_reply_cache_init()
188 unregister_shrinker(&nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_init()
191 return -ENOMEM; in nfsd_reply_cache_init()
199 unregister_shrinker(&nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_shutdown()
201 for (i = 0; i < nn->drc_hashsize; i++) { in nfsd_reply_cache_shutdown()
202 struct list_head *head = &nn->drc_hashtbl[i].lru_head; in nfsd_reply_cache_shutdown()
205 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], in nfsd_reply_cache_shutdown()
210 kvfree(nn->drc_hashtbl); in nfsd_reply_cache_shutdown()
211 nn->drc_hashtbl = NULL; in nfsd_reply_cache_shutdown()
212 nn->drc_hashsize = 0; in nfsd_reply_cache_shutdown()
223 rp->c_timestamp = jiffies; in lru_put_end()
224 list_move_tail(&rp->c_lru, &b->lru_head); in lru_put_end()
233 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { in prune_bucket()
236 * in-progress, but do keep scanning the list. in prune_bucket()
238 if (rp->c_state == RC_INPROG) in prune_bucket()
240 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && in prune_bucket()
241 time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) in prune_bucket()
259 for (i = 0; i < nn->drc_hashsize; i++) { in prune_cache_entries()
260 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; in prune_cache_entries()
262 if (list_empty(&b->lru_head)) in prune_cache_entries()
264 spin_lock(&b->cache_lock); in prune_cache_entries()
266 spin_unlock(&b->cache_lock); in prune_cache_entries()
277 return atomic_read(&nn->num_drc_entries); in nfsd_reply_cache_count()
297 struct xdr_buf *buf = &rqstp->rq_arg; in nfsd_cache_csum()
298 const unsigned char *p = buf->head[0].iov_base; in nfsd_cache_csum()
299 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, in nfsd_cache_csum()
301 size_t len = min(buf->head[0].iov_len, csum_len); in nfsd_cache_csum()
305 csum_len -= len; in nfsd_cache_csum()
308 idx = buf->page_base / PAGE_SIZE; in nfsd_cache_csum()
309 base = buf->page_base & ~PAGE_MASK; in nfsd_cache_csum()
311 p = page_address(buf->pages[idx]) + base; in nfsd_cache_csum()
312 len = min_t(size_t, PAGE_SIZE - base, csum_len); in nfsd_cache_csum()
314 csum_len -= len; in nfsd_cache_csum()
325 if (key->c_key.k_xid == rp->c_key.k_xid && in nfsd_cache_key_cmp()
326 key->c_key.k_csum != rp->c_key.k_csum) { in nfsd_cache_key_cmp()
327 ++nn->payload_misses; in nfsd_cache_key_cmp()
331 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); in nfsd_cache_key_cmp()
344 struct rb_node **p = &b->rb_head.rb_node, in nfsd_cache_insert()
356 p = &parent->rb_left; in nfsd_cache_insert()
358 p = &parent->rb_right; in nfsd_cache_insert()
364 rb_link_node(&key->c_node, parent, p); in nfsd_cache_insert()
365 rb_insert_color(&key->c_node, &b->rb_head); in nfsd_cache_insert()
368 if (entries > nn->longest_chain) { in nfsd_cache_insert()
369 nn->longest_chain = entries; in nfsd_cache_insert()
370 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); in nfsd_cache_insert()
371 } else if (entries == nn->longest_chain) { in nfsd_cache_insert()
373 nn->longest_chain_cachesize = min_t(unsigned int, in nfsd_cache_insert()
374 nn->longest_chain_cachesize, in nfsd_cache_insert()
375 atomic_read(&nn->num_drc_entries)); in nfsd_cache_insert()
383 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
401 __be32 xid = rqstp->rq_xid; in nfsd_cache_lookup()
404 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash]; in nfsd_cache_lookup()
405 int type = rqstp->rq_cachetype; in nfsd_cache_lookup()
408 rqstp->rq_cacherep = NULL; in nfsd_cache_lookup()
424 spin_lock(&b->cache_lock); in nfsd_cache_lookup()
433 rqstp->rq_cacherep = rp; in nfsd_cache_lookup()
434 rp->c_state = RC_INPROG; in nfsd_cache_lookup()
436 atomic_inc(&nn->num_drc_entries); in nfsd_cache_lookup()
437 nn->drc_mem_usage += sizeof(*rp); in nfsd_cache_lookup()
443 spin_unlock(&b->cache_lock); in nfsd_cache_lookup()
453 if (rp->c_state == RC_INPROG) in nfsd_cache_lookup()
456 /* From the hall of fame of impractical attacks: in nfsd_cache_lookup()
459 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup()
463 switch (rp->c_type) { in nfsd_cache_lookup()
467 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); in nfsd_cache_lookup()
471 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) in nfsd_cache_lookup()
476 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); in nfsd_cache_lookup()
485 * nfsd_cache_update - Update an entry in the duplicate reply cache.
491 * executed and the complete reply is in rqstp->rq_res.
494 * the toplevel loop requires max-sized buffers, which would be a waste
507 struct svc_cacherep *rp = rqstp->rq_cacherep; in nfsd_cache_update()
508 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; in nfsd_cache_update()
517 hash = nfsd_cache_hash(rp->c_key.k_xid, nn); in nfsd_cache_update()
518 b = &nn->drc_hashtbl[hash]; in nfsd_cache_update()
520 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update()
529 switch (cachetype) { in nfsd_cache_update()
533 rp->c_replstat = *statp; in nfsd_cache_update()
536 cachv = &rp->c_replvec; in nfsd_cache_update()
538 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); in nfsd_cache_update()
539 if (!cachv->iov_base) { in nfsd_cache_update()
543 cachv->iov_len = bufsize; in nfsd_cache_update()
544 memcpy(cachv->iov_base, statp, bufsize); in nfsd_cache_update()
550 spin_lock(&b->cache_lock); in nfsd_cache_update()
551 nn->drc_mem_usage += bufsize; in nfsd_cache_update()
553 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
554 rp->c_type = cachetype; in nfsd_cache_update()
555 rp->c_state = RC_DONE; in nfsd_cache_update()
556 spin_unlock(&b->cache_lock); in nfsd_cache_update()
568 struct kvec *vec = &rqstp->rq_res.head[0]; in nfsd_cache_append()
570 if (vec->iov_len + data->iov_len > PAGE_SIZE) { in nfsd_cache_append()
572 data->iov_len); in nfsd_cache_append()
575 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); in nfsd_cache_append()
576 vec->iov_len += data->iov_len; in nfsd_cache_append()
587 struct nfsd_net *nn = m->private; in nfsd_reply_cache_stats_show()
589 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); in nfsd_reply_cache_stats_show()
591 atomic_read(&nn->num_drc_entries)); in nfsd_reply_cache_stats_show()
592 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); in nfsd_reply_cache_stats_show()
593 seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage); in nfsd_reply_cache_stats_show()
597 seq_printf(m, "payload misses: %u\n", nn->payload_misses); in nfsd_reply_cache_stats_show()
598 seq_printf(m, "longest chain len: %u\n", nn->longest_chain); in nfsd_reply_cache_stats_show()
599 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); in nfsd_reply_cache_stats_show()
605 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info, in nfsd_reply_cache_stats_open()