Lines Matching refs:rp
96 struct svc_cacherep *rp; in nfsd_reply_cache_alloc() local
98 rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); in nfsd_reply_cache_alloc()
99 if (rp) { in nfsd_reply_cache_alloc()
100 rp->c_state = RC_UNUSED; in nfsd_reply_cache_alloc()
101 rp->c_type = RC_NOCACHE; in nfsd_reply_cache_alloc()
102 RB_CLEAR_NODE(&rp->c_node); in nfsd_reply_cache_alloc()
103 INIT_LIST_HEAD(&rp->c_lru); in nfsd_reply_cache_alloc()
105 memset(&rp->c_key, 0, sizeof(rp->c_key)); in nfsd_reply_cache_alloc()
106 rp->c_key.k_xid = rqstp->rq_xid; in nfsd_reply_cache_alloc()
107 rp->c_key.k_proc = rqstp->rq_proc; in nfsd_reply_cache_alloc()
108 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); in nfsd_reply_cache_alloc()
109 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); in nfsd_reply_cache_alloc()
110 rp->c_key.k_prot = rqstp->rq_prot; in nfsd_reply_cache_alloc()
111 rp->c_key.k_vers = rqstp->rq_vers; in nfsd_reply_cache_alloc()
112 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_reply_cache_alloc()
113 rp->c_key.k_csum = csum; in nfsd_reply_cache_alloc()
115 return rp; in nfsd_reply_cache_alloc()
119 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, in nfsd_reply_cache_free_locked() argument
122 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { in nfsd_reply_cache_free_locked()
123 nn->drc_mem_usage -= rp->c_replvec.iov_len; in nfsd_reply_cache_free_locked()
124 kfree(rp->c_replvec.iov_base); in nfsd_reply_cache_free_locked()
126 if (rp->c_state != RC_UNUSED) { in nfsd_reply_cache_free_locked()
127 rb_erase(&rp->c_node, &b->rb_head); in nfsd_reply_cache_free_locked()
128 list_del(&rp->c_lru); in nfsd_reply_cache_free_locked()
130 nn->drc_mem_usage -= sizeof(*rp); in nfsd_reply_cache_free_locked()
132 kmem_cache_free(nn->drc_slab, rp); in nfsd_reply_cache_free_locked()
136 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, in nfsd_reply_cache_free() argument
140 nfsd_reply_cache_free_locked(b, rp, nn); in nfsd_reply_cache_free()
194 struct svc_cacherep *rp; in nfsd_reply_cache_shutdown() local
202 rp = list_first_entry(head, struct svc_cacherep, c_lru); in nfsd_reply_cache_shutdown()
204 rp, nn); in nfsd_reply_cache_shutdown()
221 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) in lru_put_end() argument
223 rp->c_timestamp = jiffies; in lru_put_end()
224 list_move_tail(&rp->c_lru, &b->lru_head); in lru_put_end()
230 struct svc_cacherep *rp, *tmp; in prune_bucket() local
233 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { in prune_bucket()
238 if (rp->c_state == RC_INPROG) in prune_bucket()
241 time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) in prune_bucket()
243 nfsd_reply_cache_free_locked(b, rp, nn); in prune_bucket()
323 const struct svc_cacherep *rp, struct nfsd_net *nn) in nfsd_cache_key_cmp() argument
325 if (key->c_key.k_xid == rp->c_key.k_xid && in nfsd_cache_key_cmp()
326 key->c_key.k_csum != rp->c_key.k_csum) in nfsd_cache_key_cmp()
329 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); in nfsd_cache_key_cmp()
341 struct svc_cacherep *rp, *ret = key; in nfsd_cache_insert() local
350 rp = rb_entry(parent, struct svc_cacherep, c_node); in nfsd_cache_insert()
352 cmp = nfsd_cache_key_cmp(key, rp, nn); in nfsd_cache_insert()
358 ret = rp; in nfsd_cache_insert()
391 struct svc_cacherep *rp, *found; in nfsd_cache_lookup() local
411 rp = nfsd_reply_cache_alloc(rqstp, csum, nn); in nfsd_cache_lookup()
412 if (!rp) { in nfsd_cache_lookup()
418 found = nfsd_cache_insert(b, rp, nn); in nfsd_cache_lookup()
419 if (found != rp) { in nfsd_cache_lookup()
420 nfsd_reply_cache_free_locked(NULL, rp, nn); in nfsd_cache_lookup()
421 rp = found; in nfsd_cache_lookup()
426 rqstp->rq_cacherep = rp; in nfsd_cache_lookup()
427 rp->c_state = RC_INPROG; in nfsd_cache_lookup()
430 nn->drc_mem_usage += sizeof(*rp); in nfsd_cache_lookup()
444 if (rp->c_state == RC_INPROG) in nfsd_cache_lookup()
450 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup()
454 switch (rp->c_type) { in nfsd_cache_lookup()
458 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); in nfsd_cache_lookup()
462 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) in nfsd_cache_lookup()
467 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); in nfsd_cache_lookup()
468 nfsd_reply_cache_free_locked(b, rp, nn); in nfsd_cache_lookup()
494 struct svc_cacherep *rp = rqstp->rq_cacherep; in nfsd_cache_update() local
501 if (!rp) in nfsd_cache_update()
504 hash = nfsd_cache_hash(rp->c_key.k_xid, nn); in nfsd_cache_update()
512 nfsd_reply_cache_free(b, rp, nn); in nfsd_cache_update()
520 rp->c_replstat = *statp; in nfsd_cache_update()
523 cachv = &rp->c_replvec; in nfsd_cache_update()
527 nfsd_reply_cache_free(b, rp, nn); in nfsd_cache_update()
534 nfsd_reply_cache_free(b, rp, nn); in nfsd_cache_update()
539 lru_put_end(b, rp); in nfsd_cache_update()
540 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
541 rp->c_type = cachetype; in nfsd_cache_update()
542 rp->c_state = RC_DONE; in nfsd_cache_update()