Lines Matching +full:total +full:- +full:timeout
2 * INETPEER - A storage for permanent information about peers
28 * We keep one entry for each peer IP address. The nodes contains long-living
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
59 bp->rb_root = RB_ROOT; in inet_peer_base_init()
60 seqlock_init(&bp->lock); in inet_peer_base_init()
61 bp->total = 0; in inet_peer_base_init()
90 /* Called with rcu_read_lock() or base->lock held */
103 pp = &base->rb_root.rb_node; in lookup()
113 cmp = inetpeer_addr_cmp(daddr, &p->daddr); in lookup()
116 if (READ_ONCE(p->dtime) != now) in lookup()
117 WRITE_ONCE(p->dtime, now); in lookup()
123 } else if (unlikely(read_seqretry(&base->lock, seq))) { in lookup()
126 if (cmp == -1) in lookup()
127 pp = &next->rb_left; in lookup()
129 pp = &next->rb_right; in lookup()
155 if (base->total >= peer_threshold) in inet_peer_gc()
158 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * in inet_peer_gc()
159 base->total / peer_threshold * HZ; in inet_peer_gc()
163 delta = (__u32)jiffies - READ_ONCE(p->dtime); in inet_peer_gc()
165 if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) in inet_peer_gc()
171 rb_erase(&p->rb_node, &base->rb_root); in inet_peer_gc()
172 base->total--; in inet_peer_gc()
173 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_peer_gc()
189 seq = read_seqbegin(&base->lock); in inet_getpeer()
199 write_seqlock_bh(&base->lock); in inet_getpeer()
206 p->daddr = *daddr; in inet_getpeer()
207 p->dtime = (__u32)jiffies; in inet_getpeer()
208 refcount_set(&p->refcnt, 1); in inet_getpeer()
209 atomic_set(&p->rid, 0); in inet_getpeer()
210 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; in inet_getpeer()
211 p->rate_tokens = 0; in inet_getpeer()
212 p->n_redirects = 0; in inet_getpeer()
216 p->rate_last = jiffies - 60*HZ; in inet_getpeer()
218 rb_link_node(&p->rb_node, parent, pp); in inet_getpeer()
219 rb_insert_color(&p->rb_node, &base->rb_root); in inet_getpeer()
220 base->total++; in inet_getpeer()
225 write_sequnlock_bh(&base->lock); in inet_getpeer()
233 if (refcount_dec_and_test(&p->refcnt)) in inet_putpeer()
234 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_putpeer()
246 * for one "ip object" is shared - and these ICMPs are twice limited:
255 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) in inet_peer_xrlim_allow() argument
263 token = peer->rate_tokens; in inet_peer_xrlim_allow()
265 token += now - peer->rate_last; in inet_peer_xrlim_allow()
266 peer->rate_last = now; in inet_peer_xrlim_allow()
267 if (token > XRLIM_BURST_FACTOR * timeout) in inet_peer_xrlim_allow()
268 token = XRLIM_BURST_FACTOR * timeout; in inet_peer_xrlim_allow()
269 if (token >= timeout) { in inet_peer_xrlim_allow()
270 token -= timeout; in inet_peer_xrlim_allow()
273 peer->rate_tokens = token; in inet_peer_xrlim_allow()
280 struct rb_node *p = rb_first(&base->rb_root); in inetpeer_invalidate_tree()
286 rb_erase(&peer->rb_node, &base->rb_root); in inetpeer_invalidate_tree()
291 base->total = 0; in inetpeer_invalidate_tree()