• Home
  • Raw
  • Download

Lines Matching +full:total +full:- +full:timeout

2  *		INETPEER - A storage for permanent information about peers
28 * We keep one entry for each peer IP address. The nodes contains long-living
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
59 bp->rb_root = RB_ROOT; in inet_peer_base_init()
60 seqlock_init(&bp->lock); in inet_peer_base_init()
61 bp->total = 0; in inet_peer_base_init()
82 * myself. --SAW in inet_initpeers()
97 /* Called with rcu_read_lock() or base->lock held */
109 pp = &base->rb_root.rb_node; in lookup()
119 cmp = inetpeer_addr_cmp(daddr, &p->daddr); in lookup()
121 if (!refcount_inc_not_zero(&p->refcnt)) in lookup()
128 } else if (unlikely(read_seqretry(&base->lock, seq))) { in lookup()
131 if (cmp == -1) in lookup()
132 pp = &next->rb_left; in lookup()
134 pp = &next->rb_right; in lookup()
160 if (base->total >= peer_threshold) in inet_peer_gc()
163 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * in inet_peer_gc()
164 base->total / peer_threshold * HZ; in inet_peer_gc()
171 delta = (__u32)jiffies - READ_ONCE(p->dtime); in inet_peer_gc()
173 if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) in inet_peer_gc()
179 rb_erase(&p->rb_node, &base->rb_root); in inet_peer_gc()
180 base->total--; in inet_peer_gc()
181 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_peer_gc()
199 seq = read_seqbegin(&base->lock); in inet_getpeer()
201 invalidated = read_seqretry(&base->lock, seq); in inet_getpeer()
215 write_seqlock_bh(&base->lock); in inet_getpeer()
222 p->daddr = *daddr; in inet_getpeer()
223 p->dtime = (__u32)jiffies; in inet_getpeer()
224 refcount_set(&p->refcnt, 2); in inet_getpeer()
225 atomic_set(&p->rid, 0); in inet_getpeer()
226 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; in inet_getpeer()
227 p->rate_tokens = 0; in inet_getpeer()
228 p->n_redirects = 0; in inet_getpeer()
232 p->rate_last = jiffies - 60*HZ; in inet_getpeer()
234 rb_link_node(&p->rb_node, parent, pp); in inet_getpeer()
235 rb_insert_color(&p->rb_node, &base->rb_root); in inet_getpeer()
236 base->total++; in inet_getpeer()
241 write_sequnlock_bh(&base->lock); in inet_getpeer()
252 WRITE_ONCE(p->dtime, (__u32)jiffies); in inet_putpeer()
254 if (refcount_dec_and_test(&p->refcnt)) in inet_putpeer()
255 call_rcu(&p->rcu, inetpeer_free_rcu); in inet_putpeer()
268 * for one "ip object" is shared - and these ICMPs are twice limited:
277 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) in inet_peer_xrlim_allow() argument
285 token = peer->rate_tokens; in inet_peer_xrlim_allow()
287 token += now - peer->rate_last; in inet_peer_xrlim_allow()
288 peer->rate_last = now; in inet_peer_xrlim_allow()
289 if (token > XRLIM_BURST_FACTOR * timeout) in inet_peer_xrlim_allow()
290 token = XRLIM_BURST_FACTOR * timeout; in inet_peer_xrlim_allow()
291 if (token >= timeout) { in inet_peer_xrlim_allow()
292 token -= timeout; in inet_peer_xrlim_allow()
295 peer->rate_tokens = token; in inet_peer_xrlim_allow()
302 struct rb_node *p = rb_first(&base->rb_root); in inetpeer_invalidate_tree()
308 rb_erase(&peer->rb_node, &base->rb_root); in inetpeer_invalidate_tree()
313 base->total = 0; in inetpeer_invalidate_tree()