• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock.h>
15 #include <linux/random.h>
16 #include <linux/timer.h>
17 #include <linux/time.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/net.h>
21 #include <linux/workqueue.h>
22 #include <net/ip.h>
23 #include <net/inetpeer.h>
24 #include <net/secure_seq.h>
25 
26 /*
27  *  Theory of operations.
28  *  We keep one entry for each peer IP address.  The nodes contains long-living
29  *  information about the peer which doesn't depend on routes.
30  *
31  *  Nodes are removed only when reference counter goes to 0.
32  *  When it's happened the node may be removed when a sufficient amount of
33  *  time has been passed since its last use.  The less-recently-used entry can
34  *  also be removed if the pool is overloaded i.e. if the total amount of
35  *  entries is greater-or-equal than the threshold.
36  *
37  *  Node pool is organised as an RB tree.
38  *  Such an implementation has been chosen not just for fun.  It's a way to
39  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
40  *  amount of long living nodes in a single hash slot would significantly delay
41  *  lookups performed with disabled BHs.
42  *
43  *  Serialisation issues.
44  *  1.  Nodes may appear in the tree only with the pool lock held.
45  *  2.  Nodes may disappear from the tree only with the pool lock held
46  *      AND reference count being 0.
47  *  3.  Global variable peer_total is modified under the pool lock.
48  *  4.  struct inet_peer fields modification:
49  *		rb_node: pool lock
50  *		refcnt: atomically against modifications on other CPU;
51  *		   usually under some other lock to prevent node disappearing
52  *		daddr: unchangeable
53  */
54 
55 static struct kmem_cache *peer_cachep __ro_after_init;
56 
inet_peer_base_init(struct inet_peer_base * bp)57 void inet_peer_base_init(struct inet_peer_base *bp)
58 {
59 	bp->rb_root = RB_ROOT;
60 	seqlock_init(&bp->lock);
61 	bp->total = 0;
62 }
63 EXPORT_SYMBOL_GPL(inet_peer_base_init);
64 
65 #define PEER_MAX_GC 32
66 
67 /* Exported for sysctl_net_ipv4.  */
68 int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
69 					 * aggressively at this stage */
70 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
71 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
72 
73 /* Called from ip_output.c:ip_init  */
inet_initpeers(void)74 void __init inet_initpeers(void)
75 {
76 	struct sysinfo si;
77 
78 	/* Use the straight interface to information about memory. */
79 	si_meminfo(&si);
80 	/* The values below were suggested by Alexey Kuznetsov
81 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
82 	 * myself.  --SAW
83 	 */
84 	if (si.totalram <= (32768*1024)/PAGE_SIZE)
85 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
86 	if (si.totalram <= (16384*1024)/PAGE_SIZE)
87 		inet_peer_threshold >>= 1; /* about 512KB */
88 	if (si.totalram <= (8192*1024)/PAGE_SIZE)
89 		inet_peer_threshold >>= 2; /* about 128KB */
90 
91 	peer_cachep = kmem_cache_create("inet_peer_cache",
92 			sizeof(struct inet_peer),
93 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
94 			NULL);
95 }
96 
97 /* Called with rcu_read_lock() or base->lock held */
lookup(const struct inetpeer_addr * daddr,struct inet_peer_base * base,unsigned int seq,struct inet_peer * gc_stack[],unsigned int * gc_cnt,struct rb_node ** parent_p,struct rb_node *** pp_p)98 static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
99 				struct inet_peer_base *base,
100 				unsigned int seq,
101 				struct inet_peer *gc_stack[],
102 				unsigned int *gc_cnt,
103 				struct rb_node **parent_p,
104 				struct rb_node ***pp_p)
105 {
106 	struct rb_node **pp, *parent, *next;
107 	struct inet_peer *p;
108 
109 	pp = &base->rb_root.rb_node;
110 	parent = NULL;
111 	while (1) {
112 		int cmp;
113 
114 		next = rcu_dereference_raw(*pp);
115 		if (!next)
116 			break;
117 		parent = next;
118 		p = rb_entry(parent, struct inet_peer, rb_node);
119 		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
120 		if (cmp == 0) {
121 			if (!refcount_inc_not_zero(&p->refcnt))
122 				break;
123 			return p;
124 		}
125 		if (gc_stack) {
126 			if (*gc_cnt < PEER_MAX_GC)
127 				gc_stack[(*gc_cnt)++] = p;
128 		} else if (unlikely(read_seqretry(&base->lock, seq))) {
129 			break;
130 		}
131 		if (cmp == -1)
132 			pp = &next->rb_left;
133 		else
134 			pp = &next->rb_right;
135 	}
136 	*parent_p = parent;
137 	*pp_p = pp;
138 	return NULL;
139 }
140 
inetpeer_free_rcu(struct rcu_head * head)141 static void inetpeer_free_rcu(struct rcu_head *head)
142 {
143 	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
144 }
145 
146 /* perform garbage collect on all items stacked during a lookup */
inet_peer_gc(struct inet_peer_base * base,struct inet_peer * gc_stack[],unsigned int gc_cnt)147 static void inet_peer_gc(struct inet_peer_base *base,
148 			 struct inet_peer *gc_stack[],
149 			 unsigned int gc_cnt)
150 {
151 	int peer_threshold, peer_maxttl, peer_minttl;
152 	struct inet_peer *p;
153 	__u32 delta, ttl;
154 	int i;
155 
156 	peer_threshold = READ_ONCE(inet_peer_threshold);
157 	peer_maxttl = READ_ONCE(inet_peer_maxttl);
158 	peer_minttl = READ_ONCE(inet_peer_minttl);
159 
160 	if (base->total >= peer_threshold)
161 		ttl = 0; /* be aggressive */
162 	else
163 		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
164 			base->total / peer_threshold * HZ;
165 	for (i = 0; i < gc_cnt; i++) {
166 		p = gc_stack[i];
167 
168 		/* The READ_ONCE() pairs with the WRITE_ONCE()
169 		 * in inet_putpeer()
170 		 */
171 		delta = (__u32)jiffies - READ_ONCE(p->dtime);
172 
173 		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
174 			gc_stack[i] = NULL;
175 	}
176 	for (i = 0; i < gc_cnt; i++) {
177 		p = gc_stack[i];
178 		if (p) {
179 			rb_erase(&p->rb_node, &base->rb_root);
180 			base->total--;
181 			call_rcu(&p->rcu, inetpeer_free_rcu);
182 		}
183 	}
184 }
185 
inet_getpeer(struct inet_peer_base * base,const struct inetpeer_addr * daddr,int create)186 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
187 			       const struct inetpeer_addr *daddr,
188 			       int create)
189 {
190 	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
191 	struct rb_node **pp, *parent;
192 	unsigned int gc_cnt, seq;
193 	int invalidated;
194 
195 	/* Attempt a lockless lookup first.
196 	 * Because of a concurrent writer, we might not find an existing entry.
197 	 */
198 	rcu_read_lock();
199 	seq = read_seqbegin(&base->lock);
200 	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
201 	invalidated = read_seqretry(&base->lock, seq);
202 	rcu_read_unlock();
203 
204 	if (p)
205 		return p;
206 
207 	/* If no writer did a change during our lookup, we can return early. */
208 	if (!create && !invalidated)
209 		return NULL;
210 
211 	/* retry an exact lookup, taking the lock before.
212 	 * At least, nodes should be hot in our cache.
213 	 */
214 	parent = NULL;
215 	write_seqlock_bh(&base->lock);
216 
217 	gc_cnt = 0;
218 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
219 	if (!p && create) {
220 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
221 		if (p) {
222 			p->daddr = *daddr;
223 			p->dtime = (__u32)jiffies;
224 			refcount_set(&p->refcnt, 2);
225 			atomic_set(&p->rid, 0);
226 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
227 			p->rate_tokens = 0;
228 			p->n_redirects = 0;
229 			/* 60*HZ is arbitrary, but chosen enough high so that the first
230 			 * calculation of tokens is at its maximum.
231 			 */
232 			p->rate_last = jiffies - 60*HZ;
233 
234 			rb_link_node(&p->rb_node, parent, pp);
235 			rb_insert_color(&p->rb_node, &base->rb_root);
236 			base->total++;
237 		}
238 	}
239 	if (gc_cnt)
240 		inet_peer_gc(base, gc_stack, gc_cnt);
241 	write_sequnlock_bh(&base->lock);
242 
243 	return p;
244 }
245 EXPORT_SYMBOL_GPL(inet_getpeer);
246 
inet_putpeer(struct inet_peer * p)247 void inet_putpeer(struct inet_peer *p)
248 {
249 	/* The WRITE_ONCE() pairs with itself (we run lockless)
250 	 * and the READ_ONCE() in inet_peer_gc()
251 	 */
252 	WRITE_ONCE(p->dtime, (__u32)jiffies);
253 
254 	if (refcount_dec_and_test(&p->refcnt))
255 		call_rcu(&p->rcu, inetpeer_free_rcu);
256 }
257 EXPORT_SYMBOL_GPL(inet_putpeer);
258 
259 /*
260  *	Check transmit rate limitation for given message.
261  *	The rate information is held in the inet_peer entries now.
262  *	This function is generic and could be used for other purposes
263  *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
264  *
265  *	Note that the same inet_peer fields are modified by functions in
266  *	route.c too, but these work for packet destinations while xrlim_allow
267  *	works for icmp destinations. This means the rate limiting information
268  *	for one "ip object" is shared - and these ICMPs are twice limited:
269  *	by source and by destination.
270  *
271  *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
272  *			  SHOULD allow setting of rate limits
273  *
274  * 	Shared between ICMPv4 and ICMPv6.
275  */
276 #define XRLIM_BURST_FACTOR 6
inet_peer_xrlim_allow(struct inet_peer * peer,int timeout)277 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
278 {
279 	unsigned long now, token;
280 	bool rc = false;
281 
282 	if (!peer)
283 		return true;
284 
285 	token = peer->rate_tokens;
286 	now = jiffies;
287 	token += now - peer->rate_last;
288 	peer->rate_last = now;
289 	if (token > XRLIM_BURST_FACTOR * timeout)
290 		token = XRLIM_BURST_FACTOR * timeout;
291 	if (token >= timeout) {
292 		token -= timeout;
293 		rc = true;
294 	}
295 	peer->rate_tokens = token;
296 	return rc;
297 }
298 EXPORT_SYMBOL(inet_peer_xrlim_allow);
299 
inetpeer_invalidate_tree(struct inet_peer_base * base)300 void inetpeer_invalidate_tree(struct inet_peer_base *base)
301 {
302 	struct rb_node *p = rb_first(&base->rb_root);
303 
304 	while (p) {
305 		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
306 
307 		p = rb_next(p);
308 		rb_erase(&peer->rb_node, &base->rb_root);
309 		inet_putpeer(peer);
310 		cond_resched();
311 	}
312 
313 	base->total = 0;
314 }
315 EXPORT_SYMBOL(inetpeer_invalidate_tree);
316