• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * IPVS         An implementation of the IP virtual server support for the
3  *              LINUX operating system.  IPVS is now implemented as a module
4  *              over the Netfilter framework. IPVS can be used to build a
5  *              high-performance and highly available server based on a
6  *              cluster of servers.
7  *
8  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
9  *              Peter Kese <peter.kese@ijs.si>
10  *              Julian Anastasov <ja@ssi.bg>
11  *
12  *              This program is free software; you can redistribute it and/or
13  *              modify it under the terms of the GNU General Public License
14  *              as published by the Free Software Foundation; either version
15  *              2 of the License, or (at your option) any later version.
16  *
17  * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18  * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19  * and others. Many code here is taken from IP MASQ code of kernel 2.2.
20  *
21  * Changes:
22  *
23  */
24 
25 #include <linux/interrupt.h>
26 #include <linux/in.h>
27 #include <linux/net.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/vmalloc.h>
31 #include <linux/proc_fs.h>		/* for proc_net_* */
32 #include <linux/seq_file.h>
33 #include <linux/jhash.h>
34 #include <linux/random.h>
35 
36 #include <net/net_namespace.h>
37 #include <net/ip_vs.h>
38 
39 
40 /*
41  *  Connection hash table: for input and output packets lookups of IPVS
42  */
43 static struct list_head *ip_vs_conn_tab;
44 
45 /*  SLAB cache for IPVS connections */
46 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
47 
48 /*  counter for current IPVS connections */
49 static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
50 
51 /*  counter for no client port connections */
52 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
53 
54 /* random value for IPVS connection hash */
55 static unsigned int ip_vs_conn_rnd;
56 
57 /*
58  *  Fine locking granularity for big connection hash table
59  */
60 #define CT_LOCKARRAY_BITS  4
61 #define CT_LOCKARRAY_SIZE  (1<<CT_LOCKARRAY_BITS)
62 #define CT_LOCKARRAY_MASK  (CT_LOCKARRAY_SIZE-1)
63 
64 struct ip_vs_aligned_lock
65 {
66 	rwlock_t	l;
67 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
68 
69 /* lock array for conn table */
70 static struct ip_vs_aligned_lock
71 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
72 
ct_read_lock(unsigned key)73 static inline void ct_read_lock(unsigned key)
74 {
75 	read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
76 }
77 
ct_read_unlock(unsigned key)78 static inline void ct_read_unlock(unsigned key)
79 {
80 	read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
81 }
82 
ct_write_lock(unsigned key)83 static inline void ct_write_lock(unsigned key)
84 {
85 	write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
86 }
87 
ct_write_unlock(unsigned key)88 static inline void ct_write_unlock(unsigned key)
89 {
90 	write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
91 }
92 
ct_read_lock_bh(unsigned key)93 static inline void ct_read_lock_bh(unsigned key)
94 {
95 	read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
96 }
97 
ct_read_unlock_bh(unsigned key)98 static inline void ct_read_unlock_bh(unsigned key)
99 {
100 	read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
101 }
102 
ct_write_lock_bh(unsigned key)103 static inline void ct_write_lock_bh(unsigned key)
104 {
105 	write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
106 }
107 
ct_write_unlock_bh(unsigned key)108 static inline void ct_write_unlock_bh(unsigned key)
109 {
110 	write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
111 }
112 
113 
114 /*
115  *	Returns hash value for IPVS connection entry
116  */
ip_vs_conn_hashkey(int af,unsigned proto,const union nf_inet_addr * addr,__be16 port)117 static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
118 				       const union nf_inet_addr *addr,
119 				       __be16 port)
120 {
121 #ifdef CONFIG_IP_VS_IPV6
122 	if (af == AF_INET6)
123 		return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
124 				    (__force u32)port, proto, ip_vs_conn_rnd)
125 			& IP_VS_CONN_TAB_MASK;
126 #endif
127 	return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
128 			    ip_vs_conn_rnd)
129 		& IP_VS_CONN_TAB_MASK;
130 }
131 
132 
133 /*
134  *	Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
135  *	returns bool success.
136  */
ip_vs_conn_hash(struct ip_vs_conn * cp)137 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
138 {
139 	unsigned hash;
140 	int ret;
141 
142 	/* Hash by protocol, client address and port */
143 	hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
144 
145 	ct_write_lock(hash);
146 
147 	if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
148 		list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
149 		cp->flags |= IP_VS_CONN_F_HASHED;
150 		atomic_inc(&cp->refcnt);
151 		ret = 1;
152 	} else {
153 		IP_VS_ERR("ip_vs_conn_hash(): request for already hashed, "
154 			  "called from %p\n", __builtin_return_address(0));
155 		ret = 0;
156 	}
157 
158 	ct_write_unlock(hash);
159 
160 	return ret;
161 }
162 
163 
164 /*
165  *	UNhashes ip_vs_conn from ip_vs_conn_tab.
166  *	returns bool success.
167  */
ip_vs_conn_unhash(struct ip_vs_conn * cp)168 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
169 {
170 	unsigned hash;
171 	int ret;
172 
173 	/* unhash it and decrease its reference counter */
174 	hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
175 
176 	ct_write_lock(hash);
177 
178 	if (cp->flags & IP_VS_CONN_F_HASHED) {
179 		list_del(&cp->c_list);
180 		cp->flags &= ~IP_VS_CONN_F_HASHED;
181 		atomic_dec(&cp->refcnt);
182 		ret = 1;
183 	} else
184 		ret = 0;
185 
186 	ct_write_unlock(hash);
187 
188 	return ret;
189 }
190 
191 
192 /*
193  *  Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
194  *  Called for pkts coming from OUTside-to-INside.
195  *	s_addr, s_port: pkt source address (foreign host)
196  *	d_addr, d_port: pkt dest address (load balancer)
197  */
__ip_vs_conn_in_get(int af,int protocol,const union nf_inet_addr * s_addr,__be16 s_port,const union nf_inet_addr * d_addr,__be16 d_port)198 static inline struct ip_vs_conn *__ip_vs_conn_in_get
199 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
200  const union nf_inet_addr *d_addr, __be16 d_port)
201 {
202 	unsigned hash;
203 	struct ip_vs_conn *cp;
204 
205 	hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
206 
207 	ct_read_lock(hash);
208 
209 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
210 		if (cp->af == af &&
211 		    ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
212 		    ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
213 		    s_port == cp->cport && d_port == cp->vport &&
214 		    ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
215 		    protocol == cp->protocol) {
216 			/* HIT */
217 			atomic_inc(&cp->refcnt);
218 			ct_read_unlock(hash);
219 			return cp;
220 		}
221 	}
222 
223 	ct_read_unlock(hash);
224 
225 	return NULL;
226 }
227 
ip_vs_conn_in_get(int af,int protocol,const union nf_inet_addr * s_addr,__be16 s_port,const union nf_inet_addr * d_addr,__be16 d_port)228 struct ip_vs_conn *ip_vs_conn_in_get
229 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
230  const union nf_inet_addr *d_addr, __be16 d_port)
231 {
232 	struct ip_vs_conn *cp;
233 
234 	cp = __ip_vs_conn_in_get(af, protocol, s_addr, s_port, d_addr, d_port);
235 	if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt))
236 		cp = __ip_vs_conn_in_get(af, protocol, s_addr, 0, d_addr,
237 					 d_port);
238 
239 	IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
240 		      ip_vs_proto_name(protocol),
241 		      IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
242 		      IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
243 		      cp ? "hit" : "not hit");
244 
245 	return cp;
246 }
247 
248 /* Get reference to connection template */
ip_vs_ct_in_get(int af,int protocol,const union nf_inet_addr * s_addr,__be16 s_port,const union nf_inet_addr * d_addr,__be16 d_port)249 struct ip_vs_conn *ip_vs_ct_in_get
250 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
251  const union nf_inet_addr *d_addr, __be16 d_port)
252 {
253 	unsigned hash;
254 	struct ip_vs_conn *cp;
255 
256 	hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
257 
258 	ct_read_lock(hash);
259 
260 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
261 		if (cp->af == af &&
262 		    ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
263 		    ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
264 		    s_port == cp->cport && d_port == cp->vport &&
265 		    cp->flags & IP_VS_CONN_F_TEMPLATE &&
266 		    protocol == cp->protocol) {
267 			/* HIT */
268 			atomic_inc(&cp->refcnt);
269 			goto out;
270 		}
271 	}
272 	cp = NULL;
273 
274   out:
275 	ct_read_unlock(hash);
276 
277 	IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
278 		      ip_vs_proto_name(protocol),
279 		      IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
280 		      IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
281 		      cp ? "hit" : "not hit");
282 
283 	return cp;
284 }
285 
286 /*
287  *  Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
288  *  Called for pkts coming from inside-to-OUTside.
289  *	s_addr, s_port: pkt source address (inside host)
290  *	d_addr, d_port: pkt dest address (foreign host)
291  */
ip_vs_conn_out_get(int af,int protocol,const union nf_inet_addr * s_addr,__be16 s_port,const union nf_inet_addr * d_addr,__be16 d_port)292 struct ip_vs_conn *ip_vs_conn_out_get
293 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
294  const union nf_inet_addr *d_addr, __be16 d_port)
295 {
296 	unsigned hash;
297 	struct ip_vs_conn *cp, *ret=NULL;
298 
299 	/*
300 	 *	Check for "full" addressed entries
301 	 */
302 	hash = ip_vs_conn_hashkey(af, protocol, d_addr, d_port);
303 
304 	ct_read_lock(hash);
305 
306 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
307 		if (cp->af == af &&
308 		    ip_vs_addr_equal(af, d_addr, &cp->caddr) &&
309 		    ip_vs_addr_equal(af, s_addr, &cp->daddr) &&
310 		    d_port == cp->cport && s_port == cp->dport &&
311 		    protocol == cp->protocol) {
312 			/* HIT */
313 			atomic_inc(&cp->refcnt);
314 			ret = cp;
315 			break;
316 		}
317 	}
318 
319 	ct_read_unlock(hash);
320 
321 	IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
322 		      ip_vs_proto_name(protocol),
323 		      IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
324 		      IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
325 		      ret ? "hit" : "not hit");
326 
327 	return ret;
328 }
329 
330 
331 /*
332  *      Put back the conn and restart its timer with its timeout
333  */
ip_vs_conn_put(struct ip_vs_conn * cp)334 void ip_vs_conn_put(struct ip_vs_conn *cp)
335 {
336 	/* reset it expire in its timeout */
337 	mod_timer(&cp->timer, jiffies+cp->timeout);
338 
339 	__ip_vs_conn_put(cp);
340 }
341 
342 
343 /*
344  *	Fill a no_client_port connection with a client port number
345  */
ip_vs_conn_fill_cport(struct ip_vs_conn * cp,__be16 cport)346 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
347 {
348 	if (ip_vs_conn_unhash(cp)) {
349 		spin_lock(&cp->lock);
350 		if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
351 			atomic_dec(&ip_vs_conn_no_cport_cnt);
352 			cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
353 			cp->cport = cport;
354 		}
355 		spin_unlock(&cp->lock);
356 
357 		/* hash on new dport */
358 		ip_vs_conn_hash(cp);
359 	}
360 }
361 
362 
363 /*
364  *	Bind a connection entry with the corresponding packet_xmit.
365  *	Called by ip_vs_conn_new.
366  */
ip_vs_bind_xmit(struct ip_vs_conn * cp)367 static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
368 {
369 	switch (IP_VS_FWD_METHOD(cp)) {
370 	case IP_VS_CONN_F_MASQ:
371 		cp->packet_xmit = ip_vs_nat_xmit;
372 		break;
373 
374 	case IP_VS_CONN_F_TUNNEL:
375 		cp->packet_xmit = ip_vs_tunnel_xmit;
376 		break;
377 
378 	case IP_VS_CONN_F_DROUTE:
379 		cp->packet_xmit = ip_vs_dr_xmit;
380 		break;
381 
382 	case IP_VS_CONN_F_LOCALNODE:
383 		cp->packet_xmit = ip_vs_null_xmit;
384 		break;
385 
386 	case IP_VS_CONN_F_BYPASS:
387 		cp->packet_xmit = ip_vs_bypass_xmit;
388 		break;
389 	}
390 }
391 
392 #ifdef CONFIG_IP_VS_IPV6
ip_vs_bind_xmit_v6(struct ip_vs_conn * cp)393 static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
394 {
395 	switch (IP_VS_FWD_METHOD(cp)) {
396 	case IP_VS_CONN_F_MASQ:
397 		cp->packet_xmit = ip_vs_nat_xmit_v6;
398 		break;
399 
400 	case IP_VS_CONN_F_TUNNEL:
401 		cp->packet_xmit = ip_vs_tunnel_xmit_v6;
402 		break;
403 
404 	case IP_VS_CONN_F_DROUTE:
405 		cp->packet_xmit = ip_vs_dr_xmit_v6;
406 		break;
407 
408 	case IP_VS_CONN_F_LOCALNODE:
409 		cp->packet_xmit = ip_vs_null_xmit;
410 		break;
411 
412 	case IP_VS_CONN_F_BYPASS:
413 		cp->packet_xmit = ip_vs_bypass_xmit_v6;
414 		break;
415 	}
416 }
417 #endif
418 
419 
ip_vs_dest_totalconns(struct ip_vs_dest * dest)420 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
421 {
422 	return atomic_read(&dest->activeconns)
423 		+ atomic_read(&dest->inactconns);
424 }
425 
426 /*
427  *	Bind a connection entry with a virtual service destination
428  *	Called just after a new connection entry is created.
429  */
430 static inline void
ip_vs_bind_dest(struct ip_vs_conn * cp,struct ip_vs_dest * dest)431 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
432 {
433 	/* if dest is NULL, then return directly */
434 	if (!dest)
435 		return;
436 
437 	/* Increase the refcnt counter of the dest */
438 	atomic_inc(&dest->refcnt);
439 
440 	/* Bind with the destination and its corresponding transmitter */
441 	if ((cp->flags & IP_VS_CONN_F_SYNC) &&
442 	    (!(cp->flags & IP_VS_CONN_F_TEMPLATE)))
443 		/* if the connection is not template and is created
444 		 * by sync, preserve the activity flag.
445 		 */
446 		cp->flags |= atomic_read(&dest->conn_flags) &
447 			     (~IP_VS_CONN_F_INACTIVE);
448 	else
449 		cp->flags |= atomic_read(&dest->conn_flags);
450 	cp->dest = dest;
451 
452 	IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
453 		      "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
454 		      "dest->refcnt:%d\n",
455 		      ip_vs_proto_name(cp->protocol),
456 		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
457 		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
458 		      IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
459 		      ip_vs_fwd_tag(cp), cp->state,
460 		      cp->flags, atomic_read(&cp->refcnt),
461 		      atomic_read(&dest->refcnt));
462 
463 	/* Update the connection counters */
464 	if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
465 		/* It is a normal connection, so increase the inactive
466 		   connection counter because it is in TCP SYNRECV
467 		   state (inactive) or other protocol inacive state */
468 		if ((cp->flags & IP_VS_CONN_F_SYNC) &&
469 		    (!(cp->flags & IP_VS_CONN_F_INACTIVE)))
470 			atomic_inc(&dest->activeconns);
471 		else
472 			atomic_inc(&dest->inactconns);
473 	} else {
474 		/* It is a persistent connection/template, so increase
475 		   the peristent connection counter */
476 		atomic_inc(&dest->persistconns);
477 	}
478 
479 	if (dest->u_threshold != 0 &&
480 	    ip_vs_dest_totalconns(dest) >= dest->u_threshold)
481 		dest->flags |= IP_VS_DEST_F_OVERLOAD;
482 }
483 
484 
485 /*
486  * Check if there is a destination for the connection, if so
487  * bind the connection to the destination.
488  */
ip_vs_try_bind_dest(struct ip_vs_conn * cp)489 struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
490 {
491 	struct ip_vs_dest *dest;
492 
493 	if ((cp) && (!cp->dest)) {
494 		dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
495 				       &cp->vaddr, cp->vport,
496 				       cp->protocol);
497 		ip_vs_bind_dest(cp, dest);
498 		return dest;
499 	} else
500 		return NULL;
501 }
502 
503 
504 /*
505  *	Unbind a connection entry with its VS destination
506  *	Called by the ip_vs_conn_expire function.
507  */
ip_vs_unbind_dest(struct ip_vs_conn * cp)508 static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
509 {
510 	struct ip_vs_dest *dest = cp->dest;
511 
512 	if (!dest)
513 		return;
514 
515 	IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
516 		      "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
517 		      "dest->refcnt:%d\n",
518 		      ip_vs_proto_name(cp->protocol),
519 		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
520 		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
521 		      IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
522 		      ip_vs_fwd_tag(cp), cp->state,
523 		      cp->flags, atomic_read(&cp->refcnt),
524 		      atomic_read(&dest->refcnt));
525 
526 	/* Update the connection counters */
527 	if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
528 		/* It is a normal connection, so decrease the inactconns
529 		   or activeconns counter */
530 		if (cp->flags & IP_VS_CONN_F_INACTIVE) {
531 			atomic_dec(&dest->inactconns);
532 		} else {
533 			atomic_dec(&dest->activeconns);
534 		}
535 	} else {
536 		/* It is a persistent connection/template, so decrease
537 		   the peristent connection counter */
538 		atomic_dec(&dest->persistconns);
539 	}
540 
541 	if (dest->l_threshold != 0) {
542 		if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
543 			dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
544 	} else if (dest->u_threshold != 0) {
545 		if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
546 			dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
547 	} else {
548 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
549 			dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
550 	}
551 
552 	/*
553 	 * Simply decrease the refcnt of the dest, because the
554 	 * dest will be either in service's destination list
555 	 * or in the trash.
556 	 */
557 	atomic_dec(&dest->refcnt);
558 }
559 
560 
561 /*
562  *	Checking if the destination of a connection template is available.
563  *	If available, return 1, otherwise invalidate this connection
564  *	template and return 0.
565  */
ip_vs_check_template(struct ip_vs_conn * ct)566 int ip_vs_check_template(struct ip_vs_conn *ct)
567 {
568 	struct ip_vs_dest *dest = ct->dest;
569 
570 	/*
571 	 * Checking the dest server status.
572 	 */
573 	if ((dest == NULL) ||
574 	    !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
575 	    (sysctl_ip_vs_expire_quiescent_template &&
576 	     (atomic_read(&dest->weight) == 0))) {
577 		IP_VS_DBG_BUF(9, "check_template: dest not available for "
578 			      "protocol %s s:%s:%d v:%s:%d "
579 			      "-> d:%s:%d\n",
580 			      ip_vs_proto_name(ct->protocol),
581 			      IP_VS_DBG_ADDR(ct->af, &ct->caddr),
582 			      ntohs(ct->cport),
583 			      IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
584 			      ntohs(ct->vport),
585 			      IP_VS_DBG_ADDR(ct->af, &ct->daddr),
586 			      ntohs(ct->dport));
587 
588 		/*
589 		 * Invalidate the connection template
590 		 */
591 		if (ct->vport != htons(0xffff)) {
592 			if (ip_vs_conn_unhash(ct)) {
593 				ct->dport = htons(0xffff);
594 				ct->vport = htons(0xffff);
595 				ct->cport = 0;
596 				ip_vs_conn_hash(ct);
597 			}
598 		}
599 
600 		/*
601 		 * Simply decrease the refcnt of the template,
602 		 * don't restart its timer.
603 		 */
604 		atomic_dec(&ct->refcnt);
605 		return 0;
606 	}
607 	return 1;
608 }
609 
ip_vs_conn_expire(unsigned long data)610 static void ip_vs_conn_expire(unsigned long data)
611 {
612 	struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
613 
614 	cp->timeout = 60*HZ;
615 
616 	/*
617 	 *	hey, I'm using it
618 	 */
619 	atomic_inc(&cp->refcnt);
620 
621 	/*
622 	 *	do I control anybody?
623 	 */
624 	if (atomic_read(&cp->n_control))
625 		goto expire_later;
626 
627 	/*
628 	 *	unhash it if it is hashed in the conn table
629 	 */
630 	if (!ip_vs_conn_unhash(cp))
631 		goto expire_later;
632 
633 	/*
634 	 *	refcnt==1 implies I'm the only one referrer
635 	 */
636 	if (likely(atomic_read(&cp->refcnt) == 1)) {
637 		/* delete the timer if it is activated by other users */
638 		if (timer_pending(&cp->timer))
639 			del_timer(&cp->timer);
640 
641 		/* does anybody control me? */
642 		if (cp->control)
643 			ip_vs_control_del(cp);
644 
645 		if (unlikely(cp->app != NULL))
646 			ip_vs_unbind_app(cp);
647 		ip_vs_unbind_dest(cp);
648 		if (cp->flags & IP_VS_CONN_F_NO_CPORT)
649 			atomic_dec(&ip_vs_conn_no_cport_cnt);
650 		atomic_dec(&ip_vs_conn_count);
651 
652 		kmem_cache_free(ip_vs_conn_cachep, cp);
653 		return;
654 	}
655 
656 	/* hash it back to the table */
657 	ip_vs_conn_hash(cp);
658 
659   expire_later:
660 	IP_VS_DBG(7, "delayed: conn->refcnt-1=%d conn->n_control=%d\n",
661 		  atomic_read(&cp->refcnt)-1,
662 		  atomic_read(&cp->n_control));
663 
664 	ip_vs_conn_put(cp);
665 }
666 
667 
ip_vs_conn_expire_now(struct ip_vs_conn * cp)668 void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
669 {
670 	if (del_timer(&cp->timer))
671 		mod_timer(&cp->timer, jiffies);
672 }
673 
674 
675 /*
676  *	Create a new connection entry and hash it into the ip_vs_conn_tab
677  */
678 struct ip_vs_conn *
ip_vs_conn_new(int af,int proto,const union nf_inet_addr * caddr,__be16 cport,const union nf_inet_addr * vaddr,__be16 vport,const union nf_inet_addr * daddr,__be16 dport,unsigned flags,struct ip_vs_dest * dest)679 ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
680 	       const union nf_inet_addr *vaddr, __be16 vport,
681 	       const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
682 	       struct ip_vs_dest *dest)
683 {
684 	struct ip_vs_conn *cp;
685 	struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
686 
687 	cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
688 	if (cp == NULL) {
689 		IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n");
690 		return NULL;
691 	}
692 
693 	INIT_LIST_HEAD(&cp->c_list);
694 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
695 	cp->af		   = af;
696 	cp->protocol	   = proto;
697 	ip_vs_addr_copy(af, &cp->caddr, caddr);
698 	cp->cport	   = cport;
699 	ip_vs_addr_copy(af, &cp->vaddr, vaddr);
700 	cp->vport	   = vport;
701 	ip_vs_addr_copy(af, &cp->daddr, daddr);
702 	cp->dport          = dport;
703 	cp->flags	   = flags;
704 	spin_lock_init(&cp->lock);
705 
706 	/*
707 	 * Set the entry is referenced by the current thread before hashing
708 	 * it in the table, so that other thread run ip_vs_random_dropentry
709 	 * but cannot drop this entry.
710 	 */
711 	atomic_set(&cp->refcnt, 1);
712 
713 	atomic_set(&cp->n_control, 0);
714 	atomic_set(&cp->in_pkts, 0);
715 
716 	atomic_inc(&ip_vs_conn_count);
717 	if (flags & IP_VS_CONN_F_NO_CPORT)
718 		atomic_inc(&ip_vs_conn_no_cport_cnt);
719 
720 	/* Bind the connection with a destination server */
721 	ip_vs_bind_dest(cp, dest);
722 
723 	/* Set its state and timeout */
724 	cp->state = 0;
725 	cp->timeout = 3*HZ;
726 
727 	/* Bind its packet transmitter */
728 #ifdef CONFIG_IP_VS_IPV6
729 	if (af == AF_INET6)
730 		ip_vs_bind_xmit_v6(cp);
731 	else
732 #endif
733 		ip_vs_bind_xmit(cp);
734 
735 	if (unlikely(pp && atomic_read(&pp->appcnt)))
736 		ip_vs_bind_app(cp, pp);
737 
738 	/* Hash it in the ip_vs_conn_tab finally */
739 	ip_vs_conn_hash(cp);
740 
741 	return cp;
742 }
743 
744 
745 /*
746  *	/proc/net/ip_vs_conn entries
747  */
748 #ifdef CONFIG_PROC_FS
749 
ip_vs_conn_array(struct seq_file * seq,loff_t pos)750 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
751 {
752 	int idx;
753 	struct ip_vs_conn *cp;
754 
755 	for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
756 		ct_read_lock_bh(idx);
757 		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
758 			if (pos-- == 0) {
759 				seq->private = &ip_vs_conn_tab[idx];
760 				return cp;
761 			}
762 		}
763 		ct_read_unlock_bh(idx);
764 	}
765 
766 	return NULL;
767 }
768 
ip_vs_conn_seq_start(struct seq_file * seq,loff_t * pos)769 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
770 {
771 	seq->private = NULL;
772 	return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
773 }
774 
ip_vs_conn_seq_next(struct seq_file * seq,void * v,loff_t * pos)775 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
776 {
777 	struct ip_vs_conn *cp = v;
778 	struct list_head *e, *l = seq->private;
779 	int idx;
780 
781 	++*pos;
782 	if (v == SEQ_START_TOKEN)
783 		return ip_vs_conn_array(seq, 0);
784 
785 	/* more on same hash chain? */
786 	if ((e = cp->c_list.next) != l)
787 		return list_entry(e, struct ip_vs_conn, c_list);
788 
789 	idx = l - ip_vs_conn_tab;
790 	ct_read_unlock_bh(idx);
791 
792 	while (++idx < IP_VS_CONN_TAB_SIZE) {
793 		ct_read_lock_bh(idx);
794 		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
795 			seq->private = &ip_vs_conn_tab[idx];
796 			return cp;
797 		}
798 		ct_read_unlock_bh(idx);
799 	}
800 	seq->private = NULL;
801 	return NULL;
802 }
803 
ip_vs_conn_seq_stop(struct seq_file * seq,void * v)804 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
805 {
806 	struct list_head *l = seq->private;
807 
808 	if (l)
809 		ct_read_unlock_bh(l - ip_vs_conn_tab);
810 }
811 
ip_vs_conn_seq_show(struct seq_file * seq,void * v)812 static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
813 {
814 
815 	if (v == SEQ_START_TOKEN)
816 		seq_puts(seq,
817    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Expires\n");
818 	else {
819 		const struct ip_vs_conn *cp = v;
820 
821 #ifdef CONFIG_IP_VS_IPV6
822 		if (cp->af == AF_INET6)
823 			seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %7lu\n",
824 				ip_vs_proto_name(cp->protocol),
825 				&cp->caddr.in6, ntohs(cp->cport),
826 				&cp->vaddr.in6, ntohs(cp->vport),
827 				&cp->daddr.in6, ntohs(cp->dport),
828 				ip_vs_state_name(cp->protocol, cp->state),
829 				(cp->timer.expires-jiffies)/HZ);
830 		else
831 #endif
832 			seq_printf(seq,
833 				"%-3s %08X %04X %08X %04X"
834 				" %08X %04X %-11s %7lu\n",
835 				ip_vs_proto_name(cp->protocol),
836 				ntohl(cp->caddr.ip), ntohs(cp->cport),
837 				ntohl(cp->vaddr.ip), ntohs(cp->vport),
838 				ntohl(cp->daddr.ip), ntohs(cp->dport),
839 				ip_vs_state_name(cp->protocol, cp->state),
840 				(cp->timer.expires-jiffies)/HZ);
841 	}
842 	return 0;
843 }
844 
845 static const struct seq_operations ip_vs_conn_seq_ops = {
846 	.start = ip_vs_conn_seq_start,
847 	.next  = ip_vs_conn_seq_next,
848 	.stop  = ip_vs_conn_seq_stop,
849 	.show  = ip_vs_conn_seq_show,
850 };
851 
ip_vs_conn_open(struct inode * inode,struct file * file)852 static int ip_vs_conn_open(struct inode *inode, struct file *file)
853 {
854 	return seq_open(file, &ip_vs_conn_seq_ops);
855 }
856 
857 static const struct file_operations ip_vs_conn_fops = {
858 	.owner	 = THIS_MODULE,
859 	.open    = ip_vs_conn_open,
860 	.read    = seq_read,
861 	.llseek  = seq_lseek,
862 	.release = seq_release,
863 };
864 
ip_vs_origin_name(unsigned flags)865 static const char *ip_vs_origin_name(unsigned flags)
866 {
867 	if (flags & IP_VS_CONN_F_SYNC)
868 		return "SYNC";
869 	else
870 		return "LOCAL";
871 }
872 
ip_vs_conn_sync_seq_show(struct seq_file * seq,void * v)873 static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
874 {
875 
876 	if (v == SEQ_START_TOKEN)
877 		seq_puts(seq,
878    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Origin Expires\n");
879 	else {
880 		const struct ip_vs_conn *cp = v;
881 
882 #ifdef CONFIG_IP_VS_IPV6
883 		if (cp->af == AF_INET6)
884 			seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %-6s %7lu\n",
885 				ip_vs_proto_name(cp->protocol),
886 				&cp->caddr.in6, ntohs(cp->cport),
887 				&cp->vaddr.in6, ntohs(cp->vport),
888 				&cp->daddr.in6, ntohs(cp->dport),
889 				ip_vs_state_name(cp->protocol, cp->state),
890 				ip_vs_origin_name(cp->flags),
891 				(cp->timer.expires-jiffies)/HZ);
892 		else
893 #endif
894 			seq_printf(seq,
895 				"%-3s %08X %04X %08X %04X "
896 				"%08X %04X %-11s %-6s %7lu\n",
897 				ip_vs_proto_name(cp->protocol),
898 				ntohl(cp->caddr.ip), ntohs(cp->cport),
899 				ntohl(cp->vaddr.ip), ntohs(cp->vport),
900 				ntohl(cp->daddr.ip), ntohs(cp->dport),
901 				ip_vs_state_name(cp->protocol, cp->state),
902 				ip_vs_origin_name(cp->flags),
903 				(cp->timer.expires-jiffies)/HZ);
904 	}
905 	return 0;
906 }
907 
908 static const struct seq_operations ip_vs_conn_sync_seq_ops = {
909 	.start = ip_vs_conn_seq_start,
910 	.next  = ip_vs_conn_seq_next,
911 	.stop  = ip_vs_conn_seq_stop,
912 	.show  = ip_vs_conn_sync_seq_show,
913 };
914 
ip_vs_conn_sync_open(struct inode * inode,struct file * file)915 static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
916 {
917 	return seq_open(file, &ip_vs_conn_sync_seq_ops);
918 }
919 
920 static const struct file_operations ip_vs_conn_sync_fops = {
921 	.owner	 = THIS_MODULE,
922 	.open    = ip_vs_conn_sync_open,
923 	.read    = seq_read,
924 	.llseek  = seq_lseek,
925 	.release = seq_release,
926 };
927 
928 #endif
929 
930 
931 /*
932  *      Randomly drop connection entries before running out of memory
933  */
todrop_entry(struct ip_vs_conn * cp)934 static inline int todrop_entry(struct ip_vs_conn *cp)
935 {
936 	/*
937 	 * The drop rate array needs tuning for real environments.
938 	 * Called from timer bh only => no locking
939 	 */
940 	static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
941 	static char todrop_counter[9] = {0};
942 	int i;
943 
944 	/* if the conn entry hasn't lasted for 60 seconds, don't drop it.
945 	   This will leave enough time for normal connection to get
946 	   through. */
947 	if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
948 		return 0;
949 
950 	/* Don't drop the entry if its number of incoming packets is not
951 	   located in [0, 8] */
952 	i = atomic_read(&cp->in_pkts);
953 	if (i > 8 || i < 0) return 0;
954 
955 	if (!todrop_rate[i]) return 0;
956 	if (--todrop_counter[i] > 0) return 0;
957 
958 	todrop_counter[i] = todrop_rate[i];
959 	return 1;
960 }
961 
962 /* Called from keventd and must protect itself from softirqs */
ip_vs_random_dropentry(void)963 void ip_vs_random_dropentry(void)
964 {
965 	int idx;
966 	struct ip_vs_conn *cp;
967 
968 	/*
969 	 * Randomly scan 1/32 of the whole table every second
970 	 */
971 	for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) {
972 		unsigned hash = net_random() & IP_VS_CONN_TAB_MASK;
973 
974 		/*
975 		 *  Lock is actually needed in this loop.
976 		 */
977 		ct_write_lock_bh(hash);
978 
979 		list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
980 			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
981 				/* connection template */
982 				continue;
983 
984 			if (cp->protocol == IPPROTO_TCP) {
985 				switch(cp->state) {
986 				case IP_VS_TCP_S_SYN_RECV:
987 				case IP_VS_TCP_S_SYNACK:
988 					break;
989 
990 				case IP_VS_TCP_S_ESTABLISHED:
991 					if (todrop_entry(cp))
992 						break;
993 					continue;
994 
995 				default:
996 					continue;
997 				}
998 			} else {
999 				if (!todrop_entry(cp))
1000 					continue;
1001 			}
1002 
1003 			IP_VS_DBG(4, "del connection\n");
1004 			ip_vs_conn_expire_now(cp);
1005 			if (cp->control) {
1006 				IP_VS_DBG(4, "del conn template\n");
1007 				ip_vs_conn_expire_now(cp->control);
1008 			}
1009 		}
1010 		ct_write_unlock_bh(hash);
1011 	}
1012 }
1013 
1014 
1015 /*
1016  *      Flush all the connection entries in the ip_vs_conn_tab
1017  */
ip_vs_conn_flush(void)1018 static void ip_vs_conn_flush(void)
1019 {
1020 	int idx;
1021 	struct ip_vs_conn *cp;
1022 
1023   flush_again:
1024 	for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) {
1025 		/*
1026 		 *  Lock is actually needed in this loop.
1027 		 */
1028 		ct_write_lock_bh(idx);
1029 
1030 		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
1031 
1032 			IP_VS_DBG(4, "del connection\n");
1033 			ip_vs_conn_expire_now(cp);
1034 			if (cp->control) {
1035 				IP_VS_DBG(4, "del conn template\n");
1036 				ip_vs_conn_expire_now(cp->control);
1037 			}
1038 		}
1039 		ct_write_unlock_bh(idx);
1040 	}
1041 
1042 	/* the counter may be not NULL, because maybe some conn entries
1043 	   are run by slow timer handler or unhashed but still referred */
1044 	if (atomic_read(&ip_vs_conn_count) != 0) {
1045 		schedule();
1046 		goto flush_again;
1047 	}
1048 }
1049 
1050 
ip_vs_conn_init(void)1051 int __init ip_vs_conn_init(void)
1052 {
1053 	int idx;
1054 
1055 	/*
1056 	 * Allocate the connection hash table and initialize its list heads
1057 	 */
1058 	ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head));
1059 	if (!ip_vs_conn_tab)
1060 		return -ENOMEM;
1061 
1062 	/* Allocate ip_vs_conn slab cache */
1063 	ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
1064 					      sizeof(struct ip_vs_conn), 0,
1065 					      SLAB_HWCACHE_ALIGN, NULL);
1066 	if (!ip_vs_conn_cachep) {
1067 		vfree(ip_vs_conn_tab);
1068 		return -ENOMEM;
1069 	}
1070 
1071 	IP_VS_INFO("Connection hash table configured "
1072 		   "(size=%d, memory=%ldKbytes)\n",
1073 		   IP_VS_CONN_TAB_SIZE,
1074 		   (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024);
1075 	IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1076 		  sizeof(struct ip_vs_conn));
1077 
1078 	for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
1079 		INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
1080 	}
1081 
1082 	for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)  {
1083 		rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
1084 	}
1085 
1086 	proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
1087 	proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
1088 
1089 	/* calculate the random value for connection hash */
1090 	get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
1091 
1092 	return 0;
1093 }
1094 
1095 
ip_vs_conn_cleanup(void)1096 void ip_vs_conn_cleanup(void)
1097 {
1098 	/* flush all the connection entries first */
1099 	ip_vs_conn_flush();
1100 
1101 	/* Release the empty cache */
1102 	kmem_cache_destroy(ip_vs_conn_cachep);
1103 	proc_net_remove(&init_net, "ip_vs_conn");
1104 	proc_net_remove(&init_net, "ip_vs_conn_sync");
1105 	vfree(ip_vs_conn_tab);
1106 }
1107