• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic TIME_WAIT sockets functions
7  *
8  *		From code orinally in TCP
9  */
10 
11 #include <linux/kernel.h>
12 #include <net/inet_hashtables.h>
13 #include <net/inet_timewait_sock.h>
14 #include <net/ip.h>
15 
16 /* Must be called with locally disabled BHs. */
__inet_twsk_kill(struct inet_timewait_sock * tw,struct inet_hashinfo * hashinfo)17 static void __inet_twsk_kill(struct inet_timewait_sock *tw,
18 			     struct inet_hashinfo *hashinfo)
19 {
20 	struct inet_bind_hashbucket *bhead;
21 	struct inet_bind_bucket *tb;
22 	/* Unlink from established hashes. */
23 	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
24 
25 	spin_lock(lock);
26 	if (hlist_nulls_unhashed(&tw->tw_node)) {
27 		spin_unlock(lock);
28 		return;
29 	}
30 	hlist_nulls_del_rcu(&tw->tw_node);
31 	sk_nulls_node_init(&tw->tw_node);
32 	spin_unlock(lock);
33 
34 	/* Disassociate with bind bucket. */
35 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
36 			hashinfo->bhash_size)];
37 	spin_lock(&bhead->lock);
38 	tb = tw->tw_tb;
39 	__hlist_del(&tw->tw_bind_node);
40 	tw->tw_tb = NULL;
41 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
42 	spin_unlock(&bhead->lock);
43 #ifdef SOCK_REFCNT_DEBUG
44 	if (atomic_read(&tw->tw_refcnt) != 1) {
45 		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
46 		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
47 	}
48 #endif
49 	inet_twsk_put(tw);
50 }
51 
inet_twsk_put(struct inet_timewait_sock * tw)52 void inet_twsk_put(struct inet_timewait_sock *tw)
53 {
54 	if (atomic_dec_and_test(&tw->tw_refcnt)) {
55 		struct module *owner = tw->tw_prot->owner;
56 		twsk_destructor((struct sock *)tw);
57 #ifdef SOCK_REFCNT_DEBUG
58 		printk(KERN_DEBUG "%s timewait_sock %p released\n",
59 		       tw->tw_prot->name, tw);
60 #endif
61 		release_net(twsk_net(tw));
62 		kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
63 		module_put(owner);
64 	}
65 }
66 EXPORT_SYMBOL_GPL(inet_twsk_put);
67 
68 /*
69  * Enter the time wait state. This is called with locally disabled BH.
70  * Essentially we whip up a timewait bucket, copy the relevant info into it
71  * from the SK, and mess with hash chains and list linkage.
72  */
__inet_twsk_hashdance(struct inet_timewait_sock * tw,struct sock * sk,struct inet_hashinfo * hashinfo)73 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
74 			   struct inet_hashinfo *hashinfo)
75 {
76 	const struct inet_sock *inet = inet_sk(sk);
77 	const struct inet_connection_sock *icsk = inet_csk(sk);
78 	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
79 	spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
80 	struct inet_bind_hashbucket *bhead;
81 	/* Step 1: Put TW into bind hash. Original socket stays there too.
82 	   Note, that any socket with inet->num != 0 MUST be bound in
83 	   binding cache, even if it is closed.
84 	 */
85 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num,
86 			hashinfo->bhash_size)];
87 	spin_lock(&bhead->lock);
88 	tw->tw_tb = icsk->icsk_bind_hash;
89 	WARN_ON(!icsk->icsk_bind_hash);
90 	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91 	spin_unlock(&bhead->lock);
92 
93 	spin_lock(lock);
94 
95 	/*
96 	 * Step 2: Hash TW into TIMEWAIT chain.
97 	 * Should be done before removing sk from established chain
98 	 * because readers are lockless and search established first.
99 	 */
100 	atomic_inc(&tw->tw_refcnt);
101 	inet_twsk_add_node_rcu(tw, &ehead->twchain);
102 
103 	/* Step 3: Remove SK from established hash. */
104 	if (__sk_nulls_del_node_init_rcu(sk))
105 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
106 
107 	spin_unlock(lock);
108 }
109 
110 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
111 
inet_twsk_alloc(const struct sock * sk,const int state)112 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
113 {
114 	struct inet_timewait_sock *tw =
115 		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
116 				 GFP_ATOMIC);
117 	if (tw != NULL) {
118 		const struct inet_sock *inet = inet_sk(sk);
119 
120 		/* Give us an identity. */
121 		tw->tw_daddr	    = inet->daddr;
122 		tw->tw_rcv_saddr    = inet->rcv_saddr;
123 		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
124 		tw->tw_num	    = inet->num;
125 		tw->tw_state	    = TCP_TIME_WAIT;
126 		tw->tw_substate	    = state;
127 		tw->tw_sport	    = inet->sport;
128 		tw->tw_dport	    = inet->dport;
129 		tw->tw_family	    = sk->sk_family;
130 		tw->tw_reuse	    = sk->sk_reuse;
131 		tw->tw_hash	    = sk->sk_hash;
132 		tw->tw_ipv6only	    = 0;
133 		tw->tw_transparent  = inet->transparent;
134 		tw->tw_prot	    = sk->sk_prot_creator;
135 		twsk_net_set(tw, hold_net(sock_net(sk)));
136 		atomic_set(&tw->tw_refcnt, 1);
137 		inet_twsk_dead_node_init(tw);
138 		__module_get(tw->tw_prot->owner);
139 	}
140 
141 	return tw;
142 }
143 
144 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
145 
146 /* Returns non-zero if quota exceeded.  */
inet_twdr_do_twkill_work(struct inet_timewait_death_row * twdr,const int slot)147 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
148 				    const int slot)
149 {
150 	struct inet_timewait_sock *tw;
151 	struct hlist_node *node;
152 	unsigned int killed;
153 	int ret;
154 
155 	/* NOTE: compare this to previous version where lock
156 	 * was released after detaching chain. It was racy,
157 	 * because tw buckets are scheduled in not serialized context
158 	 * in 2.3 (with netfilter), and with softnet it is common, because
159 	 * soft irqs are not sequenced.
160 	 */
161 	killed = 0;
162 	ret = 0;
163 rescan:
164 	inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
165 		__inet_twsk_del_dead_node(tw);
166 		spin_unlock(&twdr->death_lock);
167 		__inet_twsk_kill(tw, twdr->hashinfo);
168 #ifdef CONFIG_NET_NS
169 		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
170 #endif
171 		inet_twsk_put(tw);
172 		killed++;
173 		spin_lock(&twdr->death_lock);
174 		if (killed > INET_TWDR_TWKILL_QUOTA) {
175 			ret = 1;
176 			break;
177 		}
178 
179 		/* While we dropped twdr->death_lock, another cpu may have
180 		 * killed off the next TW bucket in the list, therefore
181 		 * do a fresh re-read of the hlist head node with the
182 		 * lock reacquired.  We still use the hlist traversal
183 		 * macro in order to get the prefetches.
184 		 */
185 		goto rescan;
186 	}
187 
188 	twdr->tw_count -= killed;
189 #ifndef CONFIG_NET_NS
190 	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
191 #endif
192 	return ret;
193 }
194 
inet_twdr_hangman(unsigned long data)195 void inet_twdr_hangman(unsigned long data)
196 {
197 	struct inet_timewait_death_row *twdr;
198 	int unsigned need_timer;
199 
200 	twdr = (struct inet_timewait_death_row *)data;
201 	spin_lock(&twdr->death_lock);
202 
203 	if (twdr->tw_count == 0)
204 		goto out;
205 
206 	need_timer = 0;
207 	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
208 		twdr->thread_slots |= (1 << twdr->slot);
209 		schedule_work(&twdr->twkill_work);
210 		need_timer = 1;
211 	} else {
212 		/* We purged the entire slot, anything left?  */
213 		if (twdr->tw_count)
214 			need_timer = 1;
215 	}
216 	twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
217 	if (need_timer)
218 		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
219 out:
220 	spin_unlock(&twdr->death_lock);
221 }
222 
223 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
224 
inet_twdr_twkill_work(struct work_struct * work)225 void inet_twdr_twkill_work(struct work_struct *work)
226 {
227 	struct inet_timewait_death_row *twdr =
228 		container_of(work, struct inet_timewait_death_row, twkill_work);
229 	int i;
230 
231 	BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
232 			(sizeof(twdr->thread_slots) * 8));
233 
234 	while (twdr->thread_slots) {
235 		spin_lock_bh(&twdr->death_lock);
236 		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
237 			if (!(twdr->thread_slots & (1 << i)))
238 				continue;
239 
240 			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
241 				if (need_resched()) {
242 					spin_unlock_bh(&twdr->death_lock);
243 					schedule();
244 					spin_lock_bh(&twdr->death_lock);
245 				}
246 			}
247 
248 			twdr->thread_slots &= ~(1 << i);
249 		}
250 		spin_unlock_bh(&twdr->death_lock);
251 	}
252 }
253 
254 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
255 
256 /* These are always called from BH context.  See callers in
257  * tcp_input.c to verify this.
258  */
259 
260 /* This is for handling early-kills of TIME_WAIT sockets. */
inet_twsk_deschedule(struct inet_timewait_sock * tw,struct inet_timewait_death_row * twdr)261 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
262 			  struct inet_timewait_death_row *twdr)
263 {
264 	spin_lock(&twdr->death_lock);
265 	if (inet_twsk_del_dead_node(tw)) {
266 		inet_twsk_put(tw);
267 		if (--twdr->tw_count == 0)
268 			del_timer(&twdr->tw_timer);
269 	}
270 	spin_unlock(&twdr->death_lock);
271 	__inet_twsk_kill(tw, twdr->hashinfo);
272 }
273 
274 EXPORT_SYMBOL(inet_twsk_deschedule);
275 
inet_twsk_schedule(struct inet_timewait_sock * tw,struct inet_timewait_death_row * twdr,const int timeo,const int timewait_len)276 void inet_twsk_schedule(struct inet_timewait_sock *tw,
277 		       struct inet_timewait_death_row *twdr,
278 		       const int timeo, const int timewait_len)
279 {
280 	struct hlist_head *list;
281 	int slot;
282 
283 	/* timeout := RTO * 3.5
284 	 *
285 	 * 3.5 = 1+2+0.5 to wait for two retransmits.
286 	 *
287 	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
288 	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
289 	 * FINs (or previous seqments) are lost (probability of such event
290 	 * is p^(N+1), where p is probability to lose single packet and
291 	 * time to detect the loss is about RTO*(2^N - 1) with exponential
292 	 * backoff). Normal timewait length is calculated so, that we
293 	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
294 	 * [ BTW Linux. following BSD, violates this requirement waiting
295 	 *   only for 60sec, we should wait at least for 240 secs.
296 	 *   Well, 240 consumes too much of resources 8)
297 	 * ]
298 	 * This interval is not reduced to catch old duplicate and
299 	 * responces to our wandering segments living for two MSLs.
300 	 * However, if we use PAWS to detect
301 	 * old duplicates, we can reduce the interval to bounds required
302 	 * by RTO, rather than MSL. So, if peer understands PAWS, we
303 	 * kill tw bucket after 3.5*RTO (it is important that this number
304 	 * is greater than TS tick!) and detect old duplicates with help
305 	 * of PAWS.
306 	 */
307 	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
308 
309 	spin_lock(&twdr->death_lock);
310 
311 	/* Unlink it, if it was scheduled */
312 	if (inet_twsk_del_dead_node(tw))
313 		twdr->tw_count--;
314 	else
315 		atomic_inc(&tw->tw_refcnt);
316 
317 	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
318 		/* Schedule to slow timer */
319 		if (timeo >= timewait_len) {
320 			slot = INET_TWDR_TWKILL_SLOTS - 1;
321 		} else {
322 			slot = DIV_ROUND_UP(timeo, twdr->period);
323 			if (slot >= INET_TWDR_TWKILL_SLOTS)
324 				slot = INET_TWDR_TWKILL_SLOTS - 1;
325 		}
326 		tw->tw_ttd = jiffies + timeo;
327 		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
328 		list = &twdr->cells[slot];
329 	} else {
330 		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
331 
332 		if (twdr->twcal_hand < 0) {
333 			twdr->twcal_hand = 0;
334 			twdr->twcal_jiffie = jiffies;
335 			twdr->twcal_timer.expires = twdr->twcal_jiffie +
336 					      (slot << INET_TWDR_RECYCLE_TICK);
337 			add_timer(&twdr->twcal_timer);
338 		} else {
339 			if (time_after(twdr->twcal_timer.expires,
340 				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
341 				mod_timer(&twdr->twcal_timer,
342 					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
343 			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
344 		}
345 		list = &twdr->twcal_row[slot];
346 	}
347 
348 	hlist_add_head(&tw->tw_death_node, list);
349 
350 	if (twdr->tw_count++ == 0)
351 		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
352 	spin_unlock(&twdr->death_lock);
353 }
354 
355 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
356 
inet_twdr_twcal_tick(unsigned long data)357 void inet_twdr_twcal_tick(unsigned long data)
358 {
359 	struct inet_timewait_death_row *twdr;
360 	int n, slot;
361 	unsigned long j;
362 	unsigned long now = jiffies;
363 	int killed = 0;
364 	int adv = 0;
365 
366 	twdr = (struct inet_timewait_death_row *)data;
367 
368 	spin_lock(&twdr->death_lock);
369 	if (twdr->twcal_hand < 0)
370 		goto out;
371 
372 	slot = twdr->twcal_hand;
373 	j = twdr->twcal_jiffie;
374 
375 	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
376 		if (time_before_eq(j, now)) {
377 			struct hlist_node *node, *safe;
378 			struct inet_timewait_sock *tw;
379 
380 			inet_twsk_for_each_inmate_safe(tw, node, safe,
381 						       &twdr->twcal_row[slot]) {
382 				__inet_twsk_del_dead_node(tw);
383 				__inet_twsk_kill(tw, twdr->hashinfo);
384 #ifdef CONFIG_NET_NS
385 				NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
386 #endif
387 				inet_twsk_put(tw);
388 				killed++;
389 			}
390 		} else {
391 			if (!adv) {
392 				adv = 1;
393 				twdr->twcal_jiffie = j;
394 				twdr->twcal_hand = slot;
395 			}
396 
397 			if (!hlist_empty(&twdr->twcal_row[slot])) {
398 				mod_timer(&twdr->twcal_timer, j);
399 				goto out;
400 			}
401 		}
402 		j += 1 << INET_TWDR_RECYCLE_TICK;
403 		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
404 	}
405 	twdr->twcal_hand = -1;
406 
407 out:
408 	if ((twdr->tw_count -= killed) == 0)
409 		del_timer(&twdr->tw_timer);
410 #ifndef CONFIG_NET_NS
411 	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
412 #endif
413 	spin_unlock(&twdr->death_lock);
414 }
415 
416 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
417 
inet_twsk_purge(struct net * net,struct inet_hashinfo * hashinfo,struct inet_timewait_death_row * twdr,int family)418 void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
419 		     struct inet_timewait_death_row *twdr, int family)
420 {
421 	struct inet_timewait_sock *tw;
422 	struct sock *sk;
423 	struct hlist_nulls_node *node;
424 	int h;
425 
426 	local_bh_disable();
427 	for (h = 0; h < (hashinfo->ehash_size); h++) {
428 		struct inet_ehash_bucket *head =
429 			inet_ehash_bucket(hashinfo, h);
430 		spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
431 restart:
432 		spin_lock(lock);
433 		sk_nulls_for_each(sk, node, &head->twchain) {
434 
435 			tw = inet_twsk(sk);
436 			if (!net_eq(twsk_net(tw), net) ||
437 			    tw->tw_family != family)
438 				continue;
439 
440 			atomic_inc(&tw->tw_refcnt);
441 			spin_unlock(lock);
442 			inet_twsk_deschedule(tw, twdr);
443 			inet_twsk_put(tw);
444 
445 			goto restart;
446 		}
447 		spin_unlock(lock);
448 	}
449 	local_bh_enable();
450 }
451 EXPORT_SYMBOL_GPL(inet_twsk_purge);
452