• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* flow.c: Generic flow cache.
2  *
3  * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4  * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
24 #include <net/flow.h>
25 #include <linux/atomic.h>
26 #include <linux/security.h>
27 
28 struct flow_cache_entry {
29 	union {
30 		struct hlist_node	hlist;
31 		struct list_head	gc_list;
32 	} u;
33 	struct net			*net;
34 	u16				family;
35 	u8				dir;
36 	u32				genid;
37 	struct flowi			key;
38 	struct flow_cache_object	*object;
39 };
40 
41 struct flow_cache_percpu {
42 	struct hlist_head		*hash_table;
43 	int				hash_count;
44 	u32				hash_rnd;
45 	int				hash_rnd_recalc;
46 	struct tasklet_struct		flush_tasklet;
47 };
48 
49 struct flow_flush_info {
50 	struct flow_cache		*cache;
51 	atomic_t			cpuleft;
52 	struct completion		completion;
53 };
54 
55 struct flow_cache {
56 	u32				hash_shift;
57 	struct flow_cache_percpu __percpu *percpu;
58 	struct notifier_block		hotcpu_notifier;
59 	int				low_watermark;
60 	int				high_watermark;
61 	struct timer_list		rnd_timer;
62 };
63 
64 atomic_t flow_cache_genid = ATOMIC_INIT(0);
65 EXPORT_SYMBOL(flow_cache_genid);
66 static struct flow_cache flow_cache_global;
67 static struct kmem_cache *flow_cachep __read_mostly;
68 
69 static DEFINE_SPINLOCK(flow_cache_gc_lock);
70 static LIST_HEAD(flow_cache_gc_list);
71 
72 #define flow_cache_hash_size(cache)	(1 << (cache)->hash_shift)
73 #define FLOW_HASH_RND_PERIOD		(10 * 60 * HZ)
74 
flow_cache_new_hashrnd(unsigned long arg)75 static void flow_cache_new_hashrnd(unsigned long arg)
76 {
77 	struct flow_cache *fc = (void *) arg;
78 	int i;
79 
80 	for_each_possible_cpu(i)
81 		per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82 
83 	fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 	add_timer(&fc->rnd_timer);
85 }
86 
flow_entry_valid(struct flow_cache_entry * fle)87 static int flow_entry_valid(struct flow_cache_entry *fle)
88 {
89 	if (atomic_read(&flow_cache_genid) != fle->genid)
90 		return 0;
91 	if (fle->object && !fle->object->ops->check(fle->object))
92 		return 0;
93 	return 1;
94 }
95 
flow_entry_kill(struct flow_cache_entry * fle)96 static void flow_entry_kill(struct flow_cache_entry *fle)
97 {
98 	if (fle->object)
99 		fle->object->ops->delete(fle->object);
100 	kmem_cache_free(flow_cachep, fle);
101 }
102 
flow_cache_gc_task(struct work_struct * work)103 static void flow_cache_gc_task(struct work_struct *work)
104 {
105 	struct list_head gc_list;
106 	struct flow_cache_entry *fce, *n;
107 
108 	INIT_LIST_HEAD(&gc_list);
109 	spin_lock_bh(&flow_cache_gc_lock);
110 	list_splice_tail_init(&flow_cache_gc_list, &gc_list);
111 	spin_unlock_bh(&flow_cache_gc_lock);
112 
113 	list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114 		flow_entry_kill(fce);
115 }
116 static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117 
flow_cache_queue_garbage(struct flow_cache_percpu * fcp,int deleted,struct list_head * gc_list)118 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119 				     int deleted, struct list_head *gc_list)
120 {
121 	if (deleted) {
122 		fcp->hash_count -= deleted;
123 		spin_lock_bh(&flow_cache_gc_lock);
124 		list_splice_tail(gc_list, &flow_cache_gc_list);
125 		spin_unlock_bh(&flow_cache_gc_lock);
126 		schedule_work(&flow_cache_gc_work);
127 	}
128 }
129 
__flow_cache_shrink(struct flow_cache * fc,struct flow_cache_percpu * fcp,int shrink_to)130 static void __flow_cache_shrink(struct flow_cache *fc,
131 				struct flow_cache_percpu *fcp,
132 				int shrink_to)
133 {
134 	struct flow_cache_entry *fle;
135 	struct hlist_node *tmp;
136 	LIST_HEAD(gc_list);
137 	int i, deleted = 0;
138 
139 	for (i = 0; i < flow_cache_hash_size(fc); i++) {
140 		int saved = 0;
141 
142 		hlist_for_each_entry_safe(fle, tmp,
143 					  &fcp->hash_table[i], u.hlist) {
144 			if (saved < shrink_to &&
145 			    flow_entry_valid(fle)) {
146 				saved++;
147 			} else {
148 				deleted++;
149 				hlist_del(&fle->u.hlist);
150 				list_add_tail(&fle->u.gc_list, &gc_list);
151 			}
152 		}
153 	}
154 
155 	flow_cache_queue_garbage(fcp, deleted, &gc_list);
156 }
157 
flow_cache_shrink(struct flow_cache * fc,struct flow_cache_percpu * fcp)158 static void flow_cache_shrink(struct flow_cache *fc,
159 			      struct flow_cache_percpu *fcp)
160 {
161 	int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
162 
163 	__flow_cache_shrink(fc, fcp, shrink_to);
164 }
165 
flow_new_hash_rnd(struct flow_cache * fc,struct flow_cache_percpu * fcp)166 static void flow_new_hash_rnd(struct flow_cache *fc,
167 			      struct flow_cache_percpu *fcp)
168 {
169 	get_random_bytes(&fcp->hash_rnd, sizeof(u32));
170 	fcp->hash_rnd_recalc = 0;
171 	__flow_cache_shrink(fc, fcp, 0);
172 }
173 
flow_hash_code(struct flow_cache * fc,struct flow_cache_percpu * fcp,const struct flowi * key,size_t keysize)174 static u32 flow_hash_code(struct flow_cache *fc,
175 			  struct flow_cache_percpu *fcp,
176 			  const struct flowi *key,
177 			  size_t keysize)
178 {
179 	const u32 *k = (const u32 *) key;
180 	const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
181 
182 	return jhash2(k, length, fcp->hash_rnd)
183 		& (flow_cache_hash_size(fc) - 1);
184 }
185 
186 /* I hear what you're saying, use memcmp.  But memcmp cannot make
187  * important assumptions that we can here, such as alignment.
188  */
flow_key_compare(const struct flowi * key1,const struct flowi * key2,size_t keysize)189 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 			    size_t keysize)
191 {
192 	const flow_compare_t *k1, *k1_lim, *k2;
193 
194 	k1 = (const flow_compare_t *) key1;
195 	k1_lim = k1 + keysize;
196 
197 	k2 = (const flow_compare_t *) key2;
198 
199 	do {
200 		if (*k1++ != *k2++)
201 			return 1;
202 	} while (k1 < k1_lim);
203 
204 	return 0;
205 }
206 
207 struct flow_cache_object *
flow_cache_lookup(struct net * net,const struct flowi * key,u16 family,u8 dir,flow_resolve_t resolver,void * ctx)208 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
209 		  flow_resolve_t resolver, void *ctx)
210 {
211 	struct flow_cache *fc = &flow_cache_global;
212 	struct flow_cache_percpu *fcp;
213 	struct flow_cache_entry *fle, *tfle;
214 	struct flow_cache_object *flo;
215 	size_t keysize;
216 	unsigned int hash;
217 
218 	local_bh_disable();
219 	fcp = this_cpu_ptr(fc->percpu);
220 
221 	fle = NULL;
222 	flo = NULL;
223 
224 	keysize = flow_key_size(family);
225 	if (!keysize)
226 		goto nocache;
227 
228 	/* Packet really early in init?  Making flow_cache_init a
229 	 * pre-smp initcall would solve this.  --RR */
230 	if (!fcp->hash_table)
231 		goto nocache;
232 
233 	if (fcp->hash_rnd_recalc)
234 		flow_new_hash_rnd(fc, fcp);
235 
236 	hash = flow_hash_code(fc, fcp, key, keysize);
237 	hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
238 		if (tfle->net == net &&
239 		    tfle->family == family &&
240 		    tfle->dir == dir &&
241 		    flow_key_compare(key, &tfle->key, keysize) == 0) {
242 			fle = tfle;
243 			break;
244 		}
245 	}
246 
247 	if (unlikely(!fle)) {
248 		if (fcp->hash_count > fc->high_watermark)
249 			flow_cache_shrink(fc, fcp);
250 
251 		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
252 		if (fle) {
253 			fle->net = net;
254 			fle->family = family;
255 			fle->dir = dir;
256 			memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
257 			fle->object = NULL;
258 			hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
259 			fcp->hash_count++;
260 		}
261 	} else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
262 		flo = fle->object;
263 		if (!flo)
264 			goto ret_object;
265 		flo = flo->ops->get(flo);
266 		if (flo)
267 			goto ret_object;
268 	} else if (fle->object) {
269 	        flo = fle->object;
270 	        flo->ops->delete(flo);
271 	        fle->object = NULL;
272 	}
273 
274 nocache:
275 	flo = NULL;
276 	if (fle) {
277 		flo = fle->object;
278 		fle->object = NULL;
279 	}
280 	flo = resolver(net, key, family, dir, flo, ctx);
281 	if (fle) {
282 		fle->genid = atomic_read(&flow_cache_genid);
283 		if (!IS_ERR(flo))
284 			fle->object = flo;
285 		else
286 			fle->genid--;
287 	} else {
288 		if (!IS_ERR_OR_NULL(flo))
289 			flo->ops->delete(flo);
290 	}
291 ret_object:
292 	local_bh_enable();
293 	return flo;
294 }
295 EXPORT_SYMBOL(flow_cache_lookup);
296 
flow_cache_flush_tasklet(unsigned long data)297 static void flow_cache_flush_tasklet(unsigned long data)
298 {
299 	struct flow_flush_info *info = (void *)data;
300 	struct flow_cache *fc = info->cache;
301 	struct flow_cache_percpu *fcp;
302 	struct flow_cache_entry *fle;
303 	struct hlist_node *tmp;
304 	LIST_HEAD(gc_list);
305 	int i, deleted = 0;
306 
307 	fcp = this_cpu_ptr(fc->percpu);
308 	for (i = 0; i < flow_cache_hash_size(fc); i++) {
309 		hlist_for_each_entry_safe(fle, tmp,
310 					  &fcp->hash_table[i], u.hlist) {
311 			if (flow_entry_valid(fle))
312 				continue;
313 
314 			deleted++;
315 			hlist_del(&fle->u.hlist);
316 			list_add_tail(&fle->u.gc_list, &gc_list);
317 		}
318 	}
319 
320 	flow_cache_queue_garbage(fcp, deleted, &gc_list);
321 
322 	if (atomic_dec_and_test(&info->cpuleft))
323 		complete(&info->completion);
324 }
325 
326 /*
327  * Return whether a cpu needs flushing.  Conservatively, we assume
328  * the presence of any entries means the core may require flushing,
329  * since the flow_cache_ops.check() function may assume it's running
330  * on the same core as the per-cpu cache component.
331  */
flow_cache_percpu_empty(struct flow_cache * fc,int cpu)332 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
333 {
334 	struct flow_cache_percpu *fcp;
335 	int i;
336 
337 	fcp = per_cpu_ptr(fc->percpu, cpu);
338 	for (i = 0; i < flow_cache_hash_size(fc); i++)
339 		if (!hlist_empty(&fcp->hash_table[i]))
340 			return 0;
341 	return 1;
342 }
343 
flow_cache_flush_per_cpu(void * data)344 static void flow_cache_flush_per_cpu(void *data)
345 {
346 	struct flow_flush_info *info = data;
347 	struct tasklet_struct *tasklet;
348 
349 	tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
350 	tasklet->data = (unsigned long)info;
351 	tasklet_schedule(tasklet);
352 }
353 
flow_cache_flush(void)354 void flow_cache_flush(void)
355 {
356 	struct flow_flush_info info;
357 	static DEFINE_MUTEX(flow_flush_sem);
358 	cpumask_var_t mask;
359 	int i, self;
360 
361 	/* Track which cpus need flushing to avoid disturbing all cores. */
362 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
363 		return;
364 	cpumask_clear(mask);
365 
366 	/* Don't want cpus going down or up during this. */
367 	get_online_cpus();
368 	mutex_lock(&flow_flush_sem);
369 	info.cache = &flow_cache_global;
370 	for_each_online_cpu(i)
371 		if (!flow_cache_percpu_empty(info.cache, i))
372 			cpumask_set_cpu(i, mask);
373 	atomic_set(&info.cpuleft, cpumask_weight(mask));
374 	if (atomic_read(&info.cpuleft) == 0)
375 		goto done;
376 
377 	init_completion(&info.completion);
378 
379 	local_bh_disable();
380 	self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
381 	on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
382 	if (self)
383 		flow_cache_flush_tasklet((unsigned long)&info);
384 	local_bh_enable();
385 
386 	wait_for_completion(&info.completion);
387 
388 done:
389 	mutex_unlock(&flow_flush_sem);
390 	put_online_cpus();
391 	free_cpumask_var(mask);
392 }
393 
flow_cache_flush_task(struct work_struct * work)394 static void flow_cache_flush_task(struct work_struct *work)
395 {
396 	flow_cache_flush();
397 }
398 
399 static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
400 
flow_cache_flush_deferred(void)401 void flow_cache_flush_deferred(void)
402 {
403 	schedule_work(&flow_cache_flush_work);
404 }
405 
flow_cache_cpu_prepare(struct flow_cache * fc,int cpu)406 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
407 {
408 	struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
409 	size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
410 
411 	if (!fcp->hash_table) {
412 		fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
413 		if (!fcp->hash_table) {
414 			pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
415 			return -ENOMEM;
416 		}
417 		fcp->hash_rnd_recalc = 1;
418 		fcp->hash_count = 0;
419 		tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
420 	}
421 	return 0;
422 }
423 
flow_cache_cpu(struct notifier_block * nfb,unsigned long action,void * hcpu)424 static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
425 			  unsigned long action,
426 			  void *hcpu)
427 {
428 	struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
429 	int res, cpu = (unsigned long) hcpu;
430 	struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
431 
432 	switch (action) {
433 	case CPU_UP_PREPARE:
434 	case CPU_UP_PREPARE_FROZEN:
435 		res = flow_cache_cpu_prepare(fc, cpu);
436 		if (res)
437 			return notifier_from_errno(res);
438 		break;
439 	case CPU_DEAD:
440 	case CPU_DEAD_FROZEN:
441 		__flow_cache_shrink(fc, fcp, 0);
442 		break;
443 	}
444 	return NOTIFY_OK;
445 }
446 
flow_cache_init(struct flow_cache * fc)447 static int __init flow_cache_init(struct flow_cache *fc)
448 {
449 	int i;
450 
451 	fc->hash_shift = 10;
452 	fc->low_watermark = 2 * flow_cache_hash_size(fc);
453 	fc->high_watermark = 4 * flow_cache_hash_size(fc);
454 
455 	fc->percpu = alloc_percpu(struct flow_cache_percpu);
456 	if (!fc->percpu)
457 		return -ENOMEM;
458 
459 	for_each_online_cpu(i) {
460 		if (flow_cache_cpu_prepare(fc, i))
461 			goto err;
462 	}
463 	fc->hotcpu_notifier = (struct notifier_block){
464 		.notifier_call = flow_cache_cpu,
465 	};
466 	register_hotcpu_notifier(&fc->hotcpu_notifier);
467 
468 	setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
469 		    (unsigned long) fc);
470 	fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
471 	add_timer(&fc->rnd_timer);
472 
473 	return 0;
474 
475 err:
476 	for_each_possible_cpu(i) {
477 		struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
478 		kfree(fcp->hash_table);
479 		fcp->hash_table = NULL;
480 	}
481 
482 	free_percpu(fc->percpu);
483 	fc->percpu = NULL;
484 
485 	return -ENOMEM;
486 }
487 
flow_cache_init_global(void)488 static int __init flow_cache_init_global(void)
489 {
490 	flow_cachep = kmem_cache_create("flow_cache",
491 					sizeof(struct flow_cache_entry),
492 					0, SLAB_PANIC, NULL);
493 
494 	return flow_cache_init(&flow_cache_global);
495 }
496 
497 module_init(flow_cache_init_global);
498