1 /*
2 * Quicklist support.
3 *
4 * Quicklists are light weight lists of pages that have a defined state
5 * on alloc and free. Pages must be in the quicklist specific defined state
6 * (zero by default) when the page is freed. It seems that the initial idea
7 * for such lists first came from Dave Miller and then various other people
8 * improved on it.
9 *
10 * Copyright (C) 2007 SGI,
11 * Christoph Lameter <clameter@sgi.com>
12 * Generalized, added support for multiple lists and
13 * constructors / destructors.
14 */
15 #include <linux/kernel.h>
16
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
19 #include <linux/mmzone.h>
20 #include <linux/quicklist.h>
21
22 DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
23
24 #define FRACTION_OF_NODE_MEM 16
25
max_pages(unsigned long min_pages)26 static unsigned long max_pages(unsigned long min_pages)
27 {
28 unsigned long node_free_pages, max;
29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
32
33 node_free_pages =
34 #ifdef CONFIG_ZONE_DMA
35 zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
36 #endif
37 #ifdef CONFIG_ZONE_DMA32
38 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
39 #endif
40 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
41
42 max = node_free_pages / FRACTION_OF_NODE_MEM;
43
44 num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
45 max /= num_cpus_on_node;
46
47 return max(max, min_pages);
48 }
49
min_pages_to_free(struct quicklist * q,unsigned long min_pages,long max_free)50 static long min_pages_to_free(struct quicklist *q,
51 unsigned long min_pages, long max_free)
52 {
53 long pages_to_free;
54
55 pages_to_free = q->nr_pages - max_pages(min_pages);
56
57 return min(pages_to_free, max_free);
58 }
59
60 /*
61 * Trim down the number of pages in the quicklist
62 */
quicklist_trim(int nr,void (* dtor)(void *),unsigned long min_pages,unsigned long max_free)63 void quicklist_trim(int nr, void (*dtor)(void *),
64 unsigned long min_pages, unsigned long max_free)
65 {
66 long pages_to_free;
67 struct quicklist *q;
68
69 q = &get_cpu_var(quicklist)[nr];
70 if (q->nr_pages > min_pages) {
71 pages_to_free = min_pages_to_free(q, min_pages, max_free);
72
73 while (pages_to_free > 0) {
74 /*
75 * We pass a gfp_t of 0 to quicklist_alloc here
76 * because we will never call into the page allocator.
77 */
78 void *p = quicklist_alloc(nr, 0, NULL);
79
80 if (dtor)
81 dtor(p);
82 free_page((unsigned long)p);
83 pages_to_free--;
84 }
85 }
86 put_cpu_var(quicklist);
87 }
88
quicklist_total_size(void)89 unsigned long quicklist_total_size(void)
90 {
91 unsigned long count = 0;
92 int cpu;
93 struct quicklist *ql, *q;
94
95 for_each_online_cpu(cpu) {
96 ql = per_cpu(quicklist, cpu);
97 for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
98 count += q->nr_pages;
99 }
100 return count;
101 }
102
103