• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * mm/prio_tree.c - priority search tree for mapping->i_mmap
3  *
4  * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
5  *
6  * This file is released under the GPL v2.
7  *
8  * Based on the radix priority search tree proposed by Edward M. McCreight
9  * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
10  *
11  * 02Feb2004	Initial version
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/prio_tree.h>
16 
17 /*
18  * See lib/prio_tree.c for details on the general radix priority search tree
19  * code.
20  */
21 
22 /*
23  * The following #defines are mirrored from lib/prio_tree.c. They're only used
24  * for debugging, and should be removed (along with the debugging code using
25  * them) when switching also VMAs to the regular prio_tree code.
26  */
27 
28 #define RADIX_INDEX(vma)  ((vma)->vm_pgoff)
29 #define VMA_SIZE(vma)	  (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
30 /* avoid overflow */
31 #define HEAP_INDEX(vma)   ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
32 
33 /*
34  * Radix priority search tree for address_space->i_mmap
35  *
36  * For each vma that map a unique set of file pages i.e., unique [radix_index,
37  * heap_index] value, we have a corresponding priority search tree node. If
38  * multiple vmas have identical [radix_index, heap_index] value, then one of
39  * them is used as a tree node and others are stored in a vm_set list. The tree
40  * node points to the first vma (head) of the list using vm_set.head.
41  *
42  * prio_tree_root
43  *      |
44  *      A       vm_set.head
45  *     / \      /
46  *    L   R -> H-I-J-K-M-N-O-P-Q-S
47  *    ^   ^    <-- vm_set.list -->
48  *  tree nodes
49  *
50  * We need some way to identify whether a vma is a tree node, head of a vm_set
51  * list, or just a member of a vm_set list. We cannot use vm_flags to store
52  * such information. The reason is, in the above figure, it is possible that
53  * vm_flags' of R and H are covered by the different mmap_sems. When R is
54  * removed under R->mmap_sem, H replaces R as a tree node. Since we do not hold
55  * H->mmap_sem, we cannot use H->vm_flags for marking that H is a tree node now.
56  * That's why some trick involving shared.vm_set.parent is used for identifying
57  * tree nodes and list head nodes.
58  *
59  * vma radix priority search tree node rules:
60  *
61  * vma->shared.vm_set.parent != NULL    ==> a tree node
62  *      vma->shared.vm_set.head != NULL ==> list of others mapping same range
63  *      vma->shared.vm_set.head == NULL ==> no others map the same range
64  *
65  * vma->shared.vm_set.parent == NULL
66  * 	vma->shared.vm_set.head != NULL ==> list head of vmas mapping same range
67  * 	vma->shared.vm_set.head == NULL ==> a list node
68  */
69 
70 /*
71  * Add a new vma known to map the same set of pages as the old vma:
72  * useful for fork's dup_mmap as well as vma_prio_tree_insert below.
73  * Note that it just happens to work correctly on i_mmap_nonlinear too.
74  */
vma_prio_tree_add(struct vm_area_struct * vma,struct vm_area_struct * old)75 void vma_prio_tree_add(struct vm_area_struct *vma, struct vm_area_struct *old)
76 {
77 	/* Leave these BUG_ONs till prio_tree patch stabilizes */
78 	BUG_ON(RADIX_INDEX(vma) != RADIX_INDEX(old));
79 	BUG_ON(HEAP_INDEX(vma) != HEAP_INDEX(old));
80 
81 	vma->shared.vm_set.head = NULL;
82 	vma->shared.vm_set.parent = NULL;
83 
84 	if (!old->shared.vm_set.parent)
85 		list_add(&vma->shared.vm_set.list,
86 				&old->shared.vm_set.list);
87 	else if (old->shared.vm_set.head)
88 		list_add_tail(&vma->shared.vm_set.list,
89 				&old->shared.vm_set.head->shared.vm_set.list);
90 	else {
91 		INIT_LIST_HEAD(&vma->shared.vm_set.list);
92 		vma->shared.vm_set.head = old;
93 		old->shared.vm_set.head = vma;
94 	}
95 }
96 
vma_prio_tree_insert(struct vm_area_struct * vma,struct prio_tree_root * root)97 void vma_prio_tree_insert(struct vm_area_struct *vma,
98 			  struct prio_tree_root *root)
99 {
100 	struct prio_tree_node *ptr;
101 	struct vm_area_struct *old;
102 
103 	vma->shared.vm_set.head = NULL;
104 
105 	ptr = raw_prio_tree_insert(root, &vma->shared.prio_tree_node);
106 	if (ptr != (struct prio_tree_node *) &vma->shared.prio_tree_node) {
107 		old = prio_tree_entry(ptr, struct vm_area_struct,
108 					shared.prio_tree_node);
109 		vma_prio_tree_add(vma, old);
110 	}
111 }
112 
vma_prio_tree_remove(struct vm_area_struct * vma,struct prio_tree_root * root)113 void vma_prio_tree_remove(struct vm_area_struct *vma,
114 			  struct prio_tree_root *root)
115 {
116 	struct vm_area_struct *node, *head, *new_head;
117 
118 	if (!vma->shared.vm_set.head) {
119 		if (!vma->shared.vm_set.parent)
120 			list_del_init(&vma->shared.vm_set.list);
121 		else
122 			raw_prio_tree_remove(root, &vma->shared.prio_tree_node);
123 	} else {
124 		/* Leave this BUG_ON till prio_tree patch stabilizes */
125 		BUG_ON(vma->shared.vm_set.head->shared.vm_set.head != vma);
126 		if (vma->shared.vm_set.parent) {
127 			head = vma->shared.vm_set.head;
128 			if (!list_empty(&head->shared.vm_set.list)) {
129 				new_head = list_entry(
130 					head->shared.vm_set.list.next,
131 					struct vm_area_struct,
132 					shared.vm_set.list);
133 				list_del_init(&head->shared.vm_set.list);
134 			} else
135 				new_head = NULL;
136 
137 			raw_prio_tree_replace(root, &vma->shared.prio_tree_node,
138 					&head->shared.prio_tree_node);
139 			head->shared.vm_set.head = new_head;
140 			if (new_head)
141 				new_head->shared.vm_set.head = head;
142 
143 		} else {
144 			node = vma->shared.vm_set.head;
145 			if (!list_empty(&vma->shared.vm_set.list)) {
146 				new_head = list_entry(
147 					vma->shared.vm_set.list.next,
148 					struct vm_area_struct,
149 					shared.vm_set.list);
150 				list_del_init(&vma->shared.vm_set.list);
151 				node->shared.vm_set.head = new_head;
152 				new_head->shared.vm_set.head = node;
153 			} else
154 				node->shared.vm_set.head = NULL;
155 		}
156 	}
157 }
158 
159 /*
160  * Helper function to enumerate vmas that map a given file page or a set of
161  * contiguous file pages. The function returns vmas that at least map a single
162  * page in the given range of contiguous file pages.
163  */
vma_prio_tree_next(struct vm_area_struct * vma,struct prio_tree_iter * iter)164 struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
165 					struct prio_tree_iter *iter)
166 {
167 	struct prio_tree_node *ptr;
168 	struct vm_area_struct *next;
169 
170 	if (!vma) {
171 		/*
172 		 * First call is with NULL vma
173 		 */
174 		ptr = prio_tree_next(iter);
175 		if (ptr) {
176 			next = prio_tree_entry(ptr, struct vm_area_struct,
177 						shared.prio_tree_node);
178 			prefetch(next->shared.vm_set.head);
179 			return next;
180 		} else
181 			return NULL;
182 	}
183 
184 	if (vma->shared.vm_set.parent) {
185 		if (vma->shared.vm_set.head) {
186 			next = vma->shared.vm_set.head;
187 			prefetch(next->shared.vm_set.list.next);
188 			return next;
189 		}
190 	} else {
191 		next = list_entry(vma->shared.vm_set.list.next,
192 				struct vm_area_struct, shared.vm_set.list);
193 		if (!next->shared.vm_set.head) {
194 			prefetch(next->shared.vm_set.list.next);
195 			return next;
196 		}
197 	}
198 
199 	ptr = prio_tree_next(iter);
200 	if (ptr) {
201 		next = prio_tree_entry(ptr, struct vm_area_struct,
202 					shared.prio_tree_node);
203 		prefetch(next->shared.vm_set.head);
204 		return next;
205 	} else
206 		return NULL;
207 }
208