• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/rbtree.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/types.h>
32 
33 #include <drm/drm_mm.h>
34 #include <drm/drm_vma_manager.h>
35 
36 /**
37  * DOC: vma offset manager
38  *
39  * The vma-manager is responsible to map arbitrary driver-dependent memory
40  * regions into the linear user address-space. It provides offsets to the
41  * caller which can then be used on the address_space of the drm-device. It
42  * takes care to not overlap regions, size them appropriately and to not
43  * confuse mm-core by inconsistent fake vm_pgoff fields.
44  * Drivers shouldn't use this for object placement in VMEM. This manager should
45  * only be used to manage mappings into linear user-space VMs.
46  *
47  * We use drm_mm as backend to manage object allocations. But it is highly
48  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49  * speed up offset lookups.
50  *
51  * You must not use multiple offset managers on a single address_space.
52  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53  * no longer be linear.
54  *
55  * This offset manager works on page-based addresses. That is, every argument
56  * and return code (with the exception of drm_vma_node_offset_addr()) is given
57  * in number of pages, not number of bytes. That means, object sizes and offsets
58  * must always be page-aligned (as usual).
59  * If you want to get a valid byte-based user-space address for a given offset,
60  * please see drm_vma_node_offset_addr().
61  *
62  * Additionally to offset management, the vma offset manager also handles access
63  * management. For every open-file context that is allowed to access a given
64  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
65  * open-file with the offset of the node will fail with -EACCES. To revoke
66  * access again, use drm_vma_node_revoke(). However, the caller is responsible
67  * for destroying already existing mappings, if required.
68  */
69 
70 /**
71  * drm_vma_offset_manager_init - Initialize new offset-manager
72  * @mgr: Manager object
73  * @page_offset: Offset of available memory area (page-based)
74  * @size: Size of available address space range (page-based)
75  *
76  * Initialize a new offset-manager. The offset and area size available for the
77  * manager are given as @page_offset and @size. Both are interpreted as
78  * page-numbers, not bytes.
79  *
80  * Adding/removing nodes from the manager is locked internally and protected
81  * against concurrent access. However, node allocation and destruction is left
82  * for the caller. While calling into the vma-manager, a given node must
83  * always be guaranteed to be referenced.
84  */
drm_vma_offset_manager_init(struct drm_vma_offset_manager * mgr,unsigned long page_offset,unsigned long size)85 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
86 				 unsigned long page_offset, unsigned long size)
87 {
88 	rwlock_init(&mgr->vm_lock);
89 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
90 }
91 EXPORT_SYMBOL(drm_vma_offset_manager_init);
92 
93 /**
94  * drm_vma_offset_manager_destroy() - Destroy offset manager
95  * @mgr: Manager object
96  *
97  * Destroy an object manager which was previously created via
98  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
99  * before destroying the manager. Otherwise, drm_mm will refuse to free the
100  * requested resources.
101  *
102  * The manager must not be accessed after this function is called.
103  */
drm_vma_offset_manager_destroy(struct drm_vma_offset_manager * mgr)104 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
105 {
106 	drm_mm_takedown(&mgr->vm_addr_space_mm);
107 }
108 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
109 
110 /**
111  * drm_vma_offset_lookup_locked() - Find node in offset space
112  * @mgr: Manager object
113  * @start: Start address for object (page-based)
114  * @pages: Size of object (page-based)
115  *
116  * Find a node given a start address and object size. This returns the _best_
117  * match for the given node. That is, @start may point somewhere into a valid
118  * region and the given node will be returned, as long as the node spans the
119  * whole requested area (given the size in number of pages as @pages).
120  *
121  * Note that before lookup the vma offset manager lookup lock must be acquired
122  * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
123  * used to implement weakly referenced lookups using kref_get_unless_zero().
124  *
125  * Example:
126  *
127  * ::
128  *
129  *     drm_vma_offset_lock_lookup(mgr);
130  *     node = drm_vma_offset_lookup_locked(mgr);
131  *     if (node)
132  *         kref_get_unless_zero(container_of(node, sth, entr));
133  *     drm_vma_offset_unlock_lookup(mgr);
134  *
135  * RETURNS:
136  * Returns NULL if no suitable node can be found. Otherwise, the best match
137  * is returned. It's the caller's responsibility to make sure the node doesn't
138  * get destroyed before the caller can access it.
139  */
drm_vma_offset_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long start,unsigned long pages)140 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
141 							 unsigned long start,
142 							 unsigned long pages)
143 {
144 	struct drm_mm_node *node, *best;
145 	struct rb_node *iter;
146 	unsigned long offset;
147 
148 	iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
149 	best = NULL;
150 
151 	while (likely(iter)) {
152 		node = rb_entry(iter, struct drm_mm_node, rb);
153 		offset = node->start;
154 		if (start >= offset) {
155 			iter = iter->rb_right;
156 			best = node;
157 			if (start == offset)
158 				break;
159 		} else {
160 			iter = iter->rb_left;
161 		}
162 	}
163 
164 	/* verify that the node spans the requested area */
165 	if (best) {
166 		offset = best->start + best->size;
167 		if (offset < start + pages)
168 			best = NULL;
169 	}
170 
171 	if (!best)
172 		return NULL;
173 
174 	return container_of(best, struct drm_vma_offset_node, vm_node);
175 }
176 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
177 
178 /**
179  * drm_vma_offset_add() - Add offset node to manager
180  * @mgr: Manager object
181  * @node: Node to be added
182  * @pages: Allocation size visible to user-space (in number of pages)
183  *
184  * Add a node to the offset-manager. If the node was already added, this does
185  * nothing and return 0. @pages is the size of the object given in number of
186  * pages.
187  * After this call succeeds, you can access the offset of the node until it
188  * is removed again.
189  *
190  * If this call fails, it is safe to retry the operation or call
191  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
192  * case.
193  *
194  * @pages is not required to be the same size as the underlying memory object
195  * that you want to map. It only limits the size that user-space can map into
196  * their address space.
197  *
198  * RETURNS:
199  * 0 on success, negative error code on failure.
200  */
drm_vma_offset_add(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node,unsigned long pages)201 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
202 		       struct drm_vma_offset_node *node, unsigned long pages)
203 {
204 	int ret = 0;
205 
206 	write_lock(&mgr->vm_lock);
207 
208 	if (!drm_mm_node_allocated(&node->vm_node))
209 		ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
210 					 &node->vm_node, pages);
211 
212 	write_unlock(&mgr->vm_lock);
213 
214 	return ret;
215 }
216 EXPORT_SYMBOL(drm_vma_offset_add);
217 
218 /**
219  * drm_vma_offset_remove() - Remove offset node from manager
220  * @mgr: Manager object
221  * @node: Node to be removed
222  *
223  * Remove a node from the offset manager. If the node wasn't added before, this
224  * does nothing. After this call returns, the offset and size will be 0 until a
225  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
226  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
227  * offset is allocated.
228  */
drm_vma_offset_remove(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node)229 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
230 			   struct drm_vma_offset_node *node)
231 {
232 	write_lock(&mgr->vm_lock);
233 
234 	if (drm_mm_node_allocated(&node->vm_node)) {
235 		drm_mm_remove_node(&node->vm_node);
236 		memset(&node->vm_node, 0, sizeof(node->vm_node));
237 	}
238 
239 	write_unlock(&mgr->vm_lock);
240 }
241 EXPORT_SYMBOL(drm_vma_offset_remove);
242 
243 /**
244  * drm_vma_node_allow - Add open-file to list of allowed users
245  * @node: Node to modify
246  * @tag: Tag of file to remove
247  *
248  * Add @tag to the list of allowed open-files for this node. If @tag is
249  * already on this list, the ref-count is incremented.
250  *
251  * The list of allowed-users is preserved across drm_vma_offset_add() and
252  * drm_vma_offset_remove() calls. You may even call it if the node is currently
253  * not added to any offset-manager.
254  *
255  * You must remove all open-files the same number of times as you added them
256  * before destroying the node. Otherwise, you will leak memory.
257  *
258  * This is locked against concurrent access internally.
259  *
260  * RETURNS:
261  * 0 on success, negative error code on internal failure (out-of-mem)
262  */
drm_vma_node_allow(struct drm_vma_offset_node * node,struct drm_file * tag)263 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
264 {
265 	struct rb_node **iter;
266 	struct rb_node *parent = NULL;
267 	struct drm_vma_offset_file *new, *entry;
268 	int ret = 0;
269 
270 	/* Preallocate entry to avoid atomic allocations below. It is quite
271 	 * unlikely that an open-file is added twice to a single node so we
272 	 * don't optimize for this case. OOM is checked below only if the entry
273 	 * is actually used. */
274 	new = kmalloc(sizeof(*entry), GFP_KERNEL);
275 
276 	write_lock(&node->vm_lock);
277 
278 	iter = &node->vm_files.rb_node;
279 
280 	while (likely(*iter)) {
281 		parent = *iter;
282 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
283 
284 		if (tag == entry->vm_tag) {
285 			entry->vm_count++;
286 			goto unlock;
287 		} else if (tag > entry->vm_tag) {
288 			iter = &(*iter)->rb_right;
289 		} else {
290 			iter = &(*iter)->rb_left;
291 		}
292 	}
293 
294 	if (!new) {
295 		ret = -ENOMEM;
296 		goto unlock;
297 	}
298 
299 	new->vm_tag = tag;
300 	new->vm_count = 1;
301 	rb_link_node(&new->vm_rb, parent, iter);
302 	rb_insert_color(&new->vm_rb, &node->vm_files);
303 	new = NULL;
304 
305 unlock:
306 	write_unlock(&node->vm_lock);
307 	kfree(new);
308 	return ret;
309 }
310 EXPORT_SYMBOL(drm_vma_node_allow);
311 
312 /**
313  * drm_vma_node_revoke - Remove open-file from list of allowed users
314  * @node: Node to modify
315  * @tag: Tag of file to remove
316  *
317  * Decrement the ref-count of @tag in the list of allowed open-files on @node.
318  * If the ref-count drops to zero, remove @tag from the list. You must call
319  * this once for every drm_vma_node_allow() on @tag.
320  *
321  * This is locked against concurrent access internally.
322  *
323  * If @tag is not on the list, nothing is done.
324  */
drm_vma_node_revoke(struct drm_vma_offset_node * node,struct drm_file * tag)325 void drm_vma_node_revoke(struct drm_vma_offset_node *node,
326 			 struct drm_file *tag)
327 {
328 	struct drm_vma_offset_file *entry;
329 	struct rb_node *iter;
330 
331 	write_lock(&node->vm_lock);
332 
333 	iter = node->vm_files.rb_node;
334 	while (likely(iter)) {
335 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
336 		if (tag == entry->vm_tag) {
337 			if (!--entry->vm_count) {
338 				rb_erase(&entry->vm_rb, &node->vm_files);
339 				kfree(entry);
340 			}
341 			break;
342 		} else if (tag > entry->vm_tag) {
343 			iter = iter->rb_right;
344 		} else {
345 			iter = iter->rb_left;
346 		}
347 	}
348 
349 	write_unlock(&node->vm_lock);
350 }
351 EXPORT_SYMBOL(drm_vma_node_revoke);
352 
353 /**
354  * drm_vma_node_is_allowed - Check whether an open-file is granted access
355  * @node: Node to check
356  * @tag: Tag of file to remove
357  *
358  * Search the list in @node whether @tag is currently on the list of allowed
359  * open-files (see drm_vma_node_allow()).
360  *
361  * This is locked against concurrent access internally.
362  *
363  * RETURNS:
364  * true iff @filp is on the list
365  */
drm_vma_node_is_allowed(struct drm_vma_offset_node * node,struct drm_file * tag)366 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
367 			     struct drm_file *tag)
368 {
369 	struct drm_vma_offset_file *entry;
370 	struct rb_node *iter;
371 
372 	read_lock(&node->vm_lock);
373 
374 	iter = node->vm_files.rb_node;
375 	while (likely(iter)) {
376 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
377 		if (tag == entry->vm_tag)
378 			break;
379 		else if (tag > entry->vm_tag)
380 			iter = iter->rb_right;
381 		else
382 			iter = iter->rb_left;
383 	}
384 
385 	read_unlock(&node->vm_lock);
386 
387 	return iter;
388 }
389 EXPORT_SYMBOL(drm_vma_node_is_allowed);
390