• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38 
drm_get_resource_start(struct drm_device * dev,unsigned int resource)39 unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
40 {
41 	return pci_resource_start(dev->pdev, resource);
42 }
43 EXPORT_SYMBOL(drm_get_resource_start);
44 
drm_get_resource_len(struct drm_device * dev,unsigned int resource)45 unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
46 {
47 	return pci_resource_len(dev->pdev, resource);
48 }
49 
50 EXPORT_SYMBOL(drm_get_resource_len);
51 
drm_find_matching_map(struct drm_device * dev,drm_local_map_t * map)52 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
53 					     drm_local_map_t *map)
54 {
55 	struct drm_map_list *entry;
56 	list_for_each_entry(entry, &dev->maplist, head) {
57 		if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
58 		    ((entry->map->offset == map->offset) ||
59 		     ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
60 			return entry;
61 		}
62 	}
63 
64 	return NULL;
65 }
66 
drm_map_handle(struct drm_device * dev,struct drm_hash_item * hash,unsigned long user_token,int hashed_handle)67 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
68 			  unsigned long user_token, int hashed_handle)
69 {
70 	int use_hashed_handle;
71 #if (BITS_PER_LONG == 64)
72 	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73 #elif (BITS_PER_LONG == 32)
74 	use_hashed_handle = hashed_handle;
75 #else
76 #error Unsupported long size. Neither 64 nor 32 bits.
77 #endif
78 
79 	if (!use_hashed_handle) {
80 		int ret;
81 		hash->key = user_token >> PAGE_SHIFT;
82 		ret = drm_ht_insert_item(&dev->map_hash, hash);
83 		if (ret != -EINVAL)
84 			return ret;
85 	}
86 	return drm_ht_just_insert_please(&dev->map_hash, hash,
87 					 user_token, 32 - PAGE_SHIFT - 3,
88 					 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
89 }
90 
91 /**
92  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93  *
94  * \param inode device inode.
95  * \param file_priv DRM file private.
96  * \param cmd command.
97  * \param arg pointer to a drm_map structure.
98  * \return zero on success or a negative value on error.
99  *
100  * Adjusts the memory offset to its absolute value according to the mapping
101  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
102  * applicable and if supported by the kernel.
103  */
drm_addmap_core(struct drm_device * dev,unsigned int offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_map_list ** maplist)104 static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
105 			   unsigned int size, enum drm_map_type type,
106 			   enum drm_map_flags flags,
107 			   struct drm_map_list ** maplist)
108 {
109 	struct drm_map *map;
110 	struct drm_map_list *list;
111 	drm_dma_handle_t *dmah;
112 	unsigned long user_token;
113 	int ret;
114 
115 	map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
116 	if (!map)
117 		return -ENOMEM;
118 
119 	map->offset = offset;
120 	map->size = size;
121 	map->flags = flags;
122 	map->type = type;
123 
124 	/* Only allow shared memory to be removable since we only keep enough
125 	 * book keeping information about shared memory to allow for removal
126 	 * when processes fork.
127 	 */
128 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
130 		return -EINVAL;
131 	}
132 	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 		  map->offset, map->size, map->type);
134 	if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
136 		return -EINVAL;
137 	}
138 	map->mtrr = -1;
139 	map->handle = NULL;
140 
141 	switch (map->type) {
142 	case _DRM_REGISTERS:
143 	case _DRM_FRAME_BUFFER:
144 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 		if (map->offset + (map->size-1) < map->offset ||
146 		    map->offset < virt_to_phys(high_memory)) {
147 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
148 			return -EINVAL;
149 		}
150 #endif
151 #ifdef __alpha__
152 		map->offset += dev->hose->mem_space->start;
153 #endif
154 		/* Some drivers preinitialize some maps, without the X Server
155 		 * needing to be aware of it.  Therefore, we just return success
156 		 * when the server tries to create a duplicate map.
157 		 */
158 		list = drm_find_matching_map(dev, map);
159 		if (list != NULL) {
160 			if (list->map->size != map->size) {
161 				DRM_DEBUG("Matching maps of type %d with "
162 					  "mismatched sizes, (%ld vs %ld)\n",
163 					  map->type, map->size,
164 					  list->map->size);
165 				list->map->size = map->size;
166 			}
167 
168 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169 			*maplist = list;
170 			return 0;
171 		}
172 
173 		if (drm_core_has_MTRR(dev)) {
174 			if (map->type == _DRM_FRAME_BUFFER ||
175 			    (map->flags & _DRM_WRITE_COMBINING)) {
176 				map->mtrr = mtrr_add(map->offset, map->size,
177 						     MTRR_TYPE_WRCOMB, 1);
178 			}
179 		}
180 		if (map->type == _DRM_REGISTERS) {
181 			map->handle = ioremap(map->offset, map->size);
182 			if (!map->handle) {
183 				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
184 				return -ENOMEM;
185 			}
186 		}
187 
188 		break;
189 	case _DRM_SHM:
190 		list = drm_find_matching_map(dev, map);
191 		if (list != NULL) {
192 			if(list->map->size != map->size) {
193 				DRM_DEBUG("Matching maps of type %d with "
194 					  "mismatched sizes, (%ld vs %ld)\n",
195 					  map->type, map->size, list->map->size);
196 				list->map->size = map->size;
197 			}
198 
199 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
200 			*maplist = list;
201 			return 0;
202 		}
203 		map->handle = vmalloc_user(map->size);
204 		DRM_DEBUG("%lu %d %p\n",
205 			  map->size, drm_order(map->size), map->handle);
206 		if (!map->handle) {
207 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 			return -ENOMEM;
209 		}
210 		map->offset = (unsigned long)map->handle;
211 		if (map->flags & _DRM_CONTAINS_LOCK) {
212 			/* Prevent a 2nd X Server from creating a 2nd lock */
213 			if (dev->primary->master->lock.hw_lock != NULL) {
214 				vfree(map->handle);
215 				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
216 				return -EBUSY;
217 			}
218 			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
219 		}
220 		break;
221 	case _DRM_AGP: {
222 		struct drm_agp_mem *entry;
223 		int valid = 0;
224 
225 		if (!drm_core_has_AGP(dev)) {
226 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
227 			return -EINVAL;
228 		}
229 #ifdef __alpha__
230 		map->offset += dev->hose->mem_space->start;
231 #endif
232 		/* In some cases (i810 driver), user space may have already
233 		 * added the AGP base itself, because dev->agp->base previously
234 		 * only got set during AGP enable.  So, only add the base
235 		 * address if the map's offset isn't already within the
236 		 * aperture.
237 		 */
238 		if (map->offset < dev->agp->base ||
239 		    map->offset > dev->agp->base +
240 		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
241 			map->offset += dev->agp->base;
242 		}
243 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
244 
245 		/* This assumes the DRM is in total control of AGP space.
246 		 * It's not always the case as AGP can be in the control
247 		 * of user space (i.e. i810 driver). So this loop will get
248 		 * skipped and we double check that dev->agp->memory is
249 		 * actually set as well as being invalid before EPERM'ing
250 		 */
251 		list_for_each_entry(entry, &dev->agp->memory, head) {
252 			if ((map->offset >= entry->bound) &&
253 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
254 				valid = 1;
255 				break;
256 			}
257 		}
258 		if (!list_empty(&dev->agp->memory) && !valid) {
259 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260 			return -EPERM;
261 		}
262 		DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
263 
264 		break;
265 	case _DRM_GEM:
266 		DRM_ERROR("tried to rmmap GEM object\n");
267 		break;
268 	}
269 	case _DRM_SCATTER_GATHER:
270 		if (!dev->sg) {
271 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
272 			return -EINVAL;
273 		}
274 		map->offset += (unsigned long)dev->sg->virtual;
275 		break;
276 	case _DRM_CONSISTENT:
277 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
278 		 * As we're limiting the address to 2^32-1 (or less),
279 		 * casting it down to 32 bits is no problem, but we
280 		 * need to point to a 64bit variable first. */
281 		dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
282 		if (!dmah) {
283 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
284 			return -ENOMEM;
285 		}
286 		map->handle = dmah->vaddr;
287 		map->offset = (unsigned long)dmah->busaddr;
288 		kfree(dmah);
289 		break;
290 	default:
291 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
292 		return -EINVAL;
293 	}
294 
295 	list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
296 	if (!list) {
297 		if (map->type == _DRM_REGISTERS)
298 			iounmap(map->handle);
299 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
300 		return -EINVAL;
301 	}
302 	memset(list, 0, sizeof(*list));
303 	list->map = map;
304 
305 	mutex_lock(&dev->struct_mutex);
306 	list_add(&list->head, &dev->maplist);
307 
308 	/* Assign a 32-bit handle */
309 	/* We do it here so that dev->struct_mutex protects the increment */
310 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
311 		map->offset;
312 	ret = drm_map_handle(dev, &list->hash, user_token, 0);
313 	if (ret) {
314 		if (map->type == _DRM_REGISTERS)
315 			iounmap(map->handle);
316 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
317 		drm_free(list, sizeof(*list), DRM_MEM_MAPS);
318 		mutex_unlock(&dev->struct_mutex);
319 		return ret;
320 	}
321 
322 	list->user_token = list->hash.key << PAGE_SHIFT;
323 	mutex_unlock(&dev->struct_mutex);
324 
325 	list->master = dev->primary->master;
326 	*maplist = list;
327 	return 0;
328 	}
329 
drm_addmap(struct drm_device * dev,unsigned int offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,drm_local_map_t ** map_ptr)330 int drm_addmap(struct drm_device * dev, unsigned int offset,
331 	       unsigned int size, enum drm_map_type type,
332 	       enum drm_map_flags flags, drm_local_map_t ** map_ptr)
333 {
334 	struct drm_map_list *list;
335 	int rc;
336 
337 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
338 	if (!rc)
339 		*map_ptr = list->map;
340 	return rc;
341 }
342 
343 EXPORT_SYMBOL(drm_addmap);
344 
drm_addmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)345 int drm_addmap_ioctl(struct drm_device *dev, void *data,
346 		     struct drm_file *file_priv)
347 {
348 	struct drm_map *map = data;
349 	struct drm_map_list *maplist;
350 	int err;
351 
352 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
353 		return -EPERM;
354 
355 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
356 			      map->flags, &maplist);
357 
358 	if (err)
359 		return err;
360 
361 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
362 	map->handle = (void *)(unsigned long)maplist->user_token;
363 	return 0;
364 }
365 
366 /**
367  * Remove a map private from list and deallocate resources if the mapping
368  * isn't in use.
369  *
370  * \param inode device inode.
371  * \param file_priv DRM file private.
372  * \param cmd command.
373  * \param arg pointer to a struct drm_map structure.
374  * \return zero on success or a negative value on error.
375  *
376  * Searches the map on drm_device::maplist, removes it from the list, see if
377  * its being used, and free any associate resource (such as MTRR's) if it's not
378  * being on use.
379  *
380  * \sa drm_addmap
381  */
drm_rmmap_locked(struct drm_device * dev,drm_local_map_t * map)382 int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
383 {
384 	struct drm_map_list *r_list = NULL, *list_t;
385 	drm_dma_handle_t dmah;
386 	int found = 0;
387 	struct drm_master *master;
388 
389 	/* Find the list entry for the map and remove it */
390 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
391 		if (r_list->map == map) {
392 			master = r_list->master;
393 			list_del(&r_list->head);
394 			drm_ht_remove_key(&dev->map_hash,
395 					  r_list->user_token >> PAGE_SHIFT);
396 			drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
397 			found = 1;
398 			break;
399 		}
400 	}
401 
402 	if (!found)
403 		return -EINVAL;
404 
405 	switch (map->type) {
406 	case _DRM_REGISTERS:
407 		iounmap(map->handle);
408 		/* FALLTHROUGH */
409 	case _DRM_FRAME_BUFFER:
410 		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
411 			int retcode;
412 			retcode = mtrr_del(map->mtrr, map->offset, map->size);
413 			DRM_DEBUG("mtrr_del=%d\n", retcode);
414 		}
415 		break;
416 	case _DRM_SHM:
417 		vfree(map->handle);
418 		if (master) {
419 			if (dev->sigdata.lock == master->lock.hw_lock)
420 				dev->sigdata.lock = NULL;
421 			master->lock.hw_lock = NULL;   /* SHM removed */
422 			master->lock.file_priv = NULL;
423 			wake_up_interruptible_all(&master->lock.lock_queue);
424 		}
425 		break;
426 	case _DRM_AGP:
427 	case _DRM_SCATTER_GATHER:
428 		break;
429 	case _DRM_CONSISTENT:
430 		dmah.vaddr = map->handle;
431 		dmah.busaddr = map->offset;
432 		dmah.size = map->size;
433 		__drm_pci_free(dev, &dmah);
434 		break;
435 	case _DRM_GEM:
436 		DRM_ERROR("tried to rmmap GEM object\n");
437 		break;
438 	}
439 	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
440 
441 	return 0;
442 }
443 EXPORT_SYMBOL(drm_rmmap_locked);
444 
drm_rmmap(struct drm_device * dev,drm_local_map_t * map)445 int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
446 {
447 	int ret;
448 
449 	mutex_lock(&dev->struct_mutex);
450 	ret = drm_rmmap_locked(dev, map);
451 	mutex_unlock(&dev->struct_mutex);
452 
453 	return ret;
454 }
455 EXPORT_SYMBOL(drm_rmmap);
456 
457 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
458  * the last close of the device, and this is necessary for cleanup when things
459  * exit uncleanly.  Therefore, having userland manually remove mappings seems
460  * like a pointless exercise since they're going away anyway.
461  *
462  * One use case might be after addmap is allowed for normal users for SHM and
463  * gets used by drivers that the server doesn't need to care about.  This seems
464  * unlikely.
465  */
drm_rmmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)466 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
467 		    struct drm_file *file_priv)
468 {
469 	struct drm_map *request = data;
470 	drm_local_map_t *map = NULL;
471 	struct drm_map_list *r_list;
472 	int ret;
473 
474 	mutex_lock(&dev->struct_mutex);
475 	list_for_each_entry(r_list, &dev->maplist, head) {
476 		if (r_list->map &&
477 		    r_list->user_token == (unsigned long)request->handle &&
478 		    r_list->map->flags & _DRM_REMOVABLE) {
479 			map = r_list->map;
480 			break;
481 		}
482 	}
483 
484 	/* List has wrapped around to the head pointer, or its empty we didn't
485 	 * find anything.
486 	 */
487 	if (list_empty(&dev->maplist) || !map) {
488 		mutex_unlock(&dev->struct_mutex);
489 		return -EINVAL;
490 	}
491 
492 	/* Register and framebuffer maps are permanent */
493 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
494 		mutex_unlock(&dev->struct_mutex);
495 		return 0;
496 	}
497 
498 	ret = drm_rmmap_locked(dev, map);
499 
500 	mutex_unlock(&dev->struct_mutex);
501 
502 	return ret;
503 }
504 
505 /**
506  * Cleanup after an error on one of the addbufs() functions.
507  *
508  * \param dev DRM device.
509  * \param entry buffer entry where the error occurred.
510  *
511  * Frees any pages and buffers associated with the given entry.
512  */
drm_cleanup_buf_error(struct drm_device * dev,struct drm_buf_entry * entry)513 static void drm_cleanup_buf_error(struct drm_device * dev,
514 				  struct drm_buf_entry * entry)
515 {
516 	int i;
517 
518 	if (entry->seg_count) {
519 		for (i = 0; i < entry->seg_count; i++) {
520 			if (entry->seglist[i]) {
521 				drm_pci_free(dev, entry->seglist[i]);
522 			}
523 		}
524 		drm_free(entry->seglist,
525 			 entry->seg_count *
526 			 sizeof(*entry->seglist), DRM_MEM_SEGS);
527 
528 		entry->seg_count = 0;
529 	}
530 
531 	if (entry->buf_count) {
532 		for (i = 0; i < entry->buf_count; i++) {
533 			if (entry->buflist[i].dev_private) {
534 				drm_free(entry->buflist[i].dev_private,
535 					 entry->buflist[i].dev_priv_size,
536 					 DRM_MEM_BUFS);
537 			}
538 		}
539 		drm_free(entry->buflist,
540 			 entry->buf_count *
541 			 sizeof(*entry->buflist), DRM_MEM_BUFS);
542 
543 		entry->buf_count = 0;
544 	}
545 }
546 
547 #if __OS_HAS_AGP
548 /**
549  * Add AGP buffers for DMA transfers.
550  *
551  * \param dev struct drm_device to which the buffers are to be added.
552  * \param request pointer to a struct drm_buf_desc describing the request.
553  * \return zero on success or a negative number on failure.
554  *
555  * After some sanity checks creates a drm_buf structure for each buffer and
556  * reallocates the buffer list of the same size order to accommodate the new
557  * buffers.
558  */
drm_addbufs_agp(struct drm_device * dev,struct drm_buf_desc * request)559 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
560 {
561 	struct drm_device_dma *dma = dev->dma;
562 	struct drm_buf_entry *entry;
563 	struct drm_agp_mem *agp_entry;
564 	struct drm_buf *buf;
565 	unsigned long offset;
566 	unsigned long agp_offset;
567 	int count;
568 	int order;
569 	int size;
570 	int alignment;
571 	int page_order;
572 	int total;
573 	int byte_count;
574 	int i, valid;
575 	struct drm_buf **temp_buflist;
576 
577 	if (!dma)
578 		return -EINVAL;
579 
580 	count = request->count;
581 	order = drm_order(request->size);
582 	size = 1 << order;
583 
584 	alignment = (request->flags & _DRM_PAGE_ALIGN)
585 	    ? PAGE_ALIGN(size) : size;
586 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
587 	total = PAGE_SIZE << page_order;
588 
589 	byte_count = 0;
590 	agp_offset = dev->agp->base + request->agp_start;
591 
592 	DRM_DEBUG("count:      %d\n", count);
593 	DRM_DEBUG("order:      %d\n", order);
594 	DRM_DEBUG("size:       %d\n", size);
595 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
596 	DRM_DEBUG("alignment:  %d\n", alignment);
597 	DRM_DEBUG("page_order: %d\n", page_order);
598 	DRM_DEBUG("total:      %d\n", total);
599 
600 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
601 		return -EINVAL;
602 	if (dev->queue_count)
603 		return -EBUSY;	/* Not while in use */
604 
605 	/* Make sure buffers are located in AGP memory that we own */
606 	valid = 0;
607 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
608 		if ((agp_offset >= agp_entry->bound) &&
609 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
610 			valid = 1;
611 			break;
612 		}
613 	}
614 	if (!list_empty(&dev->agp->memory) && !valid) {
615 		DRM_DEBUG("zone invalid\n");
616 		return -EINVAL;
617 	}
618 	spin_lock(&dev->count_lock);
619 	if (dev->buf_use) {
620 		spin_unlock(&dev->count_lock);
621 		return -EBUSY;
622 	}
623 	atomic_inc(&dev->buf_alloc);
624 	spin_unlock(&dev->count_lock);
625 
626 	mutex_lock(&dev->struct_mutex);
627 	entry = &dma->bufs[order];
628 	if (entry->buf_count) {
629 		mutex_unlock(&dev->struct_mutex);
630 		atomic_dec(&dev->buf_alloc);
631 		return -ENOMEM;	/* May only call once for each order */
632 	}
633 
634 	if (count < 0 || count > 4096) {
635 		mutex_unlock(&dev->struct_mutex);
636 		atomic_dec(&dev->buf_alloc);
637 		return -EINVAL;
638 	}
639 
640 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
641 				   DRM_MEM_BUFS);
642 	if (!entry->buflist) {
643 		mutex_unlock(&dev->struct_mutex);
644 		atomic_dec(&dev->buf_alloc);
645 		return -ENOMEM;
646 	}
647 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
648 
649 	entry->buf_size = size;
650 	entry->page_order = page_order;
651 
652 	offset = 0;
653 
654 	while (entry->buf_count < count) {
655 		buf = &entry->buflist[entry->buf_count];
656 		buf->idx = dma->buf_count + entry->buf_count;
657 		buf->total = alignment;
658 		buf->order = order;
659 		buf->used = 0;
660 
661 		buf->offset = (dma->byte_count + offset);
662 		buf->bus_address = agp_offset + offset;
663 		buf->address = (void *)(agp_offset + offset);
664 		buf->next = NULL;
665 		buf->waiting = 0;
666 		buf->pending = 0;
667 		init_waitqueue_head(&buf->dma_wait);
668 		buf->file_priv = NULL;
669 
670 		buf->dev_priv_size = dev->driver->dev_priv_size;
671 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
672 		if (!buf->dev_private) {
673 			/* Set count correctly so we free the proper amount. */
674 			entry->buf_count = count;
675 			drm_cleanup_buf_error(dev, entry);
676 			mutex_unlock(&dev->struct_mutex);
677 			atomic_dec(&dev->buf_alloc);
678 			return -ENOMEM;
679 		}
680 		memset(buf->dev_private, 0, buf->dev_priv_size);
681 
682 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
683 
684 		offset += alignment;
685 		entry->buf_count++;
686 		byte_count += PAGE_SIZE << page_order;
687 	}
688 
689 	DRM_DEBUG("byte_count: %d\n", byte_count);
690 
691 	temp_buflist = drm_realloc(dma->buflist,
692 				   dma->buf_count * sizeof(*dma->buflist),
693 				   (dma->buf_count + entry->buf_count)
694 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
695 	if (!temp_buflist) {
696 		/* Free the entry because it isn't valid */
697 		drm_cleanup_buf_error(dev, entry);
698 		mutex_unlock(&dev->struct_mutex);
699 		atomic_dec(&dev->buf_alloc);
700 		return -ENOMEM;
701 	}
702 	dma->buflist = temp_buflist;
703 
704 	for (i = 0; i < entry->buf_count; i++) {
705 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
706 	}
707 
708 	dma->buf_count += entry->buf_count;
709 	dma->seg_count += entry->seg_count;
710 	dma->page_count += byte_count >> PAGE_SHIFT;
711 	dma->byte_count += byte_count;
712 
713 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
714 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
715 
716 	mutex_unlock(&dev->struct_mutex);
717 
718 	request->count = entry->buf_count;
719 	request->size = size;
720 
721 	dma->flags = _DRM_DMA_USE_AGP;
722 
723 	atomic_dec(&dev->buf_alloc);
724 	return 0;
725 }
726 EXPORT_SYMBOL(drm_addbufs_agp);
727 #endif				/* __OS_HAS_AGP */
728 
drm_addbufs_pci(struct drm_device * dev,struct drm_buf_desc * request)729 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
730 {
731 	struct drm_device_dma *dma = dev->dma;
732 	int count;
733 	int order;
734 	int size;
735 	int total;
736 	int page_order;
737 	struct drm_buf_entry *entry;
738 	drm_dma_handle_t *dmah;
739 	struct drm_buf *buf;
740 	int alignment;
741 	unsigned long offset;
742 	int i;
743 	int byte_count;
744 	int page_count;
745 	unsigned long *temp_pagelist;
746 	struct drm_buf **temp_buflist;
747 
748 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
749 		return -EINVAL;
750 
751 	if (!dma)
752 		return -EINVAL;
753 
754 	if (!capable(CAP_SYS_ADMIN))
755 		return -EPERM;
756 
757 	count = request->count;
758 	order = drm_order(request->size);
759 	size = 1 << order;
760 
761 	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
762 		  request->count, request->size, size, order, dev->queue_count);
763 
764 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
765 		return -EINVAL;
766 	if (dev->queue_count)
767 		return -EBUSY;	/* Not while in use */
768 
769 	alignment = (request->flags & _DRM_PAGE_ALIGN)
770 	    ? PAGE_ALIGN(size) : size;
771 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
772 	total = PAGE_SIZE << page_order;
773 
774 	spin_lock(&dev->count_lock);
775 	if (dev->buf_use) {
776 		spin_unlock(&dev->count_lock);
777 		return -EBUSY;
778 	}
779 	atomic_inc(&dev->buf_alloc);
780 	spin_unlock(&dev->count_lock);
781 
782 	mutex_lock(&dev->struct_mutex);
783 	entry = &dma->bufs[order];
784 	if (entry->buf_count) {
785 		mutex_unlock(&dev->struct_mutex);
786 		atomic_dec(&dev->buf_alloc);
787 		return -ENOMEM;	/* May only call once for each order */
788 	}
789 
790 	if (count < 0 || count > 4096) {
791 		mutex_unlock(&dev->struct_mutex);
792 		atomic_dec(&dev->buf_alloc);
793 		return -EINVAL;
794 	}
795 
796 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
797 				   DRM_MEM_BUFS);
798 	if (!entry->buflist) {
799 		mutex_unlock(&dev->struct_mutex);
800 		atomic_dec(&dev->buf_alloc);
801 		return -ENOMEM;
802 	}
803 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
804 
805 	entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
806 				   DRM_MEM_SEGS);
807 	if (!entry->seglist) {
808 		drm_free(entry->buflist,
809 			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
810 		mutex_unlock(&dev->struct_mutex);
811 		atomic_dec(&dev->buf_alloc);
812 		return -ENOMEM;
813 	}
814 	memset(entry->seglist, 0, count * sizeof(*entry->seglist));
815 
816 	/* Keep the original pagelist until we know all the allocations
817 	 * have succeeded
818 	 */
819 	temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
820 				  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
821 	if (!temp_pagelist) {
822 		drm_free(entry->buflist,
823 			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
824 		drm_free(entry->seglist,
825 			 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
826 		mutex_unlock(&dev->struct_mutex);
827 		atomic_dec(&dev->buf_alloc);
828 		return -ENOMEM;
829 	}
830 	memcpy(temp_pagelist,
831 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
832 	DRM_DEBUG("pagelist: %d entries\n",
833 		  dma->page_count + (count << page_order));
834 
835 	entry->buf_size = size;
836 	entry->page_order = page_order;
837 	byte_count = 0;
838 	page_count = 0;
839 
840 	while (entry->buf_count < count) {
841 
842 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
843 
844 		if (!dmah) {
845 			/* Set count correctly so we free the proper amount. */
846 			entry->buf_count = count;
847 			entry->seg_count = count;
848 			drm_cleanup_buf_error(dev, entry);
849 			drm_free(temp_pagelist,
850 				 (dma->page_count + (count << page_order))
851 				 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
852 			mutex_unlock(&dev->struct_mutex);
853 			atomic_dec(&dev->buf_alloc);
854 			return -ENOMEM;
855 		}
856 		entry->seglist[entry->seg_count++] = dmah;
857 		for (i = 0; i < (1 << page_order); i++) {
858 			DRM_DEBUG("page %d @ 0x%08lx\n",
859 				  dma->page_count + page_count,
860 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
861 			temp_pagelist[dma->page_count + page_count++]
862 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
863 		}
864 		for (offset = 0;
865 		     offset + size <= total && entry->buf_count < count;
866 		     offset += alignment, ++entry->buf_count) {
867 			buf = &entry->buflist[entry->buf_count];
868 			buf->idx = dma->buf_count + entry->buf_count;
869 			buf->total = alignment;
870 			buf->order = order;
871 			buf->used = 0;
872 			buf->offset = (dma->byte_count + byte_count + offset);
873 			buf->address = (void *)(dmah->vaddr + offset);
874 			buf->bus_address = dmah->busaddr + offset;
875 			buf->next = NULL;
876 			buf->waiting = 0;
877 			buf->pending = 0;
878 			init_waitqueue_head(&buf->dma_wait);
879 			buf->file_priv = NULL;
880 
881 			buf->dev_priv_size = dev->driver->dev_priv_size;
882 			buf->dev_private = drm_alloc(buf->dev_priv_size,
883 						     DRM_MEM_BUFS);
884 			if (!buf->dev_private) {
885 				/* Set count correctly so we free the proper amount. */
886 				entry->buf_count = count;
887 				entry->seg_count = count;
888 				drm_cleanup_buf_error(dev, entry);
889 				drm_free(temp_pagelist,
890 					 (dma->page_count +
891 					  (count << page_order))
892 					 * sizeof(*dma->pagelist),
893 					 DRM_MEM_PAGES);
894 				mutex_unlock(&dev->struct_mutex);
895 				atomic_dec(&dev->buf_alloc);
896 				return -ENOMEM;
897 			}
898 			memset(buf->dev_private, 0, buf->dev_priv_size);
899 
900 			DRM_DEBUG("buffer %d @ %p\n",
901 				  entry->buf_count, buf->address);
902 		}
903 		byte_count += PAGE_SIZE << page_order;
904 	}
905 
906 	temp_buflist = drm_realloc(dma->buflist,
907 				   dma->buf_count * sizeof(*dma->buflist),
908 				   (dma->buf_count + entry->buf_count)
909 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
910 	if (!temp_buflist) {
911 		/* Free the entry because it isn't valid */
912 		drm_cleanup_buf_error(dev, entry);
913 		drm_free(temp_pagelist,
914 			 (dma->page_count + (count << page_order))
915 			 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
916 		mutex_unlock(&dev->struct_mutex);
917 		atomic_dec(&dev->buf_alloc);
918 		return -ENOMEM;
919 	}
920 	dma->buflist = temp_buflist;
921 
922 	for (i = 0; i < entry->buf_count; i++) {
923 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
924 	}
925 
926 	/* No allocations failed, so now we can replace the orginal pagelist
927 	 * with the new one.
928 	 */
929 	if (dma->page_count) {
930 		drm_free(dma->pagelist,
931 			 dma->page_count * sizeof(*dma->pagelist),
932 			 DRM_MEM_PAGES);
933 	}
934 	dma->pagelist = temp_pagelist;
935 
936 	dma->buf_count += entry->buf_count;
937 	dma->seg_count += entry->seg_count;
938 	dma->page_count += entry->seg_count << page_order;
939 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
940 
941 	mutex_unlock(&dev->struct_mutex);
942 
943 	request->count = entry->buf_count;
944 	request->size = size;
945 
946 	if (request->flags & _DRM_PCI_BUFFER_RO)
947 		dma->flags = _DRM_DMA_USE_PCI_RO;
948 
949 	atomic_dec(&dev->buf_alloc);
950 	return 0;
951 
952 }
953 EXPORT_SYMBOL(drm_addbufs_pci);
954 
drm_addbufs_sg(struct drm_device * dev,struct drm_buf_desc * request)955 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
956 {
957 	struct drm_device_dma *dma = dev->dma;
958 	struct drm_buf_entry *entry;
959 	struct drm_buf *buf;
960 	unsigned long offset;
961 	unsigned long agp_offset;
962 	int count;
963 	int order;
964 	int size;
965 	int alignment;
966 	int page_order;
967 	int total;
968 	int byte_count;
969 	int i;
970 	struct drm_buf **temp_buflist;
971 
972 	if (!drm_core_check_feature(dev, DRIVER_SG))
973 		return -EINVAL;
974 
975 	if (!dma)
976 		return -EINVAL;
977 
978 	if (!capable(CAP_SYS_ADMIN))
979 		return -EPERM;
980 
981 	count = request->count;
982 	order = drm_order(request->size);
983 	size = 1 << order;
984 
985 	alignment = (request->flags & _DRM_PAGE_ALIGN)
986 	    ? PAGE_ALIGN(size) : size;
987 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
988 	total = PAGE_SIZE << page_order;
989 
990 	byte_count = 0;
991 	agp_offset = request->agp_start;
992 
993 	DRM_DEBUG("count:      %d\n", count);
994 	DRM_DEBUG("order:      %d\n", order);
995 	DRM_DEBUG("size:       %d\n", size);
996 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
997 	DRM_DEBUG("alignment:  %d\n", alignment);
998 	DRM_DEBUG("page_order: %d\n", page_order);
999 	DRM_DEBUG("total:      %d\n", total);
1000 
1001 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1002 		return -EINVAL;
1003 	if (dev->queue_count)
1004 		return -EBUSY;	/* Not while in use */
1005 
1006 	spin_lock(&dev->count_lock);
1007 	if (dev->buf_use) {
1008 		spin_unlock(&dev->count_lock);
1009 		return -EBUSY;
1010 	}
1011 	atomic_inc(&dev->buf_alloc);
1012 	spin_unlock(&dev->count_lock);
1013 
1014 	mutex_lock(&dev->struct_mutex);
1015 	entry = &dma->bufs[order];
1016 	if (entry->buf_count) {
1017 		mutex_unlock(&dev->struct_mutex);
1018 		atomic_dec(&dev->buf_alloc);
1019 		return -ENOMEM;	/* May only call once for each order */
1020 	}
1021 
1022 	if (count < 0 || count > 4096) {
1023 		mutex_unlock(&dev->struct_mutex);
1024 		atomic_dec(&dev->buf_alloc);
1025 		return -EINVAL;
1026 	}
1027 
1028 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1029 				   DRM_MEM_BUFS);
1030 	if (!entry->buflist) {
1031 		mutex_unlock(&dev->struct_mutex);
1032 		atomic_dec(&dev->buf_alloc);
1033 		return -ENOMEM;
1034 	}
1035 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1036 
1037 	entry->buf_size = size;
1038 	entry->page_order = page_order;
1039 
1040 	offset = 0;
1041 
1042 	while (entry->buf_count < count) {
1043 		buf = &entry->buflist[entry->buf_count];
1044 		buf->idx = dma->buf_count + entry->buf_count;
1045 		buf->total = alignment;
1046 		buf->order = order;
1047 		buf->used = 0;
1048 
1049 		buf->offset = (dma->byte_count + offset);
1050 		buf->bus_address = agp_offset + offset;
1051 		buf->address = (void *)(agp_offset + offset
1052 					+ (unsigned long)dev->sg->virtual);
1053 		buf->next = NULL;
1054 		buf->waiting = 0;
1055 		buf->pending = 0;
1056 		init_waitqueue_head(&buf->dma_wait);
1057 		buf->file_priv = NULL;
1058 
1059 		buf->dev_priv_size = dev->driver->dev_priv_size;
1060 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1061 		if (!buf->dev_private) {
1062 			/* Set count correctly so we free the proper amount. */
1063 			entry->buf_count = count;
1064 			drm_cleanup_buf_error(dev, entry);
1065 			mutex_unlock(&dev->struct_mutex);
1066 			atomic_dec(&dev->buf_alloc);
1067 			return -ENOMEM;
1068 		}
1069 
1070 		memset(buf->dev_private, 0, buf->dev_priv_size);
1071 
1072 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1073 
1074 		offset += alignment;
1075 		entry->buf_count++;
1076 		byte_count += PAGE_SIZE << page_order;
1077 	}
1078 
1079 	DRM_DEBUG("byte_count: %d\n", byte_count);
1080 
1081 	temp_buflist = drm_realloc(dma->buflist,
1082 				   dma->buf_count * sizeof(*dma->buflist),
1083 				   (dma->buf_count + entry->buf_count)
1084 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
1085 	if (!temp_buflist) {
1086 		/* Free the entry because it isn't valid */
1087 		drm_cleanup_buf_error(dev, entry);
1088 		mutex_unlock(&dev->struct_mutex);
1089 		atomic_dec(&dev->buf_alloc);
1090 		return -ENOMEM;
1091 	}
1092 	dma->buflist = temp_buflist;
1093 
1094 	for (i = 0; i < entry->buf_count; i++) {
1095 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1096 	}
1097 
1098 	dma->buf_count += entry->buf_count;
1099 	dma->seg_count += entry->seg_count;
1100 	dma->page_count += byte_count >> PAGE_SHIFT;
1101 	dma->byte_count += byte_count;
1102 
1103 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1104 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1105 
1106 	mutex_unlock(&dev->struct_mutex);
1107 
1108 	request->count = entry->buf_count;
1109 	request->size = size;
1110 
1111 	dma->flags = _DRM_DMA_USE_SG;
1112 
1113 	atomic_dec(&dev->buf_alloc);
1114 	return 0;
1115 }
1116 
drm_addbufs_fb(struct drm_device * dev,struct drm_buf_desc * request)1117 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1118 {
1119 	struct drm_device_dma *dma = dev->dma;
1120 	struct drm_buf_entry *entry;
1121 	struct drm_buf *buf;
1122 	unsigned long offset;
1123 	unsigned long agp_offset;
1124 	int count;
1125 	int order;
1126 	int size;
1127 	int alignment;
1128 	int page_order;
1129 	int total;
1130 	int byte_count;
1131 	int i;
1132 	struct drm_buf **temp_buflist;
1133 
1134 	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1135 		return -EINVAL;
1136 
1137 	if (!dma)
1138 		return -EINVAL;
1139 
1140 	if (!capable(CAP_SYS_ADMIN))
1141 		return -EPERM;
1142 
1143 	count = request->count;
1144 	order = drm_order(request->size);
1145 	size = 1 << order;
1146 
1147 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1148 	    ? PAGE_ALIGN(size) : size;
1149 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1150 	total = PAGE_SIZE << page_order;
1151 
1152 	byte_count = 0;
1153 	agp_offset = request->agp_start;
1154 
1155 	DRM_DEBUG("count:      %d\n", count);
1156 	DRM_DEBUG("order:      %d\n", order);
1157 	DRM_DEBUG("size:       %d\n", size);
1158 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1159 	DRM_DEBUG("alignment:  %d\n", alignment);
1160 	DRM_DEBUG("page_order: %d\n", page_order);
1161 	DRM_DEBUG("total:      %d\n", total);
1162 
1163 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1164 		return -EINVAL;
1165 	if (dev->queue_count)
1166 		return -EBUSY;	/* Not while in use */
1167 
1168 	spin_lock(&dev->count_lock);
1169 	if (dev->buf_use) {
1170 		spin_unlock(&dev->count_lock);
1171 		return -EBUSY;
1172 	}
1173 	atomic_inc(&dev->buf_alloc);
1174 	spin_unlock(&dev->count_lock);
1175 
1176 	mutex_lock(&dev->struct_mutex);
1177 	entry = &dma->bufs[order];
1178 	if (entry->buf_count) {
1179 		mutex_unlock(&dev->struct_mutex);
1180 		atomic_dec(&dev->buf_alloc);
1181 		return -ENOMEM;	/* May only call once for each order */
1182 	}
1183 
1184 	if (count < 0 || count > 4096) {
1185 		mutex_unlock(&dev->struct_mutex);
1186 		atomic_dec(&dev->buf_alloc);
1187 		return -EINVAL;
1188 	}
1189 
1190 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1191 				   DRM_MEM_BUFS);
1192 	if (!entry->buflist) {
1193 		mutex_unlock(&dev->struct_mutex);
1194 		atomic_dec(&dev->buf_alloc);
1195 		return -ENOMEM;
1196 	}
1197 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1198 
1199 	entry->buf_size = size;
1200 	entry->page_order = page_order;
1201 
1202 	offset = 0;
1203 
1204 	while (entry->buf_count < count) {
1205 		buf = &entry->buflist[entry->buf_count];
1206 		buf->idx = dma->buf_count + entry->buf_count;
1207 		buf->total = alignment;
1208 		buf->order = order;
1209 		buf->used = 0;
1210 
1211 		buf->offset = (dma->byte_count + offset);
1212 		buf->bus_address = agp_offset + offset;
1213 		buf->address = (void *)(agp_offset + offset);
1214 		buf->next = NULL;
1215 		buf->waiting = 0;
1216 		buf->pending = 0;
1217 		init_waitqueue_head(&buf->dma_wait);
1218 		buf->file_priv = NULL;
1219 
1220 		buf->dev_priv_size = dev->driver->dev_priv_size;
1221 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1222 		if (!buf->dev_private) {
1223 			/* Set count correctly so we free the proper amount. */
1224 			entry->buf_count = count;
1225 			drm_cleanup_buf_error(dev, entry);
1226 			mutex_unlock(&dev->struct_mutex);
1227 			atomic_dec(&dev->buf_alloc);
1228 			return -ENOMEM;
1229 		}
1230 		memset(buf->dev_private, 0, buf->dev_priv_size);
1231 
1232 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1233 
1234 		offset += alignment;
1235 		entry->buf_count++;
1236 		byte_count += PAGE_SIZE << page_order;
1237 	}
1238 
1239 	DRM_DEBUG("byte_count: %d\n", byte_count);
1240 
1241 	temp_buflist = drm_realloc(dma->buflist,
1242 				   dma->buf_count * sizeof(*dma->buflist),
1243 				   (dma->buf_count + entry->buf_count)
1244 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
1245 	if (!temp_buflist) {
1246 		/* Free the entry because it isn't valid */
1247 		drm_cleanup_buf_error(dev, entry);
1248 		mutex_unlock(&dev->struct_mutex);
1249 		atomic_dec(&dev->buf_alloc);
1250 		return -ENOMEM;
1251 	}
1252 	dma->buflist = temp_buflist;
1253 
1254 	for (i = 0; i < entry->buf_count; i++) {
1255 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1256 	}
1257 
1258 	dma->buf_count += entry->buf_count;
1259 	dma->seg_count += entry->seg_count;
1260 	dma->page_count += byte_count >> PAGE_SHIFT;
1261 	dma->byte_count += byte_count;
1262 
1263 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1264 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1265 
1266 	mutex_unlock(&dev->struct_mutex);
1267 
1268 	request->count = entry->buf_count;
1269 	request->size = size;
1270 
1271 	dma->flags = _DRM_DMA_USE_FB;
1272 
1273 	atomic_dec(&dev->buf_alloc);
1274 	return 0;
1275 }
1276 
1277 
1278 /**
1279  * Add buffers for DMA transfers (ioctl).
1280  *
1281  * \param inode device inode.
1282  * \param file_priv DRM file private.
1283  * \param cmd command.
1284  * \param arg pointer to a struct drm_buf_desc request.
1285  * \return zero on success or a negative number on failure.
1286  *
1287  * According with the memory type specified in drm_buf_desc::flags and the
1288  * build options, it dispatches the call either to addbufs_agp(),
1289  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1290  * PCI memory respectively.
1291  */
drm_addbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1292 int drm_addbufs(struct drm_device *dev, void *data,
1293 		struct drm_file *file_priv)
1294 {
1295 	struct drm_buf_desc *request = data;
1296 	int ret;
1297 
1298 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1299 		return -EINVAL;
1300 
1301 #if __OS_HAS_AGP
1302 	if (request->flags & _DRM_AGP_BUFFER)
1303 		ret = drm_addbufs_agp(dev, request);
1304 	else
1305 #endif
1306 	if (request->flags & _DRM_SG_BUFFER)
1307 		ret = drm_addbufs_sg(dev, request);
1308 	else if (request->flags & _DRM_FB_BUFFER)
1309 		ret = drm_addbufs_fb(dev, request);
1310 	else
1311 		ret = drm_addbufs_pci(dev, request);
1312 
1313 	return ret;
1314 }
1315 
1316 /**
1317  * Get information about the buffer mappings.
1318  *
1319  * This was originally mean for debugging purposes, or by a sophisticated
1320  * client library to determine how best to use the available buffers (e.g.,
1321  * large buffers can be used for image transfer).
1322  *
1323  * \param inode device inode.
1324  * \param file_priv DRM file private.
1325  * \param cmd command.
1326  * \param arg pointer to a drm_buf_info structure.
1327  * \return zero on success or a negative number on failure.
1328  *
1329  * Increments drm_device::buf_use while holding the drm_device::count_lock
1330  * lock, preventing of allocating more buffers after this call. Information
1331  * about each requested buffer is then copied into user space.
1332  */
drm_infobufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1333 int drm_infobufs(struct drm_device *dev, void *data,
1334 		 struct drm_file *file_priv)
1335 {
1336 	struct drm_device_dma *dma = dev->dma;
1337 	struct drm_buf_info *request = data;
1338 	int i;
1339 	int count;
1340 
1341 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1342 		return -EINVAL;
1343 
1344 	if (!dma)
1345 		return -EINVAL;
1346 
1347 	spin_lock(&dev->count_lock);
1348 	if (atomic_read(&dev->buf_alloc)) {
1349 		spin_unlock(&dev->count_lock);
1350 		return -EBUSY;
1351 	}
1352 	++dev->buf_use;		/* Can't allocate more after this call */
1353 	spin_unlock(&dev->count_lock);
1354 
1355 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1356 		if (dma->bufs[i].buf_count)
1357 			++count;
1358 	}
1359 
1360 	DRM_DEBUG("count = %d\n", count);
1361 
1362 	if (request->count >= count) {
1363 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1364 			if (dma->bufs[i].buf_count) {
1365 				struct drm_buf_desc __user *to =
1366 				    &request->list[count];
1367 				struct drm_buf_entry *from = &dma->bufs[i];
1368 				struct drm_freelist *list = &dma->bufs[i].freelist;
1369 				if (copy_to_user(&to->count,
1370 						 &from->buf_count,
1371 						 sizeof(from->buf_count)) ||
1372 				    copy_to_user(&to->size,
1373 						 &from->buf_size,
1374 						 sizeof(from->buf_size)) ||
1375 				    copy_to_user(&to->low_mark,
1376 						 &list->low_mark,
1377 						 sizeof(list->low_mark)) ||
1378 				    copy_to_user(&to->high_mark,
1379 						 &list->high_mark,
1380 						 sizeof(list->high_mark)))
1381 					return -EFAULT;
1382 
1383 				DRM_DEBUG("%d %d %d %d %d\n",
1384 					  i,
1385 					  dma->bufs[i].buf_count,
1386 					  dma->bufs[i].buf_size,
1387 					  dma->bufs[i].freelist.low_mark,
1388 					  dma->bufs[i].freelist.high_mark);
1389 				++count;
1390 			}
1391 		}
1392 	}
1393 	request->count = count;
1394 
1395 	return 0;
1396 }
1397 
1398 /**
1399  * Specifies a low and high water mark for buffer allocation
1400  *
1401  * \param inode device inode.
1402  * \param file_priv DRM file private.
1403  * \param cmd command.
1404  * \param arg a pointer to a drm_buf_desc structure.
1405  * \return zero on success or a negative number on failure.
1406  *
1407  * Verifies that the size order is bounded between the admissible orders and
1408  * updates the respective drm_device_dma::bufs entry low and high water mark.
1409  *
1410  * \note This ioctl is deprecated and mostly never used.
1411  */
drm_markbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1412 int drm_markbufs(struct drm_device *dev, void *data,
1413 		 struct drm_file *file_priv)
1414 {
1415 	struct drm_device_dma *dma = dev->dma;
1416 	struct drm_buf_desc *request = data;
1417 	int order;
1418 	struct drm_buf_entry *entry;
1419 
1420 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1421 		return -EINVAL;
1422 
1423 	if (!dma)
1424 		return -EINVAL;
1425 
1426 	DRM_DEBUG("%d, %d, %d\n",
1427 		  request->size, request->low_mark, request->high_mark);
1428 	order = drm_order(request->size);
1429 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1430 		return -EINVAL;
1431 	entry = &dma->bufs[order];
1432 
1433 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1434 		return -EINVAL;
1435 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1436 		return -EINVAL;
1437 
1438 	entry->freelist.low_mark = request->low_mark;
1439 	entry->freelist.high_mark = request->high_mark;
1440 
1441 	return 0;
1442 }
1443 
1444 /**
1445  * Unreserve the buffers in list, previously reserved using drmDMA.
1446  *
1447  * \param inode device inode.
1448  * \param file_priv DRM file private.
1449  * \param cmd command.
1450  * \param arg pointer to a drm_buf_free structure.
1451  * \return zero on success or a negative number on failure.
1452  *
1453  * Calls free_buffer() for each used buffer.
1454  * This function is primarily used for debugging.
1455  */
drm_freebufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1456 int drm_freebufs(struct drm_device *dev, void *data,
1457 		 struct drm_file *file_priv)
1458 {
1459 	struct drm_device_dma *dma = dev->dma;
1460 	struct drm_buf_free *request = data;
1461 	int i;
1462 	int idx;
1463 	struct drm_buf *buf;
1464 
1465 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1466 		return -EINVAL;
1467 
1468 	if (!dma)
1469 		return -EINVAL;
1470 
1471 	DRM_DEBUG("%d\n", request->count);
1472 	for (i = 0; i < request->count; i++) {
1473 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1474 			return -EFAULT;
1475 		if (idx < 0 || idx >= dma->buf_count) {
1476 			DRM_ERROR("Index %d (of %d max)\n",
1477 				  idx, dma->buf_count - 1);
1478 			return -EINVAL;
1479 		}
1480 		buf = dma->buflist[idx];
1481 		if (buf->file_priv != file_priv) {
1482 			DRM_ERROR("Process %d freeing buffer not owned\n",
1483 				  task_pid_nr(current));
1484 			return -EINVAL;
1485 		}
1486 		drm_free_buffer(dev, buf);
1487 	}
1488 
1489 	return 0;
1490 }
1491 
1492 /**
1493  * Maps all of the DMA buffers into client-virtual space (ioctl).
1494  *
1495  * \param inode device inode.
1496  * \param file_priv DRM file private.
1497  * \param cmd command.
1498  * \param arg pointer to a drm_buf_map structure.
1499  * \return zero on success or a negative number on failure.
1500  *
1501  * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1502  * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1503  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1504  * drm_mmap_dma().
1505  */
drm_mapbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1506 int drm_mapbufs(struct drm_device *dev, void *data,
1507 	        struct drm_file *file_priv)
1508 {
1509 	struct drm_device_dma *dma = dev->dma;
1510 	int retcode = 0;
1511 	const int zero = 0;
1512 	unsigned long virtual;
1513 	unsigned long address;
1514 	struct drm_buf_map *request = data;
1515 	int i;
1516 
1517 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1518 		return -EINVAL;
1519 
1520 	if (!dma)
1521 		return -EINVAL;
1522 
1523 	spin_lock(&dev->count_lock);
1524 	if (atomic_read(&dev->buf_alloc)) {
1525 		spin_unlock(&dev->count_lock);
1526 		return -EBUSY;
1527 	}
1528 	dev->buf_use++;		/* Can't allocate more after this call */
1529 	spin_unlock(&dev->count_lock);
1530 
1531 	if (request->count >= dma->buf_count) {
1532 		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1533 		    || (drm_core_check_feature(dev, DRIVER_SG)
1534 			&& (dma->flags & _DRM_DMA_USE_SG))
1535 		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1536 			&& (dma->flags & _DRM_DMA_USE_FB))) {
1537 			struct drm_map *map = dev->agp_buffer_map;
1538 			unsigned long token = dev->agp_buffer_token;
1539 
1540 			if (!map) {
1541 				retcode = -EINVAL;
1542 				goto done;
1543 			}
1544 			down_write(&current->mm->mmap_sem);
1545 			virtual = do_mmap(file_priv->filp, 0, map->size,
1546 					  PROT_READ | PROT_WRITE,
1547 					  MAP_SHARED,
1548 					  token);
1549 			up_write(&current->mm->mmap_sem);
1550 		} else {
1551 			down_write(&current->mm->mmap_sem);
1552 			virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1553 					  PROT_READ | PROT_WRITE,
1554 					  MAP_SHARED, 0);
1555 			up_write(&current->mm->mmap_sem);
1556 		}
1557 		if (virtual > -1024UL) {
1558 			/* Real error */
1559 			retcode = (signed long)virtual;
1560 			goto done;
1561 		}
1562 		request->virtual = (void __user *)virtual;
1563 
1564 		for (i = 0; i < dma->buf_count; i++) {
1565 			if (copy_to_user(&request->list[i].idx,
1566 					 &dma->buflist[i]->idx,
1567 					 sizeof(request->list[0].idx))) {
1568 				retcode = -EFAULT;
1569 				goto done;
1570 			}
1571 			if (copy_to_user(&request->list[i].total,
1572 					 &dma->buflist[i]->total,
1573 					 sizeof(request->list[0].total))) {
1574 				retcode = -EFAULT;
1575 				goto done;
1576 			}
1577 			if (copy_to_user(&request->list[i].used,
1578 					 &zero, sizeof(zero))) {
1579 				retcode = -EFAULT;
1580 				goto done;
1581 			}
1582 			address = virtual + dma->buflist[i]->offset;	/* *** */
1583 			if (copy_to_user(&request->list[i].address,
1584 					 &address, sizeof(address))) {
1585 				retcode = -EFAULT;
1586 				goto done;
1587 			}
1588 		}
1589 	}
1590       done:
1591 	request->count = dma->buf_count;
1592 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1593 
1594 	return retcode;
1595 }
1596 
1597 /**
1598  * Compute size order.  Returns the exponent of the smaller power of two which
1599  * is greater or equal to given number.
1600  *
1601  * \param size size.
1602  * \return order.
1603  *
1604  * \todo Can be made faster.
1605  */
drm_order(unsigned long size)1606 int drm_order(unsigned long size)
1607 {
1608 	int order;
1609 	unsigned long tmp;
1610 
1611 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1612 
1613 	if (size & (size - 1))
1614 		++order;
1615 
1616 	return order;
1617 }
1618 EXPORT_SYMBOL(drm_order);
1619