• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * \file drm_fops.c
3  * File operations for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Daryll Strauss <daryll@valinux.com>
7  * \author Gareth Hughes <gareth@valinux.com>
8  */
9 
10 /*
11  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
12  *
13  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15  * All Rights Reserved.
16  *
17  * Permission is hereby granted, free of charge, to any person obtaining a
18  * copy of this software and associated documentation files (the "Software"),
19  * to deal in the Software without restriction, including without limitation
20  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21  * and/or sell copies of the Software, and to permit persons to whom the
22  * Software is furnished to do so, subject to the following conditions:
23  *
24  * The above copyright notice and this permission notice (including the next
25  * paragraph) shall be included in all copies or substantial portions of the
26  * Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
31  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34  * OTHER DEALINGS IN THE SOFTWARE.
35  */
36 
37 #include "drmP.h"
38 #include <linux/poll.h>
39 #include <linux/smp_lock.h>
40 
41 static int drm_open_helper(struct inode *inode, struct file *filp,
42 			   struct drm_device * dev);
43 
drm_setup(struct drm_device * dev)44 static int drm_setup(struct drm_device * dev)
45 {
46 	int i;
47 	int ret;
48 
49 	if (dev->driver->firstopen) {
50 		ret = dev->driver->firstopen(dev);
51 		if (ret != 0)
52 			return ret;
53 	}
54 
55 	atomic_set(&dev->ioctl_count, 0);
56 	atomic_set(&dev->vma_count, 0);
57 
58 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
59 	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
60 		dev->buf_use = 0;
61 		atomic_set(&dev->buf_alloc, 0);
62 
63 		i = drm_dma_setup(dev);
64 		if (i < 0)
65 			return i;
66 	}
67 
68 	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
69 		atomic_set(&dev->counts[i], 0);
70 
71 	dev->sigdata.lock = NULL;
72 
73 	dev->queue_count = 0;
74 	dev->queue_reserved = 0;
75 	dev->queue_slots = 0;
76 	dev->queuelist = NULL;
77 	dev->context_flag = 0;
78 	dev->interrupt_flag = 0;
79 	dev->dma_flag = 0;
80 	dev->last_context = 0;
81 	dev->last_switch = 0;
82 	dev->last_checked = 0;
83 	init_waitqueue_head(&dev->context_wait);
84 	dev->if_version = 0;
85 
86 	dev->ctx_start = 0;
87 	dev->lck_start = 0;
88 
89 	dev->buf_async = NULL;
90 	init_waitqueue_head(&dev->buf_readers);
91 	init_waitqueue_head(&dev->buf_writers);
92 
93 	DRM_DEBUG("\n");
94 
95 	/*
96 	 * The kernel's context could be created here, but is now created
97 	 * in drm_dma_enqueue.  This is more resource-efficient for
98 	 * hardware that does not do DMA, but may mean that
99 	 * drm_select_queue fails between the time the interrupt is
100 	 * initialized and the time the queues are initialized.
101 	 */
102 
103 	return 0;
104 }
105 
106 /**
107  * Open file.
108  *
109  * \param inode device inode
110  * \param filp file pointer.
111  * \return zero on success or a negative number on failure.
112  *
113  * Searches the DRM device with the same minor number, calls open_helper(), and
114  * increments the device open count. If the open count was previous at zero,
115  * i.e., it's the first that the device is open, then calls setup().
116  */
drm_open(struct inode * inode,struct file * filp)117 int drm_open(struct inode *inode, struct file *filp)
118 {
119 	struct drm_device *dev = NULL;
120 	int minor_id = iminor(inode);
121 	struct drm_minor *minor;
122 	int retcode = 0;
123 
124 	minor = idr_find(&drm_minors_idr, minor_id);
125 	if (!minor)
126 		return -ENODEV;
127 
128 	if (!(dev = minor->dev))
129 		return -ENODEV;
130 
131 	retcode = drm_open_helper(inode, filp, dev);
132 	if (!retcode) {
133 		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
134 		spin_lock(&dev->count_lock);
135 		if (!dev->open_count++) {
136 			spin_unlock(&dev->count_lock);
137 			retcode = drm_setup(dev);
138 			goto out;
139 		}
140 		spin_unlock(&dev->count_lock);
141 	}
142 out:
143 	mutex_lock(&dev->struct_mutex);
144 	if (minor->type == DRM_MINOR_LEGACY) {
145 		BUG_ON((dev->dev_mapping != NULL) &&
146 			(dev->dev_mapping != inode->i_mapping));
147 		if (dev->dev_mapping == NULL)
148 			dev->dev_mapping = inode->i_mapping;
149 	}
150 	mutex_unlock(&dev->struct_mutex);
151 
152 	return retcode;
153 }
154 EXPORT_SYMBOL(drm_open);
155 
156 /**
157  * File \c open operation.
158  *
159  * \param inode device inode.
160  * \param filp file pointer.
161  *
162  * Puts the dev->fops corresponding to the device minor number into
163  * \p filp, call the \c open method, and restore the file operations.
164  */
drm_stub_open(struct inode * inode,struct file * filp)165 int drm_stub_open(struct inode *inode, struct file *filp)
166 {
167 	struct drm_device *dev = NULL;
168 	struct drm_minor *minor;
169 	int minor_id = iminor(inode);
170 	int err = -ENODEV;
171 	const struct file_operations *old_fops;
172 
173 	DRM_DEBUG("\n");
174 
175 	/* BKL pushdown: note that nothing else serializes idr_find() */
176 	lock_kernel();
177 	minor = idr_find(&drm_minors_idr, minor_id);
178 	if (!minor)
179 		goto out;
180 
181 	if (!(dev = minor->dev))
182 		goto out;
183 
184 	old_fops = filp->f_op;
185 	filp->f_op = fops_get(&dev->driver->fops);
186 	if (filp->f_op == NULL) {
187 		filp->f_op = old_fops;
188 		goto out;
189 	}
190 	if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
191 		fops_put(filp->f_op);
192 		filp->f_op = fops_get(old_fops);
193 	}
194 	fops_put(old_fops);
195 
196 out:
197 	unlock_kernel();
198 	return err;
199 }
200 
201 /**
202  * Check whether DRI will run on this CPU.
203  *
204  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
205  */
drm_cpu_valid(void)206 static int drm_cpu_valid(void)
207 {
208 #if defined(__i386__)
209 	if (boot_cpu_data.x86 == 3)
210 		return 0;	/* No cmpxchg on a 386 */
211 #endif
212 #if defined(__sparc__) && !defined(__sparc_v9__)
213 	return 0;		/* No cmpxchg before v9 sparc. */
214 #endif
215 	return 1;
216 }
217 
218 /**
219  * Called whenever a process opens /dev/drm.
220  *
221  * \param inode device inode.
222  * \param filp file pointer.
223  * \param dev device.
224  * \return zero on success or a negative number on failure.
225  *
226  * Creates and initializes a drm_file structure for the file private data in \p
227  * filp and add it into the double linked list in \p dev.
228  */
drm_open_helper(struct inode * inode,struct file * filp,struct drm_device * dev)229 static int drm_open_helper(struct inode *inode, struct file *filp,
230 			   struct drm_device * dev)
231 {
232 	int minor_id = iminor(inode);
233 	struct drm_file *priv;
234 	int ret;
235 
236 	if (filp->f_flags & O_EXCL)
237 		return -EBUSY;	/* No exclusive opens */
238 	if (!drm_cpu_valid())
239 		return -EINVAL;
240 
241 	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
242 
243 	priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
244 	if (!priv)
245 		return -ENOMEM;
246 
247 	memset(priv, 0, sizeof(*priv));
248 	filp->private_data = priv;
249 	priv->filp = filp;
250 	priv->uid = current_euid();
251 	priv->pid = task_pid_nr(current);
252 	priv->minor = idr_find(&drm_minors_idr, minor_id);
253 	priv->ioctl_count = 0;
254 	/* for compatibility root is always authenticated */
255 	priv->authenticated = capable(CAP_SYS_ADMIN);
256 	priv->lock_count = 0;
257 
258 	INIT_LIST_HEAD(&priv->lhead);
259 	INIT_LIST_HEAD(&priv->fbs);
260 
261 	if (dev->driver->driver_features & DRIVER_GEM)
262 		drm_gem_open(dev, priv);
263 
264 	if (dev->driver->open) {
265 		ret = dev->driver->open(dev, priv);
266 		if (ret < 0)
267 			goto out_free;
268 	}
269 
270 
271 	/* if there is no current master make this fd it */
272 	mutex_lock(&dev->struct_mutex);
273 	if (!priv->minor->master) {
274 		/* create a new master */
275 		priv->minor->master = drm_master_create(priv->minor);
276 		if (!priv->minor->master) {
277 			ret = -ENOMEM;
278 			goto out_free;
279 		}
280 
281 		priv->is_master = 1;
282 		/* take another reference for the copy in the local file priv */
283 		priv->master = drm_master_get(priv->minor->master);
284 
285 		priv->authenticated = 1;
286 
287 		mutex_unlock(&dev->struct_mutex);
288 		if (dev->driver->master_create) {
289 			ret = dev->driver->master_create(dev, priv->master);
290 			if (ret) {
291 				mutex_lock(&dev->struct_mutex);
292 				/* drop both references if this fails */
293 				drm_master_put(&priv->minor->master);
294 				drm_master_put(&priv->master);
295 				mutex_unlock(&dev->struct_mutex);
296 				goto out_free;
297 			}
298 		}
299 	} else {
300 		/* get a reference to the master */
301 		priv->master = drm_master_get(priv->minor->master);
302 		mutex_unlock(&dev->struct_mutex);
303 	}
304 
305 	mutex_lock(&dev->struct_mutex);
306 	list_add(&priv->lhead, &dev->filelist);
307 	mutex_unlock(&dev->struct_mutex);
308 
309 #ifdef __alpha__
310 	/*
311 	 * Default the hose
312 	 */
313 	if (!dev->hose) {
314 		struct pci_dev *pci_dev;
315 		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
316 		if (pci_dev) {
317 			dev->hose = pci_dev->sysdata;
318 			pci_dev_put(pci_dev);
319 		}
320 		if (!dev->hose) {
321 			struct pci_bus *b = pci_bus_b(pci_root_buses.next);
322 			if (b)
323 				dev->hose = b->sysdata;
324 		}
325 	}
326 #endif
327 
328 	return 0;
329       out_free:
330 	drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
331 	filp->private_data = NULL;
332 	return ret;
333 }
334 
335 /** No-op. */
drm_fasync(int fd,struct file * filp,int on)336 int drm_fasync(int fd, struct file *filp, int on)
337 {
338 	struct drm_file *priv = filp->private_data;
339 	struct drm_device *dev = priv->minor->dev;
340 	int retcode;
341 
342 	DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
343 		  (long)old_encode_dev(priv->minor->device));
344 	retcode = fasync_helper(fd, filp, on, &dev->buf_async);
345 	if (retcode < 0)
346 		return retcode;
347 	return 0;
348 }
349 EXPORT_SYMBOL(drm_fasync);
350 
351 /*
352  * Reclaim locked buffers; note that this may be a bad idea if the current
353  * context doesn't have the hw lock...
354  */
drm_reclaim_locked_buffers(struct drm_device * dev,struct file * f)355 static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
356 {
357 	struct drm_file *file_priv = f->private_data;
358 
359 	if (drm_i_have_hw_lock(dev, file_priv)) {
360 		dev->driver->reclaim_buffers_locked(dev, file_priv);
361 	} else {
362 		unsigned long _end = jiffies + 3 * DRM_HZ;
363 		int locked = 0;
364 
365 		drm_idlelock_take(&file_priv->master->lock);
366 
367 		/*
368 		 * Wait for a while.
369 		 */
370 		do {
371 			spin_lock_bh(&file_priv->master->lock.spinlock);
372 			locked = file_priv->master->lock.idle_has_lock;
373 			spin_unlock_bh(&file_priv->master->lock.spinlock);
374 			if (locked)
375 				break;
376 			schedule();
377 		} while (!time_after_eq(jiffies, _end));
378 
379 		if (!locked) {
380 			DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
381 				  "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
382 				  "\tI will go on reclaiming the buffers anyway.\n");
383 		}
384 
385 		dev->driver->reclaim_buffers_locked(dev, file_priv);
386 		drm_idlelock_release(&file_priv->master->lock);
387 	}
388 }
389 
drm_master_release(struct drm_device * dev,struct file * filp)390 static void drm_master_release(struct drm_device *dev, struct file *filp)
391 {
392 	struct drm_file *file_priv = filp->private_data;
393 
394 	if (dev->driver->reclaim_buffers_locked &&
395 	    file_priv->master->lock.hw_lock)
396 		drm_reclaim_locked_buffers(dev, filp);
397 
398 	if (dev->driver->reclaim_buffers_idlelocked &&
399 	    file_priv->master->lock.hw_lock) {
400 		drm_idlelock_take(&file_priv->master->lock);
401 		dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
402 		drm_idlelock_release(&file_priv->master->lock);
403 	}
404 
405 
406 	if (drm_i_have_hw_lock(dev, file_priv)) {
407 		DRM_DEBUG("File %p released, freeing lock for context %d\n",
408 			  filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
409 		drm_lock_free(&file_priv->master->lock,
410 			      _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
411 	}
412 
413 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
414 	    !dev->driver->reclaim_buffers_locked) {
415 		dev->driver->reclaim_buffers(dev, file_priv);
416 	}
417 }
418 
419 /**
420  * Release file.
421  *
422  * \param inode device inode
423  * \param file_priv DRM file private.
424  * \return zero on success or a negative number on failure.
425  *
426  * If the hardware lock is held then free it, and take it again for the kernel
427  * context since it's necessary to reclaim buffers. Unlink the file private
428  * data from its list and free it. Decreases the open count and if it reaches
429  * zero calls drm_lastclose().
430  */
drm_release(struct inode * inode,struct file * filp)431 int drm_release(struct inode *inode, struct file *filp)
432 {
433 	struct drm_file *file_priv = filp->private_data;
434 	struct drm_device *dev = file_priv->minor->dev;
435 	int retcode = 0;
436 
437 	lock_kernel();
438 
439 	DRM_DEBUG("open_count = %d\n", dev->open_count);
440 
441 	if (dev->driver->preclose)
442 		dev->driver->preclose(dev, file_priv);
443 
444 	/* ========================================================
445 	 * Begin inline drm_release
446 	 */
447 
448 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
449 		  task_pid_nr(current),
450 		  (long)old_encode_dev(file_priv->minor->device),
451 		  dev->open_count);
452 
453 	/* if the master has gone away we can't do anything with the lock */
454 	if (file_priv->minor->master)
455 		drm_master_release(dev, filp);
456 
457 	if (dev->driver->driver_features & DRIVER_GEM)
458 		drm_gem_release(dev, file_priv);
459 
460 	if (dev->driver->driver_features & DRIVER_MODESET)
461 		drm_fb_release(file_priv);
462 
463 	mutex_lock(&dev->ctxlist_mutex);
464 	if (!list_empty(&dev->ctxlist)) {
465 		struct drm_ctx_list *pos, *n;
466 
467 		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
468 			if (pos->tag == file_priv &&
469 			    pos->handle != DRM_KERNEL_CONTEXT) {
470 				if (dev->driver->context_dtor)
471 					dev->driver->context_dtor(dev,
472 								  pos->handle);
473 
474 				drm_ctxbitmap_free(dev, pos->handle);
475 
476 				list_del(&pos->head);
477 				drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
478 				--dev->ctx_count;
479 			}
480 		}
481 	}
482 	mutex_unlock(&dev->ctxlist_mutex);
483 
484 	mutex_lock(&dev->struct_mutex);
485 
486 	if (file_priv->is_master) {
487 		struct drm_master *master = file_priv->master;
488 		struct drm_file *temp;
489 		list_for_each_entry(temp, &dev->filelist, lhead) {
490 			if ((temp->master == file_priv->master) &&
491 			    (temp != file_priv))
492 				temp->authenticated = 0;
493 		}
494 
495 		/**
496 		 * Since the master is disappearing, so is the
497 		 * possibility to lock.
498 		 */
499 
500 		if (master->lock.hw_lock) {
501 			if (dev->sigdata.lock == master->lock.hw_lock)
502 				dev->sigdata.lock = NULL;
503 			master->lock.hw_lock = NULL;
504 			master->lock.file_priv = NULL;
505 			wake_up_interruptible_all(&master->lock.lock_queue);
506 		}
507 
508 		if (file_priv->minor->master == file_priv->master) {
509 			/* drop the reference held my the minor */
510 			drm_master_put(&file_priv->minor->master);
511 		}
512 	}
513 
514 	/* drop the reference held my the file priv */
515 	drm_master_put(&file_priv->master);
516 	file_priv->is_master = 0;
517 	list_del(&file_priv->lhead);
518 	mutex_unlock(&dev->struct_mutex);
519 
520 	if (dev->driver->postclose)
521 		dev->driver->postclose(dev, file_priv);
522 	drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
523 
524 	/* ========================================================
525 	 * End inline drm_release
526 	 */
527 
528 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
529 	spin_lock(&dev->count_lock);
530 	if (!--dev->open_count) {
531 		if (atomic_read(&dev->ioctl_count)) {
532 			DRM_ERROR("Device busy: %d\n",
533 				  atomic_read(&dev->ioctl_count));
534 			spin_unlock(&dev->count_lock);
535 			unlock_kernel();
536 			return -EBUSY;
537 		}
538 		spin_unlock(&dev->count_lock);
539 		unlock_kernel();
540 		return drm_lastclose(dev);
541 	}
542 	spin_unlock(&dev->count_lock);
543 
544 	unlock_kernel();
545 
546 	return retcode;
547 }
548 EXPORT_SYMBOL(drm_release);
549 
550 /** No-op. */
drm_poll(struct file * filp,struct poll_table_struct * wait)551 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
552 {
553 	return 0;
554 }
555 EXPORT_SYMBOL(drm_poll);
556