• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Google, Inc.
4  * Copyright (C) 2012 Intel, Inc.
5  * Copyright (C) 2013 Intel, Inc.
6  * Copyright (C) 2014 Linaro Limited
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 /* This source file contains the implementation of the legacy version of
20  * a goldfish pipe device driver. See goldfish_pipe_v2.c for the current
21  * version.
22  */
23 
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/platform_device.h>
29 #include <linux/poll.h>
30 #include <linux/sched.h>
31 #include <linux/bitops.h>
32 #include <linux/slab.h>
33 #include <linux/io.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mm.h>
36 #include <linux/bug.h>
37 #include <linux/goldfish.h>
38 #include "goldfish_pipe_qemu.h"
39 #include "goldfish_pipe.h"
40 
41 #define MAX_PAGES_TO_GRAB 32
42 
43 /* A value that will not be set by qemu emulator */
44 #define INITIAL_BATCH_RESULT (0xdeadbeaf)
45 
46 struct goldfish_pipe_dev;
47 
48 /* This data type models a given pipe instance */
49 struct goldfish_pipe {
50 	struct goldfish_pipe_dev *dev;
51 
52 	/* The wake flags pipe is waiting for
53 	 * Note: not protected with any lock, uses atomic operations
54 	 *  and barriers to make it thread-safe.
55 	 */
56 	unsigned long flags;
57 
58 	wait_queue_head_t wake_queue;
59 
60 	/* protects access to the pipe */
61 	struct mutex lock;
62 };
63 
64 struct access_params {
65 	unsigned long channel;
66 	u32 size;
67 	unsigned long address;
68 	u32 cmd;
69 	u32 result;
70 	/* reserved for future extension */
71 	u32 flags;
72 };
73 
74 /* The driver state. Holds a reference to the i/o page used to
75  * communicate with the emulator, and a wake queue for blocked tasks
76  * waiting to be awoken.
77  */
78 struct goldfish_pipe_dev {
79 	/* Needed for the 'remove' call */
80 	struct goldfish_pipe_dev_base super;
81 
82 	/* ptr to platform device's device struct */
83 	struct device *pdev_dev;
84 
85 	/* the base address for MMIO */
86 	char __iomem *base;
87 
88 	struct access_params *aps;
89 
90 	struct miscdevice miscdev;
91 
92 	/* Global device spinlock */
93 	spinlock_t lock;
94 };
95 
96 static int goldfish_pipe_device_deinit(void *raw_dev,
97 				       struct platform_device *pdev);
98 
goldfish_cmd_status(struct goldfish_pipe * pipe,u32 cmd)99 static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
100 {
101 	unsigned long flags;
102 	u32 status;
103 	struct goldfish_pipe_dev *dev = pipe->dev;
104 
105 	spin_lock_irqsave(&dev->lock, flags);
106 	gf_write_ptr(pipe, dev->base + PIPE_V1_REG_CHANNEL,
107 		     dev->base + PIPE_V1_REG_CHANNEL_HIGH);
108 	writel(cmd, dev->base + PIPE_V1_REG_COMMAND);
109 	status = readl(dev->base + PIPE_V1_REG_STATUS);
110 	spin_unlock_irqrestore(&dev->lock, flags);
111 	return status;
112 }
113 
goldfish_cmd(struct goldfish_pipe * pipe,u32 cmd)114 static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
115 {
116 	unsigned long flags;
117 	struct goldfish_pipe_dev *dev = pipe->dev;
118 
119 	spin_lock_irqsave(&dev->lock, flags);
120 	gf_write_ptr(pipe, dev->base + PIPE_V1_REG_CHANNEL,
121 		     dev->base + PIPE_V1_REG_CHANNEL_HIGH);
122 	writel(cmd, dev->base + PIPE_V1_REG_COMMAND);
123 	spin_unlock_irqrestore(&dev->lock, flags);
124 }
125 
126 /* This function converts an error code returned by the emulator through
127  * the PIPE_V1_REG_STATUS i/o register into a valid negative errno value.
128  */
goldfish_pipe_error_convert(int status)129 static int goldfish_pipe_error_convert(int status)
130 {
131 	switch (status) {
132 	case PIPE_ERROR_AGAIN:
133 		return -EAGAIN;
134 	case PIPE_ERROR_NOMEM:
135 		return -ENOMEM;
136 	case PIPE_ERROR_IO:
137 		return -EIO;
138 	default:
139 		return -EINVAL;
140 	}
141 }
142 
143 /*
144  * Notice: QEMU will return 0 for un-known register access, indicating
145  * access_params is supported or not
146  */
valid_batchbuffer_addr(struct goldfish_pipe_dev * dev,struct access_params * aps)147 static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
148 				  struct access_params *aps)
149 {
150 	u32 aph, apl;
151 	u64 paddr;
152 
153 	aph = readl(dev->base + PIPE_V1_REG_PARAMS_ADDR_HIGH);
154 	apl = readl(dev->base + PIPE_V1_REG_PARAMS_ADDR_LOW);
155 
156 	paddr = ((u64)aph << 32) | apl;
157 	return paddr == (__pa(aps));
158 }
159 
setup_access_params_addr(struct platform_device * pdev,struct goldfish_pipe_dev * dev)160 static int setup_access_params_addr(struct platform_device *pdev,
161 				    struct goldfish_pipe_dev *dev)
162 {
163 	u64 paddr;
164 	struct access_params *aps;
165 
166 	aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params),
167 			   GFP_KERNEL);
168 	if (!aps)
169 		return -ENOMEM;
170 
171 	paddr = __pa(aps);
172 	writel((u32)(paddr >> 32), dev->base + PIPE_V1_REG_PARAMS_ADDR_HIGH);
173 	writel((u32)paddr, dev->base + PIPE_V1_REG_PARAMS_ADDR_LOW);
174 
175 	if (valid_batchbuffer_addr(dev, aps)) {
176 		dev->aps = aps;
177 		return 0;
178 	}
179 
180 	devm_kfree(&pdev->dev, aps);
181 	return -EFAULT;
182 }
183 
access_with_param(struct goldfish_pipe_dev * dev,const int cmd,unsigned long address,unsigned long avail,struct goldfish_pipe * pipe,int * status)184 static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
185 			     unsigned long address, unsigned long avail,
186 			     struct goldfish_pipe *pipe, int *status)
187 {
188 	struct access_params *aps = dev->aps;
189 
190 	if (!aps)
191 		return -EINVAL;
192 
193 	aps->result = INITIAL_BATCH_RESULT;
194 	aps->channel = (unsigned long)pipe;
195 	aps->size = avail;
196 	aps->address = address;
197 	aps->cmd = cmd;
198 	writel(cmd, dev->base + PIPE_V1_REG_ACCESS_PARAMS);
199 
200 	/*
201 	 * If the aps->result has not changed, that means
202 	 * that the batch command failed
203 	 */
204 	if (aps->result == INITIAL_BATCH_RESULT)
205 		return -EINVAL;
206 
207 	*status = aps->result;
208 	return 0;
209 }
210 
transfer_pages(struct goldfish_pipe_dev * dev,struct goldfish_pipe * pipe,int cmd,unsigned long xaddr,unsigned long size)211 static int transfer_pages(struct goldfish_pipe_dev *dev,
212 			  struct goldfish_pipe *pipe,
213 			  int cmd,
214 			  unsigned long xaddr,
215 			  unsigned long size)
216 {
217 	unsigned long irq_flags;
218 	int status = 0;
219 
220 	spin_lock_irqsave(&dev->lock, irq_flags);
221 	if (access_with_param(dev, cmd, xaddr, size, pipe, &status)) {
222 		gf_write_ptr(pipe, dev->base + PIPE_V1_REG_CHANNEL,
223 			     dev->base + PIPE_V1_REG_CHANNEL_HIGH);
224 
225 		writel(size, dev->base + PIPE_V1_REG_SIZE);
226 
227 		gf_write_ptr((void *)xaddr,
228 			     dev->base + PIPE_V1_REG_ADDRESS,
229 			     dev->base + PIPE_V1_REG_ADDRESS_HIGH);
230 
231 		writel(cmd, dev->base + PIPE_V1_REG_COMMAND);
232 
233 		status = readl(dev->base + PIPE_V1_REG_STATUS);
234 	}
235 	spin_unlock_irqrestore(&dev->lock, irq_flags);
236 
237 	return status;
238 }
239 
translate_address(const struct page * page,unsigned long addr)240 static unsigned long translate_address(const struct page *page,
241 				       unsigned long addr)
242 {
243 	return page_to_phys(page) | (addr & ~PAGE_MASK);
244 }
245 
goldfish_pipe_read_write(struct file * filp,char __user * buffer,size_t bufflen,int is_write)246 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
247 					size_t bufflen, int is_write)
248 {
249 	struct goldfish_pipe *pipe = filp->private_data;
250 	struct goldfish_pipe_dev *dev = pipe->dev;
251 	unsigned long address;
252 	unsigned long address_end;
253 	const int wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
254 	const int pipe_cmd = is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ;
255 	int count = 0;
256 	int ret = -EINVAL;
257 
258 	/* If the emulator already closed the pipe, no need to go further */
259 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
260 		return -EIO;
261 
262 	/* Null reads or writes succeeds */
263 	if (unlikely(bufflen == 0))
264 		return 0;
265 
266 	/* Check the buffer range for access */
267 	if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
268 		       buffer, bufflen))
269 		return -EFAULT;
270 
271 	address = (unsigned long)buffer;
272 	address_end = address + bufflen;
273 
274 	/* Serialize access to the pipe */
275 	if (mutex_lock_interruptible(&pipe->lock))
276 		return -ERESTARTSYS;
277 
278 	while (address < address_end) {
279 		struct page *pages[MAX_PAGES_TO_GRAB];
280 		unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
281 		unsigned long avail;
282 		unsigned long xaddr;
283 		unsigned long xaddr_prev;
284 		long first_page;
285 		long last_page;
286 		long requested_pages;
287 		int status;
288 		int n_pages;
289 		int page_i;
290 		int num_contiguous_pages;
291 
292 		/*
293 		 * Attempt to grab multiple physically contiguous pages.
294 		 */
295 		first_page = address & PAGE_MASK;
296 		last_page = (address_end - 1) & PAGE_MASK;
297 		requested_pages =
298 			min(((last_page - first_page) >> PAGE_SHIFT) + 1,
299 			    (long)MAX_PAGES_TO_GRAB);
300 
301 		ret = get_user_pages_fast(first_page, requested_pages,
302 					  !is_write, pages);
303 		if (ret < 0) {
304 			dev_err(dev->pdev_dev,
305 				"%s: get_user_pages_fast failed: %d\n",
306 				__func__, ret);
307 			break;
308 		} else if (!ret) {
309 			dev_err(dev->pdev_dev,
310 				"%s: error: no pages returned, requested %ld\n",
311 				__func__, requested_pages);
312 			break;
313 		}
314 
315 		n_pages = ret;
316 		xaddr = translate_address(pages[0], address);
317 		xaddr_prev = xaddr;
318 		num_contiguous_pages = 1;
319 		for (page_i = 1; page_i < n_pages; page_i++) {
320 			unsigned long xaddr_i;
321 
322 			xaddr_i = translate_address(pages[page_i], address);
323 			if (xaddr_i == xaddr_prev + PAGE_SIZE) {
324 				page_end += PAGE_SIZE;
325 				xaddr_prev = xaddr_i;
326 				num_contiguous_pages++;
327 			} else {
328 				dev_err(dev->pdev_dev,
329 					"%s: discontinuous page boundary: %d "
330 					"pages instead\n",
331 					__func__, page_i);
332 				break;
333 			}
334 		}
335 		avail = min(page_end, address_end) - address;
336 
337 		status = transfer_pages(dev, pipe, pipe_cmd, xaddr, avail);
338 
339 		for (page_i = 0; page_i < n_pages; page_i++) {
340 			if (status > 0 && !is_write &&
341 			    page_i < num_contiguous_pages)
342 				set_page_dirty(pages[page_i]);
343 
344 			put_page(pages[page_i]);
345 		}
346 
347 		if (status > 0) { /* Correct transfer */
348 			count += status;
349 			address += status;
350 			continue;
351 		} else if (status == 0) { /* EOF */
352 			ret = 0;
353 			break;
354 		} else if (status < 0 && count > 0) {
355 			/*
356 			 * An error occurred and we already transferred
357 			 * something on one of the previous pages.
358 			 * Just return what we already copied and log this
359 			 * err.
360 			 *
361 			 * Note: This seems like an incorrect approach but
362 			 * cannot change it until we check if any user space
363 			 * ABI relies on this behavior.
364 			 */
365 			if (status != PIPE_ERROR_AGAIN)
366 				dev_err_ratelimited(dev->pdev_dev,
367 					"backend returned error %d on %s\n",
368 					status, is_write ? "write" : "read");
369 			ret = 0;
370 			break;
371 		}
372 
373 		/*
374 		 * If the error is not PIPE_ERROR_AGAIN, or if we are not in
375 		 * non-blocking mode, just return the error code.
376 		 */
377 		if (status != PIPE_ERROR_AGAIN ||
378 		    (filp->f_flags & O_NONBLOCK) != 0) {
379 			ret = goldfish_pipe_error_convert(status);
380 			break;
381 		}
382 
383 		/*
384 		 * The backend blocked the read/write, wait until the backend
385 		 * tells us it's ready to process more data.
386 		 */
387 		set_bit(wake_bit, &pipe->flags);
388 
389 		/* Tell the emulator we're going to wait for a wake event */
390 		goldfish_cmd(pipe, pipe_cmd);
391 
392 		/* Unlock the pipe, then wait for the wake signal */
393 		mutex_unlock(&pipe->lock);
394 
395 		while (test_bit(wake_bit, &pipe->flags)) {
396 			if (wait_event_interruptible(pipe->wake_queue,
397 					!test_bit(wake_bit, &pipe->flags)))
398 				return -ERESTARTSYS;
399 
400 			if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
401 				return -EIO;
402 		}
403 
404 		/* Try to re-acquire the lock */
405 		if (mutex_lock_interruptible(&pipe->lock))
406 			return -ERESTARTSYS;
407 	}
408 	mutex_unlock(&pipe->lock);
409 
410 	return (ret < 0) ? ret : count;
411 }
412 
goldfish_pipe_read(struct file * filp,char __user * buffer,size_t bufflen,loff_t * ppos)413 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
414 				  size_t bufflen, loff_t *ppos)
415 {
416 	return goldfish_pipe_read_write(filp, buffer, bufflen,
417 					/* is_write */ 0);
418 }
419 
goldfish_pipe_write(struct file * filp,const char __user * buffer,size_t bufflen,loff_t * ppos)420 static ssize_t goldfish_pipe_write(struct file *filp,
421 				   const char __user *buffer, size_t bufflen,
422 				   loff_t *ppos)
423 {
424 	return goldfish_pipe_read_write(filp, (char __user *)buffer,
425 					bufflen, /* is_write */ 1);
426 }
427 
goldfish_pipe_poll(struct file * filp,poll_table * wait)428 static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
429 {
430 	struct goldfish_pipe *pipe = filp->private_data;
431 	unsigned int mask = 0;
432 	int status;
433 
434 	if (mutex_lock_interruptible(&pipe->lock))
435 		return -ERESTARTSYS;
436 
437 	poll_wait(filp, &pipe->wake_queue, wait);
438 
439 	status = goldfish_cmd_status(pipe, PIPE_CMD_POLL);
440 
441 	mutex_unlock(&pipe->lock);
442 
443 	if (status & PIPE_POLL_IN)
444 		mask |= POLLIN | POLLRDNORM;
445 
446 	if (status & PIPE_POLL_OUT)
447 		mask |= POLLOUT | POLLWRNORM;
448 
449 	if (status & PIPE_POLL_HUP)
450 		mask |= POLLHUP;
451 
452 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
453 		mask |= POLLERR;
454 
455 	return mask;
456 }
457 
goldfish_pipe_interrupt(int irq,void * dev_id)458 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
459 {
460 	struct goldfish_pipe_dev *dev = dev_id;
461 	unsigned long irq_flags;
462 	int count = 0;
463 
464 	/*
465 	 * We're going to read from the emulator a list of (channel,flags)
466 	 * pairs corresponding to the wake events that occurred on each
467 	 * blocked pipe (i.e. channel).
468 	 */
469 	spin_lock_irqsave(&dev->lock, irq_flags);
470 	for (;;) {
471 		/* First read the channel, 0 means the end of the list */
472 		struct goldfish_pipe *pipe;
473 		unsigned long wakes;
474 		unsigned long channel = 0;
475 
476 #ifdef CONFIG_64BIT
477 		channel =
478 			(u64)readl(dev->base + PIPE_V1_REG_CHANNEL_HIGH) << 32;
479 #endif
480 		channel |= readl(dev->base + PIPE_V1_REG_CHANNEL);
481 		if (!channel)
482 			break;
483 
484 		/* Convert channel to struct pipe pointer + read wake flags */
485 		wakes = readl(dev->base + PIPE_V1_REG_WAKES);
486 		pipe  = (struct goldfish_pipe *)(ptrdiff_t)channel;
487 
488 		/* Did the emulator just closed a pipe? */
489 		if (wakes & PIPE_WAKE_CLOSED) {
490 			set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
491 			wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
492 		}
493 		if (wakes & PIPE_WAKE_READ)
494 			clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
495 		if (wakes & PIPE_WAKE_WRITE)
496 			clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
497 
498 		wake_up_interruptible(&pipe->wake_queue);
499 		count++;
500 	}
501 	spin_unlock_irqrestore(&dev->lock, irq_flags);
502 
503 	return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
504 }
505 
506 /* A helper function to get the instance of goldfish_pipe_dev from file */
to_goldfish_pipe_dev(struct file * file)507 static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
508 {
509 	struct miscdevice *miscdev = file->private_data;
510 
511 	return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
512 }
513 
514 /**
515  *	goldfish_pipe_open - open a channel to the AVD
516  *	@inode: inode of device
517  *	@file: file struct of opener
518  *
519  *	Create a new pipe link between the emulator and the use application.
520  *	Each new request produces a new pipe.
521  *
522  *	Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
523  *	right now so this is fine. A move to 64bit will need this addressing
524  */
goldfish_pipe_open(struct inode * inode,struct file * file)525 static int goldfish_pipe_open(struct inode *inode, struct file *file)
526 {
527 	struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
528 	struct goldfish_pipe *pipe;
529 	int status;
530 
531 	/* Allocate new pipe kernel object */
532 	pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
533 	if (!pipe)
534 		return -ENOMEM;
535 
536 	pipe->dev = dev;
537 	init_waitqueue_head(&pipe->wake_queue);
538 	mutex_init(&pipe->lock);
539 
540 	/*
541 	 * Now, tell the emulator we're opening a new pipe. We use the
542 	 * pipe object's address as the channel identifier for simplicity.
543 	 */
544 
545 	status = goldfish_cmd_status(pipe, PIPE_CMD_OPEN);
546 	if (status < 0) {
547 		kfree(pipe);
548 		return status;
549 	}
550 
551 	/* All is done, save the pipe into the file's private data field */
552 	file->private_data = pipe;
553 	return 0;
554 }
555 
goldfish_pipe_release(struct inode * inode,struct file * filp)556 static int goldfish_pipe_release(struct inode *inode, struct file *filp)
557 {
558 	struct goldfish_pipe *pipe = filp->private_data;
559 
560 	pr_debug("%s: call. pipe=%p file=%p\n", __func__, pipe, filp);
561 	/* The guest is closing the channel, so tell the emulator right now */
562 	goldfish_cmd(pipe, PIPE_CMD_CLOSE);
563 	kfree(pipe);
564 	filp->private_data = NULL;
565 	return 0;
566 }
567 
568 static const struct file_operations goldfish_pipe_fops = {
569 	.owner = THIS_MODULE,
570 	.read = goldfish_pipe_read,
571 	.write = goldfish_pipe_write,
572 	.poll = goldfish_pipe_poll,
573 	.open = goldfish_pipe_open,
574 	.release = goldfish_pipe_release,
575 };
576 
init_miscdevice(struct miscdevice * miscdev)577 static void init_miscdevice(struct miscdevice *miscdev)
578 {
579 	memset(miscdev, 0, sizeof(*miscdev));
580 
581 	miscdev->minor = MISC_DYNAMIC_MINOR;
582 	miscdev->name = DEVICE_NAME;
583 	miscdev->fops = &goldfish_pipe_fops;
584 };
585 
586 static int goldfish_pipe_device_deinit(void *raw_dev,
587 				       struct platform_device *pdev);
588 
goldfish_pipe_device_v1_init(struct platform_device * pdev,void __iomem * base,int irq)589 int goldfish_pipe_device_v1_init(struct platform_device *pdev,
590 				 void __iomem *base,
591 				 int irq)
592 {
593 	struct goldfish_pipe_dev *dev;
594 	int err;
595 
596 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
597 	if (!dev)
598 		return -ENOMEM;
599 
600 	dev->super.deinit = &goldfish_pipe_device_deinit;
601 	dev->pdev_dev = &pdev->dev;
602 	spin_lock_init(&dev->lock);
603 
604 	err = devm_request_irq(&pdev->dev, irq,
605 			       &goldfish_pipe_interrupt, IRQF_SHARED,
606 			       DEVICE_NAME, dev);
607 	if (err) {
608 		dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
609 		return err;
610 	}
611 
612 	init_miscdevice(&dev->miscdev);
613 	err = misc_register(&dev->miscdev);
614 	if (err) {
615 		dev_err(&pdev->dev, "unable to register v1 device\n");
616 		return err;
617 	}
618 
619 	setup_access_params_addr(pdev, dev);
620 
621 	platform_set_drvdata(pdev, dev);
622 	return 0;
623 }
624 
goldfish_pipe_device_deinit(void * raw_dev,struct platform_device * pdev)625 static int goldfish_pipe_device_deinit(void *raw_dev,
626 				       struct platform_device *pdev)
627 {
628 	struct goldfish_pipe_dev *dev = raw_dev;
629 
630 	misc_deregister(&dev->miscdev);
631 	return 0;
632 }
633