1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Simple synchronous userspace interface to SPI devices
4 *
5 * Copyright (C) 2006 SWAPP
6 * Andrea Paterniani <a.paterniani@swapp-eng.it>
7 * Copyright (C) 2007 David Brownell (simplification, cleanup)
8 */
9
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/ioctl.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/compat.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/acpi.h>
24
25 #include <linux/spi/spi.h>
26 #include <linux/spi/spidev.h>
27
28 #include <linux/uaccess.h>
29
30 /*
31 * This supports access to SPI devices using normal userspace I/O calls.
32 * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
33 * and often mask message boundaries, full SPI support requires full duplex
34 * transfers. There are several kinds of internal message boundaries to
35 * handle chipselect management and other protocol options.
36 *
37 * SPI has a character major number assigned. We allocate minor numbers
38 * dynamically using a bitmask. You must use hotplug tools, such as udev
39 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
40 * nodes, since there is no fixed association of minor numbers with any
41 * particular SPI bus or device.
42 */
43 #define SPIDEV_MAJOR 153 /* assigned */
44 #define N_SPI_MINORS 32 /* ... up to 256 */
45
46 static DECLARE_BITMAP(minors, N_SPI_MINORS);
47
48 /* Bit masks for spi_device.mode management. Note that incorrect
49 * settings for some settings can cause *lots* of trouble for other
50 * devices on a shared bus:
51 *
52 * - CS_HIGH ... this device will be active when it shouldn't be
53 * - 3WIRE ... when active, it won't behave as it should
54 * - NO_CS ... there will be no explicit message boundaries; this
55 * is completely incompatible with the shared bus model
56 * - READY ... transfers may proceed when they shouldn't.
57 *
58 * REVISIT should changing those flags be privileged?
59 */
60 #define SPI_MODE_MASK \
61 (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP | SPI_NO_CS | SPI_READY | SPI_TX_DUAL | \
62 SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)
63
64 struct spidev_data {
65 dev_t devt;
66 spinlock_t spi_lock;
67 struct spi_device *spi;
68 struct list_head device_entry;
69
70 /* TX/RX buffers are NULL unless this device is open (users > 0) */
71 struct mutex buf_lock;
72 unsigned users;
73 u8 *tx_buffer;
74 u8 *rx_buffer;
75 u32 speed_hz;
76 };
77
78 static LIST_HEAD(device_list);
79 static DEFINE_MUTEX(device_list_lock);
80
81 static unsigned bufsiz = 4096;
82 module_param(bufsiz, uint, S_IRUGO);
83 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
84
85 /*-------------------------------------------------------------------------*/
86
spidev_sync(struct spidev_data * spidev,struct spi_message * message)87 static ssize_t spidev_sync(struct spidev_data *spidev, struct spi_message *message)
88 {
89 int status;
90 struct spi_device *spi;
91
92 spin_lock_irq(&spidev->spi_lock);
93 spi = spidev->spi;
94 spin_unlock_irq(&spidev->spi_lock);
95
96 if (spi == NULL) {
97 status = -ESHUTDOWN;
98 } else {
99 status = spi_sync(spi, message);
100 }
101
102 if (status == 0) {
103 status = message->actual_length;
104 }
105
106 return status;
107 }
108
spidev_sync_write(struct spidev_data * spidev,size_t len)109 static inline ssize_t spidev_sync_write(struct spidev_data *spidev, size_t len)
110 {
111 struct spi_transfer t = {
112 .tx_buf = spidev->tx_buffer,
113 .len = len,
114 .speed_hz = spidev->speed_hz,
115 };
116 struct spi_message m;
117
118 spi_message_init(&m);
119 spi_message_add_tail(&t, &m);
120 return spidev_sync(spidev, &m);
121 }
122
spidev_sync_read(struct spidev_data * spidev,size_t len)123 static inline ssize_t spidev_sync_read(struct spidev_data *spidev, size_t len)
124 {
125 struct spi_transfer t = {
126 .rx_buf = spidev->rx_buffer,
127 .len = len,
128 .speed_hz = spidev->speed_hz,
129 };
130 struct spi_message m;
131
132 spi_message_init(&m);
133 spi_message_add_tail(&t, &m);
134 return spidev_sync(spidev, &m);
135 }
136
137 /*-------------------------------------------------------------------------*/
138
139 /* Read-only message with current device setup */
spidev_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)140 static ssize_t spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
141 {
142 struct spidev_data *spidev;
143 ssize_t status;
144
145 /* chipselect only toggles at start or end of operation */
146 if (count > bufsiz) {
147 return -EMSGSIZE;
148 }
149
150 spidev = filp->private_data;
151
152 mutex_lock(&spidev->buf_lock);
153 status = spidev_sync_read(spidev, count);
154 if (status > 0) {
155 unsigned long missing;
156
157 missing = copy_to_user(buf, spidev->rx_buffer, status);
158 if (missing == status) {
159 status = -EFAULT;
160 } else {
161 status = status - missing;
162 }
163 }
164 mutex_unlock(&spidev->buf_lock);
165
166 return status;
167 }
168
169 /* Write-only message with current device setup */
spidev_write(struct file * filp,const char __user * buf,size_t count,loff_t * f_pos)170 static ssize_t spidev_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
171 {
172 struct spidev_data *spidev;
173 ssize_t status;
174 unsigned long missing;
175
176 /* chipselect only toggles at start or end of operation */
177 if (count > bufsiz) {
178 return -EMSGSIZE;
179 }
180
181 spidev = filp->private_data;
182
183 mutex_lock(&spidev->buf_lock);
184 missing = copy_from_user(spidev->tx_buffer, buf, count);
185 if (missing == 0) {
186 status = spidev_sync_write(spidev, count);
187 } else {
188 status = -EFAULT;
189 }
190 mutex_unlock(&spidev->buf_lock);
191
192 return status;
193 }
194
spidev_message(struct spidev_data * spidev,struct spi_ioc_transfer * u_xfers,unsigned n_xfers)195 static int spidev_message(struct spidev_data *spidev, struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
196 {
197 struct spi_message msg;
198 struct spi_transfer *k_xfers;
199 struct spi_transfer *k_tmp;
200 struct spi_ioc_transfer *u_tmp;
201 unsigned n, total, tx_total, rx_total;
202 u8 *tx_buf, *rx_buf;
203 int status = -EFAULT;
204
205 spi_message_init(&msg);
206 k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
207 if (k_xfers == NULL) {
208 return -ENOMEM;
209 }
210
211 /* Construct spi_message, copying any tx data to bounce buffer.
212 * We walk the array of user-provided transfers, using each one
213 * to initialize a kernel version of the same transfer.
214 */
215 tx_buf = spidev->tx_buffer;
216 rx_buf = spidev->rx_buffer;
217 total = 0;
218 tx_total = 0;
219 rx_total = 0;
220 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) {
221 /* Ensure that also following allocations from rx_buf/tx_buf will meet
222 * DMA alignment requirements.
223 */
224 unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
225
226 k_tmp->len = u_tmp->len;
227
228 total += k_tmp->len;
229 /* Since the function returns the total length of transfers
230 * on success, restrict the total to positive int values to
231 * avoid the return value looking like an error. Also check
232 * each transfer length to avoid arithmetic overflow.
233 */
234 if (total > INT_MAX || k_tmp->len > INT_MAX) {
235 status = -EMSGSIZE;
236 goto done;
237 }
238
239 if (u_tmp->rx_buf) {
240 /* this transfer needs space in RX bounce buffer */
241 rx_total += len_aligned;
242 if (rx_total > bufsiz) {
243 status = -EMSGSIZE;
244 goto done;
245 }
246 k_tmp->rx_buf = rx_buf;
247 rx_buf += len_aligned;
248 }
249 if (u_tmp->tx_buf) {
250 /* this transfer needs space in TX bounce buffer */
251 tx_total += len_aligned;
252 if (tx_total > bufsiz) {
253 status = -EMSGSIZE;
254 goto done;
255 }
256 k_tmp->tx_buf = tx_buf;
257 if (copy_from_user(tx_buf, (const u8 __user *)(uintptr_t)u_tmp->tx_buf, u_tmp->len)) {
258 goto done;
259 }
260 tx_buf += len_aligned;
261 }
262
263 k_tmp->cs_change = !!u_tmp->cs_change;
264 k_tmp->tx_nbits = u_tmp->tx_nbits;
265 k_tmp->rx_nbits = u_tmp->rx_nbits;
266 k_tmp->bits_per_word = u_tmp->bits_per_word;
267 k_tmp->delay.value = u_tmp->delay_usecs;
268 k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
269 k_tmp->speed_hz = u_tmp->speed_hz;
270 k_tmp->word_delay.value = u_tmp->word_delay_usecs;
271 k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
272 if (!k_tmp->speed_hz) {
273 k_tmp->speed_hz = spidev->speed_hz;
274 }
275 #ifdef VERBOSE
276 dev_dbg(&spidev->spi->dev, " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n", k_tmp->len,
277 k_tmp->rx_buf ? "rx " : "", k_tmp->tx_buf ? "tx " : "", k_tmp->cs_change ? "cs " : "",
278 k_tmp->bits_per_word ?: spidev->spi->bits_per_word, k_tmp->delay.value, k_tmp->word_delay.value,
279 k_tmp->speed_hz ?: spidev->spi->max_speed_hz);
280 #endif
281 spi_message_add_tail(k_tmp, &msg);
282 }
283
284 status = spidev_sync(spidev, &msg);
285 if (status < 0) {
286 goto done;
287 }
288
289 /* copy any rx data out of bounce buffer */
290 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) {
291 if (u_tmp->rx_buf) {
292 if (copy_to_user((u8 __user *)(uintptr_t)u_tmp->rx_buf, k_tmp->rx_buf, u_tmp->len)) {
293 status = -EFAULT;
294 goto done;
295 }
296 }
297 }
298 status = total;
299
300 done:
301 kfree(k_xfers);
302 return status;
303 }
304
spidev_get_ioc_message(unsigned int cmd,struct spi_ioc_transfer __user * u_ioc,unsigned * n_ioc)305 static struct spi_ioc_transfer *spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
306 unsigned *n_ioc)
307 {
308 u32 tmp;
309
310 /* Check type, command number and direction */
311 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) || _IOC_DIR(cmd) != _IOC_WRITE) {
312 return ERR_PTR(-ENOTTY);
313 }
314
315 tmp = _IOC_SIZE(cmd);
316 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
317 return ERR_PTR(-EINVAL);
318 }
319 *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
320 if (*n_ioc == 0) {
321 return NULL;
322 }
323
324 /* copy into scratch area */
325 return memdup_user(u_ioc, tmp);
326 }
327
spidev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)328 static long spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
329 {
330 int retval = 0;
331 struct spidev_data *spidev;
332 struct spi_device *spi;
333 u32 tmp;
334 unsigned n_ioc;
335 struct spi_ioc_transfer *ioc;
336
337 /* Check type and command number */
338 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) {
339 return -ENOTTY;
340 }
341
342 /* guard against device removal before, or while,
343 * we issue this ioctl.
344 */
345 spidev = filp->private_data;
346 spin_lock_irq(&spidev->spi_lock);
347 spi = spi_dev_get(spidev->spi);
348 spin_unlock_irq(&spidev->spi_lock);
349
350 if (spi == NULL) {
351 return -ESHUTDOWN;
352 }
353
354 /* use the buffer lock here for triple duty:
355 * - prevent I/O (from us) so calling spi_setup() is safe;
356 * - prevent concurrent SPI_IOC_WR_* from morphing
357 * data fields while SPI_IOC_RD_* reads them;
358 * - SPI_IOC_MESSAGE needs the buffer locked "normally".
359 */
360 mutex_lock(&spidev->buf_lock);
361
362 switch (cmd) {
363 /* read requests */
364 case SPI_IOC_RD_MODE:
365 retval = put_user(spi->mode & SPI_MODE_MASK, (__u8 __user *)arg);
366 break;
367 case SPI_IOC_RD_MODE32:
368 retval = put_user(spi->mode & SPI_MODE_MASK, (__u32 __user *)arg);
369 break;
370 case SPI_IOC_RD_LSB_FIRST:
371 retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, (__u8 __user *)arg);
372 break;
373 case SPI_IOC_RD_BITS_PER_WORD:
374 retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
375 break;
376 case SPI_IOC_RD_MAX_SPEED_HZ:
377 retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
378 break;
379
380 /* write requests */
381 case SPI_IOC_WR_MODE:
382 case SPI_IOC_WR_MODE32:
383 if (cmd == SPI_IOC_WR_MODE) {
384 retval = get_user(tmp, (u8 __user *)arg);
385 } else {
386 retval = get_user(tmp, (u32 __user *)arg);
387 }
388 if (retval == 0) {
389 struct spi_controller *ctlr = spi->controller;
390 u32 save = spi->mode;
391
392 if (tmp & ~SPI_MODE_MASK) {
393 retval = -EINVAL;
394 break;
395 }
396
397 if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && ctlr->cs_gpiods[spi->chip_select]) {
398 tmp |= SPI_CS_HIGH;
399 }
400
401 tmp |= spi->mode & ~SPI_MODE_MASK;
402 spi->mode = (u16)tmp;
403 retval = spi_setup(spi);
404 if (retval < 0) {
405 spi->mode = save;
406 } else {
407 dev_dbg(&spi->dev, "spi mode %x\n", tmp);
408 }
409 }
410 break;
411 case SPI_IOC_WR_LSB_FIRST:
412 retval = get_user(tmp, (__u8 __user *)arg);
413 if (retval == 0) {
414 u32 save = spi->mode;
415
416 if (tmp) {
417 spi->mode |= SPI_LSB_FIRST;
418 } else {
419 spi->mode &= ~SPI_LSB_FIRST;
420 }
421 retval = spi_setup(spi);
422 if (retval < 0) {
423 spi->mode = save;
424 } else {
425 dev_dbg(&spi->dev, "%csb first\n", tmp ? 'l' : 'm');
426 }
427 }
428 break;
429 case SPI_IOC_WR_BITS_PER_WORD:
430 retval = get_user(tmp, (__u8 __user *)arg);
431 if (retval == 0) {
432 u8 save = spi->bits_per_word;
433
434 spi->bits_per_word = tmp;
435 retval = spi_setup(spi);
436 if (retval < 0) {
437 spi->bits_per_word = save;
438 } else {
439 dev_dbg(&spi->dev, "%d bits per word\n", tmp);
440 }
441 }
442 break;
443 case SPI_IOC_WR_MAX_SPEED_HZ:
444 retval = get_user(tmp, (__u32 __user *)arg);
445 if (retval == 0) {
446 u32 save = spi->max_speed_hz;
447
448 spi->max_speed_hz = tmp;
449 retval = spi_setup(spi);
450 if (retval == 0) {
451 spidev->speed_hz = tmp;
452 dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz);
453 }
454 spi->max_speed_hz = save;
455 }
456 break;
457
458 default:
459 /* segmented and/or full-duplex I/O request */
460 /* Check message and copy into scratch area */
461 ioc = spidev_get_ioc_message(cmd, (struct spi_ioc_transfer __user *)arg, &n_ioc);
462 if (IS_ERR(ioc)) {
463 retval = PTR_ERR(ioc);
464 break;
465 }
466 if (!ioc) {
467 break; /* n_ioc is also 0 */
468 }
469
470 /* translate to spi_message, execute */
471 retval = spidev_message(spidev, ioc, n_ioc);
472 kfree(ioc);
473 break;
474 }
475
476 mutex_unlock(&spidev->buf_lock);
477 spi_dev_put(spi);
478 return retval;
479 }
480
481 #ifdef CONFIG_COMPAT
spidev_compat_ioc_message(struct file * filp,unsigned int cmd,unsigned long arg)482 static long spidev_compat_ioc_message(struct file *filp, unsigned int cmd, unsigned long arg)
483 {
484 struct spi_ioc_transfer __user *u_ioc;
485 int retval = 0;
486 struct spidev_data *spidev;
487 struct spi_device *spi;
488 unsigned n_ioc, n;
489 struct spi_ioc_transfer *ioc;
490
491 u_ioc = (struct spi_ioc_transfer __user *)compat_ptr(arg);
492
493 /* guard against device removal before, or while,
494 * we issue this ioctl.
495 */
496 spidev = filp->private_data;
497 spin_lock_irq(&spidev->spi_lock);
498 spi = spi_dev_get(spidev->spi);
499 spin_unlock_irq(&spidev->spi_lock);
500
501 if (spi == NULL) {
502 return -ESHUTDOWN;
503 }
504
505 /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
506 mutex_lock(&spidev->buf_lock);
507
508 /* Check message and copy into scratch area */
509 ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
510 if (IS_ERR(ioc)) {
511 retval = PTR_ERR(ioc);
512 goto done;
513 }
514 if (!ioc) {
515 goto done; /* n_ioc is also 0 */
516 }
517
518 /* Convert buffer pointers */
519 for (n = 0; n < n_ioc; n++) {
520 ioc[n].rx_buf = (uintptr_t)compat_ptr(ioc[n].rx_buf);
521 ioc[n].tx_buf = (uintptr_t)compat_ptr(ioc[n].tx_buf);
522 }
523
524 /* translate to spi_message, execute */
525 retval = spidev_message(spidev, ioc, n_ioc);
526 kfree(ioc);
527
528 done:
529 mutex_unlock(&spidev->buf_lock);
530 spi_dev_put(spi);
531 return retval;
532 }
533
spidev_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)534 static long spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
535 {
536 if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) && _IOC_DIR(cmd) == _IOC_WRITE) {
537 return spidev_compat_ioc_message(filp, cmd, arg);
538 }
539
540 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
541 }
542 #else
543 #define spidev_compat_ioctl NULL
544 #endif /* CONFIG_COMPAT */
545
spidev_open(struct inode * inode,struct file * filp)546 static int spidev_open(struct inode *inode, struct file *filp)
547 {
548 struct spidev_data *spidev;
549 int status = -ENXIO;
550
551 mutex_lock(&device_list_lock);
552
553 list_for_each_entry(spidev, &device_list, device_entry)
554 {
555 if (spidev->devt == inode->i_rdev) {
556 status = 0;
557 break;
558 }
559 }
560
561 if (status) {
562 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
563 goto err_find_dev;
564 }
565
566 if (!spidev->tx_buffer) {
567 spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
568 if (!spidev->tx_buffer) {
569 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
570 status = -ENOMEM;
571 goto err_find_dev;
572 }
573 }
574
575 if (!spidev->rx_buffer) {
576 spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
577 if (!spidev->rx_buffer) {
578 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
579 status = -ENOMEM;
580 goto err_alloc_rx_buf;
581 }
582 }
583
584 spidev->users++;
585 filp->private_data = spidev;
586 stream_open(inode, filp);
587
588 mutex_unlock(&device_list_lock);
589 return 0;
590
591 err_alloc_rx_buf:
592 kfree(spidev->tx_buffer);
593 spidev->tx_buffer = NULL;
594 err_find_dev:
595 mutex_unlock(&device_list_lock);
596 return status;
597 }
598
spidev_release(struct inode * inode,struct file * filp)599 static int spidev_release(struct inode *inode, struct file *filp)
600 {
601 struct spidev_data *spidev;
602 int dofree;
603
604 mutex_lock(&device_list_lock);
605 spidev = filp->private_data;
606 filp->private_data = NULL;
607
608 spin_lock_irq(&spidev->spi_lock);
609 /* ... after we unbound from the underlying device? */
610 dofree = (spidev->spi == NULL);
611 spin_unlock_irq(&spidev->spi_lock);
612
613 /* last close? */
614 spidev->users--;
615 if (!spidev->users) {
616 kfree(spidev->tx_buffer);
617 spidev->tx_buffer = NULL;
618
619 kfree(spidev->rx_buffer);
620 spidev->rx_buffer = NULL;
621
622 if (dofree) {
623 kfree(spidev);
624 } else {
625 spidev->speed_hz = spidev->spi->max_speed_hz;
626 }
627 }
628 #ifdef CONFIG_SPI_SLAVE
629 if (!dofree) {
630 spi_slave_abort(spidev->spi);
631 }
632 #endif
633 mutex_unlock(&device_list_lock);
634
635 return 0;
636 }
637
638 static const struct file_operations spidev_fops = {
639 .owner = THIS_MODULE,
640 /* REVISIT switch to aio primitives, so that userspace
641 * gets more complete API coverage. It'll simplify things
642 * too, except for the locking.
643 */
644 .write = spidev_write,
645 .read = spidev_read,
646 .unlocked_ioctl = spidev_ioctl,
647 .compat_ioctl = spidev_compat_ioctl,
648 .open = spidev_open,
649 .release = spidev_release,
650 .llseek = no_llseek,
651 };
652
653 /*-------------------------------------------------------------------------*/
654
655 /* The main reason to have this class is to make mdev/udev create the
656 * /dev/spidevB.C character device nodes exposing our userspace API.
657 * It also simplifies memory management.
658 */
659
660 static struct class *spidev_class;
661
662 #ifdef CONFIG_OF
663 static const struct of_device_id spidev_dt_ids[] = {
664 {.compatible = "rohm,dh2228fv"},
665 {.compatible = "lineartechnology,ltc2488"},
666 {.compatible = "ge,achc"},
667 {.compatible = "semtech,sx1301"},
668 {.compatible = "lwn,bk4"},
669 {.compatible = "dh,dhcom-board"},
670 {.compatible = "menlo,m53cpld"},
671 {.compatible = "rockchip,spidev"},
672 {},
673 };
674 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
675 #endif
676
677 #ifdef CONFIG_ACPI
678
679 /* Dummy SPI devices not to be used in production systems */
680 #define SPIDEV_ACPI_DUMMY 1
681
682 static const struct acpi_device_id spidev_acpi_ids[] = {
683 /*
684 * The ACPI SPT000* devices are only meant for development and
685 * testing. Systems used in production should have a proper ACPI
686 * description of the connected peripheral and they should also use
687 * a proper driver instead of poking directly to the SPI bus.
688 */
689 {"SPT0001", SPIDEV_ACPI_DUMMY},
690 {"SPT0002", SPIDEV_ACPI_DUMMY},
691 {"SPT0003", SPIDEV_ACPI_DUMMY},
692 {},
693 };
694 MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
695
spidev_probe_acpi(struct spi_device * spi)696 static void spidev_probe_acpi(struct spi_device *spi)
697 {
698 const struct acpi_device_id *id;
699
700 if (!has_acpi_companion(&spi->dev)) {
701 return;
702 }
703
704 id = acpi_match_device(spidev_acpi_ids, &spi->dev);
705 if (WARN_ON(!id)) {
706 return;
707 }
708
709 if (id->driver_data == SPIDEV_ACPI_DUMMY) {
710 dev_warn(&spi->dev, "do not use this driver in production systems!\n");
711 }
712 }
713 #else
spidev_probe_acpi(struct spi_device * spi)714 static inline void spidev_probe_acpi(struct spi_device *spi)
715 {
716 }
717 #endif
718
719 /*-------------------------------------------------------------------------*/
720
spidev_probe(struct spi_device * spi)721 static int spidev_probe(struct spi_device *spi)
722 {
723 struct spidev_data *spidev;
724 int status;
725 unsigned long minor;
726
727 /*
728 * spidev should never be referenced in DT without a specific
729 * compatible string, it is a Linux implementation thing
730 * rather than a description of the hardware.
731 */
732 WARN(spi->dev.of_node && of_device_is_compatible(spi->dev.of_node, "spidev"),
733 "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
734
735 spidev_probe_acpi(spi);
736
737 /* Allocate driver data */
738 spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
739 if (!spidev) {
740 return -ENOMEM;
741 }
742
743 /* Initialize the driver data */
744 spidev->spi = spi;
745 spin_lock_init(&spidev->spi_lock);
746 mutex_init(&spidev->buf_lock);
747
748 INIT_LIST_HEAD(&spidev->device_entry);
749
750 /* If we can allocate a minor number, hook up this device.
751 * Reusing minors is fine so long as udev or mdev is working.
752 */
753 mutex_lock(&device_list_lock);
754 minor = find_first_zero_bit(minors, N_SPI_MINORS);
755 if (minor < N_SPI_MINORS) {
756 struct device *dev;
757
758 spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
759 dev = device_create(spidev_class, &spi->dev, spidev->devt, spidev, "spidev%d.%d", spi->master->bus_num,
760 spi->chip_select);
761 status = PTR_ERR_OR_ZERO(dev);
762 } else {
763 dev_dbg(&spi->dev, "no minor number available!\n");
764 status = -ENODEV;
765 }
766 if (status == 0) {
767 set_bit(minor, minors);
768 list_add(&spidev->device_entry, &device_list);
769 }
770 mutex_unlock(&device_list_lock);
771
772 spidev->speed_hz = spi->max_speed_hz;
773
774 if (status == 0) {
775 spi_set_drvdata(spi, spidev);
776 } else {
777 kfree(spidev);
778 }
779
780 return status;
781 }
782
spidev_remove(struct spi_device * spi)783 static int spidev_remove(struct spi_device *spi)
784 {
785 struct spidev_data *spidev = spi_get_drvdata(spi);
786
787 /* prevent new opens */
788 mutex_lock(&device_list_lock);
789 /* make sure ops on existing fds can abort cleanly */
790 spin_lock_irq(&spidev->spi_lock);
791 spidev->spi = NULL;
792 spin_unlock_irq(&spidev->spi_lock);
793
794 list_del(&spidev->device_entry);
795 device_destroy(spidev_class, spidev->devt);
796 clear_bit(MINOR(spidev->devt), minors);
797 if (spidev->users == 0) {
798 kfree(spidev);
799 }
800 mutex_unlock(&device_list_lock);
801
802 return 0;
803 }
804
805 static struct spi_driver spidev_spi_driver = {
806 .driver =
807 {
808 .name = "spidev",
809 .of_match_table = of_match_ptr(spidev_dt_ids),
810 .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
811 },
812 .probe = spidev_probe,
813 .remove = spidev_remove,
814
815 /* NOTE: suspend/resume methods are not necessary here.
816 * We don't do anything except pass the requests to/from
817 * the underlying controller. The refrigerator handles
818 * most issues; the controller driver handles the rest.
819 */
820 };
821
822 /*-------------------------------------------------------------------------*/
823
spidev_init(void)824 static int __init spidev_init(void)
825 {
826 int status;
827
828 /* Claim our 256 reserved device numbers. Then register a class
829 * that will key udev/mdev to add/remove /dev nodes. Last, register
830 * the driver which manages those device numbers.
831 */
832 BUILD_BUG_ON(N_SPI_MINORS > 256);
833 status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
834 if (status < 0) {
835 return status;
836 }
837
838 spidev_class = class_create(THIS_MODULE, "spidev");
839 if (IS_ERR(spidev_class)) {
840 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
841 return PTR_ERR(spidev_class);
842 }
843
844 status = spi_register_driver(&spidev_spi_driver);
845 if (status < 0) {
846 class_destroy(spidev_class);
847 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
848 }
849 return status;
850 }
851 module_init(spidev_init);
852
spidev_exit(void)853 static void __exit spidev_exit(void)
854 {
855 spi_unregister_driver(&spidev_spi_driver);
856 class_destroy(spidev_class);
857 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
858 }
859 module_exit(spidev_exit);
860
861 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
862 MODULE_DESCRIPTION("User mode SPI device interface");
863 MODULE_LICENSE("GPL");
864 MODULE_ALIAS("spi:spidev");
865