1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
spidev_release(struct device * dev)46 static void spidev_release(struct device *dev)
47 {
48 struct spi_device *spi = to_spi_device(dev);
49
50 spi_controller_put(spi->controller);
51 kfree(spi->driver_override);
52 kfree(spi);
53 }
54
55 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)56 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
57 {
58 const struct spi_device *spi = to_spi_device(dev);
59 int len;
60
61 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
62 if (len != -ENODEV)
63 return len;
64
65 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
66 }
67 static DEVICE_ATTR_RO(modalias);
68
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)69 static ssize_t driver_override_store(struct device *dev,
70 struct device_attribute *a,
71 const char *buf, size_t count)
72 {
73 struct spi_device *spi = to_spi_device(dev);
74 const char *end = memchr(buf, '\n', count);
75 const size_t len = end ? end - buf : count;
76 const char *driver_override, *old;
77
78 /* We need to keep extra room for a newline when displaying value */
79 if (len >= (PAGE_SIZE - 1))
80 return -EINVAL;
81
82 driver_override = kstrndup(buf, len, GFP_KERNEL);
83 if (!driver_override)
84 return -ENOMEM;
85
86 device_lock(dev);
87 old = spi->driver_override;
88 if (len) {
89 spi->driver_override = driver_override;
90 } else {
91 /* Empty string, disable driver override */
92 spi->driver_override = NULL;
93 kfree(driver_override);
94 }
95 device_unlock(dev);
96 kfree(old);
97
98 return count;
99 }
100
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)101 static ssize_t driver_override_show(struct device *dev,
102 struct device_attribute *a, char *buf)
103 {
104 const struct spi_device *spi = to_spi_device(dev);
105 ssize_t len;
106
107 device_lock(dev);
108 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
109 device_unlock(dev);
110 return len;
111 }
112 static DEVICE_ATTR_RW(driver_override);
113
114 #define SPI_STATISTICS_ATTRS(field, file) \
115 static ssize_t spi_controller_##field##_show(struct device *dev, \
116 struct device_attribute *attr, \
117 char *buf) \
118 { \
119 struct spi_controller *ctlr = container_of(dev, \
120 struct spi_controller, dev); \
121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
122 } \
123 static struct device_attribute dev_attr_spi_controller_##field = { \
124 .attr = { .name = file, .mode = 0444 }, \
125 .show = spi_controller_##field##_show, \
126 }; \
127 static ssize_t spi_device_##field##_show(struct device *dev, \
128 struct device_attribute *attr, \
129 char *buf) \
130 { \
131 struct spi_device *spi = to_spi_device(dev); \
132 return spi_statistics_##field##_show(&spi->statistics, buf); \
133 } \
134 static struct device_attribute dev_attr_spi_device_##field = { \
135 .attr = { .name = file, .mode = 0444 }, \
136 .show = spi_device_##field##_show, \
137 }
138
139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
141 char *buf) \
142 { \
143 unsigned long flags; \
144 ssize_t len; \
145 spin_lock_irqsave(&stat->lock, flags); \
146 len = sprintf(buf, format_string, stat->field); \
147 spin_unlock_irqrestore(&stat->lock, flags); \
148 return len; \
149 } \
150 SPI_STATISTICS_ATTRS(name, file)
151
152 #define SPI_STATISTICS_SHOW(field, format_string) \
153 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
154 field, format_string)
155
156 SPI_STATISTICS_SHOW(messages, "%lu");
157 SPI_STATISTICS_SHOW(transfers, "%lu");
158 SPI_STATISTICS_SHOW(errors, "%lu");
159 SPI_STATISTICS_SHOW(timedout, "%lu");
160
161 SPI_STATISTICS_SHOW(spi_sync, "%lu");
162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
163 SPI_STATISTICS_SHOW(spi_async, "%lu");
164
165 SPI_STATISTICS_SHOW(bytes, "%llu");
166 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
167 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
168
169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
170 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
171 "transfer_bytes_histo_" number, \
172 transfer_bytes_histo[index], "%lu")
173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
190
191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
192
193 static struct attribute *spi_dev_attrs[] = {
194 &dev_attr_modalias.attr,
195 &dev_attr_driver_override.attr,
196 NULL,
197 };
198
199 static const struct attribute_group spi_dev_group = {
200 .attrs = spi_dev_attrs,
201 };
202
203 static struct attribute *spi_device_statistics_attrs[] = {
204 &dev_attr_spi_device_messages.attr,
205 &dev_attr_spi_device_transfers.attr,
206 &dev_attr_spi_device_errors.attr,
207 &dev_attr_spi_device_timedout.attr,
208 &dev_attr_spi_device_spi_sync.attr,
209 &dev_attr_spi_device_spi_sync_immediate.attr,
210 &dev_attr_spi_device_spi_async.attr,
211 &dev_attr_spi_device_bytes.attr,
212 &dev_attr_spi_device_bytes_rx.attr,
213 &dev_attr_spi_device_bytes_tx.attr,
214 &dev_attr_spi_device_transfer_bytes_histo0.attr,
215 &dev_attr_spi_device_transfer_bytes_histo1.attr,
216 &dev_attr_spi_device_transfer_bytes_histo2.attr,
217 &dev_attr_spi_device_transfer_bytes_histo3.attr,
218 &dev_attr_spi_device_transfer_bytes_histo4.attr,
219 &dev_attr_spi_device_transfer_bytes_histo5.attr,
220 &dev_attr_spi_device_transfer_bytes_histo6.attr,
221 &dev_attr_spi_device_transfer_bytes_histo7.attr,
222 &dev_attr_spi_device_transfer_bytes_histo8.attr,
223 &dev_attr_spi_device_transfer_bytes_histo9.attr,
224 &dev_attr_spi_device_transfer_bytes_histo10.attr,
225 &dev_attr_spi_device_transfer_bytes_histo11.attr,
226 &dev_attr_spi_device_transfer_bytes_histo12.attr,
227 &dev_attr_spi_device_transfer_bytes_histo13.attr,
228 &dev_attr_spi_device_transfer_bytes_histo14.attr,
229 &dev_attr_spi_device_transfer_bytes_histo15.attr,
230 &dev_attr_spi_device_transfer_bytes_histo16.attr,
231 &dev_attr_spi_device_transfers_split_maxsize.attr,
232 NULL,
233 };
234
235 static const struct attribute_group spi_device_statistics_group = {
236 .name = "statistics",
237 .attrs = spi_device_statistics_attrs,
238 };
239
240 static const struct attribute_group *spi_dev_groups[] = {
241 &spi_dev_group,
242 &spi_device_statistics_group,
243 NULL,
244 };
245
246 static struct attribute *spi_controller_statistics_attrs[] = {
247 &dev_attr_spi_controller_messages.attr,
248 &dev_attr_spi_controller_transfers.attr,
249 &dev_attr_spi_controller_errors.attr,
250 &dev_attr_spi_controller_timedout.attr,
251 &dev_attr_spi_controller_spi_sync.attr,
252 &dev_attr_spi_controller_spi_sync_immediate.attr,
253 &dev_attr_spi_controller_spi_async.attr,
254 &dev_attr_spi_controller_bytes.attr,
255 &dev_attr_spi_controller_bytes_rx.attr,
256 &dev_attr_spi_controller_bytes_tx.attr,
257 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
258 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
259 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
260 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
274 &dev_attr_spi_controller_transfers_split_maxsize.attr,
275 NULL,
276 };
277
278 static const struct attribute_group spi_controller_statistics_group = {
279 .name = "statistics",
280 .attrs = spi_controller_statistics_attrs,
281 };
282
283 static const struct attribute_group *spi_master_groups[] = {
284 &spi_controller_statistics_group,
285 NULL,
286 };
287
spi_statistics_add_transfer_stats(struct spi_statistics * stats,struct spi_transfer * xfer,struct spi_controller * ctlr)288 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
289 struct spi_transfer *xfer,
290 struct spi_controller *ctlr)
291 {
292 unsigned long flags;
293 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
294
295 if (l2len < 0)
296 l2len = 0;
297
298 spin_lock_irqsave(&stats->lock, flags);
299
300 stats->transfers++;
301 stats->transfer_bytes_histo[l2len]++;
302
303 stats->bytes += xfer->len;
304 if ((xfer->tx_buf) &&
305 (xfer->tx_buf != ctlr->dummy_tx))
306 stats->bytes_tx += xfer->len;
307 if ((xfer->rx_buf) &&
308 (xfer->rx_buf != ctlr->dummy_rx))
309 stats->bytes_rx += xfer->len;
310
311 spin_unlock_irqrestore(&stats->lock, flags);
312 }
313 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
314
315 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
316 * and the sysfs version makes coldplug work too.
317 */
318
spi_match_id(const struct spi_device_id * id,const struct spi_device * sdev)319 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
320 const struct spi_device *sdev)
321 {
322 while (id->name[0]) {
323 if (!strcmp(sdev->modalias, id->name))
324 return id;
325 id++;
326 }
327 return NULL;
328 }
329
spi_get_device_id(const struct spi_device * sdev)330 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
331 {
332 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
333
334 return spi_match_id(sdrv->id_table, sdev);
335 }
336 EXPORT_SYMBOL_GPL(spi_get_device_id);
337
spi_match_device(struct device * dev,struct device_driver * drv)338 static int spi_match_device(struct device *dev, struct device_driver *drv)
339 {
340 const struct spi_device *spi = to_spi_device(dev);
341 const struct spi_driver *sdrv = to_spi_driver(drv);
342
343 /* Check override first, and if set, only use the named driver */
344 if (spi->driver_override)
345 return strcmp(spi->driver_override, drv->name) == 0;
346
347 /* Attempt an OF style match */
348 if (of_driver_match_device(dev, drv))
349 return 1;
350
351 /* Then try ACPI */
352 if (acpi_driver_match_device(dev, drv))
353 return 1;
354
355 if (sdrv->id_table)
356 return !!spi_match_id(sdrv->id_table, spi);
357
358 return strcmp(spi->modalias, drv->name) == 0;
359 }
360
spi_uevent(struct device * dev,struct kobj_uevent_env * env)361 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
362 {
363 const struct spi_device *spi = to_spi_device(dev);
364 int rc;
365
366 rc = acpi_device_uevent_modalias(dev, env);
367 if (rc != -ENODEV)
368 return rc;
369
370 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
371 }
372
373 struct bus_type spi_bus_type = {
374 .name = "spi",
375 .dev_groups = spi_dev_groups,
376 .match = spi_match_device,
377 .uevent = spi_uevent,
378 };
379 EXPORT_SYMBOL_GPL(spi_bus_type);
380
381
spi_drv_probe(struct device * dev)382 static int spi_drv_probe(struct device *dev)
383 {
384 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
385 struct spi_device *spi = to_spi_device(dev);
386 int ret;
387
388 ret = of_clk_set_defaults(dev->of_node, false);
389 if (ret)
390 return ret;
391
392 if (dev->of_node) {
393 spi->irq = of_irq_get(dev->of_node, 0);
394 if (spi->irq == -EPROBE_DEFER)
395 return -EPROBE_DEFER;
396 if (spi->irq < 0)
397 spi->irq = 0;
398 }
399
400 ret = dev_pm_domain_attach(dev, true);
401 if (ret)
402 return ret;
403
404 if (sdrv->probe) {
405 ret = sdrv->probe(spi);
406 if (ret)
407 dev_pm_domain_detach(dev, true);
408 }
409
410 return ret;
411 }
412
spi_drv_remove(struct device * dev)413 static int spi_drv_remove(struct device *dev)
414 {
415 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
416 int ret = 0;
417
418 if (sdrv->remove)
419 ret = sdrv->remove(to_spi_device(dev));
420 dev_pm_domain_detach(dev, true);
421
422 return ret;
423 }
424
spi_drv_shutdown(struct device * dev)425 static void spi_drv_shutdown(struct device *dev)
426 {
427 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
428
429 sdrv->shutdown(to_spi_device(dev));
430 }
431
432 /**
433 * __spi_register_driver - register a SPI driver
434 * @owner: owner module of the driver to register
435 * @sdrv: the driver to register
436 * Context: can sleep
437 *
438 * Return: zero on success, else a negative error code.
439 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)440 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
441 {
442 sdrv->driver.owner = owner;
443 sdrv->driver.bus = &spi_bus_type;
444 sdrv->driver.probe = spi_drv_probe;
445 sdrv->driver.remove = spi_drv_remove;
446 if (sdrv->shutdown)
447 sdrv->driver.shutdown = spi_drv_shutdown;
448 return driver_register(&sdrv->driver);
449 }
450 EXPORT_SYMBOL_GPL(__spi_register_driver);
451
452 /*-------------------------------------------------------------------------*/
453
454 /* SPI devices should normally not be created by SPI device drivers; that
455 * would make them board-specific. Similarly with SPI controller drivers.
456 * Device registration normally goes into like arch/.../mach.../board-YYY.c
457 * with other readonly (flashable) information about mainboard devices.
458 */
459
460 struct boardinfo {
461 struct list_head list;
462 struct spi_board_info board_info;
463 };
464
465 static LIST_HEAD(board_list);
466 static LIST_HEAD(spi_controller_list);
467
468 /*
469 * Used to protect add/del operation for board_info list and
470 * spi_controller list, and their matching process
471 * also used to protect object of type struct idr
472 */
473 static DEFINE_MUTEX(board_lock);
474
475 /*
476 * Prevents addition of devices with same chip select and
477 * addition of devices below an unregistering controller.
478 */
479 static DEFINE_MUTEX(spi_add_lock);
480
481 /**
482 * spi_alloc_device - Allocate a new SPI device
483 * @ctlr: Controller to which device is connected
484 * Context: can sleep
485 *
486 * Allows a driver to allocate and initialize a spi_device without
487 * registering it immediately. This allows a driver to directly
488 * fill the spi_device with device parameters before calling
489 * spi_add_device() on it.
490 *
491 * Caller is responsible to call spi_add_device() on the returned
492 * spi_device structure to add it to the SPI controller. If the caller
493 * needs to discard the spi_device without adding it, then it should
494 * call spi_dev_put() on it.
495 *
496 * Return: a pointer to the new device, or NULL.
497 */
spi_alloc_device(struct spi_controller * ctlr)498 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
499 {
500 struct spi_device *spi;
501
502 if (!spi_controller_get(ctlr))
503 return NULL;
504
505 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
506 if (!spi) {
507 spi_controller_put(ctlr);
508 return NULL;
509 }
510
511 spi->master = spi->controller = ctlr;
512 spi->dev.parent = &ctlr->dev;
513 spi->dev.bus = &spi_bus_type;
514 spi->dev.release = spidev_release;
515 spi->cs_gpio = -ENOENT;
516 spi->mode = ctlr->buswidth_override_bits;
517
518 spin_lock_init(&spi->statistics.lock);
519
520 device_initialize(&spi->dev);
521 return spi;
522 }
523 EXPORT_SYMBOL_GPL(spi_alloc_device);
524
spi_dev_set_name(struct spi_device * spi)525 static void spi_dev_set_name(struct spi_device *spi)
526 {
527 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
528
529 if (adev) {
530 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
531 return;
532 }
533
534 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
535 spi->chip_select);
536 }
537
spi_dev_check(struct device * dev,void * data)538 static int spi_dev_check(struct device *dev, void *data)
539 {
540 struct spi_device *spi = to_spi_device(dev);
541 struct spi_device *new_spi = data;
542
543 if (spi->controller == new_spi->controller &&
544 spi->chip_select == new_spi->chip_select)
545 return -EBUSY;
546 return 0;
547 }
548
spi_cleanup(struct spi_device * spi)549 static void spi_cleanup(struct spi_device *spi)
550 {
551 if (spi->controller->cleanup)
552 spi->controller->cleanup(spi);
553 }
554
555 /**
556 * spi_add_device - Add spi_device allocated with spi_alloc_device
557 * @spi: spi_device to register
558 *
559 * Companion function to spi_alloc_device. Devices allocated with
560 * spi_alloc_device can be added onto the spi bus with this function.
561 *
562 * Return: 0 on success; negative errno on failure
563 */
spi_add_device(struct spi_device * spi)564 int spi_add_device(struct spi_device *spi)
565 {
566 struct spi_controller *ctlr = spi->controller;
567 struct device *dev = ctlr->dev.parent;
568 int status;
569
570 /* Chipselects are numbered 0..max; validate. */
571 if (spi->chip_select >= ctlr->num_chipselect) {
572 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
573 ctlr->num_chipselect);
574 return -EINVAL;
575 }
576
577 /* Set the bus ID string */
578 spi_dev_set_name(spi);
579
580 /* We need to make sure there's no other device with this
581 * chipselect **BEFORE** we call setup(), else we'll trash
582 * its configuration. Lock against concurrent add() calls.
583 */
584 mutex_lock(&spi_add_lock);
585
586 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
587 if (status) {
588 dev_err(dev, "chipselect %d already in use\n",
589 spi->chip_select);
590 goto done;
591 }
592
593 /* Controller may unregister concurrently */
594 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
595 !device_is_registered(&ctlr->dev)) {
596 status = -ENODEV;
597 goto done;
598 }
599
600 /* Descriptors take precedence */
601 if (ctlr->cs_gpiods)
602 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
603 else if (ctlr->cs_gpios)
604 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
605
606 /* Drivers may modify this initial i/o setup, but will
607 * normally rely on the device being setup. Devices
608 * using SPI_CS_HIGH can't coexist well otherwise...
609 */
610 status = spi_setup(spi);
611 if (status < 0) {
612 dev_err(dev, "can't setup %s, status %d\n",
613 dev_name(&spi->dev), status);
614 goto done;
615 }
616
617 /* Device may be bound to an active driver when this returns */
618 status = device_add(&spi->dev);
619 if (status < 0) {
620 dev_err(dev, "can't add %s, status %d\n",
621 dev_name(&spi->dev), status);
622 spi_cleanup(spi);
623 } else {
624 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
625 }
626
627 done:
628 mutex_unlock(&spi_add_lock);
629 return status;
630 }
631 EXPORT_SYMBOL_GPL(spi_add_device);
632
633 /**
634 * spi_new_device - instantiate one new SPI device
635 * @ctlr: Controller to which device is connected
636 * @chip: Describes the SPI device
637 * Context: can sleep
638 *
639 * On typical mainboards, this is purely internal; and it's not needed
640 * after board init creates the hard-wired devices. Some development
641 * platforms may not be able to use spi_register_board_info though, and
642 * this is exported so that for example a USB or parport based adapter
643 * driver could add devices (which it would learn about out-of-band).
644 *
645 * Return: the new device, or NULL.
646 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)647 struct spi_device *spi_new_device(struct spi_controller *ctlr,
648 struct spi_board_info *chip)
649 {
650 struct spi_device *proxy;
651 int status;
652
653 /* NOTE: caller did any chip->bus_num checks necessary.
654 *
655 * Also, unless we change the return value convention to use
656 * error-or-pointer (not NULL-or-pointer), troubleshootability
657 * suggests syslogged diagnostics are best here (ugh).
658 */
659
660 proxy = spi_alloc_device(ctlr);
661 if (!proxy)
662 return NULL;
663
664 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
665
666 proxy->chip_select = chip->chip_select;
667 proxy->max_speed_hz = chip->max_speed_hz;
668 proxy->mode = chip->mode;
669 proxy->irq = chip->irq;
670 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
671 proxy->dev.platform_data = (void *) chip->platform_data;
672 proxy->controller_data = chip->controller_data;
673 proxy->controller_state = NULL;
674
675 if (chip->properties) {
676 status = device_add_properties(&proxy->dev, chip->properties);
677 if (status) {
678 dev_err(&ctlr->dev,
679 "failed to add properties to '%s': %d\n",
680 chip->modalias, status);
681 goto err_dev_put;
682 }
683 }
684
685 status = spi_add_device(proxy);
686 if (status < 0)
687 goto err_remove_props;
688
689 return proxy;
690
691 err_remove_props:
692 if (chip->properties)
693 device_remove_properties(&proxy->dev);
694 err_dev_put:
695 spi_dev_put(proxy);
696 return NULL;
697 }
698 EXPORT_SYMBOL_GPL(spi_new_device);
699
700 /**
701 * spi_unregister_device - unregister a single SPI device
702 * @spi: spi_device to unregister
703 *
704 * Start making the passed SPI device vanish. Normally this would be handled
705 * by spi_unregister_controller().
706 */
spi_unregister_device(struct spi_device * spi)707 void spi_unregister_device(struct spi_device *spi)
708 {
709 if (!spi)
710 return;
711
712 if (spi->dev.of_node) {
713 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
714 of_node_put(spi->dev.of_node);
715 }
716 if (ACPI_COMPANION(&spi->dev))
717 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
718 device_del(&spi->dev);
719 spi_cleanup(spi);
720 put_device(&spi->dev);
721 }
722 EXPORT_SYMBOL_GPL(spi_unregister_device);
723
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)724 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
725 struct spi_board_info *bi)
726 {
727 struct spi_device *dev;
728
729 if (ctlr->bus_num != bi->bus_num)
730 return;
731
732 dev = spi_new_device(ctlr, bi);
733 if (!dev)
734 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
735 bi->modalias);
736 }
737
738 /**
739 * spi_register_board_info - register SPI devices for a given board
740 * @info: array of chip descriptors
741 * @n: how many descriptors are provided
742 * Context: can sleep
743 *
744 * Board-specific early init code calls this (probably during arch_initcall)
745 * with segments of the SPI device table. Any device nodes are created later,
746 * after the relevant parent SPI controller (bus_num) is defined. We keep
747 * this table of devices forever, so that reloading a controller driver will
748 * not make Linux forget about these hard-wired devices.
749 *
750 * Other code can also call this, e.g. a particular add-on board might provide
751 * SPI devices through its expansion connector, so code initializing that board
752 * would naturally declare its SPI devices.
753 *
754 * The board info passed can safely be __initdata ... but be careful of
755 * any embedded pointers (platform_data, etc), they're copied as-is.
756 * Device properties are deep-copied though.
757 *
758 * Return: zero on success, else a negative error code.
759 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)760 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
761 {
762 struct boardinfo *bi;
763 int i;
764
765 if (!n)
766 return 0;
767
768 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
769 if (!bi)
770 return -ENOMEM;
771
772 for (i = 0; i < n; i++, bi++, info++) {
773 struct spi_controller *ctlr;
774
775 memcpy(&bi->board_info, info, sizeof(*info));
776 if (info->properties) {
777 bi->board_info.properties =
778 property_entries_dup(info->properties);
779 if (IS_ERR(bi->board_info.properties))
780 return PTR_ERR(bi->board_info.properties);
781 }
782
783 mutex_lock(&board_lock);
784 list_add_tail(&bi->list, &board_list);
785 list_for_each_entry(ctlr, &spi_controller_list, list)
786 spi_match_controller_to_boardinfo(ctlr,
787 &bi->board_info);
788 mutex_unlock(&board_lock);
789 }
790
791 return 0;
792 }
793
794 /*-------------------------------------------------------------------------*/
795
spi_set_cs(struct spi_device * spi,bool enable,bool force)796 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
797 {
798 bool enable1 = enable;
799
800 /*
801 * Avoid calling into the driver (or doing delays) if the chip select
802 * isn't actually changing from the last time this was called.
803 */
804 if (!force && (spi->controller->last_cs_enable == enable) &&
805 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
806 return;
807
808 spi->controller->last_cs_enable = enable;
809 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
810
811 if (!spi->controller->set_cs_timing) {
812 if (enable1)
813 spi_delay_exec(&spi->controller->cs_setup, NULL);
814 else
815 spi_delay_exec(&spi->controller->cs_hold, NULL);
816 }
817
818 if (spi->mode & SPI_CS_HIGH)
819 enable = !enable;
820
821 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
822 if (!(spi->mode & SPI_NO_CS)) {
823 if (spi->cs_gpiod) {
824 /*
825 * Historically ACPI has no means of the GPIO polarity and
826 * thus the SPISerialBus() resource defines it on the per-chip
827 * basis. In order to avoid a chain of negations, the GPIO
828 * polarity is considered being Active High. Even for the cases
829 * when _DSD() is involved (in the updated versions of ACPI)
830 * the GPIO CS polarity must be defined Active High to avoid
831 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
832 * into account.
833 */
834 if (has_acpi_companion(&spi->dev))
835 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
836 else
837 /* Polarity handled by GPIO library */
838 gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
839 } else {
840 /*
841 * invert the enable line, as active low is
842 * default for SPI.
843 */
844 gpio_set_value_cansleep(spi->cs_gpio, !enable);
845 }
846 }
847 /* Some SPI masters need both GPIO CS & slave_select */
848 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
849 spi->controller->set_cs)
850 spi->controller->set_cs(spi, !enable);
851 } else if (spi->controller->set_cs) {
852 spi->controller->set_cs(spi, !enable);
853 }
854
855 if (!spi->controller->set_cs_timing) {
856 if (!enable1)
857 spi_delay_exec(&spi->controller->cs_inactive, NULL);
858 }
859 }
860
861 #ifdef CONFIG_HAS_DMA
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)862 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
863 struct sg_table *sgt, void *buf, size_t len,
864 enum dma_data_direction dir)
865 {
866 const bool vmalloced_buf = is_vmalloc_addr(buf);
867 unsigned int max_seg_size = dma_get_max_seg_size(dev);
868 #ifdef CONFIG_HIGHMEM
869 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
870 (unsigned long)buf < (PKMAP_BASE +
871 (LAST_PKMAP * PAGE_SIZE)));
872 #else
873 const bool kmap_buf = false;
874 #endif
875 int desc_len;
876 int sgs;
877 struct page *vm_page;
878 struct scatterlist *sg;
879 void *sg_buf;
880 size_t min;
881 int i, ret;
882
883 if (vmalloced_buf || kmap_buf) {
884 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
885 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
886 } else if (virt_addr_valid(buf)) {
887 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
888 sgs = DIV_ROUND_UP(len, desc_len);
889 } else {
890 return -EINVAL;
891 }
892
893 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
894 if (ret != 0)
895 return ret;
896
897 sg = &sgt->sgl[0];
898 for (i = 0; i < sgs; i++) {
899
900 if (vmalloced_buf || kmap_buf) {
901 /*
902 * Next scatterlist entry size is the minimum between
903 * the desc_len and the remaining buffer length that
904 * fits in a page.
905 */
906 min = min_t(size_t, desc_len,
907 min_t(size_t, len,
908 PAGE_SIZE - offset_in_page(buf)));
909 if (vmalloced_buf)
910 vm_page = vmalloc_to_page(buf);
911 else
912 vm_page = kmap_to_page(buf);
913 if (!vm_page) {
914 sg_free_table(sgt);
915 return -ENOMEM;
916 }
917 sg_set_page(sg, vm_page,
918 min, offset_in_page(buf));
919 } else {
920 min = min_t(size_t, len, desc_len);
921 sg_buf = buf;
922 sg_set_buf(sg, sg_buf, min);
923 }
924
925 buf += min;
926 len -= min;
927 sg = sg_next(sg);
928 }
929
930 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
931 if (!ret)
932 ret = -ENOMEM;
933 if (ret < 0) {
934 sg_free_table(sgt);
935 return ret;
936 }
937
938 sgt->nents = ret;
939
940 return 0;
941 }
942
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)943 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
944 struct sg_table *sgt, enum dma_data_direction dir)
945 {
946 if (sgt->orig_nents) {
947 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
948 sg_free_table(sgt);
949 sgt->orig_nents = 0;
950 sgt->nents = 0;
951 }
952 }
953
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)954 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
955 {
956 struct device *tx_dev, *rx_dev;
957 struct spi_transfer *xfer;
958 int ret;
959
960 if (!ctlr->can_dma)
961 return 0;
962
963 if (ctlr->dma_tx)
964 tx_dev = ctlr->dma_tx->device->dev;
965 else
966 tx_dev = ctlr->dev.parent;
967
968 if (ctlr->dma_rx)
969 rx_dev = ctlr->dma_rx->device->dev;
970 else
971 rx_dev = ctlr->dev.parent;
972
973 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
974 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
975 continue;
976
977 if (xfer->tx_buf != NULL) {
978 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
979 (void *)xfer->tx_buf, xfer->len,
980 DMA_TO_DEVICE);
981 if (ret != 0)
982 return ret;
983 }
984
985 if (xfer->rx_buf != NULL) {
986 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
987 xfer->rx_buf, xfer->len,
988 DMA_FROM_DEVICE);
989 if (ret != 0) {
990 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
991 DMA_TO_DEVICE);
992 return ret;
993 }
994 }
995 }
996
997 ctlr->cur_msg_mapped = true;
998
999 return 0;
1000 }
1001
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1002 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1003 {
1004 struct spi_transfer *xfer;
1005 struct device *tx_dev, *rx_dev;
1006
1007 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1008 return 0;
1009
1010 if (ctlr->dma_tx)
1011 tx_dev = ctlr->dma_tx->device->dev;
1012 else
1013 tx_dev = ctlr->dev.parent;
1014
1015 if (ctlr->dma_rx)
1016 rx_dev = ctlr->dma_rx->device->dev;
1017 else
1018 rx_dev = ctlr->dev.parent;
1019
1020 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1021 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1022 continue;
1023
1024 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1025 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1026 }
1027
1028 ctlr->cur_msg_mapped = false;
1029
1030 return 0;
1031 }
1032 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1033 static inline int __spi_map_msg(struct spi_controller *ctlr,
1034 struct spi_message *msg)
1035 {
1036 return 0;
1037 }
1038
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1039 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1040 struct spi_message *msg)
1041 {
1042 return 0;
1043 }
1044 #endif /* !CONFIG_HAS_DMA */
1045
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1046 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1047 struct spi_message *msg)
1048 {
1049 struct spi_transfer *xfer;
1050
1051 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1052 /*
1053 * Restore the original value of tx_buf or rx_buf if they are
1054 * NULL.
1055 */
1056 if (xfer->tx_buf == ctlr->dummy_tx)
1057 xfer->tx_buf = NULL;
1058 if (xfer->rx_buf == ctlr->dummy_rx)
1059 xfer->rx_buf = NULL;
1060 }
1061
1062 return __spi_unmap_msg(ctlr, msg);
1063 }
1064
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1065 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1066 {
1067 struct spi_transfer *xfer;
1068 void *tmp;
1069 unsigned int max_tx, max_rx;
1070
1071 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1072 && !(msg->spi->mode & SPI_3WIRE)) {
1073 max_tx = 0;
1074 max_rx = 0;
1075
1076 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1077 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1078 !xfer->tx_buf)
1079 max_tx = max(xfer->len, max_tx);
1080 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1081 !xfer->rx_buf)
1082 max_rx = max(xfer->len, max_rx);
1083 }
1084
1085 if (max_tx) {
1086 tmp = krealloc(ctlr->dummy_tx, max_tx,
1087 GFP_KERNEL | GFP_DMA);
1088 if (!tmp)
1089 return -ENOMEM;
1090 ctlr->dummy_tx = tmp;
1091 memset(tmp, 0, max_tx);
1092 }
1093
1094 if (max_rx) {
1095 tmp = krealloc(ctlr->dummy_rx, max_rx,
1096 GFP_KERNEL | GFP_DMA);
1097 if (!tmp)
1098 return -ENOMEM;
1099 ctlr->dummy_rx = tmp;
1100 }
1101
1102 if (max_tx || max_rx) {
1103 list_for_each_entry(xfer, &msg->transfers,
1104 transfer_list) {
1105 if (!xfer->len)
1106 continue;
1107 if (!xfer->tx_buf)
1108 xfer->tx_buf = ctlr->dummy_tx;
1109 if (!xfer->rx_buf)
1110 xfer->rx_buf = ctlr->dummy_rx;
1111 }
1112 }
1113 }
1114
1115 return __spi_map_msg(ctlr, msg);
1116 }
1117
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1118 static int spi_transfer_wait(struct spi_controller *ctlr,
1119 struct spi_message *msg,
1120 struct spi_transfer *xfer)
1121 {
1122 struct spi_statistics *statm = &ctlr->statistics;
1123 struct spi_statistics *stats = &msg->spi->statistics;
1124 u32 speed_hz = xfer->speed_hz;
1125 unsigned long long ms;
1126
1127 if (spi_controller_is_slave(ctlr)) {
1128 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1129 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1130 return -EINTR;
1131 }
1132 } else {
1133 if (!speed_hz)
1134 speed_hz = 100000;
1135
1136 ms = 8LL * 1000LL * xfer->len;
1137 do_div(ms, speed_hz);
1138 ms += ms + 200; /* some tolerance */
1139
1140 if (ms > UINT_MAX)
1141 ms = UINT_MAX;
1142
1143 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1144 msecs_to_jiffies(ms));
1145
1146 if (ms == 0) {
1147 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1148 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1149 dev_err(&msg->spi->dev,
1150 "SPI transfer timed out\n");
1151 return -ETIMEDOUT;
1152 }
1153 }
1154
1155 return 0;
1156 }
1157
_spi_transfer_delay_ns(u32 ns)1158 static void _spi_transfer_delay_ns(u32 ns)
1159 {
1160 if (!ns)
1161 return;
1162 if (ns <= 1000) {
1163 ndelay(ns);
1164 } else {
1165 u32 us = DIV_ROUND_UP(ns, 1000);
1166
1167 if (us <= 10)
1168 udelay(us);
1169 else
1170 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1171 }
1172 }
1173
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1174 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1175 {
1176 u32 delay = _delay->value;
1177 u32 unit = _delay->unit;
1178 u32 hz;
1179
1180 if (!delay)
1181 return 0;
1182
1183 switch (unit) {
1184 case SPI_DELAY_UNIT_USECS:
1185 delay *= 1000;
1186 break;
1187 case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1188 break;
1189 case SPI_DELAY_UNIT_SCK:
1190 /* clock cycles need to be obtained from spi_transfer */
1191 if (!xfer)
1192 return -EINVAL;
1193 /* if there is no effective speed know, then approximate
1194 * by underestimating with half the requested hz
1195 */
1196 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1197 if (!hz)
1198 return -EINVAL;
1199 delay *= DIV_ROUND_UP(1000000000, hz);
1200 break;
1201 default:
1202 return -EINVAL;
1203 }
1204
1205 return delay;
1206 }
1207 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1208
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1209 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1210 {
1211 int delay;
1212
1213 might_sleep();
1214
1215 if (!_delay)
1216 return -EINVAL;
1217
1218 delay = spi_delay_to_ns(_delay, xfer);
1219 if (delay < 0)
1220 return delay;
1221
1222 _spi_transfer_delay_ns(delay);
1223
1224 return 0;
1225 }
1226 EXPORT_SYMBOL_GPL(spi_delay_exec);
1227
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1228 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1229 struct spi_transfer *xfer)
1230 {
1231 u32 delay = xfer->cs_change_delay.value;
1232 u32 unit = xfer->cs_change_delay.unit;
1233 int ret;
1234
1235 /* return early on "fast" mode - for everything but USECS */
1236 if (!delay) {
1237 if (unit == SPI_DELAY_UNIT_USECS)
1238 _spi_transfer_delay_ns(10000);
1239 return;
1240 }
1241
1242 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1243 if (ret) {
1244 dev_err_once(&msg->spi->dev,
1245 "Use of unsupported delay unit %i, using default of 10us\n",
1246 unit);
1247 _spi_transfer_delay_ns(10000);
1248 }
1249 }
1250
1251 /*
1252 * spi_transfer_one_message - Default implementation of transfer_one_message()
1253 *
1254 * This is a standard implementation of transfer_one_message() for
1255 * drivers which implement a transfer_one() operation. It provides
1256 * standard handling of delays and chip select management.
1257 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1258 static int spi_transfer_one_message(struct spi_controller *ctlr,
1259 struct spi_message *msg)
1260 {
1261 struct spi_transfer *xfer;
1262 bool keep_cs = false;
1263 int ret = 0;
1264 struct spi_statistics *statm = &ctlr->statistics;
1265 struct spi_statistics *stats = &msg->spi->statistics;
1266
1267 spi_set_cs(msg->spi, true, false);
1268
1269 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1270 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1271
1272 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1273 trace_spi_transfer_start(msg, xfer);
1274
1275 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1276 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1277
1278 if (!ctlr->ptp_sts_supported) {
1279 xfer->ptp_sts_word_pre = 0;
1280 ptp_read_system_prets(xfer->ptp_sts);
1281 }
1282
1283 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1284 reinit_completion(&ctlr->xfer_completion);
1285
1286 fallback_pio:
1287 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1288 if (ret < 0) {
1289 if (ctlr->cur_msg_mapped &&
1290 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1291 __spi_unmap_msg(ctlr, msg);
1292 ctlr->fallback = true;
1293 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1294 goto fallback_pio;
1295 }
1296
1297 SPI_STATISTICS_INCREMENT_FIELD(statm,
1298 errors);
1299 SPI_STATISTICS_INCREMENT_FIELD(stats,
1300 errors);
1301 dev_err(&msg->spi->dev,
1302 "SPI transfer failed: %d\n", ret);
1303 goto out;
1304 }
1305
1306 if (ret > 0) {
1307 ret = spi_transfer_wait(ctlr, msg, xfer);
1308 if (ret < 0)
1309 msg->status = ret;
1310 }
1311 } else {
1312 if (xfer->len)
1313 dev_err(&msg->spi->dev,
1314 "Bufferless transfer has length %u\n",
1315 xfer->len);
1316 }
1317
1318 if (!ctlr->ptp_sts_supported) {
1319 ptp_read_system_postts(xfer->ptp_sts);
1320 xfer->ptp_sts_word_post = xfer->len;
1321 }
1322
1323 trace_spi_transfer_stop(msg, xfer);
1324
1325 if (msg->status != -EINPROGRESS)
1326 goto out;
1327
1328 spi_transfer_delay_exec(xfer);
1329
1330 if (xfer->cs_change) {
1331 if (list_is_last(&xfer->transfer_list,
1332 &msg->transfers)) {
1333 keep_cs = true;
1334 } else {
1335 spi_set_cs(msg->spi, false, false);
1336 _spi_transfer_cs_change_delay(msg, xfer);
1337 spi_set_cs(msg->spi, true, false);
1338 }
1339 }
1340
1341 msg->actual_length += xfer->len;
1342 }
1343
1344 out:
1345 if (ret != 0 || !keep_cs)
1346 spi_set_cs(msg->spi, false, false);
1347
1348 if (msg->status == -EINPROGRESS)
1349 msg->status = ret;
1350
1351 if (msg->status && ctlr->handle_err)
1352 ctlr->handle_err(ctlr, msg);
1353
1354 spi_finalize_current_message(ctlr);
1355
1356 return ret;
1357 }
1358
1359 /**
1360 * spi_finalize_current_transfer - report completion of a transfer
1361 * @ctlr: the controller reporting completion
1362 *
1363 * Called by SPI drivers using the core transfer_one_message()
1364 * implementation to notify it that the current interrupt driven
1365 * transfer has finished and the next one may be scheduled.
1366 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1367 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1368 {
1369 complete(&ctlr->xfer_completion);
1370 }
1371 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1372
spi_idle_runtime_pm(struct spi_controller * ctlr)1373 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1374 {
1375 if (ctlr->auto_runtime_pm) {
1376 pm_runtime_mark_last_busy(ctlr->dev.parent);
1377 pm_runtime_put_autosuspend(ctlr->dev.parent);
1378 }
1379 }
1380
1381 /**
1382 * __spi_pump_messages - function which processes spi message queue
1383 * @ctlr: controller to process queue for
1384 * @in_kthread: true if we are in the context of the message pump thread
1385 *
1386 * This function checks if there is any spi message in the queue that
1387 * needs processing and if so call out to the driver to initialize hardware
1388 * and transfer each message.
1389 *
1390 * Note that it is called both from the kthread itself and also from
1391 * inside spi_sync(); the queue extraction handling at the top of the
1392 * function should deal with this safely.
1393 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1394 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1395 {
1396 struct spi_transfer *xfer;
1397 struct spi_message *msg;
1398 bool was_busy = false;
1399 unsigned long flags;
1400 int ret;
1401
1402 /* Lock queue */
1403 spin_lock_irqsave(&ctlr->queue_lock, flags);
1404
1405 /* Make sure we are not already running a message */
1406 if (ctlr->cur_msg) {
1407 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1408 return;
1409 }
1410
1411 /* If another context is idling the device then defer */
1412 if (ctlr->idling) {
1413 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1414 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1415 return;
1416 }
1417
1418 /* Check if the queue is idle */
1419 if (list_empty(&ctlr->queue) || !ctlr->running) {
1420 if (!ctlr->busy) {
1421 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1422 return;
1423 }
1424
1425 /* Defer any non-atomic teardown to the thread */
1426 if (!in_kthread) {
1427 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1428 !ctlr->unprepare_transfer_hardware) {
1429 spi_idle_runtime_pm(ctlr);
1430 ctlr->busy = false;
1431 trace_spi_controller_idle(ctlr);
1432 } else {
1433 kthread_queue_work(ctlr->kworker,
1434 &ctlr->pump_messages);
1435 }
1436 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1437 return;
1438 }
1439
1440 ctlr->busy = false;
1441 ctlr->idling = true;
1442 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1443
1444 kfree(ctlr->dummy_rx);
1445 ctlr->dummy_rx = NULL;
1446 kfree(ctlr->dummy_tx);
1447 ctlr->dummy_tx = NULL;
1448 if (ctlr->unprepare_transfer_hardware &&
1449 ctlr->unprepare_transfer_hardware(ctlr))
1450 dev_err(&ctlr->dev,
1451 "failed to unprepare transfer hardware\n");
1452 spi_idle_runtime_pm(ctlr);
1453 trace_spi_controller_idle(ctlr);
1454
1455 spin_lock_irqsave(&ctlr->queue_lock, flags);
1456 ctlr->idling = false;
1457 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1458 return;
1459 }
1460
1461 /* Extract head of queue */
1462 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1463 ctlr->cur_msg = msg;
1464
1465 list_del_init(&msg->queue);
1466 if (ctlr->busy)
1467 was_busy = true;
1468 else
1469 ctlr->busy = true;
1470 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1471
1472 mutex_lock(&ctlr->io_mutex);
1473
1474 if (!was_busy && ctlr->auto_runtime_pm) {
1475 ret = pm_runtime_get_sync(ctlr->dev.parent);
1476 if (ret < 0) {
1477 pm_runtime_put_noidle(ctlr->dev.parent);
1478 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1479 ret);
1480 mutex_unlock(&ctlr->io_mutex);
1481 return;
1482 }
1483 }
1484
1485 if (!was_busy)
1486 trace_spi_controller_busy(ctlr);
1487
1488 if (!was_busy && ctlr->prepare_transfer_hardware) {
1489 ret = ctlr->prepare_transfer_hardware(ctlr);
1490 if (ret) {
1491 dev_err(&ctlr->dev,
1492 "failed to prepare transfer hardware: %d\n",
1493 ret);
1494
1495 if (ctlr->auto_runtime_pm)
1496 pm_runtime_put(ctlr->dev.parent);
1497
1498 msg->status = ret;
1499 spi_finalize_current_message(ctlr);
1500
1501 mutex_unlock(&ctlr->io_mutex);
1502 return;
1503 }
1504 }
1505
1506 trace_spi_message_start(msg);
1507
1508 if (ctlr->prepare_message) {
1509 ret = ctlr->prepare_message(ctlr, msg);
1510 if (ret) {
1511 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1512 ret);
1513 msg->status = ret;
1514 spi_finalize_current_message(ctlr);
1515 goto out;
1516 }
1517 ctlr->cur_msg_prepared = true;
1518 }
1519
1520 ret = spi_map_msg(ctlr, msg);
1521 if (ret) {
1522 msg->status = ret;
1523 spi_finalize_current_message(ctlr);
1524 goto out;
1525 }
1526
1527 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1528 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1529 xfer->ptp_sts_word_pre = 0;
1530 ptp_read_system_prets(xfer->ptp_sts);
1531 }
1532 }
1533
1534 ret = ctlr->transfer_one_message(ctlr, msg);
1535 if (ret) {
1536 dev_err(&ctlr->dev,
1537 "failed to transfer one message from queue\n");
1538 goto out;
1539 }
1540
1541 out:
1542 mutex_unlock(&ctlr->io_mutex);
1543
1544 /* Prod the scheduler in case transfer_one() was busy waiting */
1545 if (!ret)
1546 cond_resched();
1547 }
1548
1549 /**
1550 * spi_pump_messages - kthread work function which processes spi message queue
1551 * @work: pointer to kthread work struct contained in the controller struct
1552 */
spi_pump_messages(struct kthread_work * work)1553 static void spi_pump_messages(struct kthread_work *work)
1554 {
1555 struct spi_controller *ctlr =
1556 container_of(work, struct spi_controller, pump_messages);
1557
1558 __spi_pump_messages(ctlr, true);
1559 }
1560
1561 /**
1562 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1563 * TX timestamp for the requested byte from the SPI
1564 * transfer. The frequency with which this function
1565 * must be called (once per word, once for the whole
1566 * transfer, once per batch of words etc) is arbitrary
1567 * as long as the @tx buffer offset is greater than or
1568 * equal to the requested byte at the time of the
1569 * call. The timestamp is only taken once, at the
1570 * first such call. It is assumed that the driver
1571 * advances its @tx buffer pointer monotonically.
1572 * @ctlr: Pointer to the spi_controller structure of the driver
1573 * @xfer: Pointer to the transfer being timestamped
1574 * @progress: How many words (not bytes) have been transferred so far
1575 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1576 * transfer, for less jitter in time measurement. Only compatible
1577 * with PIO drivers. If true, must follow up with
1578 * spi_take_timestamp_post or otherwise system will crash.
1579 * WARNING: for fully predictable results, the CPU frequency must
1580 * also be under control (governor).
1581 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1582 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1583 struct spi_transfer *xfer,
1584 size_t progress, bool irqs_off)
1585 {
1586 if (!xfer->ptp_sts)
1587 return;
1588
1589 if (xfer->timestamped)
1590 return;
1591
1592 if (progress > xfer->ptp_sts_word_pre)
1593 return;
1594
1595 /* Capture the resolution of the timestamp */
1596 xfer->ptp_sts_word_pre = progress;
1597
1598 if (irqs_off) {
1599 local_irq_save(ctlr->irq_flags);
1600 preempt_disable();
1601 }
1602
1603 ptp_read_system_prets(xfer->ptp_sts);
1604 }
1605 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1606
1607 /**
1608 * spi_take_timestamp_post - helper for drivers to collect the end of the
1609 * TX timestamp for the requested byte from the SPI
1610 * transfer. Can be called with an arbitrary
1611 * frequency: only the first call where @tx exceeds
1612 * or is equal to the requested word will be
1613 * timestamped.
1614 * @ctlr: Pointer to the spi_controller structure of the driver
1615 * @xfer: Pointer to the transfer being timestamped
1616 * @progress: How many words (not bytes) have been transferred so far
1617 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1618 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1619 void spi_take_timestamp_post(struct spi_controller *ctlr,
1620 struct spi_transfer *xfer,
1621 size_t progress, bool irqs_off)
1622 {
1623 if (!xfer->ptp_sts)
1624 return;
1625
1626 if (xfer->timestamped)
1627 return;
1628
1629 if (progress < xfer->ptp_sts_word_post)
1630 return;
1631
1632 ptp_read_system_postts(xfer->ptp_sts);
1633
1634 if (irqs_off) {
1635 local_irq_restore(ctlr->irq_flags);
1636 preempt_enable();
1637 }
1638
1639 /* Capture the resolution of the timestamp */
1640 xfer->ptp_sts_word_post = progress;
1641
1642 xfer->timestamped = true;
1643 }
1644 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1645
1646 /**
1647 * spi_set_thread_rt - set the controller to pump at realtime priority
1648 * @ctlr: controller to boost priority of
1649 *
1650 * This can be called because the controller requested realtime priority
1651 * (by setting the ->rt value before calling spi_register_controller()) or
1652 * because a device on the bus said that its transfers needed realtime
1653 * priority.
1654 *
1655 * NOTE: at the moment if any device on a bus says it needs realtime then
1656 * the thread will be at realtime priority for all transfers on that
1657 * controller. If this eventually becomes a problem we may see if we can
1658 * find a way to boost the priority only temporarily during relevant
1659 * transfers.
1660 */
spi_set_thread_rt(struct spi_controller * ctlr)1661 static void spi_set_thread_rt(struct spi_controller *ctlr)
1662 {
1663 dev_info(&ctlr->dev,
1664 "will run message pump with realtime priority\n");
1665 sched_set_fifo(ctlr->kworker->task);
1666 }
1667
spi_init_queue(struct spi_controller * ctlr)1668 static int spi_init_queue(struct spi_controller *ctlr)
1669 {
1670 ctlr->running = false;
1671 ctlr->busy = false;
1672
1673 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1674 if (IS_ERR(ctlr->kworker)) {
1675 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1676 return PTR_ERR(ctlr->kworker);
1677 }
1678
1679 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1680
1681 /*
1682 * Controller config will indicate if this controller should run the
1683 * message pump with high (realtime) priority to reduce the transfer
1684 * latency on the bus by minimising the delay between a transfer
1685 * request and the scheduling of the message pump thread. Without this
1686 * setting the message pump thread will remain at default priority.
1687 */
1688 if (ctlr->rt)
1689 spi_set_thread_rt(ctlr);
1690
1691 return 0;
1692 }
1693
1694 /**
1695 * spi_get_next_queued_message() - called by driver to check for queued
1696 * messages
1697 * @ctlr: the controller to check for queued messages
1698 *
1699 * If there are more messages in the queue, the next message is returned from
1700 * this call.
1701 *
1702 * Return: the next message in the queue, else NULL if the queue is empty.
1703 */
spi_get_next_queued_message(struct spi_controller * ctlr)1704 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1705 {
1706 struct spi_message *next;
1707 unsigned long flags;
1708
1709 /* get a pointer to the next message, if any */
1710 spin_lock_irqsave(&ctlr->queue_lock, flags);
1711 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1712 queue);
1713 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1714
1715 return next;
1716 }
1717 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1718
1719 /**
1720 * spi_finalize_current_message() - the current message is complete
1721 * @ctlr: the controller to return the message to
1722 *
1723 * Called by the driver to notify the core that the message in the front of the
1724 * queue is complete and can be removed from the queue.
1725 */
spi_finalize_current_message(struct spi_controller * ctlr)1726 void spi_finalize_current_message(struct spi_controller *ctlr)
1727 {
1728 struct spi_transfer *xfer;
1729 struct spi_message *mesg;
1730 unsigned long flags;
1731 int ret;
1732
1733 spin_lock_irqsave(&ctlr->queue_lock, flags);
1734 mesg = ctlr->cur_msg;
1735 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1736
1737 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1738 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1739 ptp_read_system_postts(xfer->ptp_sts);
1740 xfer->ptp_sts_word_post = xfer->len;
1741 }
1742 }
1743
1744 if (unlikely(ctlr->ptp_sts_supported))
1745 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1746 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1747
1748 spi_unmap_msg(ctlr, mesg);
1749
1750 /* In the prepare_messages callback the spi bus has the opportunity to
1751 * split a transfer to smaller chunks.
1752 * Release splited transfers here since spi_map_msg is done on the
1753 * splited transfers.
1754 */
1755 spi_res_release(ctlr, mesg);
1756
1757 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1758 ret = ctlr->unprepare_message(ctlr, mesg);
1759 if (ret) {
1760 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1761 ret);
1762 }
1763 }
1764
1765 spin_lock_irqsave(&ctlr->queue_lock, flags);
1766 ctlr->cur_msg = NULL;
1767 ctlr->cur_msg_prepared = false;
1768 ctlr->fallback = false;
1769 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1770 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1771
1772 trace_spi_message_done(mesg);
1773
1774 mesg->state = NULL;
1775 if (mesg->complete)
1776 mesg->complete(mesg->context);
1777 }
1778 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1779
spi_start_queue(struct spi_controller * ctlr)1780 static int spi_start_queue(struct spi_controller *ctlr)
1781 {
1782 unsigned long flags;
1783
1784 spin_lock_irqsave(&ctlr->queue_lock, flags);
1785
1786 if (ctlr->running || ctlr->busy) {
1787 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1788 return -EBUSY;
1789 }
1790
1791 ctlr->running = true;
1792 ctlr->cur_msg = NULL;
1793 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1794
1795 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1796
1797 return 0;
1798 }
1799
spi_stop_queue(struct spi_controller * ctlr)1800 static int spi_stop_queue(struct spi_controller *ctlr)
1801 {
1802 unsigned long flags;
1803 unsigned limit = 500;
1804 int ret = 0;
1805
1806 spin_lock_irqsave(&ctlr->queue_lock, flags);
1807
1808 /*
1809 * This is a bit lame, but is optimized for the common execution path.
1810 * A wait_queue on the ctlr->busy could be used, but then the common
1811 * execution path (pump_messages) would be required to call wake_up or
1812 * friends on every SPI message. Do this instead.
1813 */
1814 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1815 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1816 usleep_range(10000, 11000);
1817 spin_lock_irqsave(&ctlr->queue_lock, flags);
1818 }
1819
1820 if (!list_empty(&ctlr->queue) || ctlr->busy)
1821 ret = -EBUSY;
1822 else
1823 ctlr->running = false;
1824
1825 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1826
1827 if (ret) {
1828 dev_warn(&ctlr->dev, "could not stop message queue\n");
1829 return ret;
1830 }
1831 return ret;
1832 }
1833
spi_destroy_queue(struct spi_controller * ctlr)1834 static int spi_destroy_queue(struct spi_controller *ctlr)
1835 {
1836 int ret;
1837
1838 ret = spi_stop_queue(ctlr);
1839
1840 /*
1841 * kthread_flush_worker will block until all work is done.
1842 * If the reason that stop_queue timed out is that the work will never
1843 * finish, then it does no good to call flush/stop thread, so
1844 * return anyway.
1845 */
1846 if (ret) {
1847 dev_err(&ctlr->dev, "problem destroying queue\n");
1848 return ret;
1849 }
1850
1851 kthread_destroy_worker(ctlr->kworker);
1852
1853 return 0;
1854 }
1855
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)1856 static int __spi_queued_transfer(struct spi_device *spi,
1857 struct spi_message *msg,
1858 bool need_pump)
1859 {
1860 struct spi_controller *ctlr = spi->controller;
1861 unsigned long flags;
1862
1863 spin_lock_irqsave(&ctlr->queue_lock, flags);
1864
1865 if (!ctlr->running) {
1866 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1867 return -ESHUTDOWN;
1868 }
1869 msg->actual_length = 0;
1870 msg->status = -EINPROGRESS;
1871
1872 list_add_tail(&msg->queue, &ctlr->queue);
1873 if (!ctlr->busy && need_pump)
1874 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1875
1876 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1877 return 0;
1878 }
1879
1880 /**
1881 * spi_queued_transfer - transfer function for queued transfers
1882 * @spi: spi device which is requesting transfer
1883 * @msg: spi message which is to handled is queued to driver queue
1884 *
1885 * Return: zero on success, else a negative error code.
1886 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)1887 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1888 {
1889 return __spi_queued_transfer(spi, msg, true);
1890 }
1891
spi_controller_initialize_queue(struct spi_controller * ctlr)1892 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1893 {
1894 int ret;
1895
1896 ctlr->transfer = spi_queued_transfer;
1897 if (!ctlr->transfer_one_message)
1898 ctlr->transfer_one_message = spi_transfer_one_message;
1899
1900 /* Initialize and start queue */
1901 ret = spi_init_queue(ctlr);
1902 if (ret) {
1903 dev_err(&ctlr->dev, "problem initializing queue\n");
1904 goto err_init_queue;
1905 }
1906 ctlr->queued = true;
1907 ret = spi_start_queue(ctlr);
1908 if (ret) {
1909 dev_err(&ctlr->dev, "problem starting queue\n");
1910 goto err_start_queue;
1911 }
1912
1913 return 0;
1914
1915 err_start_queue:
1916 spi_destroy_queue(ctlr);
1917 err_init_queue:
1918 return ret;
1919 }
1920
1921 /**
1922 * spi_flush_queue - Send all pending messages in the queue from the callers'
1923 * context
1924 * @ctlr: controller to process queue for
1925 *
1926 * This should be used when one wants to ensure all pending messages have been
1927 * sent before doing something. Is used by the spi-mem code to make sure SPI
1928 * memory operations do not preempt regular SPI transfers that have been queued
1929 * before the spi-mem operation.
1930 */
spi_flush_queue(struct spi_controller * ctlr)1931 void spi_flush_queue(struct spi_controller *ctlr)
1932 {
1933 if (ctlr->transfer == spi_queued_transfer)
1934 __spi_pump_messages(ctlr, false);
1935 }
1936
1937 /*-------------------------------------------------------------------------*/
1938
1939 #if defined(CONFIG_OF)
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)1940 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1941 struct device_node *nc)
1942 {
1943 u32 value;
1944 int rc;
1945
1946 /* Mode (clock phase/polarity/etc.) */
1947 if (of_property_read_bool(nc, "spi-cpha"))
1948 spi->mode |= SPI_CPHA;
1949 if (of_property_read_bool(nc, "spi-cpol"))
1950 spi->mode |= SPI_CPOL;
1951 if (of_property_read_bool(nc, "spi-3wire"))
1952 spi->mode |= SPI_3WIRE;
1953 if (of_property_read_bool(nc, "spi-lsb-first"))
1954 spi->mode |= SPI_LSB_FIRST;
1955 if (of_property_read_bool(nc, "spi-cs-high"))
1956 spi->mode |= SPI_CS_HIGH;
1957
1958 /* Device DUAL/QUAD mode */
1959 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1960 switch (value) {
1961 case 1:
1962 break;
1963 case 2:
1964 spi->mode |= SPI_TX_DUAL;
1965 break;
1966 case 4:
1967 spi->mode |= SPI_TX_QUAD;
1968 break;
1969 case 8:
1970 spi->mode |= SPI_TX_OCTAL;
1971 break;
1972 default:
1973 dev_warn(&ctlr->dev,
1974 "spi-tx-bus-width %d not supported\n",
1975 value);
1976 break;
1977 }
1978 }
1979
1980 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1981 switch (value) {
1982 case 1:
1983 break;
1984 case 2:
1985 spi->mode |= SPI_RX_DUAL;
1986 break;
1987 case 4:
1988 spi->mode |= SPI_RX_QUAD;
1989 break;
1990 case 8:
1991 spi->mode |= SPI_RX_OCTAL;
1992 break;
1993 default:
1994 dev_warn(&ctlr->dev,
1995 "spi-rx-bus-width %d not supported\n",
1996 value);
1997 break;
1998 }
1999 }
2000
2001 if (spi_controller_is_slave(ctlr)) {
2002 if (!of_node_name_eq(nc, "slave")) {
2003 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2004 nc);
2005 return -EINVAL;
2006 }
2007 return 0;
2008 }
2009
2010 /* Device address */
2011 rc = of_property_read_u32(nc, "reg", &value);
2012 if (rc) {
2013 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2014 nc, rc);
2015 return rc;
2016 }
2017 spi->chip_select = value;
2018
2019 /* Device speed */
2020 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2021 spi->max_speed_hz = value;
2022
2023 return 0;
2024 }
2025
2026 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2027 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2028 {
2029 struct spi_device *spi;
2030 int rc;
2031
2032 /* Alloc an spi_device */
2033 spi = spi_alloc_device(ctlr);
2034 if (!spi) {
2035 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2036 rc = -ENOMEM;
2037 goto err_out;
2038 }
2039
2040 /* Select device driver */
2041 rc = of_modalias_node(nc, spi->modalias,
2042 sizeof(spi->modalias));
2043 if (rc < 0) {
2044 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2045 goto err_out;
2046 }
2047
2048 rc = of_spi_parse_dt(ctlr, spi, nc);
2049 if (rc)
2050 goto err_out;
2051
2052 /* Store a pointer to the node in the device structure */
2053 of_node_get(nc);
2054 spi->dev.of_node = nc;
2055 spi->dev.fwnode = of_fwnode_handle(nc);
2056
2057 /* Register the new device */
2058 rc = spi_add_device(spi);
2059 if (rc) {
2060 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2061 goto err_of_node_put;
2062 }
2063
2064 return spi;
2065
2066 err_of_node_put:
2067 of_node_put(nc);
2068 err_out:
2069 spi_dev_put(spi);
2070 return ERR_PTR(rc);
2071 }
2072
2073 /**
2074 * of_register_spi_devices() - Register child devices onto the SPI bus
2075 * @ctlr: Pointer to spi_controller device
2076 *
2077 * Registers an spi_device for each child node of controller node which
2078 * represents a valid SPI slave.
2079 */
of_register_spi_devices(struct spi_controller * ctlr)2080 static void of_register_spi_devices(struct spi_controller *ctlr)
2081 {
2082 struct spi_device *spi;
2083 struct device_node *nc;
2084
2085 if (!ctlr->dev.of_node)
2086 return;
2087
2088 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2089 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2090 continue;
2091 spi = of_register_spi_device(ctlr, nc);
2092 if (IS_ERR(spi)) {
2093 dev_warn(&ctlr->dev,
2094 "Failed to create SPI device for %pOF\n", nc);
2095 of_node_clear_flag(nc, OF_POPULATED);
2096 }
2097 }
2098 }
2099 #else
of_register_spi_devices(struct spi_controller * ctlr)2100 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2101 #endif
2102
2103 #ifdef CONFIG_ACPI
2104 struct acpi_spi_lookup {
2105 struct spi_controller *ctlr;
2106 u32 max_speed_hz;
2107 u32 mode;
2108 int irq;
2109 u8 bits_per_word;
2110 u8 chip_select;
2111 };
2112
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2113 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2114 struct acpi_spi_lookup *lookup)
2115 {
2116 const union acpi_object *obj;
2117
2118 if (!x86_apple_machine)
2119 return;
2120
2121 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2122 && obj->buffer.length >= 4)
2123 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2124
2125 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2126 && obj->buffer.length == 8)
2127 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2128
2129 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2130 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2131 lookup->mode |= SPI_LSB_FIRST;
2132
2133 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2134 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2135 lookup->mode |= SPI_CPOL;
2136
2137 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2138 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2139 lookup->mode |= SPI_CPHA;
2140 }
2141
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2142 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2143 {
2144 struct acpi_spi_lookup *lookup = data;
2145 struct spi_controller *ctlr = lookup->ctlr;
2146
2147 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2148 struct acpi_resource_spi_serialbus *sb;
2149 acpi_handle parent_handle;
2150 acpi_status status;
2151
2152 sb = &ares->data.spi_serial_bus;
2153 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2154
2155 status = acpi_get_handle(NULL,
2156 sb->resource_source.string_ptr,
2157 &parent_handle);
2158
2159 if (ACPI_FAILURE(status) ||
2160 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2161 return -ENODEV;
2162
2163 /*
2164 * ACPI DeviceSelection numbering is handled by the
2165 * host controller driver in Windows and can vary
2166 * from driver to driver. In Linux we always expect
2167 * 0 .. max - 1 so we need to ask the driver to
2168 * translate between the two schemes.
2169 */
2170 if (ctlr->fw_translate_cs) {
2171 int cs = ctlr->fw_translate_cs(ctlr,
2172 sb->device_selection);
2173 if (cs < 0)
2174 return cs;
2175 lookup->chip_select = cs;
2176 } else {
2177 lookup->chip_select = sb->device_selection;
2178 }
2179
2180 lookup->max_speed_hz = sb->connection_speed;
2181 lookup->bits_per_word = sb->data_bit_length;
2182
2183 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2184 lookup->mode |= SPI_CPHA;
2185 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2186 lookup->mode |= SPI_CPOL;
2187 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2188 lookup->mode |= SPI_CS_HIGH;
2189 }
2190 } else if (lookup->irq < 0) {
2191 struct resource r;
2192
2193 if (acpi_dev_resource_interrupt(ares, 0, &r))
2194 lookup->irq = r.start;
2195 }
2196
2197 /* Always tell the ACPI core to skip this resource */
2198 return 1;
2199 }
2200
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2201 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2202 struct acpi_device *adev)
2203 {
2204 acpi_handle parent_handle = NULL;
2205 struct list_head resource_list;
2206 struct acpi_spi_lookup lookup = {};
2207 struct spi_device *spi;
2208 int ret;
2209
2210 if (acpi_bus_get_status(adev) || !adev->status.present ||
2211 acpi_device_enumerated(adev))
2212 return AE_OK;
2213
2214 lookup.ctlr = ctlr;
2215 lookup.irq = -1;
2216
2217 INIT_LIST_HEAD(&resource_list);
2218 ret = acpi_dev_get_resources(adev, &resource_list,
2219 acpi_spi_add_resource, &lookup);
2220 acpi_dev_free_resource_list(&resource_list);
2221
2222 if (ret < 0)
2223 /* found SPI in _CRS but it points to another controller */
2224 return AE_OK;
2225
2226 if (!lookup.max_speed_hz &&
2227 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
2228 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2229 /* Apple does not use _CRS but nested devices for SPI slaves */
2230 acpi_spi_parse_apple_properties(adev, &lookup);
2231 }
2232
2233 if (!lookup.max_speed_hz)
2234 return AE_OK;
2235
2236 spi = spi_alloc_device(ctlr);
2237 if (!spi) {
2238 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2239 dev_name(&adev->dev));
2240 return AE_NO_MEMORY;
2241 }
2242
2243
2244 ACPI_COMPANION_SET(&spi->dev, adev);
2245 spi->max_speed_hz = lookup.max_speed_hz;
2246 spi->mode |= lookup.mode;
2247 spi->irq = lookup.irq;
2248 spi->bits_per_word = lookup.bits_per_word;
2249 spi->chip_select = lookup.chip_select;
2250
2251 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2252 sizeof(spi->modalias));
2253
2254 if (spi->irq < 0)
2255 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2256
2257 acpi_device_set_enumerated(adev);
2258
2259 adev->power.flags.ignore_parent = true;
2260 if (spi_add_device(spi)) {
2261 adev->power.flags.ignore_parent = false;
2262 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2263 dev_name(&adev->dev));
2264 spi_dev_put(spi);
2265 }
2266
2267 return AE_OK;
2268 }
2269
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2270 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2271 void *data, void **return_value)
2272 {
2273 struct spi_controller *ctlr = data;
2274 struct acpi_device *adev;
2275
2276 if (acpi_bus_get_device(handle, &adev))
2277 return AE_OK;
2278
2279 return acpi_register_spi_device(ctlr, adev);
2280 }
2281
2282 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2283
acpi_register_spi_devices(struct spi_controller * ctlr)2284 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2285 {
2286 acpi_status status;
2287 acpi_handle handle;
2288
2289 handle = ACPI_HANDLE(ctlr->dev.parent);
2290 if (!handle)
2291 return;
2292
2293 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2294 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2295 acpi_spi_add_device, NULL, ctlr, NULL);
2296 if (ACPI_FAILURE(status))
2297 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2298 }
2299 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2300 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2301 #endif /* CONFIG_ACPI */
2302
spi_controller_release(struct device * dev)2303 static void spi_controller_release(struct device *dev)
2304 {
2305 struct spi_controller *ctlr;
2306
2307 ctlr = container_of(dev, struct spi_controller, dev);
2308 kfree(ctlr);
2309 }
2310
2311 static struct class spi_master_class = {
2312 .name = "spi_master",
2313 .owner = THIS_MODULE,
2314 .dev_release = spi_controller_release,
2315 .dev_groups = spi_master_groups,
2316 };
2317
2318 #ifdef CONFIG_SPI_SLAVE
2319 /**
2320 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2321 * controller
2322 * @spi: device used for the current transfer
2323 */
spi_slave_abort(struct spi_device * spi)2324 int spi_slave_abort(struct spi_device *spi)
2325 {
2326 struct spi_controller *ctlr = spi->controller;
2327
2328 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2329 return ctlr->slave_abort(ctlr);
2330
2331 return -ENOTSUPP;
2332 }
2333 EXPORT_SYMBOL_GPL(spi_slave_abort);
2334
match_true(struct device * dev,void * data)2335 static int match_true(struct device *dev, void *data)
2336 {
2337 return 1;
2338 }
2339
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2340 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2341 char *buf)
2342 {
2343 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2344 dev);
2345 struct device *child;
2346
2347 child = device_find_child(&ctlr->dev, NULL, match_true);
2348 return sprintf(buf, "%s\n",
2349 child ? to_spi_device(child)->modalias : NULL);
2350 }
2351
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2352 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2353 const char *buf, size_t count)
2354 {
2355 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2356 dev);
2357 struct spi_device *spi;
2358 struct device *child;
2359 char name[32];
2360 int rc;
2361
2362 rc = sscanf(buf, "%31s", name);
2363 if (rc != 1 || !name[0])
2364 return -EINVAL;
2365
2366 child = device_find_child(&ctlr->dev, NULL, match_true);
2367 if (child) {
2368 /* Remove registered slave */
2369 device_unregister(child);
2370 put_device(child);
2371 }
2372
2373 if (strcmp(name, "(null)")) {
2374 /* Register new slave */
2375 spi = spi_alloc_device(ctlr);
2376 if (!spi)
2377 return -ENOMEM;
2378
2379 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2380
2381 rc = spi_add_device(spi);
2382 if (rc) {
2383 spi_dev_put(spi);
2384 return rc;
2385 }
2386 }
2387
2388 return count;
2389 }
2390
2391 static DEVICE_ATTR_RW(slave);
2392
2393 static struct attribute *spi_slave_attrs[] = {
2394 &dev_attr_slave.attr,
2395 NULL,
2396 };
2397
2398 static const struct attribute_group spi_slave_group = {
2399 .attrs = spi_slave_attrs,
2400 };
2401
2402 static const struct attribute_group *spi_slave_groups[] = {
2403 &spi_controller_statistics_group,
2404 &spi_slave_group,
2405 NULL,
2406 };
2407
2408 static struct class spi_slave_class = {
2409 .name = "spi_slave",
2410 .owner = THIS_MODULE,
2411 .dev_release = spi_controller_release,
2412 .dev_groups = spi_slave_groups,
2413 };
2414 #else
2415 extern struct class spi_slave_class; /* dummy */
2416 #endif
2417
2418 /**
2419 * __spi_alloc_controller - allocate an SPI master or slave controller
2420 * @dev: the controller, possibly using the platform_bus
2421 * @size: how much zeroed driver-private data to allocate; the pointer to this
2422 * memory is in the driver_data field of the returned device, accessible
2423 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2424 * drivers granting DMA access to portions of their private data need to
2425 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2426 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2427 * slave (true) controller
2428 * Context: can sleep
2429 *
2430 * This call is used only by SPI controller drivers, which are the
2431 * only ones directly touching chip registers. It's how they allocate
2432 * an spi_controller structure, prior to calling spi_register_controller().
2433 *
2434 * This must be called from context that can sleep.
2435 *
2436 * The caller is responsible for assigning the bus number and initializing the
2437 * controller's methods before calling spi_register_controller(); and (after
2438 * errors adding the device) calling spi_controller_put() to prevent a memory
2439 * leak.
2440 *
2441 * Return: the SPI controller structure on success, else NULL.
2442 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2443 struct spi_controller *__spi_alloc_controller(struct device *dev,
2444 unsigned int size, bool slave)
2445 {
2446 struct spi_controller *ctlr;
2447 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2448
2449 if (!dev)
2450 return NULL;
2451
2452 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2453 if (!ctlr)
2454 return NULL;
2455
2456 device_initialize(&ctlr->dev);
2457 ctlr->bus_num = -1;
2458 ctlr->num_chipselect = 1;
2459 ctlr->slave = slave;
2460 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2461 ctlr->dev.class = &spi_slave_class;
2462 else
2463 ctlr->dev.class = &spi_master_class;
2464 ctlr->dev.parent = dev;
2465 pm_suspend_ignore_children(&ctlr->dev, true);
2466 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2467
2468 return ctlr;
2469 }
2470 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2471
devm_spi_release_controller(struct device * dev,void * ctlr)2472 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2473 {
2474 spi_controller_put(*(struct spi_controller **)ctlr);
2475 }
2476
2477 /**
2478 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2479 * @dev: physical device of SPI controller
2480 * @size: how much zeroed driver-private data to allocate
2481 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2482 * Context: can sleep
2483 *
2484 * Allocate an SPI controller and automatically release a reference on it
2485 * when @dev is unbound from its driver. Drivers are thus relieved from
2486 * having to call spi_controller_put().
2487 *
2488 * The arguments to this function are identical to __spi_alloc_controller().
2489 *
2490 * Return: the SPI controller structure on success, else NULL.
2491 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2492 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2493 unsigned int size,
2494 bool slave)
2495 {
2496 struct spi_controller **ptr, *ctlr;
2497
2498 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2499 GFP_KERNEL);
2500 if (!ptr)
2501 return NULL;
2502
2503 ctlr = __spi_alloc_controller(dev, size, slave);
2504 if (ctlr) {
2505 ctlr->devm_allocated = true;
2506 *ptr = ctlr;
2507 devres_add(dev, ptr);
2508 } else {
2509 devres_free(ptr);
2510 }
2511
2512 return ctlr;
2513 }
2514 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2515
2516 #ifdef CONFIG_OF
of_spi_get_gpio_numbers(struct spi_controller * ctlr)2517 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2518 {
2519 int nb, i, *cs;
2520 struct device_node *np = ctlr->dev.of_node;
2521
2522 if (!np)
2523 return 0;
2524
2525 nb = of_gpio_named_count(np, "cs-gpios");
2526 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2527
2528 /* Return error only for an incorrectly formed cs-gpios property */
2529 if (nb == 0 || nb == -ENOENT)
2530 return 0;
2531 else if (nb < 0)
2532 return nb;
2533
2534 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2535 GFP_KERNEL);
2536 ctlr->cs_gpios = cs;
2537
2538 if (!ctlr->cs_gpios)
2539 return -ENOMEM;
2540
2541 for (i = 0; i < ctlr->num_chipselect; i++)
2542 cs[i] = -ENOENT;
2543
2544 for (i = 0; i < nb; i++)
2545 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2546
2547 return 0;
2548 }
2549 #else
of_spi_get_gpio_numbers(struct spi_controller * ctlr)2550 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2551 {
2552 return 0;
2553 }
2554 #endif
2555
2556 /**
2557 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2558 * @ctlr: The SPI master to grab GPIO descriptors for
2559 */
spi_get_gpio_descs(struct spi_controller * ctlr)2560 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2561 {
2562 int nb, i;
2563 struct gpio_desc **cs;
2564 struct device *dev = &ctlr->dev;
2565 unsigned long native_cs_mask = 0;
2566 unsigned int num_cs_gpios = 0;
2567
2568 nb = gpiod_count(dev, "cs");
2569 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2570
2571 /* No GPIOs at all is fine, else return the error */
2572 if (nb == 0 || nb == -ENOENT)
2573 return 0;
2574 else if (nb < 0)
2575 return nb;
2576
2577 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2578 GFP_KERNEL);
2579 if (!cs)
2580 return -ENOMEM;
2581 ctlr->cs_gpiods = cs;
2582
2583 for (i = 0; i < nb; i++) {
2584 /*
2585 * Most chipselects are active low, the inverted
2586 * semantics are handled by special quirks in gpiolib,
2587 * so initializing them GPIOD_OUT_LOW here means
2588 * "unasserted", in most cases this will drive the physical
2589 * line high.
2590 */
2591 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2592 GPIOD_OUT_LOW);
2593 if (IS_ERR(cs[i]))
2594 return PTR_ERR(cs[i]);
2595
2596 if (cs[i]) {
2597 /*
2598 * If we find a CS GPIO, name it after the device and
2599 * chip select line.
2600 */
2601 char *gpioname;
2602
2603 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2604 dev_name(dev), i);
2605 if (!gpioname)
2606 return -ENOMEM;
2607 gpiod_set_consumer_name(cs[i], gpioname);
2608 num_cs_gpios++;
2609 continue;
2610 }
2611
2612 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2613 dev_err(dev, "Invalid native chip select %d\n", i);
2614 return -EINVAL;
2615 }
2616 native_cs_mask |= BIT(i);
2617 }
2618
2619 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2620
2621 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2622 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2623 dev_err(dev, "No unused native chip select available\n");
2624 return -EINVAL;
2625 }
2626
2627 return 0;
2628 }
2629
spi_controller_check_ops(struct spi_controller * ctlr)2630 static int spi_controller_check_ops(struct spi_controller *ctlr)
2631 {
2632 /*
2633 * The controller may implement only the high-level SPI-memory like
2634 * operations if it does not support regular SPI transfers, and this is
2635 * valid use case.
2636 * If ->mem_ops is NULL, we request that at least one of the
2637 * ->transfer_xxx() method be implemented.
2638 */
2639 if (ctlr->mem_ops) {
2640 if (!ctlr->mem_ops->exec_op)
2641 return -EINVAL;
2642 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2643 !ctlr->transfer_one_message) {
2644 return -EINVAL;
2645 }
2646
2647 return 0;
2648 }
2649
2650 /**
2651 * spi_register_controller - register SPI master or slave controller
2652 * @ctlr: initialized master, originally from spi_alloc_master() or
2653 * spi_alloc_slave()
2654 * Context: can sleep
2655 *
2656 * SPI controllers connect to their drivers using some non-SPI bus,
2657 * such as the platform bus. The final stage of probe() in that code
2658 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2659 *
2660 * SPI controllers use board specific (often SOC specific) bus numbers,
2661 * and board-specific addressing for SPI devices combines those numbers
2662 * with chip select numbers. Since SPI does not directly support dynamic
2663 * device identification, boards need configuration tables telling which
2664 * chip is at which address.
2665 *
2666 * This must be called from context that can sleep. It returns zero on
2667 * success, else a negative error code (dropping the controller's refcount).
2668 * After a successful return, the caller is responsible for calling
2669 * spi_unregister_controller().
2670 *
2671 * Return: zero on success, else a negative error code.
2672 */
spi_register_controller(struct spi_controller * ctlr)2673 int spi_register_controller(struct spi_controller *ctlr)
2674 {
2675 struct device *dev = ctlr->dev.parent;
2676 struct boardinfo *bi;
2677 int status;
2678 int id, first_dynamic;
2679
2680 if (!dev)
2681 return -ENODEV;
2682
2683 /*
2684 * Make sure all necessary hooks are implemented before registering
2685 * the SPI controller.
2686 */
2687 status = spi_controller_check_ops(ctlr);
2688 if (status)
2689 return status;
2690
2691 if (ctlr->bus_num >= 0) {
2692 /* devices with a fixed bus num must check-in with the num */
2693 mutex_lock(&board_lock);
2694 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2695 ctlr->bus_num + 1, GFP_KERNEL);
2696 mutex_unlock(&board_lock);
2697 if (WARN(id < 0, "couldn't get idr"))
2698 return id == -ENOSPC ? -EBUSY : id;
2699 ctlr->bus_num = id;
2700 } else if (ctlr->dev.of_node) {
2701 /* allocate dynamic bus number using Linux idr */
2702 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2703 if (id >= 0) {
2704 ctlr->bus_num = id;
2705 mutex_lock(&board_lock);
2706 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2707 ctlr->bus_num + 1, GFP_KERNEL);
2708 mutex_unlock(&board_lock);
2709 if (WARN(id < 0, "couldn't get idr"))
2710 return id == -ENOSPC ? -EBUSY : id;
2711 }
2712 }
2713 if (ctlr->bus_num < 0) {
2714 first_dynamic = of_alias_get_highest_id("spi");
2715 if (first_dynamic < 0)
2716 first_dynamic = 0;
2717 else
2718 first_dynamic++;
2719
2720 mutex_lock(&board_lock);
2721 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2722 0, GFP_KERNEL);
2723 mutex_unlock(&board_lock);
2724 if (WARN(id < 0, "couldn't get idr"))
2725 return id;
2726 ctlr->bus_num = id;
2727 }
2728 INIT_LIST_HEAD(&ctlr->queue);
2729 spin_lock_init(&ctlr->queue_lock);
2730 spin_lock_init(&ctlr->bus_lock_spinlock);
2731 mutex_init(&ctlr->bus_lock_mutex);
2732 mutex_init(&ctlr->io_mutex);
2733 ctlr->bus_lock_flag = 0;
2734 init_completion(&ctlr->xfer_completion);
2735 if (!ctlr->max_dma_len)
2736 ctlr->max_dma_len = INT_MAX;
2737
2738 /* register the device, then userspace will see it.
2739 * registration fails if the bus ID is in use.
2740 */
2741 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2742
2743 if (!spi_controller_is_slave(ctlr)) {
2744 if (ctlr->use_gpio_descriptors) {
2745 status = spi_get_gpio_descs(ctlr);
2746 if (status)
2747 goto free_bus_id;
2748 /*
2749 * A controller using GPIO descriptors always
2750 * supports SPI_CS_HIGH if need be.
2751 */
2752 ctlr->mode_bits |= SPI_CS_HIGH;
2753 } else {
2754 /* Legacy code path for GPIOs from DT */
2755 status = of_spi_get_gpio_numbers(ctlr);
2756 if (status)
2757 goto free_bus_id;
2758 }
2759 }
2760
2761 /*
2762 * Even if it's just one always-selected device, there must
2763 * be at least one chipselect.
2764 */
2765 if (!ctlr->num_chipselect) {
2766 status = -EINVAL;
2767 goto free_bus_id;
2768 }
2769
2770 status = device_add(&ctlr->dev);
2771 if (status < 0)
2772 goto free_bus_id;
2773 dev_dbg(dev, "registered %s %s\n",
2774 spi_controller_is_slave(ctlr) ? "slave" : "master",
2775 dev_name(&ctlr->dev));
2776
2777 /*
2778 * If we're using a queued driver, start the queue. Note that we don't
2779 * need the queueing logic if the driver is only supporting high-level
2780 * memory operations.
2781 */
2782 if (ctlr->transfer) {
2783 dev_info(dev, "controller is unqueued, this is deprecated\n");
2784 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2785 status = spi_controller_initialize_queue(ctlr);
2786 if (status) {
2787 device_del(&ctlr->dev);
2788 goto free_bus_id;
2789 }
2790 }
2791 /* add statistics */
2792 spin_lock_init(&ctlr->statistics.lock);
2793
2794 mutex_lock(&board_lock);
2795 list_add_tail(&ctlr->list, &spi_controller_list);
2796 list_for_each_entry(bi, &board_list, list)
2797 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2798 mutex_unlock(&board_lock);
2799
2800 /* Register devices from the device tree and ACPI */
2801 of_register_spi_devices(ctlr);
2802 acpi_register_spi_devices(ctlr);
2803 return status;
2804
2805 free_bus_id:
2806 mutex_lock(&board_lock);
2807 idr_remove(&spi_master_idr, ctlr->bus_num);
2808 mutex_unlock(&board_lock);
2809 return status;
2810 }
2811 EXPORT_SYMBOL_GPL(spi_register_controller);
2812
devm_spi_unregister(struct device * dev,void * res)2813 static void devm_spi_unregister(struct device *dev, void *res)
2814 {
2815 spi_unregister_controller(*(struct spi_controller **)res);
2816 }
2817
2818 /**
2819 * devm_spi_register_controller - register managed SPI master or slave
2820 * controller
2821 * @dev: device managing SPI controller
2822 * @ctlr: initialized controller, originally from spi_alloc_master() or
2823 * spi_alloc_slave()
2824 * Context: can sleep
2825 *
2826 * Register a SPI device as with spi_register_controller() which will
2827 * automatically be unregistered and freed.
2828 *
2829 * Return: zero on success, else a negative error code.
2830 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)2831 int devm_spi_register_controller(struct device *dev,
2832 struct spi_controller *ctlr)
2833 {
2834 struct spi_controller **ptr;
2835 int ret;
2836
2837 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2838 if (!ptr)
2839 return -ENOMEM;
2840
2841 ret = spi_register_controller(ctlr);
2842 if (!ret) {
2843 *ptr = ctlr;
2844 devres_add(dev, ptr);
2845 } else {
2846 devres_free(ptr);
2847 }
2848
2849 return ret;
2850 }
2851 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2852
__unregister(struct device * dev,void * null)2853 static int __unregister(struct device *dev, void *null)
2854 {
2855 spi_unregister_device(to_spi_device(dev));
2856 return 0;
2857 }
2858
2859 /**
2860 * spi_unregister_controller - unregister SPI master or slave controller
2861 * @ctlr: the controller being unregistered
2862 * Context: can sleep
2863 *
2864 * This call is used only by SPI controller drivers, which are the
2865 * only ones directly touching chip registers.
2866 *
2867 * This must be called from context that can sleep.
2868 *
2869 * Note that this function also drops a reference to the controller.
2870 */
spi_unregister_controller(struct spi_controller * ctlr)2871 void spi_unregister_controller(struct spi_controller *ctlr)
2872 {
2873 struct spi_controller *found;
2874 int id = ctlr->bus_num;
2875
2876 /* Prevent addition of new devices, unregister existing ones */
2877 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2878 mutex_lock(&spi_add_lock);
2879
2880 device_for_each_child(&ctlr->dev, NULL, __unregister);
2881
2882 /* First make sure that this controller was ever added */
2883 mutex_lock(&board_lock);
2884 found = idr_find(&spi_master_idr, id);
2885 mutex_unlock(&board_lock);
2886 if (ctlr->queued) {
2887 if (spi_destroy_queue(ctlr))
2888 dev_err(&ctlr->dev, "queue remove failed\n");
2889 }
2890 mutex_lock(&board_lock);
2891 list_del(&ctlr->list);
2892 mutex_unlock(&board_lock);
2893
2894 device_del(&ctlr->dev);
2895
2896 /* Release the last reference on the controller if its driver
2897 * has not yet been converted to devm_spi_alloc_master/slave().
2898 */
2899 if (!ctlr->devm_allocated)
2900 put_device(&ctlr->dev);
2901
2902 /* free bus id */
2903 mutex_lock(&board_lock);
2904 if (found == ctlr)
2905 idr_remove(&spi_master_idr, id);
2906 mutex_unlock(&board_lock);
2907
2908 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2909 mutex_unlock(&spi_add_lock);
2910 }
2911 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2912
spi_controller_suspend(struct spi_controller * ctlr)2913 int spi_controller_suspend(struct spi_controller *ctlr)
2914 {
2915 int ret;
2916
2917 /* Basically no-ops for non-queued controllers */
2918 if (!ctlr->queued)
2919 return 0;
2920
2921 ret = spi_stop_queue(ctlr);
2922 if (ret)
2923 dev_err(&ctlr->dev, "queue stop failed\n");
2924
2925 return ret;
2926 }
2927 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2928
spi_controller_resume(struct spi_controller * ctlr)2929 int spi_controller_resume(struct spi_controller *ctlr)
2930 {
2931 int ret;
2932
2933 if (!ctlr->queued)
2934 return 0;
2935
2936 ret = spi_start_queue(ctlr);
2937 if (ret)
2938 dev_err(&ctlr->dev, "queue restart failed\n");
2939
2940 return ret;
2941 }
2942 EXPORT_SYMBOL_GPL(spi_controller_resume);
2943
__spi_controller_match(struct device * dev,const void * data)2944 static int __spi_controller_match(struct device *dev, const void *data)
2945 {
2946 struct spi_controller *ctlr;
2947 const u16 *bus_num = data;
2948
2949 ctlr = container_of(dev, struct spi_controller, dev);
2950 return ctlr->bus_num == *bus_num;
2951 }
2952
2953 /**
2954 * spi_busnum_to_master - look up master associated with bus_num
2955 * @bus_num: the master's bus number
2956 * Context: can sleep
2957 *
2958 * This call may be used with devices that are registered after
2959 * arch init time. It returns a refcounted pointer to the relevant
2960 * spi_controller (which the caller must release), or NULL if there is
2961 * no such master registered.
2962 *
2963 * Return: the SPI master structure on success, else NULL.
2964 */
spi_busnum_to_master(u16 bus_num)2965 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2966 {
2967 struct device *dev;
2968 struct spi_controller *ctlr = NULL;
2969
2970 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2971 __spi_controller_match);
2972 if (dev)
2973 ctlr = container_of(dev, struct spi_controller, dev);
2974 /* reference got in class_find_device */
2975 return ctlr;
2976 }
2977 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2978
2979 /*-------------------------------------------------------------------------*/
2980
2981 /* Core methods for SPI resource management */
2982
2983 /**
2984 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2985 * during the processing of a spi_message while using
2986 * spi_transfer_one
2987 * @spi: the spi device for which we allocate memory
2988 * @release: the release code to execute for this resource
2989 * @size: size to alloc and return
2990 * @gfp: GFP allocation flags
2991 *
2992 * Return: the pointer to the allocated data
2993 *
2994 * This may get enhanced in the future to allocate from a memory pool
2995 * of the @spi_device or @spi_controller to avoid repeated allocations.
2996 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)2997 void *spi_res_alloc(struct spi_device *spi,
2998 spi_res_release_t release,
2999 size_t size, gfp_t gfp)
3000 {
3001 struct spi_res *sres;
3002
3003 sres = kzalloc(sizeof(*sres) + size, gfp);
3004 if (!sres)
3005 return NULL;
3006
3007 INIT_LIST_HEAD(&sres->entry);
3008 sres->release = release;
3009
3010 return sres->data;
3011 }
3012 EXPORT_SYMBOL_GPL(spi_res_alloc);
3013
3014 /**
3015 * spi_res_free - free an spi resource
3016 * @res: pointer to the custom data of a resource
3017 *
3018 */
spi_res_free(void * res)3019 void spi_res_free(void *res)
3020 {
3021 struct spi_res *sres = container_of(res, struct spi_res, data);
3022
3023 if (!res)
3024 return;
3025
3026 WARN_ON(!list_empty(&sres->entry));
3027 kfree(sres);
3028 }
3029 EXPORT_SYMBOL_GPL(spi_res_free);
3030
3031 /**
3032 * spi_res_add - add a spi_res to the spi_message
3033 * @message: the spi message
3034 * @res: the spi_resource
3035 */
spi_res_add(struct spi_message * message,void * res)3036 void spi_res_add(struct spi_message *message, void *res)
3037 {
3038 struct spi_res *sres = container_of(res, struct spi_res, data);
3039
3040 WARN_ON(!list_empty(&sres->entry));
3041 list_add_tail(&sres->entry, &message->resources);
3042 }
3043 EXPORT_SYMBOL_GPL(spi_res_add);
3044
3045 /**
3046 * spi_res_release - release all spi resources for this message
3047 * @ctlr: the @spi_controller
3048 * @message: the @spi_message
3049 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)3050 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3051 {
3052 struct spi_res *res, *tmp;
3053
3054 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
3055 if (res->release)
3056 res->release(ctlr, message, res->data);
3057
3058 list_del(&res->entry);
3059
3060 kfree(res);
3061 }
3062 }
3063 EXPORT_SYMBOL_GPL(spi_res_release);
3064
3065 /*-------------------------------------------------------------------------*/
3066
3067 /* Core methods for spi_message alterations */
3068
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3069 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3070 struct spi_message *msg,
3071 void *res)
3072 {
3073 struct spi_replaced_transfers *rxfer = res;
3074 size_t i;
3075
3076 /* call extra callback if requested */
3077 if (rxfer->release)
3078 rxfer->release(ctlr, msg, res);
3079
3080 /* insert replaced transfers back into the message */
3081 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3082
3083 /* remove the formerly inserted entries */
3084 for (i = 0; i < rxfer->inserted; i++)
3085 list_del(&rxfer->inserted_transfers[i].transfer_list);
3086 }
3087
3088 /**
3089 * spi_replace_transfers - replace transfers with several transfers
3090 * and register change with spi_message.resources
3091 * @msg: the spi_message we work upon
3092 * @xfer_first: the first spi_transfer we want to replace
3093 * @remove: number of transfers to remove
3094 * @insert: the number of transfers we want to insert instead
3095 * @release: extra release code necessary in some circumstances
3096 * @extradatasize: extra data to allocate (with alignment guarantees
3097 * of struct @spi_transfer)
3098 * @gfp: gfp flags
3099 *
3100 * Returns: pointer to @spi_replaced_transfers,
3101 * PTR_ERR(...) in case of errors.
3102 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3103 struct spi_replaced_transfers *spi_replace_transfers(
3104 struct spi_message *msg,
3105 struct spi_transfer *xfer_first,
3106 size_t remove,
3107 size_t insert,
3108 spi_replaced_release_t release,
3109 size_t extradatasize,
3110 gfp_t gfp)
3111 {
3112 struct spi_replaced_transfers *rxfer;
3113 struct spi_transfer *xfer;
3114 size_t i;
3115
3116 /* allocate the structure using spi_res */
3117 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3118 struct_size(rxfer, inserted_transfers, insert)
3119 + extradatasize,
3120 gfp);
3121 if (!rxfer)
3122 return ERR_PTR(-ENOMEM);
3123
3124 /* the release code to invoke before running the generic release */
3125 rxfer->release = release;
3126
3127 /* assign extradata */
3128 if (extradatasize)
3129 rxfer->extradata =
3130 &rxfer->inserted_transfers[insert];
3131
3132 /* init the replaced_transfers list */
3133 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3134
3135 /* assign the list_entry after which we should reinsert
3136 * the @replaced_transfers - it may be spi_message.messages!
3137 */
3138 rxfer->replaced_after = xfer_first->transfer_list.prev;
3139
3140 /* remove the requested number of transfers */
3141 for (i = 0; i < remove; i++) {
3142 /* if the entry after replaced_after it is msg->transfers
3143 * then we have been requested to remove more transfers
3144 * than are in the list
3145 */
3146 if (rxfer->replaced_after->next == &msg->transfers) {
3147 dev_err(&msg->spi->dev,
3148 "requested to remove more spi_transfers than are available\n");
3149 /* insert replaced transfers back into the message */
3150 list_splice(&rxfer->replaced_transfers,
3151 rxfer->replaced_after);
3152
3153 /* free the spi_replace_transfer structure */
3154 spi_res_free(rxfer);
3155
3156 /* and return with an error */
3157 return ERR_PTR(-EINVAL);
3158 }
3159
3160 /* remove the entry after replaced_after from list of
3161 * transfers and add it to list of replaced_transfers
3162 */
3163 list_move_tail(rxfer->replaced_after->next,
3164 &rxfer->replaced_transfers);
3165 }
3166
3167 /* create copy of the given xfer with identical settings
3168 * based on the first transfer to get removed
3169 */
3170 for (i = 0; i < insert; i++) {
3171 /* we need to run in reverse order */
3172 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3173
3174 /* copy all spi_transfer data */
3175 memcpy(xfer, xfer_first, sizeof(*xfer));
3176
3177 /* add to list */
3178 list_add(&xfer->transfer_list, rxfer->replaced_after);
3179
3180 /* clear cs_change and delay for all but the last */
3181 if (i) {
3182 xfer->cs_change = false;
3183 xfer->delay_usecs = 0;
3184 xfer->delay.value = 0;
3185 }
3186 }
3187
3188 /* set up inserted */
3189 rxfer->inserted = insert;
3190
3191 /* and register it with spi_res/spi_message */
3192 spi_res_add(msg, rxfer);
3193
3194 return rxfer;
3195 }
3196 EXPORT_SYMBOL_GPL(spi_replace_transfers);
3197
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3198 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3199 struct spi_message *msg,
3200 struct spi_transfer **xferp,
3201 size_t maxsize,
3202 gfp_t gfp)
3203 {
3204 struct spi_transfer *xfer = *xferp, *xfers;
3205 struct spi_replaced_transfers *srt;
3206 size_t offset;
3207 size_t count, i;
3208
3209 /* calculate how many we have to replace */
3210 count = DIV_ROUND_UP(xfer->len, maxsize);
3211
3212 /* create replacement */
3213 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3214 if (IS_ERR(srt))
3215 return PTR_ERR(srt);
3216 xfers = srt->inserted_transfers;
3217
3218 /* now handle each of those newly inserted spi_transfers
3219 * note that the replacements spi_transfers all are preset
3220 * to the same values as *xferp, so tx_buf, rx_buf and len
3221 * are all identical (as well as most others)
3222 * so we just have to fix up len and the pointers.
3223 *
3224 * this also includes support for the depreciated
3225 * spi_message.is_dma_mapped interface
3226 */
3227
3228 /* the first transfer just needs the length modified, so we
3229 * run it outside the loop
3230 */
3231 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3232
3233 /* all the others need rx_buf/tx_buf also set */
3234 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3235 /* update rx_buf, tx_buf and dma */
3236 if (xfers[i].rx_buf)
3237 xfers[i].rx_buf += offset;
3238 if (xfers[i].rx_dma)
3239 xfers[i].rx_dma += offset;
3240 if (xfers[i].tx_buf)
3241 xfers[i].tx_buf += offset;
3242 if (xfers[i].tx_dma)
3243 xfers[i].tx_dma += offset;
3244
3245 /* update length */
3246 xfers[i].len = min(maxsize, xfers[i].len - offset);
3247 }
3248
3249 /* we set up xferp to the last entry we have inserted,
3250 * so that we skip those already split transfers
3251 */
3252 *xferp = &xfers[count - 1];
3253
3254 /* increment statistics counters */
3255 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3256 transfers_split_maxsize);
3257 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3258 transfers_split_maxsize);
3259
3260 return 0;
3261 }
3262
3263 /**
3264 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
3265 * when an individual transfer exceeds a
3266 * certain size
3267 * @ctlr: the @spi_controller for this transfer
3268 * @msg: the @spi_message to transform
3269 * @maxsize: the maximum when to apply this
3270 * @gfp: GFP allocation flags
3271 *
3272 * Return: status of transformation
3273 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3274 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3275 struct spi_message *msg,
3276 size_t maxsize,
3277 gfp_t gfp)
3278 {
3279 struct spi_transfer *xfer;
3280 int ret;
3281
3282 /* iterate over the transfer_list,
3283 * but note that xfer is advanced to the last transfer inserted
3284 * to avoid checking sizes again unnecessarily (also xfer does
3285 * potentiall belong to a different list by the time the
3286 * replacement has happened
3287 */
3288 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3289 if (xfer->len > maxsize) {
3290 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3291 maxsize, gfp);
3292 if (ret)
3293 return ret;
3294 }
3295 }
3296
3297 return 0;
3298 }
3299 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3300
3301 /*-------------------------------------------------------------------------*/
3302
3303 /* Core methods for SPI controller protocol drivers. Some of the
3304 * other core methods are currently defined as inline functions.
3305 */
3306
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3307 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3308 u8 bits_per_word)
3309 {
3310 if (ctlr->bits_per_word_mask) {
3311 /* Only 32 bits fit in the mask */
3312 if (bits_per_word > 32)
3313 return -EINVAL;
3314 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3315 return -EINVAL;
3316 }
3317
3318 return 0;
3319 }
3320
3321 /**
3322 * spi_setup - setup SPI mode and clock rate
3323 * @spi: the device whose settings are being modified
3324 * Context: can sleep, and no requests are queued to the device
3325 *
3326 * SPI protocol drivers may need to update the transfer mode if the
3327 * device doesn't work with its default. They may likewise need
3328 * to update clock rates or word sizes from initial values. This function
3329 * changes those settings, and must be called from a context that can sleep.
3330 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3331 * effect the next time the device is selected and data is transferred to
3332 * or from it. When this function returns, the spi device is deselected.
3333 *
3334 * Note that this call will fail if the protocol driver specifies an option
3335 * that the underlying controller or its driver does not support. For
3336 * example, not all hardware supports wire transfers using nine bit words,
3337 * LSB-first wire encoding, or active-high chipselects.
3338 *
3339 * Return: zero on success, else a negative error code.
3340 */
spi_setup(struct spi_device * spi)3341 int spi_setup(struct spi_device *spi)
3342 {
3343 unsigned bad_bits, ugly_bits;
3344 int status;
3345
3346 /* check mode to prevent that DUAL and QUAD set at the same time
3347 */
3348 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3349 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3350 dev_err(&spi->dev,
3351 "setup: can not select dual and quad at the same time\n");
3352 return -EINVAL;
3353 }
3354 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3355 */
3356 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3357 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3358 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3359 return -EINVAL;
3360 /* help drivers fail *cleanly* when they need options
3361 * that aren't supported with their current controller
3362 * SPI_CS_WORD has a fallback software implementation,
3363 * so it is ignored here.
3364 */
3365 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3366 /* nothing prevents from working with active-high CS in case if it
3367 * is driven by GPIO.
3368 */
3369 if (gpio_is_valid(spi->cs_gpio))
3370 bad_bits &= ~SPI_CS_HIGH;
3371 ugly_bits = bad_bits &
3372 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3373 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3374 if (ugly_bits) {
3375 dev_warn(&spi->dev,
3376 "setup: ignoring unsupported mode bits %x\n",
3377 ugly_bits);
3378 spi->mode &= ~ugly_bits;
3379 bad_bits &= ~ugly_bits;
3380 }
3381 if (bad_bits) {
3382 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3383 bad_bits);
3384 return -EINVAL;
3385 }
3386
3387 if (!spi->bits_per_word)
3388 spi->bits_per_word = 8;
3389
3390 status = __spi_validate_bits_per_word(spi->controller,
3391 spi->bits_per_word);
3392 if (status)
3393 return status;
3394
3395 if (!spi->max_speed_hz)
3396 spi->max_speed_hz = spi->controller->max_speed_hz;
3397
3398 mutex_lock(&spi->controller->io_mutex);
3399
3400 if (spi->controller->setup)
3401 status = spi->controller->setup(spi);
3402
3403 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3404 status = pm_runtime_get_sync(spi->controller->dev.parent);
3405 if (status < 0) {
3406 mutex_unlock(&spi->controller->io_mutex);
3407 pm_runtime_put_noidle(spi->controller->dev.parent);
3408 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3409 status);
3410 return status;
3411 }
3412
3413 /*
3414 * We do not want to return positive value from pm_runtime_get,
3415 * there are many instances of devices calling spi_setup() and
3416 * checking for a non-zero return value instead of a negative
3417 * return value.
3418 */
3419 status = 0;
3420
3421 spi_set_cs(spi, false, true);
3422 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3423 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3424 } else {
3425 spi_set_cs(spi, false, true);
3426 }
3427
3428 mutex_unlock(&spi->controller->io_mutex);
3429
3430 if (spi->rt && !spi->controller->rt) {
3431 spi->controller->rt = true;
3432 spi_set_thread_rt(spi->controller);
3433 }
3434
3435 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3436 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3437 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3438 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3439 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3440 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3441 spi->bits_per_word, spi->max_speed_hz,
3442 status);
3443
3444 return status;
3445 }
3446 EXPORT_SYMBOL_GPL(spi_setup);
3447
3448 /**
3449 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3450 * @spi: the device that requires specific CS timing configuration
3451 * @setup: CS setup time specified via @spi_delay
3452 * @hold: CS hold time specified via @spi_delay
3453 * @inactive: CS inactive delay between transfers specified via @spi_delay
3454 *
3455 * Return: zero on success, else a negative error code.
3456 */
spi_set_cs_timing(struct spi_device * spi,struct spi_delay * setup,struct spi_delay * hold,struct spi_delay * inactive)3457 int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
3458 struct spi_delay *hold, struct spi_delay *inactive)
3459 {
3460 size_t len;
3461
3462 if (spi->controller->set_cs_timing)
3463 return spi->controller->set_cs_timing(spi, setup, hold,
3464 inactive);
3465
3466 if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
3467 (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
3468 (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
3469 dev_err(&spi->dev,
3470 "Clock-cycle delays for CS not supported in SW mode\n");
3471 return -ENOTSUPP;
3472 }
3473
3474 len = sizeof(struct spi_delay);
3475
3476 /* copy delays to controller */
3477 if (setup)
3478 memcpy(&spi->controller->cs_setup, setup, len);
3479 else
3480 memset(&spi->controller->cs_setup, 0, len);
3481
3482 if (hold)
3483 memcpy(&spi->controller->cs_hold, hold, len);
3484 else
3485 memset(&spi->controller->cs_hold, 0, len);
3486
3487 if (inactive)
3488 memcpy(&spi->controller->cs_inactive, inactive, len);
3489 else
3490 memset(&spi->controller->cs_inactive, 0, len);
3491
3492 return 0;
3493 }
3494 EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3495
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3496 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3497 struct spi_device *spi)
3498 {
3499 int delay1, delay2;
3500
3501 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3502 if (delay1 < 0)
3503 return delay1;
3504
3505 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3506 if (delay2 < 0)
3507 return delay2;
3508
3509 if (delay1 < delay2)
3510 memcpy(&xfer->word_delay, &spi->word_delay,
3511 sizeof(xfer->word_delay));
3512
3513 return 0;
3514 }
3515
__spi_validate(struct spi_device * spi,struct spi_message * message)3516 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3517 {
3518 struct spi_controller *ctlr = spi->controller;
3519 struct spi_transfer *xfer;
3520 int w_size;
3521
3522 if (list_empty(&message->transfers))
3523 return -EINVAL;
3524
3525 /* If an SPI controller does not support toggling the CS line on each
3526 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3527 * for the CS line, we can emulate the CS-per-word hardware function by
3528 * splitting transfers into one-word transfers and ensuring that
3529 * cs_change is set for each transfer.
3530 */
3531 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3532 spi->cs_gpiod ||
3533 gpio_is_valid(spi->cs_gpio))) {
3534 size_t maxsize;
3535 int ret;
3536
3537 maxsize = (spi->bits_per_word + 7) / 8;
3538
3539 /* spi_split_transfers_maxsize() requires message->spi */
3540 message->spi = spi;
3541
3542 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3543 GFP_KERNEL);
3544 if (ret)
3545 return ret;
3546
3547 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3548 /* don't change cs_change on the last entry in the list */
3549 if (list_is_last(&xfer->transfer_list, &message->transfers))
3550 break;
3551 xfer->cs_change = 1;
3552 }
3553 }
3554
3555 /* Half-duplex links include original MicroWire, and ones with
3556 * only one data pin like SPI_3WIRE (switches direction) or where
3557 * either MOSI or MISO is missing. They can also be caused by
3558 * software limitations.
3559 */
3560 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3561 (spi->mode & SPI_3WIRE)) {
3562 unsigned flags = ctlr->flags;
3563
3564 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3565 if (xfer->rx_buf && xfer->tx_buf)
3566 return -EINVAL;
3567 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3568 return -EINVAL;
3569 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3570 return -EINVAL;
3571 }
3572 }
3573
3574 /**
3575 * Set transfer bits_per_word and max speed as spi device default if
3576 * it is not set for this transfer.
3577 * Set transfer tx_nbits and rx_nbits as single transfer default
3578 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3579 * Ensure transfer word_delay is at least as long as that required by
3580 * device itself.
3581 */
3582 message->frame_length = 0;
3583 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3584 xfer->effective_speed_hz = 0;
3585 message->frame_length += xfer->len;
3586 if (!xfer->bits_per_word)
3587 xfer->bits_per_word = spi->bits_per_word;
3588
3589 if (!xfer->speed_hz)
3590 xfer->speed_hz = spi->max_speed_hz;
3591
3592 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3593 xfer->speed_hz = ctlr->max_speed_hz;
3594
3595 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3596 return -EINVAL;
3597
3598 /*
3599 * SPI transfer length should be multiple of SPI word size
3600 * where SPI word size should be power-of-two multiple
3601 */
3602 if (xfer->bits_per_word <= 8)
3603 w_size = 1;
3604 else if (xfer->bits_per_word <= 16)
3605 w_size = 2;
3606 else
3607 w_size = 4;
3608
3609 /* No partial transfers accepted */
3610 if (xfer->len % w_size)
3611 return -EINVAL;
3612
3613 if (xfer->speed_hz && ctlr->min_speed_hz &&
3614 xfer->speed_hz < ctlr->min_speed_hz)
3615 return -EINVAL;
3616
3617 if (xfer->tx_buf && !xfer->tx_nbits)
3618 xfer->tx_nbits = SPI_NBITS_SINGLE;
3619 if (xfer->rx_buf && !xfer->rx_nbits)
3620 xfer->rx_nbits = SPI_NBITS_SINGLE;
3621 /* check transfer tx/rx_nbits:
3622 * 1. check the value matches one of single, dual and quad
3623 * 2. check tx/rx_nbits match the mode in spi_device
3624 */
3625 if (xfer->tx_buf) {
3626 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3627 xfer->tx_nbits != SPI_NBITS_DUAL &&
3628 xfer->tx_nbits != SPI_NBITS_QUAD)
3629 return -EINVAL;
3630 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3631 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3632 return -EINVAL;
3633 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3634 !(spi->mode & SPI_TX_QUAD))
3635 return -EINVAL;
3636 }
3637 /* check transfer rx_nbits */
3638 if (xfer->rx_buf) {
3639 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3640 xfer->rx_nbits != SPI_NBITS_DUAL &&
3641 xfer->rx_nbits != SPI_NBITS_QUAD)
3642 return -EINVAL;
3643 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3644 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3645 return -EINVAL;
3646 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3647 !(spi->mode & SPI_RX_QUAD))
3648 return -EINVAL;
3649 }
3650
3651 if (_spi_xfer_word_delay_update(xfer, spi))
3652 return -EINVAL;
3653 }
3654
3655 message->status = -EINPROGRESS;
3656
3657 return 0;
3658 }
3659
__spi_async(struct spi_device * spi,struct spi_message * message)3660 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3661 {
3662 struct spi_controller *ctlr = spi->controller;
3663 struct spi_transfer *xfer;
3664
3665 /*
3666 * Some controllers do not support doing regular SPI transfers. Return
3667 * ENOTSUPP when this is the case.
3668 */
3669 if (!ctlr->transfer)
3670 return -ENOTSUPP;
3671
3672 message->spi = spi;
3673
3674 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3675 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3676
3677 trace_spi_message_submit(message);
3678
3679 if (!ctlr->ptp_sts_supported) {
3680 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3681 xfer->ptp_sts_word_pre = 0;
3682 ptp_read_system_prets(xfer->ptp_sts);
3683 }
3684 }
3685
3686 return ctlr->transfer(spi, message);
3687 }
3688
3689 /**
3690 * spi_async - asynchronous SPI transfer
3691 * @spi: device with which data will be exchanged
3692 * @message: describes the data transfers, including completion callback
3693 * Context: any (irqs may be blocked, etc)
3694 *
3695 * This call may be used in_irq and other contexts which can't sleep,
3696 * as well as from task contexts which can sleep.
3697 *
3698 * The completion callback is invoked in a context which can't sleep.
3699 * Before that invocation, the value of message->status is undefined.
3700 * When the callback is issued, message->status holds either zero (to
3701 * indicate complete success) or a negative error code. After that
3702 * callback returns, the driver which issued the transfer request may
3703 * deallocate the associated memory; it's no longer in use by any SPI
3704 * core or controller driver code.
3705 *
3706 * Note that although all messages to a spi_device are handled in
3707 * FIFO order, messages may go to different devices in other orders.
3708 * Some device might be higher priority, or have various "hard" access
3709 * time requirements, for example.
3710 *
3711 * On detection of any fault during the transfer, processing of
3712 * the entire message is aborted, and the device is deselected.
3713 * Until returning from the associated message completion callback,
3714 * no other spi_message queued to that device will be processed.
3715 * (This rule applies equally to all the synchronous transfer calls,
3716 * which are wrappers around this core asynchronous primitive.)
3717 *
3718 * Return: zero on success, else a negative error code.
3719 */
spi_async(struct spi_device * spi,struct spi_message * message)3720 int spi_async(struct spi_device *spi, struct spi_message *message)
3721 {
3722 struct spi_controller *ctlr = spi->controller;
3723 int ret;
3724 unsigned long flags;
3725
3726 ret = __spi_validate(spi, message);
3727 if (ret != 0)
3728 return ret;
3729
3730 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3731
3732 if (ctlr->bus_lock_flag)
3733 ret = -EBUSY;
3734 else
3735 ret = __spi_async(spi, message);
3736
3737 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3738
3739 return ret;
3740 }
3741 EXPORT_SYMBOL_GPL(spi_async);
3742
3743 /**
3744 * spi_async_locked - version of spi_async with exclusive bus usage
3745 * @spi: device with which data will be exchanged
3746 * @message: describes the data transfers, including completion callback
3747 * Context: any (irqs may be blocked, etc)
3748 *
3749 * This call may be used in_irq and other contexts which can't sleep,
3750 * as well as from task contexts which can sleep.
3751 *
3752 * The completion callback is invoked in a context which can't sleep.
3753 * Before that invocation, the value of message->status is undefined.
3754 * When the callback is issued, message->status holds either zero (to
3755 * indicate complete success) or a negative error code. After that
3756 * callback returns, the driver which issued the transfer request may
3757 * deallocate the associated memory; it's no longer in use by any SPI
3758 * core or controller driver code.
3759 *
3760 * Note that although all messages to a spi_device are handled in
3761 * FIFO order, messages may go to different devices in other orders.
3762 * Some device might be higher priority, or have various "hard" access
3763 * time requirements, for example.
3764 *
3765 * On detection of any fault during the transfer, processing of
3766 * the entire message is aborted, and the device is deselected.
3767 * Until returning from the associated message completion callback,
3768 * no other spi_message queued to that device will be processed.
3769 * (This rule applies equally to all the synchronous transfer calls,
3770 * which are wrappers around this core asynchronous primitive.)
3771 *
3772 * Return: zero on success, else a negative error code.
3773 */
spi_async_locked(struct spi_device * spi,struct spi_message * message)3774 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3775 {
3776 struct spi_controller *ctlr = spi->controller;
3777 int ret;
3778 unsigned long flags;
3779
3780 ret = __spi_validate(spi, message);
3781 if (ret != 0)
3782 return ret;
3783
3784 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3785
3786 ret = __spi_async(spi, message);
3787
3788 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3789
3790 return ret;
3791
3792 }
3793 EXPORT_SYMBOL_GPL(spi_async_locked);
3794
3795 /*-------------------------------------------------------------------------*/
3796
3797 /* Utility methods for SPI protocol drivers, layered on
3798 * top of the core. Some other utility methods are defined as
3799 * inline functions.
3800 */
3801
spi_complete(void * arg)3802 static void spi_complete(void *arg)
3803 {
3804 complete(arg);
3805 }
3806
__spi_sync(struct spi_device * spi,struct spi_message * message)3807 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3808 {
3809 DECLARE_COMPLETION_ONSTACK(done);
3810 int status;
3811 struct spi_controller *ctlr = spi->controller;
3812 unsigned long flags;
3813
3814 status = __spi_validate(spi, message);
3815 if (status != 0)
3816 return status;
3817
3818 message->complete = spi_complete;
3819 message->context = &done;
3820 message->spi = spi;
3821
3822 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3823 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3824
3825 /* If we're not using the legacy transfer method then we will
3826 * try to transfer in the calling context so special case.
3827 * This code would be less tricky if we could remove the
3828 * support for driver implemented message queues.
3829 */
3830 if (ctlr->transfer == spi_queued_transfer) {
3831 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3832
3833 trace_spi_message_submit(message);
3834
3835 status = __spi_queued_transfer(spi, message, false);
3836
3837 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3838 } else {
3839 status = spi_async_locked(spi, message);
3840 }
3841
3842 if (status == 0) {
3843 /* Push out the messages in the calling context if we
3844 * can.
3845 */
3846 if (ctlr->transfer == spi_queued_transfer) {
3847 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3848 spi_sync_immediate);
3849 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3850 spi_sync_immediate);
3851 __spi_pump_messages(ctlr, false);
3852 }
3853
3854 wait_for_completion(&done);
3855 status = message->status;
3856 }
3857 message->context = NULL;
3858 return status;
3859 }
3860
3861 /**
3862 * spi_sync - blocking/synchronous SPI data transfers
3863 * @spi: device with which data will be exchanged
3864 * @message: describes the data transfers
3865 * Context: can sleep
3866 *
3867 * This call may only be used from a context that may sleep. The sleep
3868 * is non-interruptible, and has no timeout. Low-overhead controller
3869 * drivers may DMA directly into and out of the message buffers.
3870 *
3871 * Note that the SPI device's chip select is active during the message,
3872 * and then is normally disabled between messages. Drivers for some
3873 * frequently-used devices may want to minimize costs of selecting a chip,
3874 * by leaving it selected in anticipation that the next message will go
3875 * to the same chip. (That may increase power usage.)
3876 *
3877 * Also, the caller is guaranteeing that the memory associated with the
3878 * message will not be freed before this call returns.
3879 *
3880 * Return: zero on success, else a negative error code.
3881 */
spi_sync(struct spi_device * spi,struct spi_message * message)3882 int spi_sync(struct spi_device *spi, struct spi_message *message)
3883 {
3884 int ret;
3885
3886 mutex_lock(&spi->controller->bus_lock_mutex);
3887 ret = __spi_sync(spi, message);
3888 mutex_unlock(&spi->controller->bus_lock_mutex);
3889
3890 return ret;
3891 }
3892 EXPORT_SYMBOL_GPL(spi_sync);
3893
3894 /**
3895 * spi_sync_locked - version of spi_sync with exclusive bus usage
3896 * @spi: device with which data will be exchanged
3897 * @message: describes the data transfers
3898 * Context: can sleep
3899 *
3900 * This call may only be used from a context that may sleep. The sleep
3901 * is non-interruptible, and has no timeout. Low-overhead controller
3902 * drivers may DMA directly into and out of the message buffers.
3903 *
3904 * This call should be used by drivers that require exclusive access to the
3905 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3906 * be released by a spi_bus_unlock call when the exclusive access is over.
3907 *
3908 * Return: zero on success, else a negative error code.
3909 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)3910 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3911 {
3912 return __spi_sync(spi, message);
3913 }
3914 EXPORT_SYMBOL_GPL(spi_sync_locked);
3915
3916 /**
3917 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3918 * @ctlr: SPI bus master that should be locked for exclusive bus access
3919 * Context: can sleep
3920 *
3921 * This call may only be used from a context that may sleep. The sleep
3922 * is non-interruptible, and has no timeout.
3923 *
3924 * This call should be used by drivers that require exclusive access to the
3925 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3926 * exclusive access is over. Data transfer must be done by spi_sync_locked
3927 * and spi_async_locked calls when the SPI bus lock is held.
3928 *
3929 * Return: always zero.
3930 */
spi_bus_lock(struct spi_controller * ctlr)3931 int spi_bus_lock(struct spi_controller *ctlr)
3932 {
3933 unsigned long flags;
3934
3935 mutex_lock(&ctlr->bus_lock_mutex);
3936
3937 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3938 ctlr->bus_lock_flag = 1;
3939 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3940
3941 /* mutex remains locked until spi_bus_unlock is called */
3942
3943 return 0;
3944 }
3945 EXPORT_SYMBOL_GPL(spi_bus_lock);
3946
3947 /**
3948 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3949 * @ctlr: SPI bus master that was locked for exclusive bus access
3950 * Context: can sleep
3951 *
3952 * This call may only be used from a context that may sleep. The sleep
3953 * is non-interruptible, and has no timeout.
3954 *
3955 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3956 * call.
3957 *
3958 * Return: always zero.
3959 */
spi_bus_unlock(struct spi_controller * ctlr)3960 int spi_bus_unlock(struct spi_controller *ctlr)
3961 {
3962 ctlr->bus_lock_flag = 0;
3963
3964 mutex_unlock(&ctlr->bus_lock_mutex);
3965
3966 return 0;
3967 }
3968 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3969
3970 /* portable code must never pass more than 32 bytes */
3971 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3972
3973 static u8 *buf;
3974
3975 /**
3976 * spi_write_then_read - SPI synchronous write followed by read
3977 * @spi: device with which data will be exchanged
3978 * @txbuf: data to be written (need not be dma-safe)
3979 * @n_tx: size of txbuf, in bytes
3980 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3981 * @n_rx: size of rxbuf, in bytes
3982 * Context: can sleep
3983 *
3984 * This performs a half duplex MicroWire style transaction with the
3985 * device, sending txbuf and then reading rxbuf. The return value
3986 * is zero for success, else a negative errno status code.
3987 * This call may only be used from a context that may sleep.
3988 *
3989 * Parameters to this routine are always copied using a small buffer.
3990 * Performance-sensitive or bulk transfer code should instead use
3991 * spi_{async,sync}() calls with dma-safe buffers.
3992 *
3993 * Return: zero on success, else a negative error code.
3994 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)3995 int spi_write_then_read(struct spi_device *spi,
3996 const void *txbuf, unsigned n_tx,
3997 void *rxbuf, unsigned n_rx)
3998 {
3999 static DEFINE_MUTEX(lock);
4000
4001 int status;
4002 struct spi_message message;
4003 struct spi_transfer x[2];
4004 u8 *local_buf;
4005
4006 /* Use preallocated DMA-safe buffer if we can. We can't avoid
4007 * copying here, (as a pure convenience thing), but we can
4008 * keep heap costs out of the hot path unless someone else is
4009 * using the pre-allocated buffer or the transfer is too large.
4010 */
4011 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4012 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4013 GFP_KERNEL | GFP_DMA);
4014 if (!local_buf)
4015 return -ENOMEM;
4016 } else {
4017 local_buf = buf;
4018 }
4019
4020 spi_message_init(&message);
4021 memset(x, 0, sizeof(x));
4022 if (n_tx) {
4023 x[0].len = n_tx;
4024 spi_message_add_tail(&x[0], &message);
4025 }
4026 if (n_rx) {
4027 x[1].len = n_rx;
4028 spi_message_add_tail(&x[1], &message);
4029 }
4030
4031 memcpy(local_buf, txbuf, n_tx);
4032 x[0].tx_buf = local_buf;
4033 x[1].rx_buf = local_buf + n_tx;
4034
4035 /* do the i/o */
4036 status = spi_sync(spi, &message);
4037 if (status == 0)
4038 memcpy(rxbuf, x[1].rx_buf, n_rx);
4039
4040 if (x[0].tx_buf == buf)
4041 mutex_unlock(&lock);
4042 else
4043 kfree(local_buf);
4044
4045 return status;
4046 }
4047 EXPORT_SYMBOL_GPL(spi_write_then_read);
4048
4049 /*-------------------------------------------------------------------------*/
4050
4051 #if IS_ENABLED(CONFIG_OF)
4052 /* must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4053 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4054 {
4055 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4056
4057 return dev ? to_spi_device(dev) : NULL;
4058 }
4059 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
4060 #endif /* IS_ENABLED(CONFIG_OF) */
4061
4062 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4063 /* the spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4064 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4065 {
4066 struct device *dev;
4067
4068 dev = class_find_device_by_of_node(&spi_master_class, node);
4069 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4070 dev = class_find_device_by_of_node(&spi_slave_class, node);
4071 if (!dev)
4072 return NULL;
4073
4074 /* reference got in class_find_device */
4075 return container_of(dev, struct spi_controller, dev);
4076 }
4077
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4078 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4079 void *arg)
4080 {
4081 struct of_reconfig_data *rd = arg;
4082 struct spi_controller *ctlr;
4083 struct spi_device *spi;
4084
4085 switch (of_reconfig_get_state_change(action, arg)) {
4086 case OF_RECONFIG_CHANGE_ADD:
4087 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4088 if (ctlr == NULL)
4089 return NOTIFY_OK; /* not for us */
4090
4091 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4092 put_device(&ctlr->dev);
4093 return NOTIFY_OK;
4094 }
4095
4096 spi = of_register_spi_device(ctlr, rd->dn);
4097 put_device(&ctlr->dev);
4098
4099 if (IS_ERR(spi)) {
4100 pr_err("%s: failed to create for '%pOF'\n",
4101 __func__, rd->dn);
4102 of_node_clear_flag(rd->dn, OF_POPULATED);
4103 return notifier_from_errno(PTR_ERR(spi));
4104 }
4105 break;
4106
4107 case OF_RECONFIG_CHANGE_REMOVE:
4108 /* already depopulated? */
4109 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4110 return NOTIFY_OK;
4111
4112 /* find our device by node */
4113 spi = of_find_spi_device_by_node(rd->dn);
4114 if (spi == NULL)
4115 return NOTIFY_OK; /* no? not meant for us */
4116
4117 /* unregister takes one ref away */
4118 spi_unregister_device(spi);
4119
4120 /* and put the reference of the find */
4121 put_device(&spi->dev);
4122 break;
4123 }
4124
4125 return NOTIFY_OK;
4126 }
4127
4128 static struct notifier_block spi_of_notifier = {
4129 .notifier_call = of_spi_notify,
4130 };
4131 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4132 extern struct notifier_block spi_of_notifier;
4133 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4134
4135 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4136 static int spi_acpi_controller_match(struct device *dev, const void *data)
4137 {
4138 return ACPI_COMPANION(dev->parent) == data;
4139 }
4140
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4141 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4142 {
4143 struct device *dev;
4144
4145 dev = class_find_device(&spi_master_class, NULL, adev,
4146 spi_acpi_controller_match);
4147 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4148 dev = class_find_device(&spi_slave_class, NULL, adev,
4149 spi_acpi_controller_match);
4150 if (!dev)
4151 return NULL;
4152
4153 return container_of(dev, struct spi_controller, dev);
4154 }
4155
acpi_spi_find_device_by_adev(struct acpi_device * adev)4156 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4157 {
4158 struct device *dev;
4159
4160 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4161 return to_spi_device(dev);
4162 }
4163
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4164 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4165 void *arg)
4166 {
4167 struct acpi_device *adev = arg;
4168 struct spi_controller *ctlr;
4169 struct spi_device *spi;
4170
4171 switch (value) {
4172 case ACPI_RECONFIG_DEVICE_ADD:
4173 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4174 if (!ctlr)
4175 break;
4176
4177 acpi_register_spi_device(ctlr, adev);
4178 put_device(&ctlr->dev);
4179 break;
4180 case ACPI_RECONFIG_DEVICE_REMOVE:
4181 if (!acpi_device_enumerated(adev))
4182 break;
4183
4184 spi = acpi_spi_find_device_by_adev(adev);
4185 if (!spi)
4186 break;
4187
4188 spi_unregister_device(spi);
4189 put_device(&spi->dev);
4190 break;
4191 }
4192
4193 return NOTIFY_OK;
4194 }
4195
4196 static struct notifier_block spi_acpi_notifier = {
4197 .notifier_call = acpi_spi_notify,
4198 };
4199 #else
4200 extern struct notifier_block spi_acpi_notifier;
4201 #endif
4202
spi_init(void)4203 static int __init spi_init(void)
4204 {
4205 int status;
4206
4207 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4208 if (!buf) {
4209 status = -ENOMEM;
4210 goto err0;
4211 }
4212
4213 status = bus_register(&spi_bus_type);
4214 if (status < 0)
4215 goto err1;
4216
4217 status = class_register(&spi_master_class);
4218 if (status < 0)
4219 goto err2;
4220
4221 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4222 status = class_register(&spi_slave_class);
4223 if (status < 0)
4224 goto err3;
4225 }
4226
4227 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4228 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4229 if (IS_ENABLED(CONFIG_ACPI))
4230 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4231
4232 return 0;
4233
4234 err3:
4235 class_unregister(&spi_master_class);
4236 err2:
4237 bus_unregister(&spi_bus_type);
4238 err1:
4239 kfree(buf);
4240 buf = NULL;
4241 err0:
4242 return status;
4243 }
4244
4245 /* board_info is normally registered in arch_initcall(),
4246 * but even essential drivers wait till later
4247 *
4248 * REVISIT only boardinfo really needs static linking. the rest (device and
4249 * driver registration) _could_ be dynamically linked (modular) ... costs
4250 * include needing to have boardinfo data structures be much more public.
4251 */
4252 postcore_initcall(spi_init);
4253