1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
spidev_release(struct device * dev)46 static void spidev_release(struct device *dev)
47 {
48 struct spi_device *spi = to_spi_device(dev);
49
50 spi_controller_put(spi->controller);
51 kfree(spi->driver_override);
52 kfree(spi);
53 }
54
55 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)56 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
57 {
58 const struct spi_device *spi = to_spi_device(dev);
59 int len;
60
61 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
62 if (len != -ENODEV)
63 return len;
64
65 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
66 }
67 static DEVICE_ATTR_RO(modalias);
68
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)69 static ssize_t driver_override_store(struct device *dev,
70 struct device_attribute *a,
71 const char *buf, size_t count)
72 {
73 struct spi_device *spi = to_spi_device(dev);
74 const char *end = memchr(buf, '\n', count);
75 const size_t len = end ? end - buf : count;
76 const char *driver_override, *old;
77
78 /* We need to keep extra room for a newline when displaying value */
79 if (len >= (PAGE_SIZE - 1))
80 return -EINVAL;
81
82 driver_override = kstrndup(buf, len, GFP_KERNEL);
83 if (!driver_override)
84 return -ENOMEM;
85
86 device_lock(dev);
87 old = spi->driver_override;
88 if (len) {
89 spi->driver_override = driver_override;
90 } else {
91 /* Empty string, disable driver override */
92 spi->driver_override = NULL;
93 kfree(driver_override);
94 }
95 device_unlock(dev);
96 kfree(old);
97
98 return count;
99 }
100
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)101 static ssize_t driver_override_show(struct device *dev,
102 struct device_attribute *a, char *buf)
103 {
104 const struct spi_device *spi = to_spi_device(dev);
105 ssize_t len;
106
107 device_lock(dev);
108 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
109 device_unlock(dev);
110 return len;
111 }
112 static DEVICE_ATTR_RW(driver_override);
113
114 #define SPI_STATISTICS_ATTRS(field, file) \
115 static ssize_t spi_controller_##field##_show(struct device *dev, \
116 struct device_attribute *attr, \
117 char *buf) \
118 { \
119 struct spi_controller *ctlr = container_of(dev, \
120 struct spi_controller, dev); \
121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
122 } \
123 static struct device_attribute dev_attr_spi_controller_##field = { \
124 .attr = { .name = file, .mode = 0444 }, \
125 .show = spi_controller_##field##_show, \
126 }; \
127 static ssize_t spi_device_##field##_show(struct device *dev, \
128 struct device_attribute *attr, \
129 char *buf) \
130 { \
131 struct spi_device *spi = to_spi_device(dev); \
132 return spi_statistics_##field##_show(&spi->statistics, buf); \
133 } \
134 static struct device_attribute dev_attr_spi_device_##field = { \
135 .attr = { .name = file, .mode = 0444 }, \
136 .show = spi_device_##field##_show, \
137 }
138
139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
141 char *buf) \
142 { \
143 unsigned long flags; \
144 ssize_t len; \
145 spin_lock_irqsave(&stat->lock, flags); \
146 len = sprintf(buf, format_string, stat->field); \
147 spin_unlock_irqrestore(&stat->lock, flags); \
148 return len; \
149 } \
150 SPI_STATISTICS_ATTRS(name, file)
151
152 #define SPI_STATISTICS_SHOW(field, format_string) \
153 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
154 field, format_string)
155
156 SPI_STATISTICS_SHOW(messages, "%lu");
157 SPI_STATISTICS_SHOW(transfers, "%lu");
158 SPI_STATISTICS_SHOW(errors, "%lu");
159 SPI_STATISTICS_SHOW(timedout, "%lu");
160
161 SPI_STATISTICS_SHOW(spi_sync, "%lu");
162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
163 SPI_STATISTICS_SHOW(spi_async, "%lu");
164
165 SPI_STATISTICS_SHOW(bytes, "%llu");
166 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
167 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
168
169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
170 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
171 "transfer_bytes_histo_" number, \
172 transfer_bytes_histo[index], "%lu")
173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
190
191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
192
193 static struct attribute *spi_dev_attrs[] = {
194 &dev_attr_modalias.attr,
195 &dev_attr_driver_override.attr,
196 NULL,
197 };
198
199 static const struct attribute_group spi_dev_group = {
200 .attrs = spi_dev_attrs,
201 };
202
203 static struct attribute *spi_device_statistics_attrs[] = {
204 &dev_attr_spi_device_messages.attr,
205 &dev_attr_spi_device_transfers.attr,
206 &dev_attr_spi_device_errors.attr,
207 &dev_attr_spi_device_timedout.attr,
208 &dev_attr_spi_device_spi_sync.attr,
209 &dev_attr_spi_device_spi_sync_immediate.attr,
210 &dev_attr_spi_device_spi_async.attr,
211 &dev_attr_spi_device_bytes.attr,
212 &dev_attr_spi_device_bytes_rx.attr,
213 &dev_attr_spi_device_bytes_tx.attr,
214 &dev_attr_spi_device_transfer_bytes_histo0.attr,
215 &dev_attr_spi_device_transfer_bytes_histo1.attr,
216 &dev_attr_spi_device_transfer_bytes_histo2.attr,
217 &dev_attr_spi_device_transfer_bytes_histo3.attr,
218 &dev_attr_spi_device_transfer_bytes_histo4.attr,
219 &dev_attr_spi_device_transfer_bytes_histo5.attr,
220 &dev_attr_spi_device_transfer_bytes_histo6.attr,
221 &dev_attr_spi_device_transfer_bytes_histo7.attr,
222 &dev_attr_spi_device_transfer_bytes_histo8.attr,
223 &dev_attr_spi_device_transfer_bytes_histo9.attr,
224 &dev_attr_spi_device_transfer_bytes_histo10.attr,
225 &dev_attr_spi_device_transfer_bytes_histo11.attr,
226 &dev_attr_spi_device_transfer_bytes_histo12.attr,
227 &dev_attr_spi_device_transfer_bytes_histo13.attr,
228 &dev_attr_spi_device_transfer_bytes_histo14.attr,
229 &dev_attr_spi_device_transfer_bytes_histo15.attr,
230 &dev_attr_spi_device_transfer_bytes_histo16.attr,
231 &dev_attr_spi_device_transfers_split_maxsize.attr,
232 NULL,
233 };
234
235 static const struct attribute_group spi_device_statistics_group = {
236 .name = "statistics",
237 .attrs = spi_device_statistics_attrs,
238 };
239
240 static const struct attribute_group *spi_dev_groups[] = {
241 &spi_dev_group,
242 &spi_device_statistics_group,
243 NULL,
244 };
245
246 static struct attribute *spi_controller_statistics_attrs[] = {
247 &dev_attr_spi_controller_messages.attr,
248 &dev_attr_spi_controller_transfers.attr,
249 &dev_attr_spi_controller_errors.attr,
250 &dev_attr_spi_controller_timedout.attr,
251 &dev_attr_spi_controller_spi_sync.attr,
252 &dev_attr_spi_controller_spi_sync_immediate.attr,
253 &dev_attr_spi_controller_spi_async.attr,
254 &dev_attr_spi_controller_bytes.attr,
255 &dev_attr_spi_controller_bytes_rx.attr,
256 &dev_attr_spi_controller_bytes_tx.attr,
257 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
258 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
259 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
260 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
274 &dev_attr_spi_controller_transfers_split_maxsize.attr,
275 NULL,
276 };
277
278 static const struct attribute_group spi_controller_statistics_group = {
279 .name = "statistics",
280 .attrs = spi_controller_statistics_attrs,
281 };
282
283 static const struct attribute_group *spi_master_groups[] = {
284 &spi_controller_statistics_group,
285 NULL,
286 };
287
spi_statistics_add_transfer_stats(struct spi_statistics * stats,struct spi_transfer * xfer,struct spi_controller * ctlr)288 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
289 struct spi_transfer *xfer,
290 struct spi_controller *ctlr)
291 {
292 unsigned long flags;
293 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
294
295 if (l2len < 0)
296 l2len = 0;
297
298 spin_lock_irqsave(&stats->lock, flags);
299
300 stats->transfers++;
301 stats->transfer_bytes_histo[l2len]++;
302
303 stats->bytes += xfer->len;
304 if ((xfer->tx_buf) &&
305 (xfer->tx_buf != ctlr->dummy_tx))
306 stats->bytes_tx += xfer->len;
307 if ((xfer->rx_buf) &&
308 (xfer->rx_buf != ctlr->dummy_rx))
309 stats->bytes_rx += xfer->len;
310
311 spin_unlock_irqrestore(&stats->lock, flags);
312 }
313 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
314
315 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
316 * and the sysfs version makes coldplug work too.
317 */
318
spi_match_id(const struct spi_device_id * id,const struct spi_device * sdev)319 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
320 const struct spi_device *sdev)
321 {
322 while (id->name[0]) {
323 if (!strcmp(sdev->modalias, id->name))
324 return id;
325 id++;
326 }
327 return NULL;
328 }
329
spi_get_device_id(const struct spi_device * sdev)330 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
331 {
332 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
333
334 return spi_match_id(sdrv->id_table, sdev);
335 }
336 EXPORT_SYMBOL_GPL(spi_get_device_id);
337
spi_match_device(struct device * dev,struct device_driver * drv)338 static int spi_match_device(struct device *dev, struct device_driver *drv)
339 {
340 const struct spi_device *spi = to_spi_device(dev);
341 const struct spi_driver *sdrv = to_spi_driver(drv);
342
343 /* Check override first, and if set, only use the named driver */
344 if (spi->driver_override)
345 return strcmp(spi->driver_override, drv->name) == 0;
346
347 /* Attempt an OF style match */
348 if (of_driver_match_device(dev, drv))
349 return 1;
350
351 /* Then try ACPI */
352 if (acpi_driver_match_device(dev, drv))
353 return 1;
354
355 if (sdrv->id_table)
356 return !!spi_match_id(sdrv->id_table, spi);
357
358 return strcmp(spi->modalias, drv->name) == 0;
359 }
360
spi_uevent(struct device * dev,struct kobj_uevent_env * env)361 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
362 {
363 const struct spi_device *spi = to_spi_device(dev);
364 int rc;
365
366 rc = acpi_device_uevent_modalias(dev, env);
367 if (rc != -ENODEV)
368 return rc;
369
370 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
371 }
372
373 struct bus_type spi_bus_type = {
374 .name = "spi",
375 .dev_groups = spi_dev_groups,
376 .match = spi_match_device,
377 .uevent = spi_uevent,
378 };
379 EXPORT_SYMBOL_GPL(spi_bus_type);
380
381
spi_drv_probe(struct device * dev)382 static int spi_drv_probe(struct device *dev)
383 {
384 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
385 struct spi_device *spi = to_spi_device(dev);
386 int ret;
387
388 ret = of_clk_set_defaults(dev->of_node, false);
389 if (ret)
390 return ret;
391
392 if (dev->of_node) {
393 spi->irq = of_irq_get(dev->of_node, 0);
394 if (spi->irq == -EPROBE_DEFER)
395 return -EPROBE_DEFER;
396 if (spi->irq < 0)
397 spi->irq = 0;
398 }
399
400 ret = dev_pm_domain_attach(dev, true);
401 if (ret)
402 return ret;
403
404 if (sdrv->probe) {
405 ret = sdrv->probe(spi);
406 if (ret)
407 dev_pm_domain_detach(dev, true);
408 }
409
410 return ret;
411 }
412
spi_drv_remove(struct device * dev)413 static int spi_drv_remove(struct device *dev)
414 {
415 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
416 int ret = 0;
417
418 if (sdrv->remove)
419 ret = sdrv->remove(to_spi_device(dev));
420 dev_pm_domain_detach(dev, true);
421
422 return ret;
423 }
424
spi_drv_shutdown(struct device * dev)425 static void spi_drv_shutdown(struct device *dev)
426 {
427 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
428
429 sdrv->shutdown(to_spi_device(dev));
430 }
431
432 /**
433 * __spi_register_driver - register a SPI driver
434 * @owner: owner module of the driver to register
435 * @sdrv: the driver to register
436 * Context: can sleep
437 *
438 * Return: zero on success, else a negative error code.
439 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)440 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
441 {
442 sdrv->driver.owner = owner;
443 sdrv->driver.bus = &spi_bus_type;
444 sdrv->driver.probe = spi_drv_probe;
445 sdrv->driver.remove = spi_drv_remove;
446 if (sdrv->shutdown)
447 sdrv->driver.shutdown = spi_drv_shutdown;
448 return driver_register(&sdrv->driver);
449 }
450 EXPORT_SYMBOL_GPL(__spi_register_driver);
451
452 /*-------------------------------------------------------------------------*/
453
454 /* SPI devices should normally not be created by SPI device drivers; that
455 * would make them board-specific. Similarly with SPI controller drivers.
456 * Device registration normally goes into like arch/.../mach.../board-YYY.c
457 * with other readonly (flashable) information about mainboard devices.
458 */
459
460 struct boardinfo {
461 struct list_head list;
462 struct spi_board_info board_info;
463 };
464
465 static LIST_HEAD(board_list);
466 static LIST_HEAD(spi_controller_list);
467
468 /*
469 * Used to protect add/del operation for board_info list and
470 * spi_controller list, and their matching process
471 * also used to protect object of type struct idr
472 */
473 static DEFINE_MUTEX(board_lock);
474
475 /**
476 * spi_alloc_device - Allocate a new SPI device
477 * @ctlr: Controller to which device is connected
478 * Context: can sleep
479 *
480 * Allows a driver to allocate and initialize a spi_device without
481 * registering it immediately. This allows a driver to directly
482 * fill the spi_device with device parameters before calling
483 * spi_add_device() on it.
484 *
485 * Caller is responsible to call spi_add_device() on the returned
486 * spi_device structure to add it to the SPI controller. If the caller
487 * needs to discard the spi_device without adding it, then it should
488 * call spi_dev_put() on it.
489 *
490 * Return: a pointer to the new device, or NULL.
491 */
spi_alloc_device(struct spi_controller * ctlr)492 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
493 {
494 struct spi_device *spi;
495
496 if (!spi_controller_get(ctlr))
497 return NULL;
498
499 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
500 if (!spi) {
501 spi_controller_put(ctlr);
502 return NULL;
503 }
504
505 spi->master = spi->controller = ctlr;
506 spi->dev.parent = &ctlr->dev;
507 spi->dev.bus = &spi_bus_type;
508 spi->dev.release = spidev_release;
509 spi->cs_gpio = -ENOENT;
510 spi->mode = ctlr->buswidth_override_bits;
511
512 spin_lock_init(&spi->statistics.lock);
513
514 device_initialize(&spi->dev);
515 return spi;
516 }
517 EXPORT_SYMBOL_GPL(spi_alloc_device);
518
spi_dev_set_name(struct spi_device * spi)519 static void spi_dev_set_name(struct spi_device *spi)
520 {
521 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
522
523 if (adev) {
524 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
525 return;
526 }
527
528 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
529 spi->chip_select);
530 }
531
spi_dev_check(struct device * dev,void * data)532 static int spi_dev_check(struct device *dev, void *data)
533 {
534 struct spi_device *spi = to_spi_device(dev);
535 struct spi_device *new_spi = data;
536
537 if (spi->controller == new_spi->controller &&
538 spi->chip_select == new_spi->chip_select)
539 return -EBUSY;
540 return 0;
541 }
542
spi_cleanup(struct spi_device * spi)543 static void spi_cleanup(struct spi_device *spi)
544 {
545 if (spi->controller->cleanup)
546 spi->controller->cleanup(spi);
547 }
548
549 /**
550 * spi_add_device - Add spi_device allocated with spi_alloc_device
551 * @spi: spi_device to register
552 *
553 * Companion function to spi_alloc_device. Devices allocated with
554 * spi_alloc_device can be added onto the spi bus with this function.
555 *
556 * Return: 0 on success; negative errno on failure
557 */
spi_add_device(struct spi_device * spi)558 int spi_add_device(struct spi_device *spi)
559 {
560 struct spi_controller *ctlr = spi->controller;
561 struct device *dev = ctlr->dev.parent;
562 int status;
563
564 /* Chipselects are numbered 0..max; validate. */
565 if (spi->chip_select >= ctlr->num_chipselect) {
566 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
567 ctlr->num_chipselect);
568 return -EINVAL;
569 }
570
571 /* Set the bus ID string */
572 spi_dev_set_name(spi);
573
574 /* We need to make sure there's no other device with this
575 * chipselect **BEFORE** we call setup(), else we'll trash
576 * its configuration. Lock against concurrent add() calls.
577 */
578 mutex_lock(&ctlr->add_lock);
579
580 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
581 if (status) {
582 dev_err(dev, "chipselect %d already in use\n",
583 spi->chip_select);
584 goto done;
585 }
586
587 /* Controller may unregister concurrently */
588 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
589 !device_is_registered(&ctlr->dev)) {
590 status = -ENODEV;
591 goto done;
592 }
593
594 /* Descriptors take precedence */
595 if (ctlr->cs_gpiods)
596 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
597 else if (ctlr->cs_gpios)
598 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
599
600 /* Drivers may modify this initial i/o setup, but will
601 * normally rely on the device being setup. Devices
602 * using SPI_CS_HIGH can't coexist well otherwise...
603 */
604 status = spi_setup(spi);
605 if (status < 0) {
606 dev_err(dev, "can't setup %s, status %d\n",
607 dev_name(&spi->dev), status);
608 goto done;
609 }
610
611 /* Device may be bound to an active driver when this returns */
612 status = device_add(&spi->dev);
613 if (status < 0) {
614 dev_err(dev, "can't add %s, status %d\n",
615 dev_name(&spi->dev), status);
616 spi_cleanup(spi);
617 } else {
618 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
619 }
620
621 done:
622 mutex_unlock(&ctlr->add_lock);
623 return status;
624 }
625 EXPORT_SYMBOL_GPL(spi_add_device);
626
627 /**
628 * spi_new_device - instantiate one new SPI device
629 * @ctlr: Controller to which device is connected
630 * @chip: Describes the SPI device
631 * Context: can sleep
632 *
633 * On typical mainboards, this is purely internal; and it's not needed
634 * after board init creates the hard-wired devices. Some development
635 * platforms may not be able to use spi_register_board_info though, and
636 * this is exported so that for example a USB or parport based adapter
637 * driver could add devices (which it would learn about out-of-band).
638 *
639 * Return: the new device, or NULL.
640 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)641 struct spi_device *spi_new_device(struct spi_controller *ctlr,
642 struct spi_board_info *chip)
643 {
644 struct spi_device *proxy;
645 int status;
646
647 /* NOTE: caller did any chip->bus_num checks necessary.
648 *
649 * Also, unless we change the return value convention to use
650 * error-or-pointer (not NULL-or-pointer), troubleshootability
651 * suggests syslogged diagnostics are best here (ugh).
652 */
653
654 proxy = spi_alloc_device(ctlr);
655 if (!proxy)
656 return NULL;
657
658 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
659
660 proxy->chip_select = chip->chip_select;
661 proxy->max_speed_hz = chip->max_speed_hz;
662 proxy->mode = chip->mode;
663 proxy->irq = chip->irq;
664 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
665 proxy->dev.platform_data = (void *) chip->platform_data;
666 proxy->controller_data = chip->controller_data;
667 proxy->controller_state = NULL;
668
669 if (chip->properties) {
670 status = device_add_properties(&proxy->dev, chip->properties);
671 if (status) {
672 dev_err(&ctlr->dev,
673 "failed to add properties to '%s': %d\n",
674 chip->modalias, status);
675 goto err_dev_put;
676 }
677 }
678
679 status = spi_add_device(proxy);
680 if (status < 0)
681 goto err_remove_props;
682
683 return proxy;
684
685 err_remove_props:
686 if (chip->properties)
687 device_remove_properties(&proxy->dev);
688 err_dev_put:
689 spi_dev_put(proxy);
690 return NULL;
691 }
692 EXPORT_SYMBOL_GPL(spi_new_device);
693
694 /**
695 * spi_unregister_device - unregister a single SPI device
696 * @spi: spi_device to unregister
697 *
698 * Start making the passed SPI device vanish. Normally this would be handled
699 * by spi_unregister_controller().
700 */
spi_unregister_device(struct spi_device * spi)701 void spi_unregister_device(struct spi_device *spi)
702 {
703 if (!spi)
704 return;
705
706 if (spi->dev.of_node) {
707 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
708 of_node_put(spi->dev.of_node);
709 }
710 if (ACPI_COMPANION(&spi->dev))
711 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
712 device_del(&spi->dev);
713 spi_cleanup(spi);
714 put_device(&spi->dev);
715 }
716 EXPORT_SYMBOL_GPL(spi_unregister_device);
717
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)718 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
719 struct spi_board_info *bi)
720 {
721 struct spi_device *dev;
722
723 if (ctlr->bus_num != bi->bus_num)
724 return;
725
726 dev = spi_new_device(ctlr, bi);
727 if (!dev)
728 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
729 bi->modalias);
730 }
731
732 /**
733 * spi_register_board_info - register SPI devices for a given board
734 * @info: array of chip descriptors
735 * @n: how many descriptors are provided
736 * Context: can sleep
737 *
738 * Board-specific early init code calls this (probably during arch_initcall)
739 * with segments of the SPI device table. Any device nodes are created later,
740 * after the relevant parent SPI controller (bus_num) is defined. We keep
741 * this table of devices forever, so that reloading a controller driver will
742 * not make Linux forget about these hard-wired devices.
743 *
744 * Other code can also call this, e.g. a particular add-on board might provide
745 * SPI devices through its expansion connector, so code initializing that board
746 * would naturally declare its SPI devices.
747 *
748 * The board info passed can safely be __initdata ... but be careful of
749 * any embedded pointers (platform_data, etc), they're copied as-is.
750 * Device properties are deep-copied though.
751 *
752 * Return: zero on success, else a negative error code.
753 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)754 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
755 {
756 struct boardinfo *bi;
757 int i;
758
759 if (!n)
760 return 0;
761
762 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
763 if (!bi)
764 return -ENOMEM;
765
766 for (i = 0; i < n; i++, bi++, info++) {
767 struct spi_controller *ctlr;
768
769 memcpy(&bi->board_info, info, sizeof(*info));
770 if (info->properties) {
771 bi->board_info.properties =
772 property_entries_dup(info->properties);
773 if (IS_ERR(bi->board_info.properties))
774 return PTR_ERR(bi->board_info.properties);
775 }
776
777 mutex_lock(&board_lock);
778 list_add_tail(&bi->list, &board_list);
779 list_for_each_entry(ctlr, &spi_controller_list, list)
780 spi_match_controller_to_boardinfo(ctlr,
781 &bi->board_info);
782 mutex_unlock(&board_lock);
783 }
784
785 return 0;
786 }
787
788 /*-------------------------------------------------------------------------*/
789
spi_set_cs(struct spi_device * spi,bool enable,bool force)790 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
791 {
792 bool enable1 = enable;
793
794 /*
795 * Avoid calling into the driver (or doing delays) if the chip select
796 * isn't actually changing from the last time this was called.
797 */
798 if (!force && (spi->controller->last_cs_enable == enable) &&
799 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
800 return;
801
802 spi->controller->last_cs_enable = enable;
803 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
804
805 if (!spi->controller->set_cs_timing) {
806 if (enable1)
807 spi_delay_exec(&spi->controller->cs_setup, NULL);
808 else
809 spi_delay_exec(&spi->controller->cs_hold, NULL);
810 }
811
812 if (spi->mode & SPI_CS_HIGH)
813 enable = !enable;
814
815 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
816 if (!(spi->mode & SPI_NO_CS)) {
817 if (spi->cs_gpiod) {
818 /*
819 * Historically ACPI has no means of the GPIO polarity and
820 * thus the SPISerialBus() resource defines it on the per-chip
821 * basis. In order to avoid a chain of negations, the GPIO
822 * polarity is considered being Active High. Even for the cases
823 * when _DSD() is involved (in the updated versions of ACPI)
824 * the GPIO CS polarity must be defined Active High to avoid
825 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
826 * into account.
827 */
828 if (has_acpi_companion(&spi->dev))
829 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
830 else
831 /* Polarity handled by GPIO library */
832 gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
833 } else {
834 /*
835 * invert the enable line, as active low is
836 * default for SPI.
837 */
838 gpio_set_value_cansleep(spi->cs_gpio, !enable);
839 }
840 }
841 /* Some SPI masters need both GPIO CS & slave_select */
842 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
843 spi->controller->set_cs)
844 spi->controller->set_cs(spi, !enable);
845 } else if (spi->controller->set_cs) {
846 spi->controller->set_cs(spi, !enable);
847 }
848
849 if (!spi->controller->set_cs_timing) {
850 if (!enable1)
851 spi_delay_exec(&spi->controller->cs_inactive, NULL);
852 }
853 }
854
855 #ifdef CONFIG_HAS_DMA
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)856 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
857 struct sg_table *sgt, void *buf, size_t len,
858 enum dma_data_direction dir)
859 {
860 const bool vmalloced_buf = is_vmalloc_addr(buf);
861 unsigned int max_seg_size = dma_get_max_seg_size(dev);
862 #ifdef CONFIG_HIGHMEM
863 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
864 (unsigned long)buf < (PKMAP_BASE +
865 (LAST_PKMAP * PAGE_SIZE)));
866 #else
867 const bool kmap_buf = false;
868 #endif
869 int desc_len;
870 int sgs;
871 struct page *vm_page;
872 struct scatterlist *sg;
873 void *sg_buf;
874 size_t min;
875 int i, ret;
876
877 if (vmalloced_buf || kmap_buf) {
878 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
879 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
880 } else if (virt_addr_valid(buf)) {
881 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
882 sgs = DIV_ROUND_UP(len, desc_len);
883 } else {
884 return -EINVAL;
885 }
886
887 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
888 if (ret != 0)
889 return ret;
890
891 sg = &sgt->sgl[0];
892 for (i = 0; i < sgs; i++) {
893
894 if (vmalloced_buf || kmap_buf) {
895 /*
896 * Next scatterlist entry size is the minimum between
897 * the desc_len and the remaining buffer length that
898 * fits in a page.
899 */
900 min = min_t(size_t, desc_len,
901 min_t(size_t, len,
902 PAGE_SIZE - offset_in_page(buf)));
903 if (vmalloced_buf)
904 vm_page = vmalloc_to_page(buf);
905 else
906 vm_page = kmap_to_page(buf);
907 if (!vm_page) {
908 sg_free_table(sgt);
909 return -ENOMEM;
910 }
911 sg_set_page(sg, vm_page,
912 min, offset_in_page(buf));
913 } else {
914 min = min_t(size_t, len, desc_len);
915 sg_buf = buf;
916 sg_set_buf(sg, sg_buf, min);
917 }
918
919 buf += min;
920 len -= min;
921 sg = sg_next(sg);
922 }
923
924 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
925 if (!ret)
926 ret = -ENOMEM;
927 if (ret < 0) {
928 sg_free_table(sgt);
929 return ret;
930 }
931
932 sgt->nents = ret;
933
934 return 0;
935 }
936
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)937 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
938 struct sg_table *sgt, enum dma_data_direction dir)
939 {
940 if (sgt->orig_nents) {
941 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
942 sg_free_table(sgt);
943 sgt->orig_nents = 0;
944 sgt->nents = 0;
945 }
946 }
947
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)948 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
949 {
950 struct device *tx_dev, *rx_dev;
951 struct spi_transfer *xfer;
952 int ret;
953
954 if (!ctlr->can_dma)
955 return 0;
956
957 if (ctlr->dma_tx)
958 tx_dev = ctlr->dma_tx->device->dev;
959 else
960 tx_dev = ctlr->dev.parent;
961
962 if (ctlr->dma_rx)
963 rx_dev = ctlr->dma_rx->device->dev;
964 else
965 rx_dev = ctlr->dev.parent;
966
967 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
968 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
969 continue;
970
971 if (xfer->tx_buf != NULL) {
972 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
973 (void *)xfer->tx_buf, xfer->len,
974 DMA_TO_DEVICE);
975 if (ret != 0)
976 return ret;
977 }
978
979 if (xfer->rx_buf != NULL) {
980 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
981 xfer->rx_buf, xfer->len,
982 DMA_FROM_DEVICE);
983 if (ret != 0) {
984 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
985 DMA_TO_DEVICE);
986 return ret;
987 }
988 }
989 }
990
991 ctlr->cur_msg_mapped = true;
992
993 return 0;
994 }
995
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)996 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
997 {
998 struct spi_transfer *xfer;
999 struct device *tx_dev, *rx_dev;
1000
1001 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1002 return 0;
1003
1004 if (ctlr->dma_tx)
1005 tx_dev = ctlr->dma_tx->device->dev;
1006 else
1007 tx_dev = ctlr->dev.parent;
1008
1009 if (ctlr->dma_rx)
1010 rx_dev = ctlr->dma_rx->device->dev;
1011 else
1012 rx_dev = ctlr->dev.parent;
1013
1014 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1015 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1016 continue;
1017
1018 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1019 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1020 }
1021
1022 ctlr->cur_msg_mapped = false;
1023
1024 return 0;
1025 }
1026 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1027 static inline int __spi_map_msg(struct spi_controller *ctlr,
1028 struct spi_message *msg)
1029 {
1030 return 0;
1031 }
1032
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1033 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1034 struct spi_message *msg)
1035 {
1036 return 0;
1037 }
1038 #endif /* !CONFIG_HAS_DMA */
1039
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1040 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1041 struct spi_message *msg)
1042 {
1043 struct spi_transfer *xfer;
1044
1045 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1046 /*
1047 * Restore the original value of tx_buf or rx_buf if they are
1048 * NULL.
1049 */
1050 if (xfer->tx_buf == ctlr->dummy_tx)
1051 xfer->tx_buf = NULL;
1052 if (xfer->rx_buf == ctlr->dummy_rx)
1053 xfer->rx_buf = NULL;
1054 }
1055
1056 return __spi_unmap_msg(ctlr, msg);
1057 }
1058
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1059 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1060 {
1061 struct spi_transfer *xfer;
1062 void *tmp;
1063 unsigned int max_tx, max_rx;
1064
1065 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1066 && !(msg->spi->mode & SPI_3WIRE)) {
1067 max_tx = 0;
1068 max_rx = 0;
1069
1070 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1071 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1072 !xfer->tx_buf)
1073 max_tx = max(xfer->len, max_tx);
1074 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1075 !xfer->rx_buf)
1076 max_rx = max(xfer->len, max_rx);
1077 }
1078
1079 if (max_tx) {
1080 tmp = krealloc(ctlr->dummy_tx, max_tx,
1081 GFP_KERNEL | GFP_DMA);
1082 if (!tmp)
1083 return -ENOMEM;
1084 ctlr->dummy_tx = tmp;
1085 memset(tmp, 0, max_tx);
1086 }
1087
1088 if (max_rx) {
1089 tmp = krealloc(ctlr->dummy_rx, max_rx,
1090 GFP_KERNEL | GFP_DMA);
1091 if (!tmp)
1092 return -ENOMEM;
1093 ctlr->dummy_rx = tmp;
1094 }
1095
1096 if (max_tx || max_rx) {
1097 list_for_each_entry(xfer, &msg->transfers,
1098 transfer_list) {
1099 if (!xfer->len)
1100 continue;
1101 if (!xfer->tx_buf)
1102 xfer->tx_buf = ctlr->dummy_tx;
1103 if (!xfer->rx_buf)
1104 xfer->rx_buf = ctlr->dummy_rx;
1105 }
1106 }
1107 }
1108
1109 return __spi_map_msg(ctlr, msg);
1110 }
1111
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1112 static int spi_transfer_wait(struct spi_controller *ctlr,
1113 struct spi_message *msg,
1114 struct spi_transfer *xfer)
1115 {
1116 struct spi_statistics *statm = &ctlr->statistics;
1117 struct spi_statistics *stats = &msg->spi->statistics;
1118 u32 speed_hz = xfer->speed_hz;
1119 unsigned long long ms;
1120
1121 if (spi_controller_is_slave(ctlr)) {
1122 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1123 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1124 return -EINTR;
1125 }
1126 } else {
1127 if (!speed_hz)
1128 speed_hz = 100000;
1129
1130 ms = 8LL * 1000LL * xfer->len;
1131 do_div(ms, speed_hz);
1132 ms += ms + 200; /* some tolerance */
1133
1134 if (ms > UINT_MAX)
1135 ms = UINT_MAX;
1136
1137 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1138 msecs_to_jiffies(ms));
1139
1140 if (ms == 0) {
1141 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1142 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1143 dev_err(&msg->spi->dev,
1144 "SPI transfer timed out\n");
1145 return -ETIMEDOUT;
1146 }
1147 }
1148
1149 return 0;
1150 }
1151
_spi_transfer_delay_ns(u32 ns)1152 static void _spi_transfer_delay_ns(u32 ns)
1153 {
1154 if (!ns)
1155 return;
1156 if (ns <= 1000) {
1157 ndelay(ns);
1158 } else {
1159 u32 us = DIV_ROUND_UP(ns, 1000);
1160
1161 if (us <= 10)
1162 udelay(us);
1163 else
1164 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1165 }
1166 }
1167
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1168 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1169 {
1170 u32 delay = _delay->value;
1171 u32 unit = _delay->unit;
1172 u32 hz;
1173
1174 if (!delay)
1175 return 0;
1176
1177 switch (unit) {
1178 case SPI_DELAY_UNIT_USECS:
1179 delay *= 1000;
1180 break;
1181 case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1182 break;
1183 case SPI_DELAY_UNIT_SCK:
1184 /* clock cycles need to be obtained from spi_transfer */
1185 if (!xfer)
1186 return -EINVAL;
1187 /* if there is no effective speed know, then approximate
1188 * by underestimating with half the requested hz
1189 */
1190 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1191 if (!hz)
1192 return -EINVAL;
1193 delay *= DIV_ROUND_UP(1000000000, hz);
1194 break;
1195 default:
1196 return -EINVAL;
1197 }
1198
1199 return delay;
1200 }
1201 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1202
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1203 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1204 {
1205 int delay;
1206
1207 might_sleep();
1208
1209 if (!_delay)
1210 return -EINVAL;
1211
1212 delay = spi_delay_to_ns(_delay, xfer);
1213 if (delay < 0)
1214 return delay;
1215
1216 _spi_transfer_delay_ns(delay);
1217
1218 return 0;
1219 }
1220 EXPORT_SYMBOL_GPL(spi_delay_exec);
1221
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1222 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1223 struct spi_transfer *xfer)
1224 {
1225 u32 delay = xfer->cs_change_delay.value;
1226 u32 unit = xfer->cs_change_delay.unit;
1227 int ret;
1228
1229 /* return early on "fast" mode - for everything but USECS */
1230 if (!delay) {
1231 if (unit == SPI_DELAY_UNIT_USECS)
1232 _spi_transfer_delay_ns(10000);
1233 return;
1234 }
1235
1236 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1237 if (ret) {
1238 dev_err_once(&msg->spi->dev,
1239 "Use of unsupported delay unit %i, using default of 10us\n",
1240 unit);
1241 _spi_transfer_delay_ns(10000);
1242 }
1243 }
1244
1245 /*
1246 * spi_transfer_one_message - Default implementation of transfer_one_message()
1247 *
1248 * This is a standard implementation of transfer_one_message() for
1249 * drivers which implement a transfer_one() operation. It provides
1250 * standard handling of delays and chip select management.
1251 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1252 static int spi_transfer_one_message(struct spi_controller *ctlr,
1253 struct spi_message *msg)
1254 {
1255 struct spi_transfer *xfer;
1256 bool keep_cs = false;
1257 int ret = 0;
1258 struct spi_statistics *statm = &ctlr->statistics;
1259 struct spi_statistics *stats = &msg->spi->statistics;
1260
1261 spi_set_cs(msg->spi, true, false);
1262
1263 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1264 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1265
1266 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1267 trace_spi_transfer_start(msg, xfer);
1268
1269 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1270 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1271
1272 if (!ctlr->ptp_sts_supported) {
1273 xfer->ptp_sts_word_pre = 0;
1274 ptp_read_system_prets(xfer->ptp_sts);
1275 }
1276
1277 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1278 reinit_completion(&ctlr->xfer_completion);
1279
1280 fallback_pio:
1281 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1282 if (ret < 0) {
1283 if (ctlr->cur_msg_mapped &&
1284 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1285 __spi_unmap_msg(ctlr, msg);
1286 ctlr->fallback = true;
1287 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1288 goto fallback_pio;
1289 }
1290
1291 SPI_STATISTICS_INCREMENT_FIELD(statm,
1292 errors);
1293 SPI_STATISTICS_INCREMENT_FIELD(stats,
1294 errors);
1295 dev_err(&msg->spi->dev,
1296 "SPI transfer failed: %d\n", ret);
1297 goto out;
1298 }
1299
1300 if (ret > 0) {
1301 ret = spi_transfer_wait(ctlr, msg, xfer);
1302 if (ret < 0)
1303 msg->status = ret;
1304 }
1305 } else {
1306 if (xfer->len)
1307 dev_err(&msg->spi->dev,
1308 "Bufferless transfer has length %u\n",
1309 xfer->len);
1310 }
1311
1312 if (!ctlr->ptp_sts_supported) {
1313 ptp_read_system_postts(xfer->ptp_sts);
1314 xfer->ptp_sts_word_post = xfer->len;
1315 }
1316
1317 trace_spi_transfer_stop(msg, xfer);
1318
1319 if (msg->status != -EINPROGRESS)
1320 goto out;
1321
1322 spi_transfer_delay_exec(xfer);
1323
1324 if (xfer->cs_change) {
1325 if (list_is_last(&xfer->transfer_list,
1326 &msg->transfers)) {
1327 keep_cs = true;
1328 } else {
1329 spi_set_cs(msg->spi, false, false);
1330 _spi_transfer_cs_change_delay(msg, xfer);
1331 spi_set_cs(msg->spi, true, false);
1332 }
1333 }
1334
1335 msg->actual_length += xfer->len;
1336 }
1337
1338 out:
1339 if (ret != 0 || !keep_cs)
1340 spi_set_cs(msg->spi, false, false);
1341
1342 if (msg->status == -EINPROGRESS)
1343 msg->status = ret;
1344
1345 if (msg->status && ctlr->handle_err)
1346 ctlr->handle_err(ctlr, msg);
1347
1348 spi_finalize_current_message(ctlr);
1349
1350 return ret;
1351 }
1352
1353 /**
1354 * spi_finalize_current_transfer - report completion of a transfer
1355 * @ctlr: the controller reporting completion
1356 *
1357 * Called by SPI drivers using the core transfer_one_message()
1358 * implementation to notify it that the current interrupt driven
1359 * transfer has finished and the next one may be scheduled.
1360 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1361 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1362 {
1363 complete(&ctlr->xfer_completion);
1364 }
1365 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1366
spi_idle_runtime_pm(struct spi_controller * ctlr)1367 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1368 {
1369 if (ctlr->auto_runtime_pm) {
1370 pm_runtime_mark_last_busy(ctlr->dev.parent);
1371 pm_runtime_put_autosuspend(ctlr->dev.parent);
1372 }
1373 }
1374
1375 /**
1376 * __spi_pump_messages - function which processes spi message queue
1377 * @ctlr: controller to process queue for
1378 * @in_kthread: true if we are in the context of the message pump thread
1379 *
1380 * This function checks if there is any spi message in the queue that
1381 * needs processing and if so call out to the driver to initialize hardware
1382 * and transfer each message.
1383 *
1384 * Note that it is called both from the kthread itself and also from
1385 * inside spi_sync(); the queue extraction handling at the top of the
1386 * function should deal with this safely.
1387 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1388 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1389 {
1390 struct spi_transfer *xfer;
1391 struct spi_message *msg;
1392 bool was_busy = false;
1393 unsigned long flags;
1394 int ret;
1395
1396 /* Lock queue */
1397 spin_lock_irqsave(&ctlr->queue_lock, flags);
1398
1399 /* Make sure we are not already running a message */
1400 if (ctlr->cur_msg) {
1401 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1402 return;
1403 }
1404
1405 /* If another context is idling the device then defer */
1406 if (ctlr->idling) {
1407 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1408 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1409 return;
1410 }
1411
1412 /* Check if the queue is idle */
1413 if (list_empty(&ctlr->queue) || !ctlr->running) {
1414 if (!ctlr->busy) {
1415 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1416 return;
1417 }
1418
1419 /* Defer any non-atomic teardown to the thread */
1420 if (!in_kthread) {
1421 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1422 !ctlr->unprepare_transfer_hardware) {
1423 spi_idle_runtime_pm(ctlr);
1424 ctlr->busy = false;
1425 trace_spi_controller_idle(ctlr);
1426 } else {
1427 kthread_queue_work(ctlr->kworker,
1428 &ctlr->pump_messages);
1429 }
1430 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1431 return;
1432 }
1433
1434 ctlr->busy = false;
1435 ctlr->idling = true;
1436 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1437
1438 kfree(ctlr->dummy_rx);
1439 ctlr->dummy_rx = NULL;
1440 kfree(ctlr->dummy_tx);
1441 ctlr->dummy_tx = NULL;
1442 if (ctlr->unprepare_transfer_hardware &&
1443 ctlr->unprepare_transfer_hardware(ctlr))
1444 dev_err(&ctlr->dev,
1445 "failed to unprepare transfer hardware\n");
1446 spi_idle_runtime_pm(ctlr);
1447 trace_spi_controller_idle(ctlr);
1448
1449 spin_lock_irqsave(&ctlr->queue_lock, flags);
1450 ctlr->idling = false;
1451 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1452 return;
1453 }
1454
1455 /* Extract head of queue */
1456 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1457 ctlr->cur_msg = msg;
1458
1459 list_del_init(&msg->queue);
1460 if (ctlr->busy)
1461 was_busy = true;
1462 else
1463 ctlr->busy = true;
1464 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1465
1466 mutex_lock(&ctlr->io_mutex);
1467
1468 if (!was_busy && ctlr->auto_runtime_pm) {
1469 ret = pm_runtime_get_sync(ctlr->dev.parent);
1470 if (ret < 0) {
1471 pm_runtime_put_noidle(ctlr->dev.parent);
1472 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1473 ret);
1474 mutex_unlock(&ctlr->io_mutex);
1475 return;
1476 }
1477 }
1478
1479 if (!was_busy)
1480 trace_spi_controller_busy(ctlr);
1481
1482 if (!was_busy && ctlr->prepare_transfer_hardware) {
1483 ret = ctlr->prepare_transfer_hardware(ctlr);
1484 if (ret) {
1485 dev_err(&ctlr->dev,
1486 "failed to prepare transfer hardware: %d\n",
1487 ret);
1488
1489 if (ctlr->auto_runtime_pm)
1490 pm_runtime_put(ctlr->dev.parent);
1491
1492 msg->status = ret;
1493 spi_finalize_current_message(ctlr);
1494
1495 mutex_unlock(&ctlr->io_mutex);
1496 return;
1497 }
1498 }
1499
1500 trace_spi_message_start(msg);
1501
1502 if (ctlr->prepare_message) {
1503 ret = ctlr->prepare_message(ctlr, msg);
1504 if (ret) {
1505 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1506 ret);
1507 msg->status = ret;
1508 spi_finalize_current_message(ctlr);
1509 goto out;
1510 }
1511 ctlr->cur_msg_prepared = true;
1512 }
1513
1514 ret = spi_map_msg(ctlr, msg);
1515 if (ret) {
1516 msg->status = ret;
1517 spi_finalize_current_message(ctlr);
1518 goto out;
1519 }
1520
1521 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1522 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1523 xfer->ptp_sts_word_pre = 0;
1524 ptp_read_system_prets(xfer->ptp_sts);
1525 }
1526 }
1527
1528 ret = ctlr->transfer_one_message(ctlr, msg);
1529 if (ret) {
1530 dev_err(&ctlr->dev,
1531 "failed to transfer one message from queue\n");
1532 goto out;
1533 }
1534
1535 out:
1536 mutex_unlock(&ctlr->io_mutex);
1537
1538 /* Prod the scheduler in case transfer_one() was busy waiting */
1539 if (!ret)
1540 cond_resched();
1541 }
1542
1543 /**
1544 * spi_pump_messages - kthread work function which processes spi message queue
1545 * @work: pointer to kthread work struct contained in the controller struct
1546 */
spi_pump_messages(struct kthread_work * work)1547 static void spi_pump_messages(struct kthread_work *work)
1548 {
1549 struct spi_controller *ctlr =
1550 container_of(work, struct spi_controller, pump_messages);
1551
1552 __spi_pump_messages(ctlr, true);
1553 }
1554
1555 /**
1556 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1557 * TX timestamp for the requested byte from the SPI
1558 * transfer. The frequency with which this function
1559 * must be called (once per word, once for the whole
1560 * transfer, once per batch of words etc) is arbitrary
1561 * as long as the @tx buffer offset is greater than or
1562 * equal to the requested byte at the time of the
1563 * call. The timestamp is only taken once, at the
1564 * first such call. It is assumed that the driver
1565 * advances its @tx buffer pointer monotonically.
1566 * @ctlr: Pointer to the spi_controller structure of the driver
1567 * @xfer: Pointer to the transfer being timestamped
1568 * @progress: How many words (not bytes) have been transferred so far
1569 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1570 * transfer, for less jitter in time measurement. Only compatible
1571 * with PIO drivers. If true, must follow up with
1572 * spi_take_timestamp_post or otherwise system will crash.
1573 * WARNING: for fully predictable results, the CPU frequency must
1574 * also be under control (governor).
1575 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1576 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1577 struct spi_transfer *xfer,
1578 size_t progress, bool irqs_off)
1579 {
1580 if (!xfer->ptp_sts)
1581 return;
1582
1583 if (xfer->timestamped)
1584 return;
1585
1586 if (progress > xfer->ptp_sts_word_pre)
1587 return;
1588
1589 /* Capture the resolution of the timestamp */
1590 xfer->ptp_sts_word_pre = progress;
1591
1592 if (irqs_off) {
1593 local_irq_save(ctlr->irq_flags);
1594 preempt_disable();
1595 }
1596
1597 ptp_read_system_prets(xfer->ptp_sts);
1598 }
1599 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1600
1601 /**
1602 * spi_take_timestamp_post - helper for drivers to collect the end of the
1603 * TX timestamp for the requested byte from the SPI
1604 * transfer. Can be called with an arbitrary
1605 * frequency: only the first call where @tx exceeds
1606 * or is equal to the requested word will be
1607 * timestamped.
1608 * @ctlr: Pointer to the spi_controller structure of the driver
1609 * @xfer: Pointer to the transfer being timestamped
1610 * @progress: How many words (not bytes) have been transferred so far
1611 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1612 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1613 void spi_take_timestamp_post(struct spi_controller *ctlr,
1614 struct spi_transfer *xfer,
1615 size_t progress, bool irqs_off)
1616 {
1617 if (!xfer->ptp_sts)
1618 return;
1619
1620 if (xfer->timestamped)
1621 return;
1622
1623 if (progress < xfer->ptp_sts_word_post)
1624 return;
1625
1626 ptp_read_system_postts(xfer->ptp_sts);
1627
1628 if (irqs_off) {
1629 local_irq_restore(ctlr->irq_flags);
1630 preempt_enable();
1631 }
1632
1633 /* Capture the resolution of the timestamp */
1634 xfer->ptp_sts_word_post = progress;
1635
1636 xfer->timestamped = true;
1637 }
1638 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1639
1640 /**
1641 * spi_set_thread_rt - set the controller to pump at realtime priority
1642 * @ctlr: controller to boost priority of
1643 *
1644 * This can be called because the controller requested realtime priority
1645 * (by setting the ->rt value before calling spi_register_controller()) or
1646 * because a device on the bus said that its transfers needed realtime
1647 * priority.
1648 *
1649 * NOTE: at the moment if any device on a bus says it needs realtime then
1650 * the thread will be at realtime priority for all transfers on that
1651 * controller. If this eventually becomes a problem we may see if we can
1652 * find a way to boost the priority only temporarily during relevant
1653 * transfers.
1654 */
spi_set_thread_rt(struct spi_controller * ctlr)1655 static void spi_set_thread_rt(struct spi_controller *ctlr)
1656 {
1657 dev_info(&ctlr->dev,
1658 "will run message pump with realtime priority\n");
1659 sched_set_fifo(ctlr->kworker->task);
1660 }
1661
spi_init_queue(struct spi_controller * ctlr)1662 static int spi_init_queue(struct spi_controller *ctlr)
1663 {
1664 ctlr->running = false;
1665 ctlr->busy = false;
1666
1667 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1668 if (IS_ERR(ctlr->kworker)) {
1669 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1670 return PTR_ERR(ctlr->kworker);
1671 }
1672
1673 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1674
1675 /*
1676 * Controller config will indicate if this controller should run the
1677 * message pump with high (realtime) priority to reduce the transfer
1678 * latency on the bus by minimising the delay between a transfer
1679 * request and the scheduling of the message pump thread. Without this
1680 * setting the message pump thread will remain at default priority.
1681 */
1682 if (ctlr->rt)
1683 spi_set_thread_rt(ctlr);
1684
1685 return 0;
1686 }
1687
1688 /**
1689 * spi_get_next_queued_message() - called by driver to check for queued
1690 * messages
1691 * @ctlr: the controller to check for queued messages
1692 *
1693 * If there are more messages in the queue, the next message is returned from
1694 * this call.
1695 *
1696 * Return: the next message in the queue, else NULL if the queue is empty.
1697 */
spi_get_next_queued_message(struct spi_controller * ctlr)1698 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1699 {
1700 struct spi_message *next;
1701 unsigned long flags;
1702
1703 /* get a pointer to the next message, if any */
1704 spin_lock_irqsave(&ctlr->queue_lock, flags);
1705 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1706 queue);
1707 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1708
1709 return next;
1710 }
1711 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1712
1713 /**
1714 * spi_finalize_current_message() - the current message is complete
1715 * @ctlr: the controller to return the message to
1716 *
1717 * Called by the driver to notify the core that the message in the front of the
1718 * queue is complete and can be removed from the queue.
1719 */
spi_finalize_current_message(struct spi_controller * ctlr)1720 void spi_finalize_current_message(struct spi_controller *ctlr)
1721 {
1722 struct spi_transfer *xfer;
1723 struct spi_message *mesg;
1724 unsigned long flags;
1725 int ret;
1726
1727 spin_lock_irqsave(&ctlr->queue_lock, flags);
1728 mesg = ctlr->cur_msg;
1729 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1730
1731 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1732 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1733 ptp_read_system_postts(xfer->ptp_sts);
1734 xfer->ptp_sts_word_post = xfer->len;
1735 }
1736 }
1737
1738 if (unlikely(ctlr->ptp_sts_supported))
1739 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1740 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1741
1742 spi_unmap_msg(ctlr, mesg);
1743
1744 /* In the prepare_messages callback the spi bus has the opportunity to
1745 * split a transfer to smaller chunks.
1746 * Release splited transfers here since spi_map_msg is done on the
1747 * splited transfers.
1748 */
1749 spi_res_release(ctlr, mesg);
1750
1751 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1752 ret = ctlr->unprepare_message(ctlr, mesg);
1753 if (ret) {
1754 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1755 ret);
1756 }
1757 }
1758
1759 spin_lock_irqsave(&ctlr->queue_lock, flags);
1760 ctlr->cur_msg = NULL;
1761 ctlr->cur_msg_prepared = false;
1762 ctlr->fallback = false;
1763 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1764 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1765
1766 trace_spi_message_done(mesg);
1767
1768 mesg->state = NULL;
1769 if (mesg->complete)
1770 mesg->complete(mesg->context);
1771 }
1772 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1773
spi_start_queue(struct spi_controller * ctlr)1774 static int spi_start_queue(struct spi_controller *ctlr)
1775 {
1776 unsigned long flags;
1777
1778 spin_lock_irqsave(&ctlr->queue_lock, flags);
1779
1780 if (ctlr->running || ctlr->busy) {
1781 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1782 return -EBUSY;
1783 }
1784
1785 ctlr->running = true;
1786 ctlr->cur_msg = NULL;
1787 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1788
1789 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1790
1791 return 0;
1792 }
1793
spi_stop_queue(struct spi_controller * ctlr)1794 static int spi_stop_queue(struct spi_controller *ctlr)
1795 {
1796 unsigned long flags;
1797 unsigned limit = 500;
1798 int ret = 0;
1799
1800 spin_lock_irqsave(&ctlr->queue_lock, flags);
1801
1802 /*
1803 * This is a bit lame, but is optimized for the common execution path.
1804 * A wait_queue on the ctlr->busy could be used, but then the common
1805 * execution path (pump_messages) would be required to call wake_up or
1806 * friends on every SPI message. Do this instead.
1807 */
1808 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1809 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1810 usleep_range(10000, 11000);
1811 spin_lock_irqsave(&ctlr->queue_lock, flags);
1812 }
1813
1814 if (!list_empty(&ctlr->queue) || ctlr->busy)
1815 ret = -EBUSY;
1816 else
1817 ctlr->running = false;
1818
1819 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1820
1821 if (ret) {
1822 dev_warn(&ctlr->dev, "could not stop message queue\n");
1823 return ret;
1824 }
1825 return ret;
1826 }
1827
spi_destroy_queue(struct spi_controller * ctlr)1828 static int spi_destroy_queue(struct spi_controller *ctlr)
1829 {
1830 int ret;
1831
1832 ret = spi_stop_queue(ctlr);
1833
1834 /*
1835 * kthread_flush_worker will block until all work is done.
1836 * If the reason that stop_queue timed out is that the work will never
1837 * finish, then it does no good to call flush/stop thread, so
1838 * return anyway.
1839 */
1840 if (ret) {
1841 dev_err(&ctlr->dev, "problem destroying queue\n");
1842 return ret;
1843 }
1844
1845 kthread_destroy_worker(ctlr->kworker);
1846
1847 return 0;
1848 }
1849
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)1850 static int __spi_queued_transfer(struct spi_device *spi,
1851 struct spi_message *msg,
1852 bool need_pump)
1853 {
1854 struct spi_controller *ctlr = spi->controller;
1855 unsigned long flags;
1856
1857 spin_lock_irqsave(&ctlr->queue_lock, flags);
1858
1859 if (!ctlr->running) {
1860 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1861 return -ESHUTDOWN;
1862 }
1863 msg->actual_length = 0;
1864 msg->status = -EINPROGRESS;
1865
1866 list_add_tail(&msg->queue, &ctlr->queue);
1867 if (!ctlr->busy && need_pump)
1868 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1869
1870 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1871 return 0;
1872 }
1873
1874 /**
1875 * spi_queued_transfer - transfer function for queued transfers
1876 * @spi: spi device which is requesting transfer
1877 * @msg: spi message which is to handled is queued to driver queue
1878 *
1879 * Return: zero on success, else a negative error code.
1880 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)1881 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1882 {
1883 return __spi_queued_transfer(spi, msg, true);
1884 }
1885
spi_controller_initialize_queue(struct spi_controller * ctlr)1886 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1887 {
1888 int ret;
1889
1890 ctlr->transfer = spi_queued_transfer;
1891 if (!ctlr->transfer_one_message)
1892 ctlr->transfer_one_message = spi_transfer_one_message;
1893
1894 /* Initialize and start queue */
1895 ret = spi_init_queue(ctlr);
1896 if (ret) {
1897 dev_err(&ctlr->dev, "problem initializing queue\n");
1898 goto err_init_queue;
1899 }
1900 ctlr->queued = true;
1901 ret = spi_start_queue(ctlr);
1902 if (ret) {
1903 dev_err(&ctlr->dev, "problem starting queue\n");
1904 goto err_start_queue;
1905 }
1906
1907 return 0;
1908
1909 err_start_queue:
1910 spi_destroy_queue(ctlr);
1911 err_init_queue:
1912 return ret;
1913 }
1914
1915 /**
1916 * spi_flush_queue - Send all pending messages in the queue from the callers'
1917 * context
1918 * @ctlr: controller to process queue for
1919 *
1920 * This should be used when one wants to ensure all pending messages have been
1921 * sent before doing something. Is used by the spi-mem code to make sure SPI
1922 * memory operations do not preempt regular SPI transfers that have been queued
1923 * before the spi-mem operation.
1924 */
spi_flush_queue(struct spi_controller * ctlr)1925 void spi_flush_queue(struct spi_controller *ctlr)
1926 {
1927 if (ctlr->transfer == spi_queued_transfer)
1928 __spi_pump_messages(ctlr, false);
1929 }
1930
1931 /*-------------------------------------------------------------------------*/
1932
1933 #if defined(CONFIG_OF)
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)1934 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1935 struct device_node *nc)
1936 {
1937 u32 value;
1938 int rc;
1939
1940 /* Mode (clock phase/polarity/etc.) */
1941 if (of_property_read_bool(nc, "spi-cpha"))
1942 spi->mode |= SPI_CPHA;
1943 if (of_property_read_bool(nc, "spi-cpol"))
1944 spi->mode |= SPI_CPOL;
1945 if (of_property_read_bool(nc, "spi-3wire"))
1946 spi->mode |= SPI_3WIRE;
1947 if (of_property_read_bool(nc, "spi-lsb-first"))
1948 spi->mode |= SPI_LSB_FIRST;
1949 if (of_property_read_bool(nc, "spi-cs-high"))
1950 spi->mode |= SPI_CS_HIGH;
1951
1952 /* Device DUAL/QUAD mode */
1953 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1954 switch (value) {
1955 case 1:
1956 break;
1957 case 2:
1958 spi->mode |= SPI_TX_DUAL;
1959 break;
1960 case 4:
1961 spi->mode |= SPI_TX_QUAD;
1962 break;
1963 case 8:
1964 spi->mode |= SPI_TX_OCTAL;
1965 break;
1966 default:
1967 dev_warn(&ctlr->dev,
1968 "spi-tx-bus-width %d not supported\n",
1969 value);
1970 break;
1971 }
1972 }
1973
1974 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1975 switch (value) {
1976 case 1:
1977 break;
1978 case 2:
1979 spi->mode |= SPI_RX_DUAL;
1980 break;
1981 case 4:
1982 spi->mode |= SPI_RX_QUAD;
1983 break;
1984 case 8:
1985 spi->mode |= SPI_RX_OCTAL;
1986 break;
1987 default:
1988 dev_warn(&ctlr->dev,
1989 "spi-rx-bus-width %d not supported\n",
1990 value);
1991 break;
1992 }
1993 }
1994
1995 if (spi_controller_is_slave(ctlr)) {
1996 if (!of_node_name_eq(nc, "slave")) {
1997 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1998 nc);
1999 return -EINVAL;
2000 }
2001 return 0;
2002 }
2003
2004 /* Device address */
2005 rc = of_property_read_u32(nc, "reg", &value);
2006 if (rc) {
2007 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2008 nc, rc);
2009 return rc;
2010 }
2011 spi->chip_select = value;
2012
2013 /* Device speed */
2014 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2015 spi->max_speed_hz = value;
2016
2017 return 0;
2018 }
2019
2020 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2021 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2022 {
2023 struct spi_device *spi;
2024 int rc;
2025
2026 /* Alloc an spi_device */
2027 spi = spi_alloc_device(ctlr);
2028 if (!spi) {
2029 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2030 rc = -ENOMEM;
2031 goto err_out;
2032 }
2033
2034 /* Select device driver */
2035 rc = of_modalias_node(nc, spi->modalias,
2036 sizeof(spi->modalias));
2037 if (rc < 0) {
2038 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2039 goto err_out;
2040 }
2041
2042 rc = of_spi_parse_dt(ctlr, spi, nc);
2043 if (rc)
2044 goto err_out;
2045
2046 /* Store a pointer to the node in the device structure */
2047 of_node_get(nc);
2048 spi->dev.of_node = nc;
2049 spi->dev.fwnode = of_fwnode_handle(nc);
2050
2051 /* Register the new device */
2052 rc = spi_add_device(spi);
2053 if (rc) {
2054 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2055 goto err_of_node_put;
2056 }
2057
2058 return spi;
2059
2060 err_of_node_put:
2061 of_node_put(nc);
2062 err_out:
2063 spi_dev_put(spi);
2064 return ERR_PTR(rc);
2065 }
2066
2067 /**
2068 * of_register_spi_devices() - Register child devices onto the SPI bus
2069 * @ctlr: Pointer to spi_controller device
2070 *
2071 * Registers an spi_device for each child node of controller node which
2072 * represents a valid SPI slave.
2073 */
of_register_spi_devices(struct spi_controller * ctlr)2074 static void of_register_spi_devices(struct spi_controller *ctlr)
2075 {
2076 struct spi_device *spi;
2077 struct device_node *nc;
2078
2079 if (!ctlr->dev.of_node)
2080 return;
2081
2082 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2083 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2084 continue;
2085 spi = of_register_spi_device(ctlr, nc);
2086 if (IS_ERR(spi)) {
2087 dev_warn(&ctlr->dev,
2088 "Failed to create SPI device for %pOF\n", nc);
2089 of_node_clear_flag(nc, OF_POPULATED);
2090 }
2091 }
2092 }
2093 #else
of_register_spi_devices(struct spi_controller * ctlr)2094 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2095 #endif
2096
2097 #ifdef CONFIG_ACPI
2098 struct acpi_spi_lookup {
2099 struct spi_controller *ctlr;
2100 u32 max_speed_hz;
2101 u32 mode;
2102 int irq;
2103 u8 bits_per_word;
2104 u8 chip_select;
2105 };
2106
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2107 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2108 struct acpi_spi_lookup *lookup)
2109 {
2110 const union acpi_object *obj;
2111
2112 if (!x86_apple_machine)
2113 return;
2114
2115 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2116 && obj->buffer.length >= 4)
2117 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2118
2119 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2120 && obj->buffer.length == 8)
2121 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2122
2123 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2124 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2125 lookup->mode |= SPI_LSB_FIRST;
2126
2127 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2128 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2129 lookup->mode |= SPI_CPOL;
2130
2131 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2132 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2133 lookup->mode |= SPI_CPHA;
2134 }
2135
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2136 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2137 {
2138 struct acpi_spi_lookup *lookup = data;
2139 struct spi_controller *ctlr = lookup->ctlr;
2140
2141 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2142 struct acpi_resource_spi_serialbus *sb;
2143 acpi_handle parent_handle;
2144 acpi_status status;
2145
2146 sb = &ares->data.spi_serial_bus;
2147 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2148
2149 status = acpi_get_handle(NULL,
2150 sb->resource_source.string_ptr,
2151 &parent_handle);
2152
2153 if (ACPI_FAILURE(status) ||
2154 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2155 return -ENODEV;
2156
2157 /*
2158 * ACPI DeviceSelection numbering is handled by the
2159 * host controller driver in Windows and can vary
2160 * from driver to driver. In Linux we always expect
2161 * 0 .. max - 1 so we need to ask the driver to
2162 * translate between the two schemes.
2163 */
2164 if (ctlr->fw_translate_cs) {
2165 int cs = ctlr->fw_translate_cs(ctlr,
2166 sb->device_selection);
2167 if (cs < 0)
2168 return cs;
2169 lookup->chip_select = cs;
2170 } else {
2171 lookup->chip_select = sb->device_selection;
2172 }
2173
2174 lookup->max_speed_hz = sb->connection_speed;
2175 lookup->bits_per_word = sb->data_bit_length;
2176
2177 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2178 lookup->mode |= SPI_CPHA;
2179 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2180 lookup->mode |= SPI_CPOL;
2181 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2182 lookup->mode |= SPI_CS_HIGH;
2183 }
2184 } else if (lookup->irq < 0) {
2185 struct resource r;
2186
2187 if (acpi_dev_resource_interrupt(ares, 0, &r))
2188 lookup->irq = r.start;
2189 }
2190
2191 /* Always tell the ACPI core to skip this resource */
2192 return 1;
2193 }
2194
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2195 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2196 struct acpi_device *adev)
2197 {
2198 acpi_handle parent_handle = NULL;
2199 struct list_head resource_list;
2200 struct acpi_spi_lookup lookup = {};
2201 struct spi_device *spi;
2202 int ret;
2203
2204 if (acpi_bus_get_status(adev) || !adev->status.present ||
2205 acpi_device_enumerated(adev))
2206 return AE_OK;
2207
2208 lookup.ctlr = ctlr;
2209 lookup.irq = -1;
2210
2211 INIT_LIST_HEAD(&resource_list);
2212 ret = acpi_dev_get_resources(adev, &resource_list,
2213 acpi_spi_add_resource, &lookup);
2214 acpi_dev_free_resource_list(&resource_list);
2215
2216 if (ret < 0)
2217 /* found SPI in _CRS but it points to another controller */
2218 return AE_OK;
2219
2220 if (!lookup.max_speed_hz &&
2221 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
2222 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2223 /* Apple does not use _CRS but nested devices for SPI slaves */
2224 acpi_spi_parse_apple_properties(adev, &lookup);
2225 }
2226
2227 if (!lookup.max_speed_hz)
2228 return AE_OK;
2229
2230 spi = spi_alloc_device(ctlr);
2231 if (!spi) {
2232 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2233 dev_name(&adev->dev));
2234 return AE_NO_MEMORY;
2235 }
2236
2237
2238 ACPI_COMPANION_SET(&spi->dev, adev);
2239 spi->max_speed_hz = lookup.max_speed_hz;
2240 spi->mode |= lookup.mode;
2241 spi->irq = lookup.irq;
2242 spi->bits_per_word = lookup.bits_per_word;
2243 spi->chip_select = lookup.chip_select;
2244
2245 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2246 sizeof(spi->modalias));
2247
2248 if (spi->irq < 0)
2249 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2250
2251 acpi_device_set_enumerated(adev);
2252
2253 adev->power.flags.ignore_parent = true;
2254 if (spi_add_device(spi)) {
2255 adev->power.flags.ignore_parent = false;
2256 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2257 dev_name(&adev->dev));
2258 spi_dev_put(spi);
2259 }
2260
2261 return AE_OK;
2262 }
2263
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2264 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2265 void *data, void **return_value)
2266 {
2267 struct spi_controller *ctlr = data;
2268 struct acpi_device *adev;
2269
2270 if (acpi_bus_get_device(handle, &adev))
2271 return AE_OK;
2272
2273 return acpi_register_spi_device(ctlr, adev);
2274 }
2275
2276 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2277
acpi_register_spi_devices(struct spi_controller * ctlr)2278 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2279 {
2280 acpi_status status;
2281 acpi_handle handle;
2282
2283 handle = ACPI_HANDLE(ctlr->dev.parent);
2284 if (!handle)
2285 return;
2286
2287 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2288 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2289 acpi_spi_add_device, NULL, ctlr, NULL);
2290 if (ACPI_FAILURE(status))
2291 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2292 }
2293 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2294 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2295 #endif /* CONFIG_ACPI */
2296
spi_controller_release(struct device * dev)2297 static void spi_controller_release(struct device *dev)
2298 {
2299 struct spi_controller *ctlr;
2300
2301 ctlr = container_of(dev, struct spi_controller, dev);
2302 kfree(ctlr);
2303 }
2304
2305 static struct class spi_master_class = {
2306 .name = "spi_master",
2307 .owner = THIS_MODULE,
2308 .dev_release = spi_controller_release,
2309 .dev_groups = spi_master_groups,
2310 };
2311
2312 #ifdef CONFIG_SPI_SLAVE
2313 /**
2314 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2315 * controller
2316 * @spi: device used for the current transfer
2317 */
spi_slave_abort(struct spi_device * spi)2318 int spi_slave_abort(struct spi_device *spi)
2319 {
2320 struct spi_controller *ctlr = spi->controller;
2321
2322 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2323 return ctlr->slave_abort(ctlr);
2324
2325 return -ENOTSUPP;
2326 }
2327 EXPORT_SYMBOL_GPL(spi_slave_abort);
2328
match_true(struct device * dev,void * data)2329 static int match_true(struct device *dev, void *data)
2330 {
2331 return 1;
2332 }
2333
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2334 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2335 char *buf)
2336 {
2337 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2338 dev);
2339 struct device *child;
2340
2341 child = device_find_child(&ctlr->dev, NULL, match_true);
2342 return sprintf(buf, "%s\n",
2343 child ? to_spi_device(child)->modalias : NULL);
2344 }
2345
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2346 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2347 const char *buf, size_t count)
2348 {
2349 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2350 dev);
2351 struct spi_device *spi;
2352 struct device *child;
2353 char name[32];
2354 int rc;
2355
2356 rc = sscanf(buf, "%31s", name);
2357 if (rc != 1 || !name[0])
2358 return -EINVAL;
2359
2360 child = device_find_child(&ctlr->dev, NULL, match_true);
2361 if (child) {
2362 /* Remove registered slave */
2363 device_unregister(child);
2364 put_device(child);
2365 }
2366
2367 if (strcmp(name, "(null)")) {
2368 /* Register new slave */
2369 spi = spi_alloc_device(ctlr);
2370 if (!spi)
2371 return -ENOMEM;
2372
2373 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2374
2375 rc = spi_add_device(spi);
2376 if (rc) {
2377 spi_dev_put(spi);
2378 return rc;
2379 }
2380 }
2381
2382 return count;
2383 }
2384
2385 static DEVICE_ATTR_RW(slave);
2386
2387 static struct attribute *spi_slave_attrs[] = {
2388 &dev_attr_slave.attr,
2389 NULL,
2390 };
2391
2392 static const struct attribute_group spi_slave_group = {
2393 .attrs = spi_slave_attrs,
2394 };
2395
2396 static const struct attribute_group *spi_slave_groups[] = {
2397 &spi_controller_statistics_group,
2398 &spi_slave_group,
2399 NULL,
2400 };
2401
2402 static struct class spi_slave_class = {
2403 .name = "spi_slave",
2404 .owner = THIS_MODULE,
2405 .dev_release = spi_controller_release,
2406 .dev_groups = spi_slave_groups,
2407 };
2408 #else
2409 extern struct class spi_slave_class; /* dummy */
2410 #endif
2411
2412 /**
2413 * __spi_alloc_controller - allocate an SPI master or slave controller
2414 * @dev: the controller, possibly using the platform_bus
2415 * @size: how much zeroed driver-private data to allocate; the pointer to this
2416 * memory is in the driver_data field of the returned device, accessible
2417 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2418 * drivers granting DMA access to portions of their private data need to
2419 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2420 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2421 * slave (true) controller
2422 * Context: can sleep
2423 *
2424 * This call is used only by SPI controller drivers, which are the
2425 * only ones directly touching chip registers. It's how they allocate
2426 * an spi_controller structure, prior to calling spi_register_controller().
2427 *
2428 * This must be called from context that can sleep.
2429 *
2430 * The caller is responsible for assigning the bus number and initializing the
2431 * controller's methods before calling spi_register_controller(); and (after
2432 * errors adding the device) calling spi_controller_put() to prevent a memory
2433 * leak.
2434 *
2435 * Return: the SPI controller structure on success, else NULL.
2436 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2437 struct spi_controller *__spi_alloc_controller(struct device *dev,
2438 unsigned int size, bool slave)
2439 {
2440 struct spi_controller *ctlr;
2441 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2442
2443 if (!dev)
2444 return NULL;
2445
2446 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2447 if (!ctlr)
2448 return NULL;
2449
2450 device_initialize(&ctlr->dev);
2451 ctlr->bus_num = -1;
2452 ctlr->num_chipselect = 1;
2453 ctlr->slave = slave;
2454 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2455 ctlr->dev.class = &spi_slave_class;
2456 else
2457 ctlr->dev.class = &spi_master_class;
2458 ctlr->dev.parent = dev;
2459 pm_suspend_ignore_children(&ctlr->dev, true);
2460 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2461
2462 return ctlr;
2463 }
2464 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2465
devm_spi_release_controller(struct device * dev,void * ctlr)2466 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2467 {
2468 spi_controller_put(*(struct spi_controller **)ctlr);
2469 }
2470
2471 /**
2472 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2473 * @dev: physical device of SPI controller
2474 * @size: how much zeroed driver-private data to allocate
2475 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2476 * Context: can sleep
2477 *
2478 * Allocate an SPI controller and automatically release a reference on it
2479 * when @dev is unbound from its driver. Drivers are thus relieved from
2480 * having to call spi_controller_put().
2481 *
2482 * The arguments to this function are identical to __spi_alloc_controller().
2483 *
2484 * Return: the SPI controller structure on success, else NULL.
2485 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2486 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2487 unsigned int size,
2488 bool slave)
2489 {
2490 struct spi_controller **ptr, *ctlr;
2491
2492 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2493 GFP_KERNEL);
2494 if (!ptr)
2495 return NULL;
2496
2497 ctlr = __spi_alloc_controller(dev, size, slave);
2498 if (ctlr) {
2499 ctlr->devm_allocated = true;
2500 *ptr = ctlr;
2501 devres_add(dev, ptr);
2502 } else {
2503 devres_free(ptr);
2504 }
2505
2506 return ctlr;
2507 }
2508 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2509
2510 #ifdef CONFIG_OF
of_spi_get_gpio_numbers(struct spi_controller * ctlr)2511 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2512 {
2513 int nb, i, *cs;
2514 struct device_node *np = ctlr->dev.of_node;
2515
2516 if (!np)
2517 return 0;
2518
2519 nb = of_gpio_named_count(np, "cs-gpios");
2520 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2521
2522 /* Return error only for an incorrectly formed cs-gpios property */
2523 if (nb == 0 || nb == -ENOENT)
2524 return 0;
2525 else if (nb < 0)
2526 return nb;
2527
2528 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2529 GFP_KERNEL);
2530 ctlr->cs_gpios = cs;
2531
2532 if (!ctlr->cs_gpios)
2533 return -ENOMEM;
2534
2535 for (i = 0; i < ctlr->num_chipselect; i++)
2536 cs[i] = -ENOENT;
2537
2538 for (i = 0; i < nb; i++)
2539 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2540
2541 return 0;
2542 }
2543 #else
of_spi_get_gpio_numbers(struct spi_controller * ctlr)2544 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2545 {
2546 return 0;
2547 }
2548 #endif
2549
2550 /**
2551 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2552 * @ctlr: The SPI master to grab GPIO descriptors for
2553 */
spi_get_gpio_descs(struct spi_controller * ctlr)2554 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2555 {
2556 int nb, i;
2557 struct gpio_desc **cs;
2558 struct device *dev = &ctlr->dev;
2559 unsigned long native_cs_mask = 0;
2560 unsigned int num_cs_gpios = 0;
2561
2562 nb = gpiod_count(dev, "cs");
2563 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2564
2565 /* No GPIOs at all is fine, else return the error */
2566 if (nb == 0 || nb == -ENOENT)
2567 return 0;
2568 else if (nb < 0)
2569 return nb;
2570
2571 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2572 GFP_KERNEL);
2573 if (!cs)
2574 return -ENOMEM;
2575 ctlr->cs_gpiods = cs;
2576
2577 for (i = 0; i < nb; i++) {
2578 /*
2579 * Most chipselects are active low, the inverted
2580 * semantics are handled by special quirks in gpiolib,
2581 * so initializing them GPIOD_OUT_LOW here means
2582 * "unasserted", in most cases this will drive the physical
2583 * line high.
2584 */
2585 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2586 GPIOD_OUT_LOW);
2587 if (IS_ERR(cs[i]))
2588 return PTR_ERR(cs[i]);
2589
2590 if (cs[i]) {
2591 /*
2592 * If we find a CS GPIO, name it after the device and
2593 * chip select line.
2594 */
2595 char *gpioname;
2596
2597 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2598 dev_name(dev), i);
2599 if (!gpioname)
2600 return -ENOMEM;
2601 gpiod_set_consumer_name(cs[i], gpioname);
2602 num_cs_gpios++;
2603 continue;
2604 }
2605
2606 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2607 dev_err(dev, "Invalid native chip select %d\n", i);
2608 return -EINVAL;
2609 }
2610 native_cs_mask |= BIT(i);
2611 }
2612
2613 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2614
2615 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2616 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2617 dev_err(dev, "No unused native chip select available\n");
2618 return -EINVAL;
2619 }
2620
2621 return 0;
2622 }
2623
spi_controller_check_ops(struct spi_controller * ctlr)2624 static int spi_controller_check_ops(struct spi_controller *ctlr)
2625 {
2626 /*
2627 * The controller may implement only the high-level SPI-memory like
2628 * operations if it does not support regular SPI transfers, and this is
2629 * valid use case.
2630 * If ->mem_ops is NULL, we request that at least one of the
2631 * ->transfer_xxx() method be implemented.
2632 */
2633 if (ctlr->mem_ops) {
2634 if (!ctlr->mem_ops->exec_op)
2635 return -EINVAL;
2636 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2637 !ctlr->transfer_one_message) {
2638 return -EINVAL;
2639 }
2640
2641 return 0;
2642 }
2643
2644 /**
2645 * spi_register_controller - register SPI master or slave controller
2646 * @ctlr: initialized master, originally from spi_alloc_master() or
2647 * spi_alloc_slave()
2648 * Context: can sleep
2649 *
2650 * SPI controllers connect to their drivers using some non-SPI bus,
2651 * such as the platform bus. The final stage of probe() in that code
2652 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2653 *
2654 * SPI controllers use board specific (often SOC specific) bus numbers,
2655 * and board-specific addressing for SPI devices combines those numbers
2656 * with chip select numbers. Since SPI does not directly support dynamic
2657 * device identification, boards need configuration tables telling which
2658 * chip is at which address.
2659 *
2660 * This must be called from context that can sleep. It returns zero on
2661 * success, else a negative error code (dropping the controller's refcount).
2662 * After a successful return, the caller is responsible for calling
2663 * spi_unregister_controller().
2664 *
2665 * Return: zero on success, else a negative error code.
2666 */
spi_register_controller(struct spi_controller * ctlr)2667 int spi_register_controller(struct spi_controller *ctlr)
2668 {
2669 struct device *dev = ctlr->dev.parent;
2670 struct boardinfo *bi;
2671 int status;
2672 int id, first_dynamic;
2673
2674 if (!dev)
2675 return -ENODEV;
2676
2677 /*
2678 * Make sure all necessary hooks are implemented before registering
2679 * the SPI controller.
2680 */
2681 status = spi_controller_check_ops(ctlr);
2682 if (status)
2683 return status;
2684
2685 if (ctlr->bus_num >= 0) {
2686 /* devices with a fixed bus num must check-in with the num */
2687 mutex_lock(&board_lock);
2688 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2689 ctlr->bus_num + 1, GFP_KERNEL);
2690 mutex_unlock(&board_lock);
2691 if (WARN(id < 0, "couldn't get idr"))
2692 return id == -ENOSPC ? -EBUSY : id;
2693 ctlr->bus_num = id;
2694 } else if (ctlr->dev.of_node) {
2695 /* allocate dynamic bus number using Linux idr */
2696 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2697 if (id >= 0) {
2698 ctlr->bus_num = id;
2699 mutex_lock(&board_lock);
2700 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2701 ctlr->bus_num + 1, GFP_KERNEL);
2702 mutex_unlock(&board_lock);
2703 if (WARN(id < 0, "couldn't get idr"))
2704 return id == -ENOSPC ? -EBUSY : id;
2705 }
2706 }
2707 if (ctlr->bus_num < 0) {
2708 first_dynamic = of_alias_get_highest_id("spi");
2709 if (first_dynamic < 0)
2710 first_dynamic = 0;
2711 else
2712 first_dynamic++;
2713
2714 mutex_lock(&board_lock);
2715 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2716 0, GFP_KERNEL);
2717 mutex_unlock(&board_lock);
2718 if (WARN(id < 0, "couldn't get idr"))
2719 return id;
2720 ctlr->bus_num = id;
2721 }
2722 INIT_LIST_HEAD(&ctlr->queue);
2723 spin_lock_init(&ctlr->queue_lock);
2724 spin_lock_init(&ctlr->bus_lock_spinlock);
2725 mutex_init(&ctlr->bus_lock_mutex);
2726 mutex_init(&ctlr->io_mutex);
2727 mutex_init(&ctlr->add_lock);
2728 ctlr->bus_lock_flag = 0;
2729 init_completion(&ctlr->xfer_completion);
2730 if (!ctlr->max_dma_len)
2731 ctlr->max_dma_len = INT_MAX;
2732
2733 /* register the device, then userspace will see it.
2734 * registration fails if the bus ID is in use.
2735 */
2736 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2737
2738 if (!spi_controller_is_slave(ctlr)) {
2739 if (ctlr->use_gpio_descriptors) {
2740 status = spi_get_gpio_descs(ctlr);
2741 if (status)
2742 goto free_bus_id;
2743 /*
2744 * A controller using GPIO descriptors always
2745 * supports SPI_CS_HIGH if need be.
2746 */
2747 ctlr->mode_bits |= SPI_CS_HIGH;
2748 } else {
2749 /* Legacy code path for GPIOs from DT */
2750 status = of_spi_get_gpio_numbers(ctlr);
2751 if (status)
2752 goto free_bus_id;
2753 }
2754 }
2755
2756 /*
2757 * Even if it's just one always-selected device, there must
2758 * be at least one chipselect.
2759 */
2760 if (!ctlr->num_chipselect) {
2761 status = -EINVAL;
2762 goto free_bus_id;
2763 }
2764
2765 status = device_add(&ctlr->dev);
2766 if (status < 0)
2767 goto free_bus_id;
2768 dev_dbg(dev, "registered %s %s\n",
2769 spi_controller_is_slave(ctlr) ? "slave" : "master",
2770 dev_name(&ctlr->dev));
2771
2772 /*
2773 * If we're using a queued driver, start the queue. Note that we don't
2774 * need the queueing logic if the driver is only supporting high-level
2775 * memory operations.
2776 */
2777 if (ctlr->transfer) {
2778 dev_info(dev, "controller is unqueued, this is deprecated\n");
2779 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2780 status = spi_controller_initialize_queue(ctlr);
2781 if (status) {
2782 device_del(&ctlr->dev);
2783 goto free_bus_id;
2784 }
2785 }
2786 /* add statistics */
2787 spin_lock_init(&ctlr->statistics.lock);
2788
2789 mutex_lock(&board_lock);
2790 list_add_tail(&ctlr->list, &spi_controller_list);
2791 list_for_each_entry(bi, &board_list, list)
2792 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2793 mutex_unlock(&board_lock);
2794
2795 /* Register devices from the device tree and ACPI */
2796 of_register_spi_devices(ctlr);
2797 acpi_register_spi_devices(ctlr);
2798 return status;
2799
2800 free_bus_id:
2801 mutex_lock(&board_lock);
2802 idr_remove(&spi_master_idr, ctlr->bus_num);
2803 mutex_unlock(&board_lock);
2804 return status;
2805 }
2806 EXPORT_SYMBOL_GPL(spi_register_controller);
2807
devm_spi_unregister(struct device * dev,void * res)2808 static void devm_spi_unregister(struct device *dev, void *res)
2809 {
2810 spi_unregister_controller(*(struct spi_controller **)res);
2811 }
2812
2813 /**
2814 * devm_spi_register_controller - register managed SPI master or slave
2815 * controller
2816 * @dev: device managing SPI controller
2817 * @ctlr: initialized controller, originally from spi_alloc_master() or
2818 * spi_alloc_slave()
2819 * Context: can sleep
2820 *
2821 * Register a SPI device as with spi_register_controller() which will
2822 * automatically be unregistered and freed.
2823 *
2824 * Return: zero on success, else a negative error code.
2825 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)2826 int devm_spi_register_controller(struct device *dev,
2827 struct spi_controller *ctlr)
2828 {
2829 struct spi_controller **ptr;
2830 int ret;
2831
2832 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2833 if (!ptr)
2834 return -ENOMEM;
2835
2836 ret = spi_register_controller(ctlr);
2837 if (!ret) {
2838 *ptr = ctlr;
2839 devres_add(dev, ptr);
2840 } else {
2841 devres_free(ptr);
2842 }
2843
2844 return ret;
2845 }
2846 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2847
__unregister(struct device * dev,void * null)2848 static int __unregister(struct device *dev, void *null)
2849 {
2850 spi_unregister_device(to_spi_device(dev));
2851 return 0;
2852 }
2853
2854 /**
2855 * spi_unregister_controller - unregister SPI master or slave controller
2856 * @ctlr: the controller being unregistered
2857 * Context: can sleep
2858 *
2859 * This call is used only by SPI controller drivers, which are the
2860 * only ones directly touching chip registers.
2861 *
2862 * This must be called from context that can sleep.
2863 *
2864 * Note that this function also drops a reference to the controller.
2865 */
spi_unregister_controller(struct spi_controller * ctlr)2866 void spi_unregister_controller(struct spi_controller *ctlr)
2867 {
2868 struct spi_controller *found;
2869 int id = ctlr->bus_num;
2870
2871 /* Prevent addition of new devices, unregister existing ones */
2872 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2873 mutex_lock(&ctlr->add_lock);
2874
2875 device_for_each_child(&ctlr->dev, NULL, __unregister);
2876
2877 /* First make sure that this controller was ever added */
2878 mutex_lock(&board_lock);
2879 found = idr_find(&spi_master_idr, id);
2880 mutex_unlock(&board_lock);
2881 if (ctlr->queued) {
2882 if (spi_destroy_queue(ctlr))
2883 dev_err(&ctlr->dev, "queue remove failed\n");
2884 }
2885 mutex_lock(&board_lock);
2886 list_del(&ctlr->list);
2887 mutex_unlock(&board_lock);
2888
2889 device_del(&ctlr->dev);
2890
2891 /* Release the last reference on the controller if its driver
2892 * has not yet been converted to devm_spi_alloc_master/slave().
2893 */
2894 if (!ctlr->devm_allocated)
2895 put_device(&ctlr->dev);
2896
2897 /* free bus id */
2898 mutex_lock(&board_lock);
2899 if (found == ctlr)
2900 idr_remove(&spi_master_idr, id);
2901 mutex_unlock(&board_lock);
2902
2903 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2904 mutex_unlock(&ctlr->add_lock);
2905 }
2906 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2907
spi_controller_suspend(struct spi_controller * ctlr)2908 int spi_controller_suspend(struct spi_controller *ctlr)
2909 {
2910 int ret;
2911
2912 /* Basically no-ops for non-queued controllers */
2913 if (!ctlr->queued)
2914 return 0;
2915
2916 ret = spi_stop_queue(ctlr);
2917 if (ret)
2918 dev_err(&ctlr->dev, "queue stop failed\n");
2919
2920 return ret;
2921 }
2922 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2923
spi_controller_resume(struct spi_controller * ctlr)2924 int spi_controller_resume(struct spi_controller *ctlr)
2925 {
2926 int ret;
2927
2928 if (!ctlr->queued)
2929 return 0;
2930
2931 ret = spi_start_queue(ctlr);
2932 if (ret)
2933 dev_err(&ctlr->dev, "queue restart failed\n");
2934
2935 return ret;
2936 }
2937 EXPORT_SYMBOL_GPL(spi_controller_resume);
2938
__spi_controller_match(struct device * dev,const void * data)2939 static int __spi_controller_match(struct device *dev, const void *data)
2940 {
2941 struct spi_controller *ctlr;
2942 const u16 *bus_num = data;
2943
2944 ctlr = container_of(dev, struct spi_controller, dev);
2945 return ctlr->bus_num == *bus_num;
2946 }
2947
2948 /**
2949 * spi_busnum_to_master - look up master associated with bus_num
2950 * @bus_num: the master's bus number
2951 * Context: can sleep
2952 *
2953 * This call may be used with devices that are registered after
2954 * arch init time. It returns a refcounted pointer to the relevant
2955 * spi_controller (which the caller must release), or NULL if there is
2956 * no such master registered.
2957 *
2958 * Return: the SPI master structure on success, else NULL.
2959 */
spi_busnum_to_master(u16 bus_num)2960 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2961 {
2962 struct device *dev;
2963 struct spi_controller *ctlr = NULL;
2964
2965 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2966 __spi_controller_match);
2967 if (dev)
2968 ctlr = container_of(dev, struct spi_controller, dev);
2969 /* reference got in class_find_device */
2970 return ctlr;
2971 }
2972 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2973
2974 /*-------------------------------------------------------------------------*/
2975
2976 /* Core methods for SPI resource management */
2977
2978 /**
2979 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2980 * during the processing of a spi_message while using
2981 * spi_transfer_one
2982 * @spi: the spi device for which we allocate memory
2983 * @release: the release code to execute for this resource
2984 * @size: size to alloc and return
2985 * @gfp: GFP allocation flags
2986 *
2987 * Return: the pointer to the allocated data
2988 *
2989 * This may get enhanced in the future to allocate from a memory pool
2990 * of the @spi_device or @spi_controller to avoid repeated allocations.
2991 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)2992 void *spi_res_alloc(struct spi_device *spi,
2993 spi_res_release_t release,
2994 size_t size, gfp_t gfp)
2995 {
2996 struct spi_res *sres;
2997
2998 sres = kzalloc(sizeof(*sres) + size, gfp);
2999 if (!sres)
3000 return NULL;
3001
3002 INIT_LIST_HEAD(&sres->entry);
3003 sres->release = release;
3004
3005 return sres->data;
3006 }
3007 EXPORT_SYMBOL_GPL(spi_res_alloc);
3008
3009 /**
3010 * spi_res_free - free an spi resource
3011 * @res: pointer to the custom data of a resource
3012 *
3013 */
spi_res_free(void * res)3014 void spi_res_free(void *res)
3015 {
3016 struct spi_res *sres = container_of(res, struct spi_res, data);
3017
3018 if (!res)
3019 return;
3020
3021 WARN_ON(!list_empty(&sres->entry));
3022 kfree(sres);
3023 }
3024 EXPORT_SYMBOL_GPL(spi_res_free);
3025
3026 /**
3027 * spi_res_add - add a spi_res to the spi_message
3028 * @message: the spi message
3029 * @res: the spi_resource
3030 */
spi_res_add(struct spi_message * message,void * res)3031 void spi_res_add(struct spi_message *message, void *res)
3032 {
3033 struct spi_res *sres = container_of(res, struct spi_res, data);
3034
3035 WARN_ON(!list_empty(&sres->entry));
3036 list_add_tail(&sres->entry, &message->resources);
3037 }
3038 EXPORT_SYMBOL_GPL(spi_res_add);
3039
3040 /**
3041 * spi_res_release - release all spi resources for this message
3042 * @ctlr: the @spi_controller
3043 * @message: the @spi_message
3044 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)3045 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3046 {
3047 struct spi_res *res, *tmp;
3048
3049 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
3050 if (res->release)
3051 res->release(ctlr, message, res->data);
3052
3053 list_del(&res->entry);
3054
3055 kfree(res);
3056 }
3057 }
3058 EXPORT_SYMBOL_GPL(spi_res_release);
3059
3060 /*-------------------------------------------------------------------------*/
3061
3062 /* Core methods for spi_message alterations */
3063
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3064 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3065 struct spi_message *msg,
3066 void *res)
3067 {
3068 struct spi_replaced_transfers *rxfer = res;
3069 size_t i;
3070
3071 /* call extra callback if requested */
3072 if (rxfer->release)
3073 rxfer->release(ctlr, msg, res);
3074
3075 /* insert replaced transfers back into the message */
3076 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3077
3078 /* remove the formerly inserted entries */
3079 for (i = 0; i < rxfer->inserted; i++)
3080 list_del(&rxfer->inserted_transfers[i].transfer_list);
3081 }
3082
3083 /**
3084 * spi_replace_transfers - replace transfers with several transfers
3085 * and register change with spi_message.resources
3086 * @msg: the spi_message we work upon
3087 * @xfer_first: the first spi_transfer we want to replace
3088 * @remove: number of transfers to remove
3089 * @insert: the number of transfers we want to insert instead
3090 * @release: extra release code necessary in some circumstances
3091 * @extradatasize: extra data to allocate (with alignment guarantees
3092 * of struct @spi_transfer)
3093 * @gfp: gfp flags
3094 *
3095 * Returns: pointer to @spi_replaced_transfers,
3096 * PTR_ERR(...) in case of errors.
3097 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3098 struct spi_replaced_transfers *spi_replace_transfers(
3099 struct spi_message *msg,
3100 struct spi_transfer *xfer_first,
3101 size_t remove,
3102 size_t insert,
3103 spi_replaced_release_t release,
3104 size_t extradatasize,
3105 gfp_t gfp)
3106 {
3107 struct spi_replaced_transfers *rxfer;
3108 struct spi_transfer *xfer;
3109 size_t i;
3110
3111 /* allocate the structure using spi_res */
3112 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3113 struct_size(rxfer, inserted_transfers, insert)
3114 + extradatasize,
3115 gfp);
3116 if (!rxfer)
3117 return ERR_PTR(-ENOMEM);
3118
3119 /* the release code to invoke before running the generic release */
3120 rxfer->release = release;
3121
3122 /* assign extradata */
3123 if (extradatasize)
3124 rxfer->extradata =
3125 &rxfer->inserted_transfers[insert];
3126
3127 /* init the replaced_transfers list */
3128 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3129
3130 /* assign the list_entry after which we should reinsert
3131 * the @replaced_transfers - it may be spi_message.messages!
3132 */
3133 rxfer->replaced_after = xfer_first->transfer_list.prev;
3134
3135 /* remove the requested number of transfers */
3136 for (i = 0; i < remove; i++) {
3137 /* if the entry after replaced_after it is msg->transfers
3138 * then we have been requested to remove more transfers
3139 * than are in the list
3140 */
3141 if (rxfer->replaced_after->next == &msg->transfers) {
3142 dev_err(&msg->spi->dev,
3143 "requested to remove more spi_transfers than are available\n");
3144 /* insert replaced transfers back into the message */
3145 list_splice(&rxfer->replaced_transfers,
3146 rxfer->replaced_after);
3147
3148 /* free the spi_replace_transfer structure */
3149 spi_res_free(rxfer);
3150
3151 /* and return with an error */
3152 return ERR_PTR(-EINVAL);
3153 }
3154
3155 /* remove the entry after replaced_after from list of
3156 * transfers and add it to list of replaced_transfers
3157 */
3158 list_move_tail(rxfer->replaced_after->next,
3159 &rxfer->replaced_transfers);
3160 }
3161
3162 /* create copy of the given xfer with identical settings
3163 * based on the first transfer to get removed
3164 */
3165 for (i = 0; i < insert; i++) {
3166 /* we need to run in reverse order */
3167 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3168
3169 /* copy all spi_transfer data */
3170 memcpy(xfer, xfer_first, sizeof(*xfer));
3171
3172 /* add to list */
3173 list_add(&xfer->transfer_list, rxfer->replaced_after);
3174
3175 /* clear cs_change and delay for all but the last */
3176 if (i) {
3177 xfer->cs_change = false;
3178 xfer->delay_usecs = 0;
3179 xfer->delay.value = 0;
3180 }
3181 }
3182
3183 /* set up inserted */
3184 rxfer->inserted = insert;
3185
3186 /* and register it with spi_res/spi_message */
3187 spi_res_add(msg, rxfer);
3188
3189 return rxfer;
3190 }
3191 EXPORT_SYMBOL_GPL(spi_replace_transfers);
3192
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3193 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3194 struct spi_message *msg,
3195 struct spi_transfer **xferp,
3196 size_t maxsize,
3197 gfp_t gfp)
3198 {
3199 struct spi_transfer *xfer = *xferp, *xfers;
3200 struct spi_replaced_transfers *srt;
3201 size_t offset;
3202 size_t count, i;
3203
3204 /* calculate how many we have to replace */
3205 count = DIV_ROUND_UP(xfer->len, maxsize);
3206
3207 /* create replacement */
3208 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3209 if (IS_ERR(srt))
3210 return PTR_ERR(srt);
3211 xfers = srt->inserted_transfers;
3212
3213 /* now handle each of those newly inserted spi_transfers
3214 * note that the replacements spi_transfers all are preset
3215 * to the same values as *xferp, so tx_buf, rx_buf and len
3216 * are all identical (as well as most others)
3217 * so we just have to fix up len and the pointers.
3218 *
3219 * this also includes support for the depreciated
3220 * spi_message.is_dma_mapped interface
3221 */
3222
3223 /* the first transfer just needs the length modified, so we
3224 * run it outside the loop
3225 */
3226 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3227
3228 /* all the others need rx_buf/tx_buf also set */
3229 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3230 /* update rx_buf, tx_buf and dma */
3231 if (xfers[i].rx_buf)
3232 xfers[i].rx_buf += offset;
3233 if (xfers[i].rx_dma)
3234 xfers[i].rx_dma += offset;
3235 if (xfers[i].tx_buf)
3236 xfers[i].tx_buf += offset;
3237 if (xfers[i].tx_dma)
3238 xfers[i].tx_dma += offset;
3239
3240 /* update length */
3241 xfers[i].len = min(maxsize, xfers[i].len - offset);
3242 }
3243
3244 /* we set up xferp to the last entry we have inserted,
3245 * so that we skip those already split transfers
3246 */
3247 *xferp = &xfers[count - 1];
3248
3249 /* increment statistics counters */
3250 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3251 transfers_split_maxsize);
3252 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3253 transfers_split_maxsize);
3254
3255 return 0;
3256 }
3257
3258 /**
3259 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
3260 * when an individual transfer exceeds a
3261 * certain size
3262 * @ctlr: the @spi_controller for this transfer
3263 * @msg: the @spi_message to transform
3264 * @maxsize: the maximum when to apply this
3265 * @gfp: GFP allocation flags
3266 *
3267 * Return: status of transformation
3268 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3269 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3270 struct spi_message *msg,
3271 size_t maxsize,
3272 gfp_t gfp)
3273 {
3274 struct spi_transfer *xfer;
3275 int ret;
3276
3277 /* iterate over the transfer_list,
3278 * but note that xfer is advanced to the last transfer inserted
3279 * to avoid checking sizes again unnecessarily (also xfer does
3280 * potentiall belong to a different list by the time the
3281 * replacement has happened
3282 */
3283 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3284 if (xfer->len > maxsize) {
3285 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3286 maxsize, gfp);
3287 if (ret)
3288 return ret;
3289 }
3290 }
3291
3292 return 0;
3293 }
3294 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3295
3296 /*-------------------------------------------------------------------------*/
3297
3298 /* Core methods for SPI controller protocol drivers. Some of the
3299 * other core methods are currently defined as inline functions.
3300 */
3301
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3302 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3303 u8 bits_per_word)
3304 {
3305 if (ctlr->bits_per_word_mask) {
3306 /* Only 32 bits fit in the mask */
3307 if (bits_per_word > 32)
3308 return -EINVAL;
3309 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3310 return -EINVAL;
3311 }
3312
3313 return 0;
3314 }
3315
3316 /**
3317 * spi_setup - setup SPI mode and clock rate
3318 * @spi: the device whose settings are being modified
3319 * Context: can sleep, and no requests are queued to the device
3320 *
3321 * SPI protocol drivers may need to update the transfer mode if the
3322 * device doesn't work with its default. They may likewise need
3323 * to update clock rates or word sizes from initial values. This function
3324 * changes those settings, and must be called from a context that can sleep.
3325 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3326 * effect the next time the device is selected and data is transferred to
3327 * or from it. When this function returns, the spi device is deselected.
3328 *
3329 * Note that this call will fail if the protocol driver specifies an option
3330 * that the underlying controller or its driver does not support. For
3331 * example, not all hardware supports wire transfers using nine bit words,
3332 * LSB-first wire encoding, or active-high chipselects.
3333 *
3334 * Return: zero on success, else a negative error code.
3335 */
spi_setup(struct spi_device * spi)3336 int spi_setup(struct spi_device *spi)
3337 {
3338 unsigned bad_bits, ugly_bits;
3339 int status;
3340
3341 /* check mode to prevent that DUAL and QUAD set at the same time
3342 */
3343 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3344 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3345 dev_err(&spi->dev,
3346 "setup: can not select dual and quad at the same time\n");
3347 return -EINVAL;
3348 }
3349 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3350 */
3351 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3352 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3353 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3354 return -EINVAL;
3355 /* help drivers fail *cleanly* when they need options
3356 * that aren't supported with their current controller
3357 * SPI_CS_WORD has a fallback software implementation,
3358 * so it is ignored here.
3359 */
3360 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3361 /* nothing prevents from working with active-high CS in case if it
3362 * is driven by GPIO.
3363 */
3364 if (gpio_is_valid(spi->cs_gpio))
3365 bad_bits &= ~SPI_CS_HIGH;
3366 ugly_bits = bad_bits &
3367 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3368 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3369 if (ugly_bits) {
3370 dev_warn(&spi->dev,
3371 "setup: ignoring unsupported mode bits %x\n",
3372 ugly_bits);
3373 spi->mode &= ~ugly_bits;
3374 bad_bits &= ~ugly_bits;
3375 }
3376 if (bad_bits) {
3377 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3378 bad_bits);
3379 return -EINVAL;
3380 }
3381
3382 if (!spi->bits_per_word)
3383 spi->bits_per_word = 8;
3384
3385 status = __spi_validate_bits_per_word(spi->controller,
3386 spi->bits_per_word);
3387 if (status)
3388 return status;
3389
3390 if (!spi->max_speed_hz)
3391 spi->max_speed_hz = spi->controller->max_speed_hz;
3392
3393 mutex_lock(&spi->controller->io_mutex);
3394
3395 if (spi->controller->setup)
3396 status = spi->controller->setup(spi);
3397
3398 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3399 status = pm_runtime_get_sync(spi->controller->dev.parent);
3400 if (status < 0) {
3401 mutex_unlock(&spi->controller->io_mutex);
3402 pm_runtime_put_noidle(spi->controller->dev.parent);
3403 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3404 status);
3405 return status;
3406 }
3407
3408 /*
3409 * We do not want to return positive value from pm_runtime_get,
3410 * there are many instances of devices calling spi_setup() and
3411 * checking for a non-zero return value instead of a negative
3412 * return value.
3413 */
3414 status = 0;
3415
3416 spi_set_cs(spi, false, true);
3417 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3418 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3419 } else {
3420 spi_set_cs(spi, false, true);
3421 }
3422
3423 mutex_unlock(&spi->controller->io_mutex);
3424
3425 if (spi->rt && !spi->controller->rt) {
3426 spi->controller->rt = true;
3427 spi_set_thread_rt(spi->controller);
3428 }
3429
3430 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3431 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3432 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3433 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3434 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3435 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3436 spi->bits_per_word, spi->max_speed_hz,
3437 status);
3438
3439 return status;
3440 }
3441 EXPORT_SYMBOL_GPL(spi_setup);
3442
3443 /**
3444 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3445 * @spi: the device that requires specific CS timing configuration
3446 * @setup: CS setup time specified via @spi_delay
3447 * @hold: CS hold time specified via @spi_delay
3448 * @inactive: CS inactive delay between transfers specified via @spi_delay
3449 *
3450 * Return: zero on success, else a negative error code.
3451 */
spi_set_cs_timing(struct spi_device * spi,struct spi_delay * setup,struct spi_delay * hold,struct spi_delay * inactive)3452 int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
3453 struct spi_delay *hold, struct spi_delay *inactive)
3454 {
3455 size_t len;
3456
3457 if (spi->controller->set_cs_timing)
3458 return spi->controller->set_cs_timing(spi, setup, hold,
3459 inactive);
3460
3461 if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
3462 (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
3463 (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
3464 dev_err(&spi->dev,
3465 "Clock-cycle delays for CS not supported in SW mode\n");
3466 return -ENOTSUPP;
3467 }
3468
3469 len = sizeof(struct spi_delay);
3470
3471 /* copy delays to controller */
3472 if (setup)
3473 memcpy(&spi->controller->cs_setup, setup, len);
3474 else
3475 memset(&spi->controller->cs_setup, 0, len);
3476
3477 if (hold)
3478 memcpy(&spi->controller->cs_hold, hold, len);
3479 else
3480 memset(&spi->controller->cs_hold, 0, len);
3481
3482 if (inactive)
3483 memcpy(&spi->controller->cs_inactive, inactive, len);
3484 else
3485 memset(&spi->controller->cs_inactive, 0, len);
3486
3487 return 0;
3488 }
3489 EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3490
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3491 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3492 struct spi_device *spi)
3493 {
3494 int delay1, delay2;
3495
3496 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3497 if (delay1 < 0)
3498 return delay1;
3499
3500 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3501 if (delay2 < 0)
3502 return delay2;
3503
3504 if (delay1 < delay2)
3505 memcpy(&xfer->word_delay, &spi->word_delay,
3506 sizeof(xfer->word_delay));
3507
3508 return 0;
3509 }
3510
__spi_validate(struct spi_device * spi,struct spi_message * message)3511 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3512 {
3513 struct spi_controller *ctlr = spi->controller;
3514 struct spi_transfer *xfer;
3515 int w_size;
3516
3517 if (list_empty(&message->transfers))
3518 return -EINVAL;
3519
3520 /* If an SPI controller does not support toggling the CS line on each
3521 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3522 * for the CS line, we can emulate the CS-per-word hardware function by
3523 * splitting transfers into one-word transfers and ensuring that
3524 * cs_change is set for each transfer.
3525 */
3526 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3527 spi->cs_gpiod ||
3528 gpio_is_valid(spi->cs_gpio))) {
3529 size_t maxsize;
3530 int ret;
3531
3532 maxsize = (spi->bits_per_word + 7) / 8;
3533
3534 /* spi_split_transfers_maxsize() requires message->spi */
3535 message->spi = spi;
3536
3537 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3538 GFP_KERNEL);
3539 if (ret)
3540 return ret;
3541
3542 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3543 /* don't change cs_change on the last entry in the list */
3544 if (list_is_last(&xfer->transfer_list, &message->transfers))
3545 break;
3546 xfer->cs_change = 1;
3547 }
3548 }
3549
3550 /* Half-duplex links include original MicroWire, and ones with
3551 * only one data pin like SPI_3WIRE (switches direction) or where
3552 * either MOSI or MISO is missing. They can also be caused by
3553 * software limitations.
3554 */
3555 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3556 (spi->mode & SPI_3WIRE)) {
3557 unsigned flags = ctlr->flags;
3558
3559 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3560 if (xfer->rx_buf && xfer->tx_buf)
3561 return -EINVAL;
3562 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3563 return -EINVAL;
3564 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3565 return -EINVAL;
3566 }
3567 }
3568
3569 /**
3570 * Set transfer bits_per_word and max speed as spi device default if
3571 * it is not set for this transfer.
3572 * Set transfer tx_nbits and rx_nbits as single transfer default
3573 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3574 * Ensure transfer word_delay is at least as long as that required by
3575 * device itself.
3576 */
3577 message->frame_length = 0;
3578 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3579 xfer->effective_speed_hz = 0;
3580 message->frame_length += xfer->len;
3581 if (!xfer->bits_per_word)
3582 xfer->bits_per_word = spi->bits_per_word;
3583
3584 if (!xfer->speed_hz)
3585 xfer->speed_hz = spi->max_speed_hz;
3586
3587 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3588 xfer->speed_hz = ctlr->max_speed_hz;
3589
3590 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3591 return -EINVAL;
3592
3593 /*
3594 * SPI transfer length should be multiple of SPI word size
3595 * where SPI word size should be power-of-two multiple
3596 */
3597 if (xfer->bits_per_word <= 8)
3598 w_size = 1;
3599 else if (xfer->bits_per_word <= 16)
3600 w_size = 2;
3601 else
3602 w_size = 4;
3603
3604 /* No partial transfers accepted */
3605 if (xfer->len % w_size)
3606 return -EINVAL;
3607
3608 if (xfer->speed_hz && ctlr->min_speed_hz &&
3609 xfer->speed_hz < ctlr->min_speed_hz)
3610 return -EINVAL;
3611
3612 if (xfer->tx_buf && !xfer->tx_nbits)
3613 xfer->tx_nbits = SPI_NBITS_SINGLE;
3614 if (xfer->rx_buf && !xfer->rx_nbits)
3615 xfer->rx_nbits = SPI_NBITS_SINGLE;
3616 /* check transfer tx/rx_nbits:
3617 * 1. check the value matches one of single, dual and quad
3618 * 2. check tx/rx_nbits match the mode in spi_device
3619 */
3620 if (xfer->tx_buf) {
3621 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3622 xfer->tx_nbits != SPI_NBITS_DUAL &&
3623 xfer->tx_nbits != SPI_NBITS_QUAD)
3624 return -EINVAL;
3625 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3626 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3627 return -EINVAL;
3628 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3629 !(spi->mode & SPI_TX_QUAD))
3630 return -EINVAL;
3631 }
3632 /* check transfer rx_nbits */
3633 if (xfer->rx_buf) {
3634 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3635 xfer->rx_nbits != SPI_NBITS_DUAL &&
3636 xfer->rx_nbits != SPI_NBITS_QUAD)
3637 return -EINVAL;
3638 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3639 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3640 return -EINVAL;
3641 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3642 !(spi->mode & SPI_RX_QUAD))
3643 return -EINVAL;
3644 }
3645
3646 if (_spi_xfer_word_delay_update(xfer, spi))
3647 return -EINVAL;
3648 }
3649
3650 message->status = -EINPROGRESS;
3651
3652 return 0;
3653 }
3654
__spi_async(struct spi_device * spi,struct spi_message * message)3655 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3656 {
3657 struct spi_controller *ctlr = spi->controller;
3658 struct spi_transfer *xfer;
3659
3660 /*
3661 * Some controllers do not support doing regular SPI transfers. Return
3662 * ENOTSUPP when this is the case.
3663 */
3664 if (!ctlr->transfer)
3665 return -ENOTSUPP;
3666
3667 message->spi = spi;
3668
3669 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3670 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3671
3672 trace_spi_message_submit(message);
3673
3674 if (!ctlr->ptp_sts_supported) {
3675 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3676 xfer->ptp_sts_word_pre = 0;
3677 ptp_read_system_prets(xfer->ptp_sts);
3678 }
3679 }
3680
3681 return ctlr->transfer(spi, message);
3682 }
3683
3684 /**
3685 * spi_async - asynchronous SPI transfer
3686 * @spi: device with which data will be exchanged
3687 * @message: describes the data transfers, including completion callback
3688 * Context: any (irqs may be blocked, etc)
3689 *
3690 * This call may be used in_irq and other contexts which can't sleep,
3691 * as well as from task contexts which can sleep.
3692 *
3693 * The completion callback is invoked in a context which can't sleep.
3694 * Before that invocation, the value of message->status is undefined.
3695 * When the callback is issued, message->status holds either zero (to
3696 * indicate complete success) or a negative error code. After that
3697 * callback returns, the driver which issued the transfer request may
3698 * deallocate the associated memory; it's no longer in use by any SPI
3699 * core or controller driver code.
3700 *
3701 * Note that although all messages to a spi_device are handled in
3702 * FIFO order, messages may go to different devices in other orders.
3703 * Some device might be higher priority, or have various "hard" access
3704 * time requirements, for example.
3705 *
3706 * On detection of any fault during the transfer, processing of
3707 * the entire message is aborted, and the device is deselected.
3708 * Until returning from the associated message completion callback,
3709 * no other spi_message queued to that device will be processed.
3710 * (This rule applies equally to all the synchronous transfer calls,
3711 * which are wrappers around this core asynchronous primitive.)
3712 *
3713 * Return: zero on success, else a negative error code.
3714 */
spi_async(struct spi_device * spi,struct spi_message * message)3715 int spi_async(struct spi_device *spi, struct spi_message *message)
3716 {
3717 struct spi_controller *ctlr = spi->controller;
3718 int ret;
3719 unsigned long flags;
3720
3721 ret = __spi_validate(spi, message);
3722 if (ret != 0)
3723 return ret;
3724
3725 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3726
3727 if (ctlr->bus_lock_flag)
3728 ret = -EBUSY;
3729 else
3730 ret = __spi_async(spi, message);
3731
3732 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3733
3734 return ret;
3735 }
3736 EXPORT_SYMBOL_GPL(spi_async);
3737
3738 /**
3739 * spi_async_locked - version of spi_async with exclusive bus usage
3740 * @spi: device with which data will be exchanged
3741 * @message: describes the data transfers, including completion callback
3742 * Context: any (irqs may be blocked, etc)
3743 *
3744 * This call may be used in_irq and other contexts which can't sleep,
3745 * as well as from task contexts which can sleep.
3746 *
3747 * The completion callback is invoked in a context which can't sleep.
3748 * Before that invocation, the value of message->status is undefined.
3749 * When the callback is issued, message->status holds either zero (to
3750 * indicate complete success) or a negative error code. After that
3751 * callback returns, the driver which issued the transfer request may
3752 * deallocate the associated memory; it's no longer in use by any SPI
3753 * core or controller driver code.
3754 *
3755 * Note that although all messages to a spi_device are handled in
3756 * FIFO order, messages may go to different devices in other orders.
3757 * Some device might be higher priority, or have various "hard" access
3758 * time requirements, for example.
3759 *
3760 * On detection of any fault during the transfer, processing of
3761 * the entire message is aborted, and the device is deselected.
3762 * Until returning from the associated message completion callback,
3763 * no other spi_message queued to that device will be processed.
3764 * (This rule applies equally to all the synchronous transfer calls,
3765 * which are wrappers around this core asynchronous primitive.)
3766 *
3767 * Return: zero on success, else a negative error code.
3768 */
spi_async_locked(struct spi_device * spi,struct spi_message * message)3769 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3770 {
3771 struct spi_controller *ctlr = spi->controller;
3772 int ret;
3773 unsigned long flags;
3774
3775 ret = __spi_validate(spi, message);
3776 if (ret != 0)
3777 return ret;
3778
3779 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3780
3781 ret = __spi_async(spi, message);
3782
3783 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3784
3785 return ret;
3786
3787 }
3788 EXPORT_SYMBOL_GPL(spi_async_locked);
3789
3790 /*-------------------------------------------------------------------------*/
3791
3792 /* Utility methods for SPI protocol drivers, layered on
3793 * top of the core. Some other utility methods are defined as
3794 * inline functions.
3795 */
3796
spi_complete(void * arg)3797 static void spi_complete(void *arg)
3798 {
3799 complete(arg);
3800 }
3801
__spi_sync(struct spi_device * spi,struct spi_message * message)3802 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3803 {
3804 DECLARE_COMPLETION_ONSTACK(done);
3805 int status;
3806 struct spi_controller *ctlr = spi->controller;
3807 unsigned long flags;
3808
3809 status = __spi_validate(spi, message);
3810 if (status != 0)
3811 return status;
3812
3813 message->complete = spi_complete;
3814 message->context = &done;
3815 message->spi = spi;
3816
3817 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3818 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3819
3820 /* If we're not using the legacy transfer method then we will
3821 * try to transfer in the calling context so special case.
3822 * This code would be less tricky if we could remove the
3823 * support for driver implemented message queues.
3824 */
3825 if (ctlr->transfer == spi_queued_transfer) {
3826 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3827
3828 trace_spi_message_submit(message);
3829
3830 status = __spi_queued_transfer(spi, message, false);
3831
3832 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3833 } else {
3834 status = spi_async_locked(spi, message);
3835 }
3836
3837 if (status == 0) {
3838 /* Push out the messages in the calling context if we
3839 * can.
3840 */
3841 if (ctlr->transfer == spi_queued_transfer) {
3842 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3843 spi_sync_immediate);
3844 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3845 spi_sync_immediate);
3846 __spi_pump_messages(ctlr, false);
3847 }
3848
3849 wait_for_completion(&done);
3850 status = message->status;
3851 }
3852 message->context = NULL;
3853 return status;
3854 }
3855
3856 /**
3857 * spi_sync - blocking/synchronous SPI data transfers
3858 * @spi: device with which data will be exchanged
3859 * @message: describes the data transfers
3860 * Context: can sleep
3861 *
3862 * This call may only be used from a context that may sleep. The sleep
3863 * is non-interruptible, and has no timeout. Low-overhead controller
3864 * drivers may DMA directly into and out of the message buffers.
3865 *
3866 * Note that the SPI device's chip select is active during the message,
3867 * and then is normally disabled between messages. Drivers for some
3868 * frequently-used devices may want to minimize costs of selecting a chip,
3869 * by leaving it selected in anticipation that the next message will go
3870 * to the same chip. (That may increase power usage.)
3871 *
3872 * Also, the caller is guaranteeing that the memory associated with the
3873 * message will not be freed before this call returns.
3874 *
3875 * Return: zero on success, else a negative error code.
3876 */
spi_sync(struct spi_device * spi,struct spi_message * message)3877 int spi_sync(struct spi_device *spi, struct spi_message *message)
3878 {
3879 int ret;
3880
3881 mutex_lock(&spi->controller->bus_lock_mutex);
3882 ret = __spi_sync(spi, message);
3883 mutex_unlock(&spi->controller->bus_lock_mutex);
3884
3885 return ret;
3886 }
3887 EXPORT_SYMBOL_GPL(spi_sync);
3888
3889 /**
3890 * spi_sync_locked - version of spi_sync with exclusive bus usage
3891 * @spi: device with which data will be exchanged
3892 * @message: describes the data transfers
3893 * Context: can sleep
3894 *
3895 * This call may only be used from a context that may sleep. The sleep
3896 * is non-interruptible, and has no timeout. Low-overhead controller
3897 * drivers may DMA directly into and out of the message buffers.
3898 *
3899 * This call should be used by drivers that require exclusive access to the
3900 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3901 * be released by a spi_bus_unlock call when the exclusive access is over.
3902 *
3903 * Return: zero on success, else a negative error code.
3904 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)3905 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3906 {
3907 return __spi_sync(spi, message);
3908 }
3909 EXPORT_SYMBOL_GPL(spi_sync_locked);
3910
3911 /**
3912 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3913 * @ctlr: SPI bus master that should be locked for exclusive bus access
3914 * Context: can sleep
3915 *
3916 * This call may only be used from a context that may sleep. The sleep
3917 * is non-interruptible, and has no timeout.
3918 *
3919 * This call should be used by drivers that require exclusive access to the
3920 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3921 * exclusive access is over. Data transfer must be done by spi_sync_locked
3922 * and spi_async_locked calls when the SPI bus lock is held.
3923 *
3924 * Return: always zero.
3925 */
spi_bus_lock(struct spi_controller * ctlr)3926 int spi_bus_lock(struct spi_controller *ctlr)
3927 {
3928 unsigned long flags;
3929
3930 mutex_lock(&ctlr->bus_lock_mutex);
3931
3932 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3933 ctlr->bus_lock_flag = 1;
3934 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3935
3936 /* mutex remains locked until spi_bus_unlock is called */
3937
3938 return 0;
3939 }
3940 EXPORT_SYMBOL_GPL(spi_bus_lock);
3941
3942 /**
3943 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3944 * @ctlr: SPI bus master that was locked for exclusive bus access
3945 * Context: can sleep
3946 *
3947 * This call may only be used from a context that may sleep. The sleep
3948 * is non-interruptible, and has no timeout.
3949 *
3950 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3951 * call.
3952 *
3953 * Return: always zero.
3954 */
spi_bus_unlock(struct spi_controller * ctlr)3955 int spi_bus_unlock(struct spi_controller *ctlr)
3956 {
3957 ctlr->bus_lock_flag = 0;
3958
3959 mutex_unlock(&ctlr->bus_lock_mutex);
3960
3961 return 0;
3962 }
3963 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3964
3965 /* portable code must never pass more than 32 bytes */
3966 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3967
3968 static u8 *buf;
3969
3970 /**
3971 * spi_write_then_read - SPI synchronous write followed by read
3972 * @spi: device with which data will be exchanged
3973 * @txbuf: data to be written (need not be dma-safe)
3974 * @n_tx: size of txbuf, in bytes
3975 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3976 * @n_rx: size of rxbuf, in bytes
3977 * Context: can sleep
3978 *
3979 * This performs a half duplex MicroWire style transaction with the
3980 * device, sending txbuf and then reading rxbuf. The return value
3981 * is zero for success, else a negative errno status code.
3982 * This call may only be used from a context that may sleep.
3983 *
3984 * Parameters to this routine are always copied using a small buffer.
3985 * Performance-sensitive or bulk transfer code should instead use
3986 * spi_{async,sync}() calls with dma-safe buffers.
3987 *
3988 * Return: zero on success, else a negative error code.
3989 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)3990 int spi_write_then_read(struct spi_device *spi,
3991 const void *txbuf, unsigned n_tx,
3992 void *rxbuf, unsigned n_rx)
3993 {
3994 static DEFINE_MUTEX(lock);
3995
3996 int status;
3997 struct spi_message message;
3998 struct spi_transfer x[2];
3999 u8 *local_buf;
4000
4001 /* Use preallocated DMA-safe buffer if we can. We can't avoid
4002 * copying here, (as a pure convenience thing), but we can
4003 * keep heap costs out of the hot path unless someone else is
4004 * using the pre-allocated buffer or the transfer is too large.
4005 */
4006 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4007 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4008 GFP_KERNEL | GFP_DMA);
4009 if (!local_buf)
4010 return -ENOMEM;
4011 } else {
4012 local_buf = buf;
4013 }
4014
4015 spi_message_init(&message);
4016 memset(x, 0, sizeof(x));
4017 if (n_tx) {
4018 x[0].len = n_tx;
4019 spi_message_add_tail(&x[0], &message);
4020 }
4021 if (n_rx) {
4022 x[1].len = n_rx;
4023 spi_message_add_tail(&x[1], &message);
4024 }
4025
4026 memcpy(local_buf, txbuf, n_tx);
4027 x[0].tx_buf = local_buf;
4028 x[1].rx_buf = local_buf + n_tx;
4029
4030 /* do the i/o */
4031 status = spi_sync(spi, &message);
4032 if (status == 0)
4033 memcpy(rxbuf, x[1].rx_buf, n_rx);
4034
4035 if (x[0].tx_buf == buf)
4036 mutex_unlock(&lock);
4037 else
4038 kfree(local_buf);
4039
4040 return status;
4041 }
4042 EXPORT_SYMBOL_GPL(spi_write_then_read);
4043
4044 /*-------------------------------------------------------------------------*/
4045
4046 #if IS_ENABLED(CONFIG_OF)
4047 /* must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4048 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4049 {
4050 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4051
4052 return dev ? to_spi_device(dev) : NULL;
4053 }
4054 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
4055 #endif /* IS_ENABLED(CONFIG_OF) */
4056
4057 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4058 /* the spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4059 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4060 {
4061 struct device *dev;
4062
4063 dev = class_find_device_by_of_node(&spi_master_class, node);
4064 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4065 dev = class_find_device_by_of_node(&spi_slave_class, node);
4066 if (!dev)
4067 return NULL;
4068
4069 /* reference got in class_find_device */
4070 return container_of(dev, struct spi_controller, dev);
4071 }
4072
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4073 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4074 void *arg)
4075 {
4076 struct of_reconfig_data *rd = arg;
4077 struct spi_controller *ctlr;
4078 struct spi_device *spi;
4079
4080 switch (of_reconfig_get_state_change(action, arg)) {
4081 case OF_RECONFIG_CHANGE_ADD:
4082 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4083 if (ctlr == NULL)
4084 return NOTIFY_OK; /* not for us */
4085
4086 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4087 put_device(&ctlr->dev);
4088 return NOTIFY_OK;
4089 }
4090
4091 spi = of_register_spi_device(ctlr, rd->dn);
4092 put_device(&ctlr->dev);
4093
4094 if (IS_ERR(spi)) {
4095 pr_err("%s: failed to create for '%pOF'\n",
4096 __func__, rd->dn);
4097 of_node_clear_flag(rd->dn, OF_POPULATED);
4098 return notifier_from_errno(PTR_ERR(spi));
4099 }
4100 break;
4101
4102 case OF_RECONFIG_CHANGE_REMOVE:
4103 /* already depopulated? */
4104 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4105 return NOTIFY_OK;
4106
4107 /* find our device by node */
4108 spi = of_find_spi_device_by_node(rd->dn);
4109 if (spi == NULL)
4110 return NOTIFY_OK; /* no? not meant for us */
4111
4112 /* unregister takes one ref away */
4113 spi_unregister_device(spi);
4114
4115 /* and put the reference of the find */
4116 put_device(&spi->dev);
4117 break;
4118 }
4119
4120 return NOTIFY_OK;
4121 }
4122
4123 static struct notifier_block spi_of_notifier = {
4124 .notifier_call = of_spi_notify,
4125 };
4126 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4127 extern struct notifier_block spi_of_notifier;
4128 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4129
4130 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4131 static int spi_acpi_controller_match(struct device *dev, const void *data)
4132 {
4133 return ACPI_COMPANION(dev->parent) == data;
4134 }
4135
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4136 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4137 {
4138 struct device *dev;
4139
4140 dev = class_find_device(&spi_master_class, NULL, adev,
4141 spi_acpi_controller_match);
4142 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4143 dev = class_find_device(&spi_slave_class, NULL, adev,
4144 spi_acpi_controller_match);
4145 if (!dev)
4146 return NULL;
4147
4148 return container_of(dev, struct spi_controller, dev);
4149 }
4150
acpi_spi_find_device_by_adev(struct acpi_device * adev)4151 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4152 {
4153 struct device *dev;
4154
4155 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4156 return to_spi_device(dev);
4157 }
4158
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4159 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4160 void *arg)
4161 {
4162 struct acpi_device *adev = arg;
4163 struct spi_controller *ctlr;
4164 struct spi_device *spi;
4165
4166 switch (value) {
4167 case ACPI_RECONFIG_DEVICE_ADD:
4168 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4169 if (!ctlr)
4170 break;
4171
4172 acpi_register_spi_device(ctlr, adev);
4173 put_device(&ctlr->dev);
4174 break;
4175 case ACPI_RECONFIG_DEVICE_REMOVE:
4176 if (!acpi_device_enumerated(adev))
4177 break;
4178
4179 spi = acpi_spi_find_device_by_adev(adev);
4180 if (!spi)
4181 break;
4182
4183 spi_unregister_device(spi);
4184 put_device(&spi->dev);
4185 break;
4186 }
4187
4188 return NOTIFY_OK;
4189 }
4190
4191 static struct notifier_block spi_acpi_notifier = {
4192 .notifier_call = acpi_spi_notify,
4193 };
4194 #else
4195 extern struct notifier_block spi_acpi_notifier;
4196 #endif
4197
spi_init(void)4198 static int __init spi_init(void)
4199 {
4200 int status;
4201
4202 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4203 if (!buf) {
4204 status = -ENOMEM;
4205 goto err0;
4206 }
4207
4208 status = bus_register(&spi_bus_type);
4209 if (status < 0)
4210 goto err1;
4211
4212 status = class_register(&spi_master_class);
4213 if (status < 0)
4214 goto err2;
4215
4216 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4217 status = class_register(&spi_slave_class);
4218 if (status < 0)
4219 goto err3;
4220 }
4221
4222 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4223 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4224 if (IS_ENABLED(CONFIG_ACPI))
4225 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4226
4227 return 0;
4228
4229 err3:
4230 class_unregister(&spi_master_class);
4231 err2:
4232 bus_unregister(&spi_bus_type);
4233 err1:
4234 kfree(buf);
4235 buf = NULL;
4236 err0:
4237 return status;
4238 }
4239
4240 /* board_info is normally registered in arch_initcall(),
4241 * but even essential drivers wait till later
4242 *
4243 * REVISIT only boardinfo really needs static linking. the rest (device and
4244 * driver registration) _could_ be dynamically linked (modular) ... costs
4245 * include needing to have boardinfo data structures be much more public.
4246 */
4247 postcore_initcall(spi_init);
4248