1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/spi.h>
39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
41
42 #include "internals.h"
43
44 static DEFINE_IDR(spi_master_idr);
45
spidev_release(struct device * dev)46 static void spidev_release(struct device *dev)
47 {
48 struct spi_device *spi = to_spi_device(dev);
49
50 spi_controller_put(spi->controller);
51 kfree(spi->driver_override);
52 kfree(spi);
53 }
54
55 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)56 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
57 {
58 const struct spi_device *spi = to_spi_device(dev);
59 int len;
60
61 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
62 if (len != -ENODEV)
63 return len;
64
65 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
66 }
67 static DEVICE_ATTR_RO(modalias);
68
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)69 static ssize_t driver_override_store(struct device *dev,
70 struct device_attribute *a,
71 const char *buf, size_t count)
72 {
73 struct spi_device *spi = to_spi_device(dev);
74 const char *end = memchr(buf, '\n', count);
75 const size_t len = end ? end - buf : count;
76 const char *driver_override, *old;
77
78 /* We need to keep extra room for a newline when displaying value */
79 if (len >= (PAGE_SIZE - 1))
80 return -EINVAL;
81
82 driver_override = kstrndup(buf, len, GFP_KERNEL);
83 if (!driver_override)
84 return -ENOMEM;
85
86 device_lock(dev);
87 old = spi->driver_override;
88 if (len) {
89 spi->driver_override = driver_override;
90 } else {
91 /* Empty string, disable driver override */
92 spi->driver_override = NULL;
93 kfree(driver_override);
94 }
95 device_unlock(dev);
96 kfree(old);
97
98 return count;
99 }
100
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)101 static ssize_t driver_override_show(struct device *dev,
102 struct device_attribute *a, char *buf)
103 {
104 const struct spi_device *spi = to_spi_device(dev);
105 ssize_t len;
106
107 device_lock(dev);
108 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
109 device_unlock(dev);
110 return len;
111 }
112 static DEVICE_ATTR_RW(driver_override);
113
114 #define SPI_STATISTICS_ATTRS(field, file) \
115 static ssize_t spi_controller_##field##_show(struct device *dev, \
116 struct device_attribute *attr, \
117 char *buf) \
118 { \
119 struct spi_controller *ctlr = container_of(dev, \
120 struct spi_controller, dev); \
121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
122 } \
123 static struct device_attribute dev_attr_spi_controller_##field = { \
124 .attr = { .name = file, .mode = 0444 }, \
125 .show = spi_controller_##field##_show, \
126 }; \
127 static ssize_t spi_device_##field##_show(struct device *dev, \
128 struct device_attribute *attr, \
129 char *buf) \
130 { \
131 struct spi_device *spi = to_spi_device(dev); \
132 return spi_statistics_##field##_show(&spi->statistics, buf); \
133 } \
134 static struct device_attribute dev_attr_spi_device_##field = { \
135 .attr = { .name = file, .mode = 0444 }, \
136 .show = spi_device_##field##_show, \
137 }
138
139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
141 char *buf) \
142 { \
143 unsigned long flags; \
144 ssize_t len; \
145 spin_lock_irqsave(&stat->lock, flags); \
146 len = sprintf(buf, format_string, stat->field); \
147 spin_unlock_irqrestore(&stat->lock, flags); \
148 return len; \
149 } \
150 SPI_STATISTICS_ATTRS(name, file)
151
152 #define SPI_STATISTICS_SHOW(field, format_string) \
153 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
154 field, format_string)
155
156 SPI_STATISTICS_SHOW(messages, "%lu");
157 SPI_STATISTICS_SHOW(transfers, "%lu");
158 SPI_STATISTICS_SHOW(errors, "%lu");
159 SPI_STATISTICS_SHOW(timedout, "%lu");
160
161 SPI_STATISTICS_SHOW(spi_sync, "%lu");
162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
163 SPI_STATISTICS_SHOW(spi_async, "%lu");
164
165 SPI_STATISTICS_SHOW(bytes, "%llu");
166 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
167 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
168
169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
170 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
171 "transfer_bytes_histo_" number, \
172 transfer_bytes_histo[index], "%lu")
173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
190
191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
192
193 static struct attribute *spi_dev_attrs[] = {
194 &dev_attr_modalias.attr,
195 &dev_attr_driver_override.attr,
196 NULL,
197 };
198
199 static const struct attribute_group spi_dev_group = {
200 .attrs = spi_dev_attrs,
201 };
202
203 static struct attribute *spi_device_statistics_attrs[] = {
204 &dev_attr_spi_device_messages.attr,
205 &dev_attr_spi_device_transfers.attr,
206 &dev_attr_spi_device_errors.attr,
207 &dev_attr_spi_device_timedout.attr,
208 &dev_attr_spi_device_spi_sync.attr,
209 &dev_attr_spi_device_spi_sync_immediate.attr,
210 &dev_attr_spi_device_spi_async.attr,
211 &dev_attr_spi_device_bytes.attr,
212 &dev_attr_spi_device_bytes_rx.attr,
213 &dev_attr_spi_device_bytes_tx.attr,
214 &dev_attr_spi_device_transfer_bytes_histo0.attr,
215 &dev_attr_spi_device_transfer_bytes_histo1.attr,
216 &dev_attr_spi_device_transfer_bytes_histo2.attr,
217 &dev_attr_spi_device_transfer_bytes_histo3.attr,
218 &dev_attr_spi_device_transfer_bytes_histo4.attr,
219 &dev_attr_spi_device_transfer_bytes_histo5.attr,
220 &dev_attr_spi_device_transfer_bytes_histo6.attr,
221 &dev_attr_spi_device_transfer_bytes_histo7.attr,
222 &dev_attr_spi_device_transfer_bytes_histo8.attr,
223 &dev_attr_spi_device_transfer_bytes_histo9.attr,
224 &dev_attr_spi_device_transfer_bytes_histo10.attr,
225 &dev_attr_spi_device_transfer_bytes_histo11.attr,
226 &dev_attr_spi_device_transfer_bytes_histo12.attr,
227 &dev_attr_spi_device_transfer_bytes_histo13.attr,
228 &dev_attr_spi_device_transfer_bytes_histo14.attr,
229 &dev_attr_spi_device_transfer_bytes_histo15.attr,
230 &dev_attr_spi_device_transfer_bytes_histo16.attr,
231 &dev_attr_spi_device_transfers_split_maxsize.attr,
232 NULL,
233 };
234
235 static const struct attribute_group spi_device_statistics_group = {
236 .name = "statistics",
237 .attrs = spi_device_statistics_attrs,
238 };
239
240 static const struct attribute_group *spi_dev_groups[] = {
241 &spi_dev_group,
242 &spi_device_statistics_group,
243 NULL,
244 };
245
246 static struct attribute *spi_controller_statistics_attrs[] = {
247 &dev_attr_spi_controller_messages.attr,
248 &dev_attr_spi_controller_transfers.attr,
249 &dev_attr_spi_controller_errors.attr,
250 &dev_attr_spi_controller_timedout.attr,
251 &dev_attr_spi_controller_spi_sync.attr,
252 &dev_attr_spi_controller_spi_sync_immediate.attr,
253 &dev_attr_spi_controller_spi_async.attr,
254 &dev_attr_spi_controller_bytes.attr,
255 &dev_attr_spi_controller_bytes_rx.attr,
256 &dev_attr_spi_controller_bytes_tx.attr,
257 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
258 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
259 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
260 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
261 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
262 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
263 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
264 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
265 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
266 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
267 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
268 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
269 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
270 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
271 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
272 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
273 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
274 &dev_attr_spi_controller_transfers_split_maxsize.attr,
275 NULL,
276 };
277
278 static const struct attribute_group spi_controller_statistics_group = {
279 .name = "statistics",
280 .attrs = spi_controller_statistics_attrs,
281 };
282
283 static const struct attribute_group *spi_master_groups[] = {
284 &spi_controller_statistics_group,
285 NULL,
286 };
287
spi_statistics_add_transfer_stats(struct spi_statistics * stats,struct spi_transfer * xfer,struct spi_controller * ctlr)288 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
289 struct spi_transfer *xfer,
290 struct spi_controller *ctlr)
291 {
292 unsigned long flags;
293 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
294
295 if (l2len < 0)
296 l2len = 0;
297
298 spin_lock_irqsave(&stats->lock, flags);
299
300 stats->transfers++;
301 stats->transfer_bytes_histo[l2len]++;
302
303 stats->bytes += xfer->len;
304 if ((xfer->tx_buf) &&
305 (xfer->tx_buf != ctlr->dummy_tx))
306 stats->bytes_tx += xfer->len;
307 if ((xfer->rx_buf) &&
308 (xfer->rx_buf != ctlr->dummy_rx))
309 stats->bytes_rx += xfer->len;
310
311 spin_unlock_irqrestore(&stats->lock, flags);
312 }
313 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
314
315 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
316 * and the sysfs version makes coldplug work too.
317 */
318
spi_match_id(const struct spi_device_id * id,const struct spi_device * sdev)319 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
320 const struct spi_device *sdev)
321 {
322 while (id->name[0]) {
323 if (!strcmp(sdev->modalias, id->name))
324 return id;
325 id++;
326 }
327 return NULL;
328 }
329
spi_get_device_id(const struct spi_device * sdev)330 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
331 {
332 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
333
334 return spi_match_id(sdrv->id_table, sdev);
335 }
336 EXPORT_SYMBOL_GPL(spi_get_device_id);
337
spi_match_device(struct device * dev,struct device_driver * drv)338 static int spi_match_device(struct device *dev, struct device_driver *drv)
339 {
340 const struct spi_device *spi = to_spi_device(dev);
341 const struct spi_driver *sdrv = to_spi_driver(drv);
342
343 /* Check override first, and if set, only use the named driver */
344 if (spi->driver_override)
345 return strcmp(spi->driver_override, drv->name) == 0;
346
347 /* Attempt an OF style match */
348 if (of_driver_match_device(dev, drv))
349 return 1;
350
351 /* Then try ACPI */
352 if (acpi_driver_match_device(dev, drv))
353 return 1;
354
355 if (sdrv->id_table)
356 return !!spi_match_id(sdrv->id_table, spi);
357
358 return strcmp(spi->modalias, drv->name) == 0;
359 }
360
spi_uevent(struct device * dev,struct kobj_uevent_env * env)361 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
362 {
363 const struct spi_device *spi = to_spi_device(dev);
364 int rc;
365
366 rc = acpi_device_uevent_modalias(dev, env);
367 if (rc != -ENODEV)
368 return rc;
369
370 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
371 }
372
spi_probe(struct device * dev)373 static int spi_probe(struct device *dev)
374 {
375 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
376 struct spi_device *spi = to_spi_device(dev);
377 int ret;
378
379 ret = of_clk_set_defaults(dev->of_node, false);
380 if (ret)
381 return ret;
382
383 if (dev->of_node) {
384 spi->irq = of_irq_get(dev->of_node, 0);
385 if (spi->irq == -EPROBE_DEFER)
386 return -EPROBE_DEFER;
387 if (spi->irq < 0)
388 spi->irq = 0;
389 }
390
391 ret = dev_pm_domain_attach(dev, true);
392 if (ret)
393 return ret;
394
395 if (sdrv->probe) {
396 ret = sdrv->probe(spi);
397 if (ret)
398 dev_pm_domain_detach(dev, true);
399 }
400
401 return ret;
402 }
403
spi_remove(struct device * dev)404 static void spi_remove(struct device *dev)
405 {
406 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
407
408 if (sdrv->remove) {
409 int ret;
410
411 ret = sdrv->remove(to_spi_device(dev));
412 if (ret)
413 dev_warn(dev,
414 "Failed to unbind driver (%pe), ignoring\n",
415 ERR_PTR(ret));
416 }
417
418 dev_pm_domain_detach(dev, true);
419 }
420
spi_shutdown(struct device * dev)421 static void spi_shutdown(struct device *dev)
422 {
423 if (dev->driver) {
424 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
425
426 if (sdrv->shutdown)
427 sdrv->shutdown(to_spi_device(dev));
428 }
429 }
430
431 struct bus_type spi_bus_type = {
432 .name = "spi",
433 .dev_groups = spi_dev_groups,
434 .match = spi_match_device,
435 .uevent = spi_uevent,
436 .probe = spi_probe,
437 .remove = spi_remove,
438 .shutdown = spi_shutdown,
439 };
440 EXPORT_SYMBOL_GPL(spi_bus_type);
441
442 /**
443 * __spi_register_driver - register a SPI driver
444 * @owner: owner module of the driver to register
445 * @sdrv: the driver to register
446 * Context: can sleep
447 *
448 * Return: zero on success, else a negative error code.
449 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)450 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
451 {
452 sdrv->driver.owner = owner;
453 sdrv->driver.bus = &spi_bus_type;
454
455 /*
456 * For Really Good Reasons we use spi: modaliases not of:
457 * modaliases for DT so module autoloading won't work if we
458 * don't have a spi_device_id as well as a compatible string.
459 */
460 if (sdrv->driver.of_match_table) {
461 const struct of_device_id *of_id;
462
463 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
464 of_id++) {
465 const char *of_name;
466
467 /* Strip off any vendor prefix */
468 of_name = strnchr(of_id->compatible,
469 sizeof(of_id->compatible), ',');
470 if (of_name)
471 of_name++;
472 else
473 of_name = of_id->compatible;
474
475 if (sdrv->id_table) {
476 const struct spi_device_id *spi_id;
477
478 for (spi_id = sdrv->id_table; spi_id->name[0];
479 spi_id++)
480 if (strcmp(spi_id->name, of_name) == 0)
481 break;
482
483 if (spi_id->name[0])
484 continue;
485 } else {
486 if (strcmp(sdrv->driver.name, of_name) == 0)
487 continue;
488 }
489
490 pr_warn("SPI driver %s has no spi_device_id for %s\n",
491 sdrv->driver.name, of_id->compatible);
492 }
493 }
494
495 return driver_register(&sdrv->driver);
496 }
497 EXPORT_SYMBOL_GPL(__spi_register_driver);
498
499 /*-------------------------------------------------------------------------*/
500
501 /* SPI devices should normally not be created by SPI device drivers; that
502 * would make them board-specific. Similarly with SPI controller drivers.
503 * Device registration normally goes into like arch/.../mach.../board-YYY.c
504 * with other readonly (flashable) information about mainboard devices.
505 */
506
507 struct boardinfo {
508 struct list_head list;
509 struct spi_board_info board_info;
510 };
511
512 static LIST_HEAD(board_list);
513 static LIST_HEAD(spi_controller_list);
514
515 /*
516 * Used to protect add/del operation for board_info list and
517 * spi_controller list, and their matching process
518 * also used to protect object of type struct idr
519 */
520 static DEFINE_MUTEX(board_lock);
521
522 /**
523 * spi_alloc_device - Allocate a new SPI device
524 * @ctlr: Controller to which device is connected
525 * Context: can sleep
526 *
527 * Allows a driver to allocate and initialize a spi_device without
528 * registering it immediately. This allows a driver to directly
529 * fill the spi_device with device parameters before calling
530 * spi_add_device() on it.
531 *
532 * Caller is responsible to call spi_add_device() on the returned
533 * spi_device structure to add it to the SPI controller. If the caller
534 * needs to discard the spi_device without adding it, then it should
535 * call spi_dev_put() on it.
536 *
537 * Return: a pointer to the new device, or NULL.
538 */
spi_alloc_device(struct spi_controller * ctlr)539 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
540 {
541 struct spi_device *spi;
542
543 if (!spi_controller_get(ctlr))
544 return NULL;
545
546 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
547 if (!spi) {
548 spi_controller_put(ctlr);
549 return NULL;
550 }
551
552 spi->master = spi->controller = ctlr;
553 spi->dev.parent = &ctlr->dev;
554 spi->dev.bus = &spi_bus_type;
555 spi->dev.release = spidev_release;
556 spi->cs_gpio = -ENOENT;
557 spi->mode = ctlr->buswidth_override_bits;
558
559 spin_lock_init(&spi->statistics.lock);
560
561 device_initialize(&spi->dev);
562 return spi;
563 }
564 EXPORT_SYMBOL_GPL(spi_alloc_device);
565
spi_dev_set_name(struct spi_device * spi)566 static void spi_dev_set_name(struct spi_device *spi)
567 {
568 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
569
570 if (adev) {
571 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
572 return;
573 }
574
575 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
576 spi->chip_select);
577 }
578
spi_dev_check(struct device * dev,void * data)579 static int spi_dev_check(struct device *dev, void *data)
580 {
581 struct spi_device *spi = to_spi_device(dev);
582 struct spi_device *new_spi = data;
583
584 if (spi->controller == new_spi->controller &&
585 spi->chip_select == new_spi->chip_select)
586 return -EBUSY;
587 return 0;
588 }
589
spi_cleanup(struct spi_device * spi)590 static void spi_cleanup(struct spi_device *spi)
591 {
592 if (spi->controller->cleanup)
593 spi->controller->cleanup(spi);
594 }
595
__spi_add_device(struct spi_device * spi)596 static int __spi_add_device(struct spi_device *spi)
597 {
598 struct spi_controller *ctlr = spi->controller;
599 struct device *dev = ctlr->dev.parent;
600 int status;
601
602 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
603 if (status) {
604 dev_err(dev, "chipselect %d already in use\n",
605 spi->chip_select);
606 return status;
607 }
608
609 /* Controller may unregister concurrently */
610 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
611 !device_is_registered(&ctlr->dev)) {
612 return -ENODEV;
613 }
614
615 /* Descriptors take precedence */
616 if (ctlr->cs_gpiods)
617 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
618 else if (ctlr->cs_gpios)
619 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
620
621 /* Drivers may modify this initial i/o setup, but will
622 * normally rely on the device being setup. Devices
623 * using SPI_CS_HIGH can't coexist well otherwise...
624 */
625 status = spi_setup(spi);
626 if (status < 0) {
627 dev_err(dev, "can't setup %s, status %d\n",
628 dev_name(&spi->dev), status);
629 return status;
630 }
631
632 /* Device may be bound to an active driver when this returns */
633 status = device_add(&spi->dev);
634 if (status < 0) {
635 dev_err(dev, "can't add %s, status %d\n",
636 dev_name(&spi->dev), status);
637 spi_cleanup(spi);
638 } else {
639 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
640 }
641
642 return status;
643 }
644
645 /**
646 * spi_add_device - Add spi_device allocated with spi_alloc_device
647 * @spi: spi_device to register
648 *
649 * Companion function to spi_alloc_device. Devices allocated with
650 * spi_alloc_device can be added onto the spi bus with this function.
651 *
652 * Return: 0 on success; negative errno on failure
653 */
spi_add_device(struct spi_device * spi)654 int spi_add_device(struct spi_device *spi)
655 {
656 struct spi_controller *ctlr = spi->controller;
657 struct device *dev = ctlr->dev.parent;
658 int status;
659
660 /* Chipselects are numbered 0..max; validate. */
661 if (spi->chip_select >= ctlr->num_chipselect) {
662 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
663 ctlr->num_chipselect);
664 return -EINVAL;
665 }
666
667 /* Set the bus ID string */
668 spi_dev_set_name(spi);
669
670 /* We need to make sure there's no other device with this
671 * chipselect **BEFORE** we call setup(), else we'll trash
672 * its configuration. Lock against concurrent add() calls.
673 */
674 mutex_lock(&ctlr->add_lock);
675 status = __spi_add_device(spi);
676 mutex_unlock(&ctlr->add_lock);
677 return status;
678 }
679 EXPORT_SYMBOL_GPL(spi_add_device);
680
spi_add_device_locked(struct spi_device * spi)681 static int spi_add_device_locked(struct spi_device *spi)
682 {
683 struct spi_controller *ctlr = spi->controller;
684 struct device *dev = ctlr->dev.parent;
685
686 /* Chipselects are numbered 0..max; validate. */
687 if (spi->chip_select >= ctlr->num_chipselect) {
688 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
689 ctlr->num_chipselect);
690 return -EINVAL;
691 }
692
693 /* Set the bus ID string */
694 spi_dev_set_name(spi);
695
696 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
697 return __spi_add_device(spi);
698 }
699
700 /**
701 * spi_new_device - instantiate one new SPI device
702 * @ctlr: Controller to which device is connected
703 * @chip: Describes the SPI device
704 * Context: can sleep
705 *
706 * On typical mainboards, this is purely internal; and it's not needed
707 * after board init creates the hard-wired devices. Some development
708 * platforms may not be able to use spi_register_board_info though, and
709 * this is exported so that for example a USB or parport based adapter
710 * driver could add devices (which it would learn about out-of-band).
711 *
712 * Return: the new device, or NULL.
713 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)714 struct spi_device *spi_new_device(struct spi_controller *ctlr,
715 struct spi_board_info *chip)
716 {
717 struct spi_device *proxy;
718 int status;
719
720 /* NOTE: caller did any chip->bus_num checks necessary.
721 *
722 * Also, unless we change the return value convention to use
723 * error-or-pointer (not NULL-or-pointer), troubleshootability
724 * suggests syslogged diagnostics are best here (ugh).
725 */
726
727 proxy = spi_alloc_device(ctlr);
728 if (!proxy)
729 return NULL;
730
731 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
732
733 proxy->chip_select = chip->chip_select;
734 proxy->max_speed_hz = chip->max_speed_hz;
735 proxy->mode = chip->mode;
736 proxy->irq = chip->irq;
737 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
738 proxy->dev.platform_data = (void *) chip->platform_data;
739 proxy->controller_data = chip->controller_data;
740 proxy->controller_state = NULL;
741
742 if (chip->swnode) {
743 status = device_add_software_node(&proxy->dev, chip->swnode);
744 if (status) {
745 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
746 chip->modalias, status);
747 goto err_dev_put;
748 }
749 }
750
751 status = spi_add_device(proxy);
752 if (status < 0)
753 goto err_dev_put;
754
755 return proxy;
756
757 err_dev_put:
758 device_remove_software_node(&proxy->dev);
759 spi_dev_put(proxy);
760 return NULL;
761 }
762 EXPORT_SYMBOL_GPL(spi_new_device);
763
764 /**
765 * spi_unregister_device - unregister a single SPI device
766 * @spi: spi_device to unregister
767 *
768 * Start making the passed SPI device vanish. Normally this would be handled
769 * by spi_unregister_controller().
770 */
spi_unregister_device(struct spi_device * spi)771 void spi_unregister_device(struct spi_device *spi)
772 {
773 if (!spi)
774 return;
775
776 if (spi->dev.of_node) {
777 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
778 of_node_put(spi->dev.of_node);
779 }
780 if (ACPI_COMPANION(&spi->dev))
781 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
782 device_remove_software_node(&spi->dev);
783 device_del(&spi->dev);
784 spi_cleanup(spi);
785 put_device(&spi->dev);
786 }
787 EXPORT_SYMBOL_GPL(spi_unregister_device);
788
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)789 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
790 struct spi_board_info *bi)
791 {
792 struct spi_device *dev;
793
794 if (ctlr->bus_num != bi->bus_num)
795 return;
796
797 dev = spi_new_device(ctlr, bi);
798 if (!dev)
799 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
800 bi->modalias);
801 }
802
803 /**
804 * spi_register_board_info - register SPI devices for a given board
805 * @info: array of chip descriptors
806 * @n: how many descriptors are provided
807 * Context: can sleep
808 *
809 * Board-specific early init code calls this (probably during arch_initcall)
810 * with segments of the SPI device table. Any device nodes are created later,
811 * after the relevant parent SPI controller (bus_num) is defined. We keep
812 * this table of devices forever, so that reloading a controller driver will
813 * not make Linux forget about these hard-wired devices.
814 *
815 * Other code can also call this, e.g. a particular add-on board might provide
816 * SPI devices through its expansion connector, so code initializing that board
817 * would naturally declare its SPI devices.
818 *
819 * The board info passed can safely be __initdata ... but be careful of
820 * any embedded pointers (platform_data, etc), they're copied as-is.
821 *
822 * Return: zero on success, else a negative error code.
823 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)824 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
825 {
826 struct boardinfo *bi;
827 int i;
828
829 if (!n)
830 return 0;
831
832 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
833 if (!bi)
834 return -ENOMEM;
835
836 for (i = 0; i < n; i++, bi++, info++) {
837 struct spi_controller *ctlr;
838
839 memcpy(&bi->board_info, info, sizeof(*info));
840
841 mutex_lock(&board_lock);
842 list_add_tail(&bi->list, &board_list);
843 list_for_each_entry(ctlr, &spi_controller_list, list)
844 spi_match_controller_to_boardinfo(ctlr,
845 &bi->board_info);
846 mutex_unlock(&board_lock);
847 }
848
849 return 0;
850 }
851
852 /*-------------------------------------------------------------------------*/
853
spi_set_cs(struct spi_device * spi,bool enable,bool force)854 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
855 {
856 bool activate = enable;
857
858 /*
859 * Avoid calling into the driver (or doing delays) if the chip select
860 * isn't actually changing from the last time this was called.
861 */
862 if (!force && (spi->controller->last_cs_enable == enable) &&
863 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
864 return;
865
866 trace_spi_set_cs(spi, activate);
867
868 spi->controller->last_cs_enable = enable;
869 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
870
871 if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
872 !spi->controller->set_cs_timing) && !activate) {
873 spi_delay_exec(&spi->cs_hold, NULL);
874 }
875
876 if (spi->mode & SPI_CS_HIGH)
877 enable = !enable;
878
879 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
880 if (!(spi->mode & SPI_NO_CS)) {
881 if (spi->cs_gpiod) {
882 /*
883 * Historically ACPI has no means of the GPIO polarity and
884 * thus the SPISerialBus() resource defines it on the per-chip
885 * basis. In order to avoid a chain of negations, the GPIO
886 * polarity is considered being Active High. Even for the cases
887 * when _DSD() is involved (in the updated versions of ACPI)
888 * the GPIO CS polarity must be defined Active High to avoid
889 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
890 * into account.
891 */
892 if (has_acpi_companion(&spi->dev))
893 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
894 else
895 /* Polarity handled by GPIO library */
896 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
897 } else {
898 /*
899 * invert the enable line, as active low is
900 * default for SPI.
901 */
902 gpio_set_value_cansleep(spi->cs_gpio, !enable);
903 }
904 }
905 /* Some SPI masters need both GPIO CS & slave_select */
906 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
907 spi->controller->set_cs)
908 spi->controller->set_cs(spi, !enable);
909 } else if (spi->controller->set_cs) {
910 spi->controller->set_cs(spi, !enable);
911 }
912
913 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
914 !spi->controller->set_cs_timing) {
915 if (activate)
916 spi_delay_exec(&spi->cs_setup, NULL);
917 else
918 spi_delay_exec(&spi->cs_inactive, NULL);
919 }
920 }
921
922 #ifdef CONFIG_HAS_DMA
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)923 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
924 struct sg_table *sgt, void *buf, size_t len,
925 enum dma_data_direction dir)
926 {
927 const bool vmalloced_buf = is_vmalloc_addr(buf);
928 unsigned int max_seg_size = dma_get_max_seg_size(dev);
929 #ifdef CONFIG_HIGHMEM
930 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
931 (unsigned long)buf < (PKMAP_BASE +
932 (LAST_PKMAP * PAGE_SIZE)));
933 #else
934 const bool kmap_buf = false;
935 #endif
936 int desc_len;
937 int sgs;
938 struct page *vm_page;
939 struct scatterlist *sg;
940 void *sg_buf;
941 size_t min;
942 int i, ret;
943
944 if (vmalloced_buf || kmap_buf) {
945 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
946 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
947 } else if (virt_addr_valid(buf)) {
948 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
949 sgs = DIV_ROUND_UP(len, desc_len);
950 } else {
951 return -EINVAL;
952 }
953
954 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
955 if (ret != 0)
956 return ret;
957
958 sg = &sgt->sgl[0];
959 for (i = 0; i < sgs; i++) {
960
961 if (vmalloced_buf || kmap_buf) {
962 /*
963 * Next scatterlist entry size is the minimum between
964 * the desc_len and the remaining buffer length that
965 * fits in a page.
966 */
967 min = min_t(size_t, desc_len,
968 min_t(size_t, len,
969 PAGE_SIZE - offset_in_page(buf)));
970 if (vmalloced_buf)
971 vm_page = vmalloc_to_page(buf);
972 else
973 vm_page = kmap_to_page(buf);
974 if (!vm_page) {
975 sg_free_table(sgt);
976 return -ENOMEM;
977 }
978 sg_set_page(sg, vm_page,
979 min, offset_in_page(buf));
980 } else {
981 min = min_t(size_t, len, desc_len);
982 sg_buf = buf;
983 sg_set_buf(sg, sg_buf, min);
984 }
985
986 buf += min;
987 len -= min;
988 sg = sg_next(sg);
989 }
990
991 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
992 if (!ret)
993 ret = -ENOMEM;
994 if (ret < 0) {
995 sg_free_table(sgt);
996 return ret;
997 }
998
999 sgt->nents = ret;
1000
1001 return 0;
1002 }
1003
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1004 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1005 struct sg_table *sgt, enum dma_data_direction dir)
1006 {
1007 if (sgt->orig_nents) {
1008 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
1009 sg_free_table(sgt);
1010 sgt->orig_nents = 0;
1011 sgt->nents = 0;
1012 }
1013 }
1014
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1015 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1016 {
1017 struct device *tx_dev, *rx_dev;
1018 struct spi_transfer *xfer;
1019 int ret;
1020
1021 if (!ctlr->can_dma)
1022 return 0;
1023
1024 if (ctlr->dma_tx)
1025 tx_dev = ctlr->dma_tx->device->dev;
1026 else if (ctlr->dma_map_dev)
1027 tx_dev = ctlr->dma_map_dev;
1028 else
1029 tx_dev = ctlr->dev.parent;
1030
1031 if (ctlr->dma_rx)
1032 rx_dev = ctlr->dma_rx->device->dev;
1033 else if (ctlr->dma_map_dev)
1034 rx_dev = ctlr->dma_map_dev;
1035 else
1036 rx_dev = ctlr->dev.parent;
1037
1038 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1039 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1040 continue;
1041
1042 if (xfer->tx_buf != NULL) {
1043 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1044 (void *)xfer->tx_buf, xfer->len,
1045 DMA_TO_DEVICE);
1046 if (ret != 0)
1047 return ret;
1048 }
1049
1050 if (xfer->rx_buf != NULL) {
1051 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1052 xfer->rx_buf, xfer->len,
1053 DMA_FROM_DEVICE);
1054 if (ret != 0) {
1055 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1056 DMA_TO_DEVICE);
1057 return ret;
1058 }
1059 }
1060 }
1061
1062 ctlr->cur_msg_mapped = true;
1063
1064 return 0;
1065 }
1066
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1067 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1068 {
1069 struct spi_transfer *xfer;
1070 struct device *tx_dev, *rx_dev;
1071
1072 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1073 return 0;
1074
1075 if (ctlr->dma_tx)
1076 tx_dev = ctlr->dma_tx->device->dev;
1077 else if (ctlr->dma_map_dev)
1078 tx_dev = ctlr->dma_map_dev;
1079 else
1080 tx_dev = ctlr->dev.parent;
1081
1082 if (ctlr->dma_rx)
1083 rx_dev = ctlr->dma_rx->device->dev;
1084 else if (ctlr->dma_map_dev)
1085 rx_dev = ctlr->dma_map_dev;
1086 else
1087 rx_dev = ctlr->dev.parent;
1088
1089 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1090 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1091 continue;
1092
1093 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1094 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1095 }
1096
1097 ctlr->cur_msg_mapped = false;
1098
1099 return 0;
1100 }
1101 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1102 static inline int __spi_map_msg(struct spi_controller *ctlr,
1103 struct spi_message *msg)
1104 {
1105 return 0;
1106 }
1107
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1108 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1109 struct spi_message *msg)
1110 {
1111 return 0;
1112 }
1113 #endif /* !CONFIG_HAS_DMA */
1114
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1115 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1116 struct spi_message *msg)
1117 {
1118 struct spi_transfer *xfer;
1119
1120 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1121 /*
1122 * Restore the original value of tx_buf or rx_buf if they are
1123 * NULL.
1124 */
1125 if (xfer->tx_buf == ctlr->dummy_tx)
1126 xfer->tx_buf = NULL;
1127 if (xfer->rx_buf == ctlr->dummy_rx)
1128 xfer->rx_buf = NULL;
1129 }
1130
1131 return __spi_unmap_msg(ctlr, msg);
1132 }
1133
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1134 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1135 {
1136 struct spi_transfer *xfer;
1137 void *tmp;
1138 unsigned int max_tx, max_rx;
1139
1140 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1141 && !(msg->spi->mode & SPI_3WIRE)) {
1142 max_tx = 0;
1143 max_rx = 0;
1144
1145 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1146 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1147 !xfer->tx_buf)
1148 max_tx = max(xfer->len, max_tx);
1149 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1150 !xfer->rx_buf)
1151 max_rx = max(xfer->len, max_rx);
1152 }
1153
1154 if (max_tx) {
1155 tmp = krealloc(ctlr->dummy_tx, max_tx,
1156 GFP_KERNEL | GFP_DMA);
1157 if (!tmp)
1158 return -ENOMEM;
1159 ctlr->dummy_tx = tmp;
1160 memset(tmp, 0, max_tx);
1161 }
1162
1163 if (max_rx) {
1164 tmp = krealloc(ctlr->dummy_rx, max_rx,
1165 GFP_KERNEL | GFP_DMA);
1166 if (!tmp)
1167 return -ENOMEM;
1168 ctlr->dummy_rx = tmp;
1169 }
1170
1171 if (max_tx || max_rx) {
1172 list_for_each_entry(xfer, &msg->transfers,
1173 transfer_list) {
1174 if (!xfer->len)
1175 continue;
1176 if (!xfer->tx_buf)
1177 xfer->tx_buf = ctlr->dummy_tx;
1178 if (!xfer->rx_buf)
1179 xfer->rx_buf = ctlr->dummy_rx;
1180 }
1181 }
1182 }
1183
1184 return __spi_map_msg(ctlr, msg);
1185 }
1186
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1187 static int spi_transfer_wait(struct spi_controller *ctlr,
1188 struct spi_message *msg,
1189 struct spi_transfer *xfer)
1190 {
1191 struct spi_statistics *statm = &ctlr->statistics;
1192 struct spi_statistics *stats = &msg->spi->statistics;
1193 u32 speed_hz = xfer->speed_hz;
1194 unsigned long long ms;
1195
1196 if (spi_controller_is_slave(ctlr)) {
1197 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1198 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1199 return -EINTR;
1200 }
1201 } else {
1202 if (!speed_hz)
1203 speed_hz = 100000;
1204
1205 /*
1206 * For each byte we wait for 8 cycles of the SPI clock.
1207 * Since speed is defined in Hz and we want milliseconds,
1208 * use respective multiplier, but before the division,
1209 * otherwise we may get 0 for short transfers.
1210 */
1211 ms = 8LL * MSEC_PER_SEC * xfer->len;
1212 do_div(ms, speed_hz);
1213
1214 /*
1215 * Increase it twice and add 200 ms tolerance, use
1216 * predefined maximum in case of overflow.
1217 */
1218 ms += ms + 200;
1219 if (ms > UINT_MAX)
1220 ms = UINT_MAX;
1221
1222 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1223 msecs_to_jiffies(ms));
1224
1225 if (ms == 0) {
1226 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1227 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1228 dev_err(&msg->spi->dev,
1229 "SPI transfer timed out\n");
1230 return -ETIMEDOUT;
1231 }
1232 }
1233
1234 return 0;
1235 }
1236
_spi_transfer_delay_ns(u32 ns)1237 static void _spi_transfer_delay_ns(u32 ns)
1238 {
1239 if (!ns)
1240 return;
1241 if (ns <= NSEC_PER_USEC) {
1242 ndelay(ns);
1243 } else {
1244 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1245
1246 if (us <= 10)
1247 udelay(us);
1248 else
1249 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1250 }
1251 }
1252
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1253 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1254 {
1255 u32 delay = _delay->value;
1256 u32 unit = _delay->unit;
1257 u32 hz;
1258
1259 if (!delay)
1260 return 0;
1261
1262 switch (unit) {
1263 case SPI_DELAY_UNIT_USECS:
1264 delay *= NSEC_PER_USEC;
1265 break;
1266 case SPI_DELAY_UNIT_NSECS:
1267 /* Nothing to do here */
1268 break;
1269 case SPI_DELAY_UNIT_SCK:
1270 /* clock cycles need to be obtained from spi_transfer */
1271 if (!xfer)
1272 return -EINVAL;
1273 /*
1274 * If there is unknown effective speed, approximate it
1275 * by underestimating with half of the requested hz.
1276 */
1277 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1278 if (!hz)
1279 return -EINVAL;
1280
1281 /* Convert delay to nanoseconds */
1282 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1283 break;
1284 default:
1285 return -EINVAL;
1286 }
1287
1288 return delay;
1289 }
1290 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1291
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1292 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1293 {
1294 int delay;
1295
1296 might_sleep();
1297
1298 if (!_delay)
1299 return -EINVAL;
1300
1301 delay = spi_delay_to_ns(_delay, xfer);
1302 if (delay < 0)
1303 return delay;
1304
1305 _spi_transfer_delay_ns(delay);
1306
1307 return 0;
1308 }
1309 EXPORT_SYMBOL_GPL(spi_delay_exec);
1310
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1311 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1312 struct spi_transfer *xfer)
1313 {
1314 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1315 u32 delay = xfer->cs_change_delay.value;
1316 u32 unit = xfer->cs_change_delay.unit;
1317 int ret;
1318
1319 /* return early on "fast" mode - for everything but USECS */
1320 if (!delay) {
1321 if (unit == SPI_DELAY_UNIT_USECS)
1322 _spi_transfer_delay_ns(default_delay_ns);
1323 return;
1324 }
1325
1326 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1327 if (ret) {
1328 dev_err_once(&msg->spi->dev,
1329 "Use of unsupported delay unit %i, using default of %luus\n",
1330 unit, default_delay_ns / NSEC_PER_USEC);
1331 _spi_transfer_delay_ns(default_delay_ns);
1332 }
1333 }
1334
1335 /*
1336 * spi_transfer_one_message - Default implementation of transfer_one_message()
1337 *
1338 * This is a standard implementation of transfer_one_message() for
1339 * drivers which implement a transfer_one() operation. It provides
1340 * standard handling of delays and chip select management.
1341 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1342 static int spi_transfer_one_message(struct spi_controller *ctlr,
1343 struct spi_message *msg)
1344 {
1345 struct spi_transfer *xfer;
1346 bool keep_cs = false;
1347 int ret = 0;
1348 struct spi_statistics *statm = &ctlr->statistics;
1349 struct spi_statistics *stats = &msg->spi->statistics;
1350
1351 spi_set_cs(msg->spi, true, false);
1352
1353 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1354 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1355
1356 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1357 trace_spi_transfer_start(msg, xfer);
1358
1359 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1360 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1361
1362 if (!ctlr->ptp_sts_supported) {
1363 xfer->ptp_sts_word_pre = 0;
1364 ptp_read_system_prets(xfer->ptp_sts);
1365 }
1366
1367 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1368 reinit_completion(&ctlr->xfer_completion);
1369
1370 fallback_pio:
1371 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1372 if (ret < 0) {
1373 if (ctlr->cur_msg_mapped &&
1374 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1375 __spi_unmap_msg(ctlr, msg);
1376 ctlr->fallback = true;
1377 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1378 goto fallback_pio;
1379 }
1380
1381 SPI_STATISTICS_INCREMENT_FIELD(statm,
1382 errors);
1383 SPI_STATISTICS_INCREMENT_FIELD(stats,
1384 errors);
1385 dev_err(&msg->spi->dev,
1386 "SPI transfer failed: %d\n", ret);
1387 goto out;
1388 }
1389
1390 if (ret > 0) {
1391 ret = spi_transfer_wait(ctlr, msg, xfer);
1392 if (ret < 0)
1393 msg->status = ret;
1394 }
1395 } else {
1396 if (xfer->len)
1397 dev_err(&msg->spi->dev,
1398 "Bufferless transfer has length %u\n",
1399 xfer->len);
1400 }
1401
1402 if (!ctlr->ptp_sts_supported) {
1403 ptp_read_system_postts(xfer->ptp_sts);
1404 xfer->ptp_sts_word_post = xfer->len;
1405 }
1406
1407 trace_spi_transfer_stop(msg, xfer);
1408
1409 if (msg->status != -EINPROGRESS)
1410 goto out;
1411
1412 spi_transfer_delay_exec(xfer);
1413
1414 if (xfer->cs_change) {
1415 if (list_is_last(&xfer->transfer_list,
1416 &msg->transfers)) {
1417 keep_cs = true;
1418 } else {
1419 spi_set_cs(msg->spi, false, false);
1420 _spi_transfer_cs_change_delay(msg, xfer);
1421 spi_set_cs(msg->spi, true, false);
1422 }
1423 }
1424
1425 msg->actual_length += xfer->len;
1426 }
1427
1428 out:
1429 if (ret != 0 || !keep_cs)
1430 spi_set_cs(msg->spi, false, false);
1431
1432 if (msg->status == -EINPROGRESS)
1433 msg->status = ret;
1434
1435 if (msg->status && ctlr->handle_err)
1436 ctlr->handle_err(ctlr, msg);
1437
1438 spi_finalize_current_message(ctlr);
1439
1440 return ret;
1441 }
1442
1443 /**
1444 * spi_finalize_current_transfer - report completion of a transfer
1445 * @ctlr: the controller reporting completion
1446 *
1447 * Called by SPI drivers using the core transfer_one_message()
1448 * implementation to notify it that the current interrupt driven
1449 * transfer has finished and the next one may be scheduled.
1450 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1451 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1452 {
1453 complete(&ctlr->xfer_completion);
1454 }
1455 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1456
spi_idle_runtime_pm(struct spi_controller * ctlr)1457 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1458 {
1459 if (ctlr->auto_runtime_pm) {
1460 pm_runtime_mark_last_busy(ctlr->dev.parent);
1461 pm_runtime_put_autosuspend(ctlr->dev.parent);
1462 }
1463 }
1464
1465 /**
1466 * __spi_pump_messages - function which processes spi message queue
1467 * @ctlr: controller to process queue for
1468 * @in_kthread: true if we are in the context of the message pump thread
1469 *
1470 * This function checks if there is any spi message in the queue that
1471 * needs processing and if so call out to the driver to initialize hardware
1472 * and transfer each message.
1473 *
1474 * Note that it is called both from the kthread itself and also from
1475 * inside spi_sync(); the queue extraction handling at the top of the
1476 * function should deal with this safely.
1477 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1478 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1479 {
1480 struct spi_transfer *xfer;
1481 struct spi_message *msg;
1482 bool was_busy = false;
1483 unsigned long flags;
1484 int ret;
1485
1486 /* Lock queue */
1487 spin_lock_irqsave(&ctlr->queue_lock, flags);
1488
1489 /* Make sure we are not already running a message */
1490 if (ctlr->cur_msg) {
1491 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1492 return;
1493 }
1494
1495 /* If another context is idling the device then defer */
1496 if (ctlr->idling) {
1497 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1498 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1499 return;
1500 }
1501
1502 /* Check if the queue is idle */
1503 if (list_empty(&ctlr->queue) || !ctlr->running) {
1504 if (!ctlr->busy) {
1505 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1506 return;
1507 }
1508
1509 /* Defer any non-atomic teardown to the thread */
1510 if (!in_kthread) {
1511 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1512 !ctlr->unprepare_transfer_hardware) {
1513 spi_idle_runtime_pm(ctlr);
1514 ctlr->busy = false;
1515 trace_spi_controller_idle(ctlr);
1516 } else {
1517 kthread_queue_work(ctlr->kworker,
1518 &ctlr->pump_messages);
1519 }
1520 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1521 return;
1522 }
1523
1524 ctlr->busy = false;
1525 ctlr->idling = true;
1526 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1527
1528 kfree(ctlr->dummy_rx);
1529 ctlr->dummy_rx = NULL;
1530 kfree(ctlr->dummy_tx);
1531 ctlr->dummy_tx = NULL;
1532 if (ctlr->unprepare_transfer_hardware &&
1533 ctlr->unprepare_transfer_hardware(ctlr))
1534 dev_err(&ctlr->dev,
1535 "failed to unprepare transfer hardware\n");
1536 spi_idle_runtime_pm(ctlr);
1537 trace_spi_controller_idle(ctlr);
1538
1539 spin_lock_irqsave(&ctlr->queue_lock, flags);
1540 ctlr->idling = false;
1541 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1542 return;
1543 }
1544
1545 /* Extract head of queue */
1546 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1547 ctlr->cur_msg = msg;
1548
1549 list_del_init(&msg->queue);
1550 if (ctlr->busy)
1551 was_busy = true;
1552 else
1553 ctlr->busy = true;
1554 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1555
1556 mutex_lock(&ctlr->io_mutex);
1557
1558 if (!was_busy && ctlr->auto_runtime_pm) {
1559 ret = pm_runtime_get_sync(ctlr->dev.parent);
1560 if (ret < 0) {
1561 pm_runtime_put_noidle(ctlr->dev.parent);
1562 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1563 ret);
1564 mutex_unlock(&ctlr->io_mutex);
1565 return;
1566 }
1567 }
1568
1569 if (!was_busy)
1570 trace_spi_controller_busy(ctlr);
1571
1572 if (!was_busy && ctlr->prepare_transfer_hardware) {
1573 ret = ctlr->prepare_transfer_hardware(ctlr);
1574 if (ret) {
1575 dev_err(&ctlr->dev,
1576 "failed to prepare transfer hardware: %d\n",
1577 ret);
1578
1579 if (ctlr->auto_runtime_pm)
1580 pm_runtime_put(ctlr->dev.parent);
1581
1582 msg->status = ret;
1583 spi_finalize_current_message(ctlr);
1584
1585 mutex_unlock(&ctlr->io_mutex);
1586 return;
1587 }
1588 }
1589
1590 trace_spi_message_start(msg);
1591
1592 if (ctlr->prepare_message) {
1593 ret = ctlr->prepare_message(ctlr, msg);
1594 if (ret) {
1595 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1596 ret);
1597 msg->status = ret;
1598 spi_finalize_current_message(ctlr);
1599 goto out;
1600 }
1601 ctlr->cur_msg_prepared = true;
1602 }
1603
1604 ret = spi_map_msg(ctlr, msg);
1605 if (ret) {
1606 msg->status = ret;
1607 spi_finalize_current_message(ctlr);
1608 goto out;
1609 }
1610
1611 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1612 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1613 xfer->ptp_sts_word_pre = 0;
1614 ptp_read_system_prets(xfer->ptp_sts);
1615 }
1616 }
1617
1618 ret = ctlr->transfer_one_message(ctlr, msg);
1619 if (ret) {
1620 dev_err(&ctlr->dev,
1621 "failed to transfer one message from queue\n");
1622 goto out;
1623 }
1624
1625 out:
1626 mutex_unlock(&ctlr->io_mutex);
1627
1628 /* Prod the scheduler in case transfer_one() was busy waiting */
1629 if (!ret)
1630 cond_resched();
1631 }
1632
1633 /**
1634 * spi_pump_messages - kthread work function which processes spi message queue
1635 * @work: pointer to kthread work struct contained in the controller struct
1636 */
spi_pump_messages(struct kthread_work * work)1637 static void spi_pump_messages(struct kthread_work *work)
1638 {
1639 struct spi_controller *ctlr =
1640 container_of(work, struct spi_controller, pump_messages);
1641
1642 __spi_pump_messages(ctlr, true);
1643 }
1644
1645 /**
1646 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1647 * TX timestamp for the requested byte from the SPI
1648 * transfer. The frequency with which this function
1649 * must be called (once per word, once for the whole
1650 * transfer, once per batch of words etc) is arbitrary
1651 * as long as the @tx buffer offset is greater than or
1652 * equal to the requested byte at the time of the
1653 * call. The timestamp is only taken once, at the
1654 * first such call. It is assumed that the driver
1655 * advances its @tx buffer pointer monotonically.
1656 * @ctlr: Pointer to the spi_controller structure of the driver
1657 * @xfer: Pointer to the transfer being timestamped
1658 * @progress: How many words (not bytes) have been transferred so far
1659 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1660 * transfer, for less jitter in time measurement. Only compatible
1661 * with PIO drivers. If true, must follow up with
1662 * spi_take_timestamp_post or otherwise system will crash.
1663 * WARNING: for fully predictable results, the CPU frequency must
1664 * also be under control (governor).
1665 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1666 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1667 struct spi_transfer *xfer,
1668 size_t progress, bool irqs_off)
1669 {
1670 if (!xfer->ptp_sts)
1671 return;
1672
1673 if (xfer->timestamped)
1674 return;
1675
1676 if (progress > xfer->ptp_sts_word_pre)
1677 return;
1678
1679 /* Capture the resolution of the timestamp */
1680 xfer->ptp_sts_word_pre = progress;
1681
1682 if (irqs_off) {
1683 local_irq_save(ctlr->irq_flags);
1684 preempt_disable();
1685 }
1686
1687 ptp_read_system_prets(xfer->ptp_sts);
1688 }
1689 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1690
1691 /**
1692 * spi_take_timestamp_post - helper for drivers to collect the end of the
1693 * TX timestamp for the requested byte from the SPI
1694 * transfer. Can be called with an arbitrary
1695 * frequency: only the first call where @tx exceeds
1696 * or is equal to the requested word will be
1697 * timestamped.
1698 * @ctlr: Pointer to the spi_controller structure of the driver
1699 * @xfer: Pointer to the transfer being timestamped
1700 * @progress: How many words (not bytes) have been transferred so far
1701 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1702 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1703 void spi_take_timestamp_post(struct spi_controller *ctlr,
1704 struct spi_transfer *xfer,
1705 size_t progress, bool irqs_off)
1706 {
1707 if (!xfer->ptp_sts)
1708 return;
1709
1710 if (xfer->timestamped)
1711 return;
1712
1713 if (progress < xfer->ptp_sts_word_post)
1714 return;
1715
1716 ptp_read_system_postts(xfer->ptp_sts);
1717
1718 if (irqs_off) {
1719 local_irq_restore(ctlr->irq_flags);
1720 preempt_enable();
1721 }
1722
1723 /* Capture the resolution of the timestamp */
1724 xfer->ptp_sts_word_post = progress;
1725
1726 xfer->timestamped = true;
1727 }
1728 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1729
1730 /**
1731 * spi_set_thread_rt - set the controller to pump at realtime priority
1732 * @ctlr: controller to boost priority of
1733 *
1734 * This can be called because the controller requested realtime priority
1735 * (by setting the ->rt value before calling spi_register_controller()) or
1736 * because a device on the bus said that its transfers needed realtime
1737 * priority.
1738 *
1739 * NOTE: at the moment if any device on a bus says it needs realtime then
1740 * the thread will be at realtime priority for all transfers on that
1741 * controller. If this eventually becomes a problem we may see if we can
1742 * find a way to boost the priority only temporarily during relevant
1743 * transfers.
1744 */
spi_set_thread_rt(struct spi_controller * ctlr)1745 static void spi_set_thread_rt(struct spi_controller *ctlr)
1746 {
1747 dev_info(&ctlr->dev,
1748 "will run message pump with realtime priority\n");
1749 sched_set_fifo(ctlr->kworker->task);
1750 }
1751
spi_init_queue(struct spi_controller * ctlr)1752 static int spi_init_queue(struct spi_controller *ctlr)
1753 {
1754 ctlr->running = false;
1755 ctlr->busy = false;
1756
1757 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1758 if (IS_ERR(ctlr->kworker)) {
1759 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1760 return PTR_ERR(ctlr->kworker);
1761 }
1762
1763 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1764
1765 /*
1766 * Controller config will indicate if this controller should run the
1767 * message pump with high (realtime) priority to reduce the transfer
1768 * latency on the bus by minimising the delay between a transfer
1769 * request and the scheduling of the message pump thread. Without this
1770 * setting the message pump thread will remain at default priority.
1771 */
1772 if (ctlr->rt)
1773 spi_set_thread_rt(ctlr);
1774
1775 return 0;
1776 }
1777
1778 /**
1779 * spi_get_next_queued_message() - called by driver to check for queued
1780 * messages
1781 * @ctlr: the controller to check for queued messages
1782 *
1783 * If there are more messages in the queue, the next message is returned from
1784 * this call.
1785 *
1786 * Return: the next message in the queue, else NULL if the queue is empty.
1787 */
spi_get_next_queued_message(struct spi_controller * ctlr)1788 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1789 {
1790 struct spi_message *next;
1791 unsigned long flags;
1792
1793 /* get a pointer to the next message, if any */
1794 spin_lock_irqsave(&ctlr->queue_lock, flags);
1795 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1796 queue);
1797 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1798
1799 return next;
1800 }
1801 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1802
1803 /**
1804 * spi_finalize_current_message() - the current message is complete
1805 * @ctlr: the controller to return the message to
1806 *
1807 * Called by the driver to notify the core that the message in the front of the
1808 * queue is complete and can be removed from the queue.
1809 */
spi_finalize_current_message(struct spi_controller * ctlr)1810 void spi_finalize_current_message(struct spi_controller *ctlr)
1811 {
1812 struct spi_transfer *xfer;
1813 struct spi_message *mesg;
1814 unsigned long flags;
1815 int ret;
1816
1817 spin_lock_irqsave(&ctlr->queue_lock, flags);
1818 mesg = ctlr->cur_msg;
1819 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1820
1821 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1822 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1823 ptp_read_system_postts(xfer->ptp_sts);
1824 xfer->ptp_sts_word_post = xfer->len;
1825 }
1826 }
1827
1828 if (unlikely(ctlr->ptp_sts_supported))
1829 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1830 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1831
1832 spi_unmap_msg(ctlr, mesg);
1833
1834 /* In the prepare_messages callback the spi bus has the opportunity to
1835 * split a transfer to smaller chunks.
1836 * Release splited transfers here since spi_map_msg is done on the
1837 * splited transfers.
1838 */
1839 spi_res_release(ctlr, mesg);
1840
1841 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1842 ret = ctlr->unprepare_message(ctlr, mesg);
1843 if (ret) {
1844 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1845 ret);
1846 }
1847 }
1848
1849 spin_lock_irqsave(&ctlr->queue_lock, flags);
1850 ctlr->cur_msg = NULL;
1851 ctlr->cur_msg_prepared = false;
1852 ctlr->fallback = false;
1853 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1854 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1855
1856 trace_spi_message_done(mesg);
1857
1858 mesg->state = NULL;
1859 if (mesg->complete)
1860 mesg->complete(mesg->context);
1861 }
1862 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1863
spi_start_queue(struct spi_controller * ctlr)1864 static int spi_start_queue(struct spi_controller *ctlr)
1865 {
1866 unsigned long flags;
1867
1868 spin_lock_irqsave(&ctlr->queue_lock, flags);
1869
1870 if (ctlr->running || ctlr->busy) {
1871 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1872 return -EBUSY;
1873 }
1874
1875 ctlr->running = true;
1876 ctlr->cur_msg = NULL;
1877 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1878
1879 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1880
1881 return 0;
1882 }
1883
spi_stop_queue(struct spi_controller * ctlr)1884 static int spi_stop_queue(struct spi_controller *ctlr)
1885 {
1886 unsigned long flags;
1887 unsigned limit = 500;
1888 int ret = 0;
1889
1890 spin_lock_irqsave(&ctlr->queue_lock, flags);
1891
1892 /*
1893 * This is a bit lame, but is optimized for the common execution path.
1894 * A wait_queue on the ctlr->busy could be used, but then the common
1895 * execution path (pump_messages) would be required to call wake_up or
1896 * friends on every SPI message. Do this instead.
1897 */
1898 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1899 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1900 usleep_range(10000, 11000);
1901 spin_lock_irqsave(&ctlr->queue_lock, flags);
1902 }
1903
1904 if (!list_empty(&ctlr->queue) || ctlr->busy)
1905 ret = -EBUSY;
1906 else
1907 ctlr->running = false;
1908
1909 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1910
1911 if (ret) {
1912 dev_warn(&ctlr->dev, "could not stop message queue\n");
1913 return ret;
1914 }
1915 return ret;
1916 }
1917
spi_destroy_queue(struct spi_controller * ctlr)1918 static int spi_destroy_queue(struct spi_controller *ctlr)
1919 {
1920 int ret;
1921
1922 ret = spi_stop_queue(ctlr);
1923
1924 /*
1925 * kthread_flush_worker will block until all work is done.
1926 * If the reason that stop_queue timed out is that the work will never
1927 * finish, then it does no good to call flush/stop thread, so
1928 * return anyway.
1929 */
1930 if (ret) {
1931 dev_err(&ctlr->dev, "problem destroying queue\n");
1932 return ret;
1933 }
1934
1935 kthread_destroy_worker(ctlr->kworker);
1936
1937 return 0;
1938 }
1939
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)1940 static int __spi_queued_transfer(struct spi_device *spi,
1941 struct spi_message *msg,
1942 bool need_pump)
1943 {
1944 struct spi_controller *ctlr = spi->controller;
1945 unsigned long flags;
1946
1947 spin_lock_irqsave(&ctlr->queue_lock, flags);
1948
1949 if (!ctlr->running) {
1950 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1951 return -ESHUTDOWN;
1952 }
1953 msg->actual_length = 0;
1954 msg->status = -EINPROGRESS;
1955
1956 list_add_tail(&msg->queue, &ctlr->queue);
1957 if (!ctlr->busy && need_pump)
1958 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1959
1960 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1961 return 0;
1962 }
1963
1964 /**
1965 * spi_queued_transfer - transfer function for queued transfers
1966 * @spi: spi device which is requesting transfer
1967 * @msg: spi message which is to handled is queued to driver queue
1968 *
1969 * Return: zero on success, else a negative error code.
1970 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)1971 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1972 {
1973 return __spi_queued_transfer(spi, msg, true);
1974 }
1975
spi_controller_initialize_queue(struct spi_controller * ctlr)1976 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1977 {
1978 int ret;
1979
1980 ctlr->transfer = spi_queued_transfer;
1981 if (!ctlr->transfer_one_message)
1982 ctlr->transfer_one_message = spi_transfer_one_message;
1983
1984 /* Initialize and start queue */
1985 ret = spi_init_queue(ctlr);
1986 if (ret) {
1987 dev_err(&ctlr->dev, "problem initializing queue\n");
1988 goto err_init_queue;
1989 }
1990 ctlr->queued = true;
1991 ret = spi_start_queue(ctlr);
1992 if (ret) {
1993 dev_err(&ctlr->dev, "problem starting queue\n");
1994 goto err_start_queue;
1995 }
1996
1997 return 0;
1998
1999 err_start_queue:
2000 spi_destroy_queue(ctlr);
2001 err_init_queue:
2002 return ret;
2003 }
2004
2005 /**
2006 * spi_flush_queue - Send all pending messages in the queue from the callers'
2007 * context
2008 * @ctlr: controller to process queue for
2009 *
2010 * This should be used when one wants to ensure all pending messages have been
2011 * sent before doing something. Is used by the spi-mem code to make sure SPI
2012 * memory operations do not preempt regular SPI transfers that have been queued
2013 * before the spi-mem operation.
2014 */
spi_flush_queue(struct spi_controller * ctlr)2015 void spi_flush_queue(struct spi_controller *ctlr)
2016 {
2017 if (ctlr->transfer == spi_queued_transfer)
2018 __spi_pump_messages(ctlr, false);
2019 }
2020
2021 /*-------------------------------------------------------------------------*/
2022
2023 #if defined(CONFIG_OF)
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2024 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2025 struct device_node *nc)
2026 {
2027 u32 value;
2028 int rc;
2029
2030 /* Mode (clock phase/polarity/etc.) */
2031 if (of_property_read_bool(nc, "spi-cpha"))
2032 spi->mode |= SPI_CPHA;
2033 if (of_property_read_bool(nc, "spi-cpol"))
2034 spi->mode |= SPI_CPOL;
2035 if (of_property_read_bool(nc, "spi-3wire"))
2036 spi->mode |= SPI_3WIRE;
2037 if (of_property_read_bool(nc, "spi-lsb-first"))
2038 spi->mode |= SPI_LSB_FIRST;
2039 if (of_property_read_bool(nc, "spi-cs-high"))
2040 spi->mode |= SPI_CS_HIGH;
2041
2042 /* Device DUAL/QUAD mode */
2043 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2044 switch (value) {
2045 case 0:
2046 spi->mode |= SPI_NO_TX;
2047 break;
2048 case 1:
2049 break;
2050 case 2:
2051 spi->mode |= SPI_TX_DUAL;
2052 break;
2053 case 4:
2054 spi->mode |= SPI_TX_QUAD;
2055 break;
2056 case 8:
2057 spi->mode |= SPI_TX_OCTAL;
2058 break;
2059 default:
2060 dev_warn(&ctlr->dev,
2061 "spi-tx-bus-width %d not supported\n",
2062 value);
2063 break;
2064 }
2065 }
2066
2067 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2068 switch (value) {
2069 case 0:
2070 spi->mode |= SPI_NO_RX;
2071 break;
2072 case 1:
2073 break;
2074 case 2:
2075 spi->mode |= SPI_RX_DUAL;
2076 break;
2077 case 4:
2078 spi->mode |= SPI_RX_QUAD;
2079 break;
2080 case 8:
2081 spi->mode |= SPI_RX_OCTAL;
2082 break;
2083 default:
2084 dev_warn(&ctlr->dev,
2085 "spi-rx-bus-width %d not supported\n",
2086 value);
2087 break;
2088 }
2089 }
2090
2091 if (spi_controller_is_slave(ctlr)) {
2092 if (!of_node_name_eq(nc, "slave")) {
2093 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2094 nc);
2095 return -EINVAL;
2096 }
2097 return 0;
2098 }
2099
2100 /* Device address */
2101 rc = of_property_read_u32(nc, "reg", &value);
2102 if (rc) {
2103 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2104 nc, rc);
2105 return rc;
2106 }
2107 spi->chip_select = value;
2108
2109 /* Device speed */
2110 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2111 spi->max_speed_hz = value;
2112
2113 return 0;
2114 }
2115
2116 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2117 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2118 {
2119 struct spi_device *spi;
2120 int rc;
2121
2122 /* Alloc an spi_device */
2123 spi = spi_alloc_device(ctlr);
2124 if (!spi) {
2125 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2126 rc = -ENOMEM;
2127 goto err_out;
2128 }
2129
2130 /* Select device driver */
2131 rc = of_modalias_node(nc, spi->modalias,
2132 sizeof(spi->modalias));
2133 if (rc < 0) {
2134 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2135 goto err_out;
2136 }
2137
2138 rc = of_spi_parse_dt(ctlr, spi, nc);
2139 if (rc)
2140 goto err_out;
2141
2142 /* Store a pointer to the node in the device structure */
2143 of_node_get(nc);
2144 spi->dev.of_node = nc;
2145 spi->dev.fwnode = of_fwnode_handle(nc);
2146
2147 /* Register the new device */
2148 rc = spi_add_device(spi);
2149 if (rc) {
2150 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2151 goto err_of_node_put;
2152 }
2153
2154 return spi;
2155
2156 err_of_node_put:
2157 of_node_put(nc);
2158 err_out:
2159 spi_dev_put(spi);
2160 return ERR_PTR(rc);
2161 }
2162
2163 /**
2164 * of_register_spi_devices() - Register child devices onto the SPI bus
2165 * @ctlr: Pointer to spi_controller device
2166 *
2167 * Registers an spi_device for each child node of controller node which
2168 * represents a valid SPI slave.
2169 */
of_register_spi_devices(struct spi_controller * ctlr)2170 static void of_register_spi_devices(struct spi_controller *ctlr)
2171 {
2172 struct spi_device *spi;
2173 struct device_node *nc;
2174
2175 if (!ctlr->dev.of_node)
2176 return;
2177
2178 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2179 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2180 continue;
2181 spi = of_register_spi_device(ctlr, nc);
2182 if (IS_ERR(spi)) {
2183 dev_warn(&ctlr->dev,
2184 "Failed to create SPI device for %pOF\n", nc);
2185 of_node_clear_flag(nc, OF_POPULATED);
2186 }
2187 }
2188 }
2189 #else
of_register_spi_devices(struct spi_controller * ctlr)2190 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2191 #endif
2192
2193 /**
2194 * spi_new_ancillary_device() - Register ancillary SPI device
2195 * @spi: Pointer to the main SPI device registering the ancillary device
2196 * @chip_select: Chip Select of the ancillary device
2197 *
2198 * Register an ancillary SPI device; for example some chips have a chip-select
2199 * for normal device usage and another one for setup/firmware upload.
2200 *
2201 * This may only be called from main SPI device's probe routine.
2202 *
2203 * Return: 0 on success; negative errno on failure
2204 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2205 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2206 u8 chip_select)
2207 {
2208 struct spi_device *ancillary;
2209 int rc = 0;
2210
2211 /* Alloc an spi_device */
2212 ancillary = spi_alloc_device(spi->controller);
2213 if (!ancillary) {
2214 rc = -ENOMEM;
2215 goto err_out;
2216 }
2217
2218 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2219
2220 /* Use provided chip-select for ancillary device */
2221 ancillary->chip_select = chip_select;
2222
2223 /* Take over SPI mode/speed from SPI main device */
2224 ancillary->max_speed_hz = spi->max_speed_hz;
2225 ancillary->mode = spi->mode;
2226
2227 /* Register the new device */
2228 rc = spi_add_device_locked(ancillary);
2229 if (rc) {
2230 dev_err(&spi->dev, "failed to register ancillary device\n");
2231 goto err_out;
2232 }
2233
2234 return ancillary;
2235
2236 err_out:
2237 spi_dev_put(ancillary);
2238 return ERR_PTR(rc);
2239 }
2240 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2241
2242 #ifdef CONFIG_ACPI
2243 struct acpi_spi_lookup {
2244 struct spi_controller *ctlr;
2245 u32 max_speed_hz;
2246 u32 mode;
2247 int irq;
2248 u8 bits_per_word;
2249 u8 chip_select;
2250 };
2251
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2252 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2253 struct acpi_spi_lookup *lookup)
2254 {
2255 const union acpi_object *obj;
2256
2257 if (!x86_apple_machine)
2258 return;
2259
2260 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2261 && obj->buffer.length >= 4)
2262 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2263
2264 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2265 && obj->buffer.length == 8)
2266 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2267
2268 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2269 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2270 lookup->mode |= SPI_LSB_FIRST;
2271
2272 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2273 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2274 lookup->mode |= SPI_CPOL;
2275
2276 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2277 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2278 lookup->mode |= SPI_CPHA;
2279 }
2280
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2281 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2282 {
2283 struct acpi_spi_lookup *lookup = data;
2284 struct spi_controller *ctlr = lookup->ctlr;
2285
2286 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2287 struct acpi_resource_spi_serialbus *sb;
2288 acpi_handle parent_handle;
2289 acpi_status status;
2290
2291 sb = &ares->data.spi_serial_bus;
2292 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2293
2294 status = acpi_get_handle(NULL,
2295 sb->resource_source.string_ptr,
2296 &parent_handle);
2297
2298 if (ACPI_FAILURE(status) ||
2299 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2300 return -ENODEV;
2301
2302 /*
2303 * ACPI DeviceSelection numbering is handled by the
2304 * host controller driver in Windows and can vary
2305 * from driver to driver. In Linux we always expect
2306 * 0 .. max - 1 so we need to ask the driver to
2307 * translate between the two schemes.
2308 */
2309 if (ctlr->fw_translate_cs) {
2310 int cs = ctlr->fw_translate_cs(ctlr,
2311 sb->device_selection);
2312 if (cs < 0)
2313 return cs;
2314 lookup->chip_select = cs;
2315 } else {
2316 lookup->chip_select = sb->device_selection;
2317 }
2318
2319 lookup->max_speed_hz = sb->connection_speed;
2320 lookup->bits_per_word = sb->data_bit_length;
2321
2322 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2323 lookup->mode |= SPI_CPHA;
2324 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2325 lookup->mode |= SPI_CPOL;
2326 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2327 lookup->mode |= SPI_CS_HIGH;
2328 }
2329 } else if (lookup->irq < 0) {
2330 struct resource r;
2331
2332 if (acpi_dev_resource_interrupt(ares, 0, &r))
2333 lookup->irq = r.start;
2334 }
2335
2336 /* Always tell the ACPI core to skip this resource */
2337 return 1;
2338 }
2339
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2340 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2341 struct acpi_device *adev)
2342 {
2343 acpi_handle parent_handle = NULL;
2344 struct list_head resource_list;
2345 struct acpi_spi_lookup lookup = {};
2346 struct spi_device *spi;
2347 int ret;
2348
2349 if (acpi_bus_get_status(adev) || !adev->status.present ||
2350 acpi_device_enumerated(adev))
2351 return AE_OK;
2352
2353 lookup.ctlr = ctlr;
2354 lookup.irq = -1;
2355
2356 INIT_LIST_HEAD(&resource_list);
2357 ret = acpi_dev_get_resources(adev, &resource_list,
2358 acpi_spi_add_resource, &lookup);
2359 acpi_dev_free_resource_list(&resource_list);
2360
2361 if (ret < 0)
2362 /* found SPI in _CRS but it points to another controller */
2363 return AE_OK;
2364
2365 if (!lookup.max_speed_hz &&
2366 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2367 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2368 /* Apple does not use _CRS but nested devices for SPI slaves */
2369 acpi_spi_parse_apple_properties(adev, &lookup);
2370 }
2371
2372 if (!lookup.max_speed_hz)
2373 return AE_OK;
2374
2375 spi = spi_alloc_device(ctlr);
2376 if (!spi) {
2377 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2378 dev_name(&adev->dev));
2379 return AE_NO_MEMORY;
2380 }
2381
2382
2383 ACPI_COMPANION_SET(&spi->dev, adev);
2384 spi->max_speed_hz = lookup.max_speed_hz;
2385 spi->mode |= lookup.mode;
2386 spi->irq = lookup.irq;
2387 spi->bits_per_word = lookup.bits_per_word;
2388 spi->chip_select = lookup.chip_select;
2389
2390 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2391 sizeof(spi->modalias));
2392
2393 if (spi->irq < 0)
2394 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2395
2396 acpi_device_set_enumerated(adev);
2397
2398 adev->power.flags.ignore_parent = true;
2399 if (spi_add_device(spi)) {
2400 adev->power.flags.ignore_parent = false;
2401 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2402 dev_name(&adev->dev));
2403 spi_dev_put(spi);
2404 }
2405
2406 return AE_OK;
2407 }
2408
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2409 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2410 void *data, void **return_value)
2411 {
2412 struct spi_controller *ctlr = data;
2413 struct acpi_device *adev;
2414
2415 if (acpi_bus_get_device(handle, &adev))
2416 return AE_OK;
2417
2418 return acpi_register_spi_device(ctlr, adev);
2419 }
2420
2421 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2422
acpi_register_spi_devices(struct spi_controller * ctlr)2423 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2424 {
2425 acpi_status status;
2426 acpi_handle handle;
2427
2428 handle = ACPI_HANDLE(ctlr->dev.parent);
2429 if (!handle)
2430 return;
2431
2432 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2433 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2434 acpi_spi_add_device, NULL, ctlr, NULL);
2435 if (ACPI_FAILURE(status))
2436 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2437 }
2438 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2439 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2440 #endif /* CONFIG_ACPI */
2441
spi_controller_release(struct device * dev)2442 static void spi_controller_release(struct device *dev)
2443 {
2444 struct spi_controller *ctlr;
2445
2446 ctlr = container_of(dev, struct spi_controller, dev);
2447 kfree(ctlr);
2448 }
2449
2450 static struct class spi_master_class = {
2451 .name = "spi_master",
2452 .owner = THIS_MODULE,
2453 .dev_release = spi_controller_release,
2454 .dev_groups = spi_master_groups,
2455 };
2456
2457 #ifdef CONFIG_SPI_SLAVE
2458 /**
2459 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2460 * controller
2461 * @spi: device used for the current transfer
2462 */
spi_slave_abort(struct spi_device * spi)2463 int spi_slave_abort(struct spi_device *spi)
2464 {
2465 struct spi_controller *ctlr = spi->controller;
2466
2467 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2468 return ctlr->slave_abort(ctlr);
2469
2470 return -ENOTSUPP;
2471 }
2472 EXPORT_SYMBOL_GPL(spi_slave_abort);
2473
match_true(struct device * dev,void * data)2474 static int match_true(struct device *dev, void *data)
2475 {
2476 return 1;
2477 }
2478
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2479 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2480 char *buf)
2481 {
2482 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2483 dev);
2484 struct device *child;
2485
2486 child = device_find_child(&ctlr->dev, NULL, match_true);
2487 return sprintf(buf, "%s\n",
2488 child ? to_spi_device(child)->modalias : NULL);
2489 }
2490
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2491 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2492 const char *buf, size_t count)
2493 {
2494 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2495 dev);
2496 struct spi_device *spi;
2497 struct device *child;
2498 char name[32];
2499 int rc;
2500
2501 rc = sscanf(buf, "%31s", name);
2502 if (rc != 1 || !name[0])
2503 return -EINVAL;
2504
2505 child = device_find_child(&ctlr->dev, NULL, match_true);
2506 if (child) {
2507 /* Remove registered slave */
2508 device_unregister(child);
2509 put_device(child);
2510 }
2511
2512 if (strcmp(name, "(null)")) {
2513 /* Register new slave */
2514 spi = spi_alloc_device(ctlr);
2515 if (!spi)
2516 return -ENOMEM;
2517
2518 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2519
2520 rc = spi_add_device(spi);
2521 if (rc) {
2522 spi_dev_put(spi);
2523 return rc;
2524 }
2525 }
2526
2527 return count;
2528 }
2529
2530 static DEVICE_ATTR_RW(slave);
2531
2532 static struct attribute *spi_slave_attrs[] = {
2533 &dev_attr_slave.attr,
2534 NULL,
2535 };
2536
2537 static const struct attribute_group spi_slave_group = {
2538 .attrs = spi_slave_attrs,
2539 };
2540
2541 static const struct attribute_group *spi_slave_groups[] = {
2542 &spi_controller_statistics_group,
2543 &spi_slave_group,
2544 NULL,
2545 };
2546
2547 static struct class spi_slave_class = {
2548 .name = "spi_slave",
2549 .owner = THIS_MODULE,
2550 .dev_release = spi_controller_release,
2551 .dev_groups = spi_slave_groups,
2552 };
2553 #else
2554 extern struct class spi_slave_class; /* dummy */
2555 #endif
2556
2557 /**
2558 * __spi_alloc_controller - allocate an SPI master or slave controller
2559 * @dev: the controller, possibly using the platform_bus
2560 * @size: how much zeroed driver-private data to allocate; the pointer to this
2561 * memory is in the driver_data field of the returned device, accessible
2562 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2563 * drivers granting DMA access to portions of their private data need to
2564 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2565 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2566 * slave (true) controller
2567 * Context: can sleep
2568 *
2569 * This call is used only by SPI controller drivers, which are the
2570 * only ones directly touching chip registers. It's how they allocate
2571 * an spi_controller structure, prior to calling spi_register_controller().
2572 *
2573 * This must be called from context that can sleep.
2574 *
2575 * The caller is responsible for assigning the bus number and initializing the
2576 * controller's methods before calling spi_register_controller(); and (after
2577 * errors adding the device) calling spi_controller_put() to prevent a memory
2578 * leak.
2579 *
2580 * Return: the SPI controller structure on success, else NULL.
2581 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2582 struct spi_controller *__spi_alloc_controller(struct device *dev,
2583 unsigned int size, bool slave)
2584 {
2585 struct spi_controller *ctlr;
2586 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2587
2588 if (!dev)
2589 return NULL;
2590
2591 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2592 if (!ctlr)
2593 return NULL;
2594
2595 device_initialize(&ctlr->dev);
2596 INIT_LIST_HEAD(&ctlr->queue);
2597 spin_lock_init(&ctlr->queue_lock);
2598 spin_lock_init(&ctlr->bus_lock_spinlock);
2599 mutex_init(&ctlr->bus_lock_mutex);
2600 mutex_init(&ctlr->io_mutex);
2601 mutex_init(&ctlr->add_lock);
2602 ctlr->bus_num = -1;
2603 ctlr->num_chipselect = 1;
2604 ctlr->slave = slave;
2605 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2606 ctlr->dev.class = &spi_slave_class;
2607 else
2608 ctlr->dev.class = &spi_master_class;
2609 ctlr->dev.parent = dev;
2610 pm_suspend_ignore_children(&ctlr->dev, true);
2611 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2612
2613 return ctlr;
2614 }
2615 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2616
devm_spi_release_controller(struct device * dev,void * ctlr)2617 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2618 {
2619 spi_controller_put(*(struct spi_controller **)ctlr);
2620 }
2621
2622 /**
2623 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2624 * @dev: physical device of SPI controller
2625 * @size: how much zeroed driver-private data to allocate
2626 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2627 * Context: can sleep
2628 *
2629 * Allocate an SPI controller and automatically release a reference on it
2630 * when @dev is unbound from its driver. Drivers are thus relieved from
2631 * having to call spi_controller_put().
2632 *
2633 * The arguments to this function are identical to __spi_alloc_controller().
2634 *
2635 * Return: the SPI controller structure on success, else NULL.
2636 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2637 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2638 unsigned int size,
2639 bool slave)
2640 {
2641 struct spi_controller **ptr, *ctlr;
2642
2643 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2644 GFP_KERNEL);
2645 if (!ptr)
2646 return NULL;
2647
2648 ctlr = __spi_alloc_controller(dev, size, slave);
2649 if (ctlr) {
2650 ctlr->devm_allocated = true;
2651 *ptr = ctlr;
2652 devres_add(dev, ptr);
2653 } else {
2654 devres_free(ptr);
2655 }
2656
2657 return ctlr;
2658 }
2659 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2660
2661 #ifdef CONFIG_OF
of_spi_get_gpio_numbers(struct spi_controller * ctlr)2662 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2663 {
2664 int nb, i, *cs;
2665 struct device_node *np = ctlr->dev.of_node;
2666
2667 if (!np)
2668 return 0;
2669
2670 nb = of_gpio_named_count(np, "cs-gpios");
2671 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2672
2673 /* Return error only for an incorrectly formed cs-gpios property */
2674 if (nb == 0 || nb == -ENOENT)
2675 return 0;
2676 else if (nb < 0)
2677 return nb;
2678
2679 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2680 GFP_KERNEL);
2681 ctlr->cs_gpios = cs;
2682
2683 if (!ctlr->cs_gpios)
2684 return -ENOMEM;
2685
2686 for (i = 0; i < ctlr->num_chipselect; i++)
2687 cs[i] = -ENOENT;
2688
2689 for (i = 0; i < nb; i++)
2690 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2691
2692 return 0;
2693 }
2694 #else
of_spi_get_gpio_numbers(struct spi_controller * ctlr)2695 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2696 {
2697 return 0;
2698 }
2699 #endif
2700
2701 /**
2702 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2703 * @ctlr: The SPI master to grab GPIO descriptors for
2704 */
spi_get_gpio_descs(struct spi_controller * ctlr)2705 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2706 {
2707 int nb, i;
2708 struct gpio_desc **cs;
2709 struct device *dev = &ctlr->dev;
2710 unsigned long native_cs_mask = 0;
2711 unsigned int num_cs_gpios = 0;
2712
2713 nb = gpiod_count(dev, "cs");
2714 if (nb < 0) {
2715 /* No GPIOs at all is fine, else return the error */
2716 if (nb == -ENOENT)
2717 return 0;
2718 return nb;
2719 }
2720
2721 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2722
2723 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2724 GFP_KERNEL);
2725 if (!cs)
2726 return -ENOMEM;
2727 ctlr->cs_gpiods = cs;
2728
2729 for (i = 0; i < nb; i++) {
2730 /*
2731 * Most chipselects are active low, the inverted
2732 * semantics are handled by special quirks in gpiolib,
2733 * so initializing them GPIOD_OUT_LOW here means
2734 * "unasserted", in most cases this will drive the physical
2735 * line high.
2736 */
2737 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2738 GPIOD_OUT_LOW);
2739 if (IS_ERR(cs[i]))
2740 return PTR_ERR(cs[i]);
2741
2742 if (cs[i]) {
2743 /*
2744 * If we find a CS GPIO, name it after the device and
2745 * chip select line.
2746 */
2747 char *gpioname;
2748
2749 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2750 dev_name(dev), i);
2751 if (!gpioname)
2752 return -ENOMEM;
2753 gpiod_set_consumer_name(cs[i], gpioname);
2754 num_cs_gpios++;
2755 continue;
2756 }
2757
2758 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2759 dev_err(dev, "Invalid native chip select %d\n", i);
2760 return -EINVAL;
2761 }
2762 native_cs_mask |= BIT(i);
2763 }
2764
2765 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2766
2767 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2768 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2769 dev_err(dev, "No unused native chip select available\n");
2770 return -EINVAL;
2771 }
2772
2773 return 0;
2774 }
2775
spi_controller_check_ops(struct spi_controller * ctlr)2776 static int spi_controller_check_ops(struct spi_controller *ctlr)
2777 {
2778 /*
2779 * The controller may implement only the high-level SPI-memory like
2780 * operations if it does not support regular SPI transfers, and this is
2781 * valid use case.
2782 * If ->mem_ops is NULL, we request that at least one of the
2783 * ->transfer_xxx() method be implemented.
2784 */
2785 if (ctlr->mem_ops) {
2786 if (!ctlr->mem_ops->exec_op)
2787 return -EINVAL;
2788 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2789 !ctlr->transfer_one_message) {
2790 return -EINVAL;
2791 }
2792
2793 return 0;
2794 }
2795
2796 /**
2797 * spi_register_controller - register SPI master or slave controller
2798 * @ctlr: initialized master, originally from spi_alloc_master() or
2799 * spi_alloc_slave()
2800 * Context: can sleep
2801 *
2802 * SPI controllers connect to their drivers using some non-SPI bus,
2803 * such as the platform bus. The final stage of probe() in that code
2804 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2805 *
2806 * SPI controllers use board specific (often SOC specific) bus numbers,
2807 * and board-specific addressing for SPI devices combines those numbers
2808 * with chip select numbers. Since SPI does not directly support dynamic
2809 * device identification, boards need configuration tables telling which
2810 * chip is at which address.
2811 *
2812 * This must be called from context that can sleep. It returns zero on
2813 * success, else a negative error code (dropping the controller's refcount).
2814 * After a successful return, the caller is responsible for calling
2815 * spi_unregister_controller().
2816 *
2817 * Return: zero on success, else a negative error code.
2818 */
spi_register_controller(struct spi_controller * ctlr)2819 int spi_register_controller(struct spi_controller *ctlr)
2820 {
2821 struct device *dev = ctlr->dev.parent;
2822 struct boardinfo *bi;
2823 int status;
2824 int id, first_dynamic;
2825
2826 if (!dev)
2827 return -ENODEV;
2828
2829 /*
2830 * Make sure all necessary hooks are implemented before registering
2831 * the SPI controller.
2832 */
2833 status = spi_controller_check_ops(ctlr);
2834 if (status)
2835 return status;
2836
2837 if (ctlr->bus_num >= 0) {
2838 /* devices with a fixed bus num must check-in with the num */
2839 mutex_lock(&board_lock);
2840 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2841 ctlr->bus_num + 1, GFP_KERNEL);
2842 mutex_unlock(&board_lock);
2843 if (WARN(id < 0, "couldn't get idr"))
2844 return id == -ENOSPC ? -EBUSY : id;
2845 ctlr->bus_num = id;
2846 } else if (ctlr->dev.of_node) {
2847 /* allocate dynamic bus number using Linux idr */
2848 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2849 if (id >= 0) {
2850 ctlr->bus_num = id;
2851 mutex_lock(&board_lock);
2852 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2853 ctlr->bus_num + 1, GFP_KERNEL);
2854 mutex_unlock(&board_lock);
2855 if (WARN(id < 0, "couldn't get idr"))
2856 return id == -ENOSPC ? -EBUSY : id;
2857 }
2858 }
2859 if (ctlr->bus_num < 0) {
2860 first_dynamic = of_alias_get_highest_id("spi");
2861 if (first_dynamic < 0)
2862 first_dynamic = 0;
2863 else
2864 first_dynamic++;
2865
2866 mutex_lock(&board_lock);
2867 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2868 0, GFP_KERNEL);
2869 mutex_unlock(&board_lock);
2870 if (WARN(id < 0, "couldn't get idr"))
2871 return id;
2872 ctlr->bus_num = id;
2873 }
2874 ctlr->bus_lock_flag = 0;
2875 init_completion(&ctlr->xfer_completion);
2876 if (!ctlr->max_dma_len)
2877 ctlr->max_dma_len = INT_MAX;
2878
2879 /* register the device, then userspace will see it.
2880 * registration fails if the bus ID is in use.
2881 */
2882 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2883
2884 if (!spi_controller_is_slave(ctlr)) {
2885 if (ctlr->use_gpio_descriptors) {
2886 status = spi_get_gpio_descs(ctlr);
2887 if (status)
2888 goto free_bus_id;
2889 /*
2890 * A controller using GPIO descriptors always
2891 * supports SPI_CS_HIGH if need be.
2892 */
2893 ctlr->mode_bits |= SPI_CS_HIGH;
2894 } else {
2895 /* Legacy code path for GPIOs from DT */
2896 status = of_spi_get_gpio_numbers(ctlr);
2897 if (status)
2898 goto free_bus_id;
2899 }
2900 }
2901
2902 /*
2903 * Even if it's just one always-selected device, there must
2904 * be at least one chipselect.
2905 */
2906 if (!ctlr->num_chipselect) {
2907 status = -EINVAL;
2908 goto free_bus_id;
2909 }
2910
2911 status = device_add(&ctlr->dev);
2912 if (status < 0)
2913 goto free_bus_id;
2914 dev_dbg(dev, "registered %s %s\n",
2915 spi_controller_is_slave(ctlr) ? "slave" : "master",
2916 dev_name(&ctlr->dev));
2917
2918 /*
2919 * If we're using a queued driver, start the queue. Note that we don't
2920 * need the queueing logic if the driver is only supporting high-level
2921 * memory operations.
2922 */
2923 if (ctlr->transfer) {
2924 dev_info(dev, "controller is unqueued, this is deprecated\n");
2925 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2926 status = spi_controller_initialize_queue(ctlr);
2927 if (status) {
2928 device_del(&ctlr->dev);
2929 goto free_bus_id;
2930 }
2931 }
2932 /* add statistics */
2933 spin_lock_init(&ctlr->statistics.lock);
2934
2935 mutex_lock(&board_lock);
2936 list_add_tail(&ctlr->list, &spi_controller_list);
2937 list_for_each_entry(bi, &board_list, list)
2938 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2939 mutex_unlock(&board_lock);
2940
2941 /* Register devices from the device tree and ACPI */
2942 of_register_spi_devices(ctlr);
2943 acpi_register_spi_devices(ctlr);
2944 return status;
2945
2946 free_bus_id:
2947 mutex_lock(&board_lock);
2948 idr_remove(&spi_master_idr, ctlr->bus_num);
2949 mutex_unlock(&board_lock);
2950 return status;
2951 }
2952 EXPORT_SYMBOL_GPL(spi_register_controller);
2953
devm_spi_unregister(struct device * dev,void * res)2954 static void devm_spi_unregister(struct device *dev, void *res)
2955 {
2956 spi_unregister_controller(*(struct spi_controller **)res);
2957 }
2958
2959 /**
2960 * devm_spi_register_controller - register managed SPI master or slave
2961 * controller
2962 * @dev: device managing SPI controller
2963 * @ctlr: initialized controller, originally from spi_alloc_master() or
2964 * spi_alloc_slave()
2965 * Context: can sleep
2966 *
2967 * Register a SPI device as with spi_register_controller() which will
2968 * automatically be unregistered and freed.
2969 *
2970 * Return: zero on success, else a negative error code.
2971 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)2972 int devm_spi_register_controller(struct device *dev,
2973 struct spi_controller *ctlr)
2974 {
2975 struct spi_controller **ptr;
2976 int ret;
2977
2978 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2979 if (!ptr)
2980 return -ENOMEM;
2981
2982 ret = spi_register_controller(ctlr);
2983 if (!ret) {
2984 *ptr = ctlr;
2985 devres_add(dev, ptr);
2986 } else {
2987 devres_free(ptr);
2988 }
2989
2990 return ret;
2991 }
2992 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2993
__unregister(struct device * dev,void * null)2994 static int __unregister(struct device *dev, void *null)
2995 {
2996 spi_unregister_device(to_spi_device(dev));
2997 return 0;
2998 }
2999
3000 /**
3001 * spi_unregister_controller - unregister SPI master or slave controller
3002 * @ctlr: the controller being unregistered
3003 * Context: can sleep
3004 *
3005 * This call is used only by SPI controller drivers, which are the
3006 * only ones directly touching chip registers.
3007 *
3008 * This must be called from context that can sleep.
3009 *
3010 * Note that this function also drops a reference to the controller.
3011 */
spi_unregister_controller(struct spi_controller * ctlr)3012 void spi_unregister_controller(struct spi_controller *ctlr)
3013 {
3014 struct spi_controller *found;
3015 int id = ctlr->bus_num;
3016
3017 /* Prevent addition of new devices, unregister existing ones */
3018 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3019 mutex_lock(&ctlr->add_lock);
3020
3021 device_for_each_child(&ctlr->dev, NULL, __unregister);
3022
3023 /* First make sure that this controller was ever added */
3024 mutex_lock(&board_lock);
3025 found = idr_find(&spi_master_idr, id);
3026 mutex_unlock(&board_lock);
3027 if (ctlr->queued) {
3028 if (spi_destroy_queue(ctlr))
3029 dev_err(&ctlr->dev, "queue remove failed\n");
3030 }
3031 mutex_lock(&board_lock);
3032 list_del(&ctlr->list);
3033 mutex_unlock(&board_lock);
3034
3035 device_del(&ctlr->dev);
3036
3037 /* free bus id */
3038 mutex_lock(&board_lock);
3039 if (found == ctlr)
3040 idr_remove(&spi_master_idr, id);
3041 mutex_unlock(&board_lock);
3042
3043 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3044 mutex_unlock(&ctlr->add_lock);
3045
3046 /* Release the last reference on the controller if its driver
3047 * has not yet been converted to devm_spi_alloc_master/slave().
3048 */
3049 if (!ctlr->devm_allocated)
3050 put_device(&ctlr->dev);
3051 }
3052 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3053
spi_controller_suspend(struct spi_controller * ctlr)3054 int spi_controller_suspend(struct spi_controller *ctlr)
3055 {
3056 int ret;
3057
3058 /* Basically no-ops for non-queued controllers */
3059 if (!ctlr->queued)
3060 return 0;
3061
3062 ret = spi_stop_queue(ctlr);
3063 if (ret)
3064 dev_err(&ctlr->dev, "queue stop failed\n");
3065
3066 return ret;
3067 }
3068 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3069
spi_controller_resume(struct spi_controller * ctlr)3070 int spi_controller_resume(struct spi_controller *ctlr)
3071 {
3072 int ret;
3073
3074 if (!ctlr->queued)
3075 return 0;
3076
3077 ret = spi_start_queue(ctlr);
3078 if (ret)
3079 dev_err(&ctlr->dev, "queue restart failed\n");
3080
3081 return ret;
3082 }
3083 EXPORT_SYMBOL_GPL(spi_controller_resume);
3084
__spi_controller_match(struct device * dev,const void * data)3085 static int __spi_controller_match(struct device *dev, const void *data)
3086 {
3087 struct spi_controller *ctlr;
3088 const u16 *bus_num = data;
3089
3090 ctlr = container_of(dev, struct spi_controller, dev);
3091 return ctlr->bus_num == *bus_num;
3092 }
3093
3094 /**
3095 * spi_busnum_to_master - look up master associated with bus_num
3096 * @bus_num: the master's bus number
3097 * Context: can sleep
3098 *
3099 * This call may be used with devices that are registered after
3100 * arch init time. It returns a refcounted pointer to the relevant
3101 * spi_controller (which the caller must release), or NULL if there is
3102 * no such master registered.
3103 *
3104 * Return: the SPI master structure on success, else NULL.
3105 */
spi_busnum_to_master(u16 bus_num)3106 struct spi_controller *spi_busnum_to_master(u16 bus_num)
3107 {
3108 struct device *dev;
3109 struct spi_controller *ctlr = NULL;
3110
3111 dev = class_find_device(&spi_master_class, NULL, &bus_num,
3112 __spi_controller_match);
3113 if (dev)
3114 ctlr = container_of(dev, struct spi_controller, dev);
3115 /* reference got in class_find_device */
3116 return ctlr;
3117 }
3118 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
3119
3120 /*-------------------------------------------------------------------------*/
3121
3122 /* Core methods for SPI resource management */
3123
3124 /**
3125 * spi_res_alloc - allocate a spi resource that is life-cycle managed
3126 * during the processing of a spi_message while using
3127 * spi_transfer_one
3128 * @spi: the spi device for which we allocate memory
3129 * @release: the release code to execute for this resource
3130 * @size: size to alloc and return
3131 * @gfp: GFP allocation flags
3132 *
3133 * Return: the pointer to the allocated data
3134 *
3135 * This may get enhanced in the future to allocate from a memory pool
3136 * of the @spi_device or @spi_controller to avoid repeated allocations.
3137 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)3138 void *spi_res_alloc(struct spi_device *spi,
3139 spi_res_release_t release,
3140 size_t size, gfp_t gfp)
3141 {
3142 struct spi_res *sres;
3143
3144 sres = kzalloc(sizeof(*sres) + size, gfp);
3145 if (!sres)
3146 return NULL;
3147
3148 INIT_LIST_HEAD(&sres->entry);
3149 sres->release = release;
3150
3151 return sres->data;
3152 }
3153 EXPORT_SYMBOL_GPL(spi_res_alloc);
3154
3155 /**
3156 * spi_res_free - free an spi resource
3157 * @res: pointer to the custom data of a resource
3158 *
3159 */
spi_res_free(void * res)3160 void spi_res_free(void *res)
3161 {
3162 struct spi_res *sres = container_of(res, struct spi_res, data);
3163
3164 if (!res)
3165 return;
3166
3167 WARN_ON(!list_empty(&sres->entry));
3168 kfree(sres);
3169 }
3170 EXPORT_SYMBOL_GPL(spi_res_free);
3171
3172 /**
3173 * spi_res_add - add a spi_res to the spi_message
3174 * @message: the spi message
3175 * @res: the spi_resource
3176 */
spi_res_add(struct spi_message * message,void * res)3177 void spi_res_add(struct spi_message *message, void *res)
3178 {
3179 struct spi_res *sres = container_of(res, struct spi_res, data);
3180
3181 WARN_ON(!list_empty(&sres->entry));
3182 list_add_tail(&sres->entry, &message->resources);
3183 }
3184 EXPORT_SYMBOL_GPL(spi_res_add);
3185
3186 /**
3187 * spi_res_release - release all spi resources for this message
3188 * @ctlr: the @spi_controller
3189 * @message: the @spi_message
3190 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)3191 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3192 {
3193 struct spi_res *res, *tmp;
3194
3195 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
3196 if (res->release)
3197 res->release(ctlr, message, res->data);
3198
3199 list_del(&res->entry);
3200
3201 kfree(res);
3202 }
3203 }
3204 EXPORT_SYMBOL_GPL(spi_res_release);
3205
3206 /*-------------------------------------------------------------------------*/
3207
3208 /* Core methods for spi_message alterations */
3209
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3210 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3211 struct spi_message *msg,
3212 void *res)
3213 {
3214 struct spi_replaced_transfers *rxfer = res;
3215 size_t i;
3216
3217 /* call extra callback if requested */
3218 if (rxfer->release)
3219 rxfer->release(ctlr, msg, res);
3220
3221 /* insert replaced transfers back into the message */
3222 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3223
3224 /* remove the formerly inserted entries */
3225 for (i = 0; i < rxfer->inserted; i++)
3226 list_del(&rxfer->inserted_transfers[i].transfer_list);
3227 }
3228
3229 /**
3230 * spi_replace_transfers - replace transfers with several transfers
3231 * and register change with spi_message.resources
3232 * @msg: the spi_message we work upon
3233 * @xfer_first: the first spi_transfer we want to replace
3234 * @remove: number of transfers to remove
3235 * @insert: the number of transfers we want to insert instead
3236 * @release: extra release code necessary in some circumstances
3237 * @extradatasize: extra data to allocate (with alignment guarantees
3238 * of struct @spi_transfer)
3239 * @gfp: gfp flags
3240 *
3241 * Returns: pointer to @spi_replaced_transfers,
3242 * PTR_ERR(...) in case of errors.
3243 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3244 struct spi_replaced_transfers *spi_replace_transfers(
3245 struct spi_message *msg,
3246 struct spi_transfer *xfer_first,
3247 size_t remove,
3248 size_t insert,
3249 spi_replaced_release_t release,
3250 size_t extradatasize,
3251 gfp_t gfp)
3252 {
3253 struct spi_replaced_transfers *rxfer;
3254 struct spi_transfer *xfer;
3255 size_t i;
3256
3257 /* allocate the structure using spi_res */
3258 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3259 struct_size(rxfer, inserted_transfers, insert)
3260 + extradatasize,
3261 gfp);
3262 if (!rxfer)
3263 return ERR_PTR(-ENOMEM);
3264
3265 /* the release code to invoke before running the generic release */
3266 rxfer->release = release;
3267
3268 /* assign extradata */
3269 if (extradatasize)
3270 rxfer->extradata =
3271 &rxfer->inserted_transfers[insert];
3272
3273 /* init the replaced_transfers list */
3274 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3275
3276 /* assign the list_entry after which we should reinsert
3277 * the @replaced_transfers - it may be spi_message.messages!
3278 */
3279 rxfer->replaced_after = xfer_first->transfer_list.prev;
3280
3281 /* remove the requested number of transfers */
3282 for (i = 0; i < remove; i++) {
3283 /* if the entry after replaced_after it is msg->transfers
3284 * then we have been requested to remove more transfers
3285 * than are in the list
3286 */
3287 if (rxfer->replaced_after->next == &msg->transfers) {
3288 dev_err(&msg->spi->dev,
3289 "requested to remove more spi_transfers than are available\n");
3290 /* insert replaced transfers back into the message */
3291 list_splice(&rxfer->replaced_transfers,
3292 rxfer->replaced_after);
3293
3294 /* free the spi_replace_transfer structure */
3295 spi_res_free(rxfer);
3296
3297 /* and return with an error */
3298 return ERR_PTR(-EINVAL);
3299 }
3300
3301 /* remove the entry after replaced_after from list of
3302 * transfers and add it to list of replaced_transfers
3303 */
3304 list_move_tail(rxfer->replaced_after->next,
3305 &rxfer->replaced_transfers);
3306 }
3307
3308 /* create copy of the given xfer with identical settings
3309 * based on the first transfer to get removed
3310 */
3311 for (i = 0; i < insert; i++) {
3312 /* we need to run in reverse order */
3313 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3314
3315 /* copy all spi_transfer data */
3316 memcpy(xfer, xfer_first, sizeof(*xfer));
3317
3318 /* add to list */
3319 list_add(&xfer->transfer_list, rxfer->replaced_after);
3320
3321 /* clear cs_change and delay for all but the last */
3322 if (i) {
3323 xfer->cs_change = false;
3324 xfer->delay.value = 0;
3325 }
3326 }
3327
3328 /* set up inserted */
3329 rxfer->inserted = insert;
3330
3331 /* and register it with spi_res/spi_message */
3332 spi_res_add(msg, rxfer);
3333
3334 return rxfer;
3335 }
3336 EXPORT_SYMBOL_GPL(spi_replace_transfers);
3337
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3338 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3339 struct spi_message *msg,
3340 struct spi_transfer **xferp,
3341 size_t maxsize,
3342 gfp_t gfp)
3343 {
3344 struct spi_transfer *xfer = *xferp, *xfers;
3345 struct spi_replaced_transfers *srt;
3346 size_t offset;
3347 size_t count, i;
3348
3349 /* calculate how many we have to replace */
3350 count = DIV_ROUND_UP(xfer->len, maxsize);
3351
3352 /* create replacement */
3353 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3354 if (IS_ERR(srt))
3355 return PTR_ERR(srt);
3356 xfers = srt->inserted_transfers;
3357
3358 /* now handle each of those newly inserted spi_transfers
3359 * note that the replacements spi_transfers all are preset
3360 * to the same values as *xferp, so tx_buf, rx_buf and len
3361 * are all identical (as well as most others)
3362 * so we just have to fix up len and the pointers.
3363 *
3364 * this also includes support for the depreciated
3365 * spi_message.is_dma_mapped interface
3366 */
3367
3368 /* the first transfer just needs the length modified, so we
3369 * run it outside the loop
3370 */
3371 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3372
3373 /* all the others need rx_buf/tx_buf also set */
3374 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3375 /* update rx_buf, tx_buf and dma */
3376 if (xfers[i].rx_buf)
3377 xfers[i].rx_buf += offset;
3378 if (xfers[i].rx_dma)
3379 xfers[i].rx_dma += offset;
3380 if (xfers[i].tx_buf)
3381 xfers[i].tx_buf += offset;
3382 if (xfers[i].tx_dma)
3383 xfers[i].tx_dma += offset;
3384
3385 /* update length */
3386 xfers[i].len = min(maxsize, xfers[i].len - offset);
3387 }
3388
3389 /* we set up xferp to the last entry we have inserted,
3390 * so that we skip those already split transfers
3391 */
3392 *xferp = &xfers[count - 1];
3393
3394 /* increment statistics counters */
3395 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3396 transfers_split_maxsize);
3397 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3398 transfers_split_maxsize);
3399
3400 return 0;
3401 }
3402
3403 /**
3404 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3405 * when an individual transfer exceeds a
3406 * certain size
3407 * @ctlr: the @spi_controller for this transfer
3408 * @msg: the @spi_message to transform
3409 * @maxsize: the maximum when to apply this
3410 * @gfp: GFP allocation flags
3411 *
3412 * Return: status of transformation
3413 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3414 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3415 struct spi_message *msg,
3416 size_t maxsize,
3417 gfp_t gfp)
3418 {
3419 struct spi_transfer *xfer;
3420 int ret;
3421
3422 /* iterate over the transfer_list,
3423 * but note that xfer is advanced to the last transfer inserted
3424 * to avoid checking sizes again unnecessarily (also xfer does
3425 * potentiall belong to a different list by the time the
3426 * replacement has happened
3427 */
3428 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3429 if (xfer->len > maxsize) {
3430 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3431 maxsize, gfp);
3432 if (ret)
3433 return ret;
3434 }
3435 }
3436
3437 return 0;
3438 }
3439 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3440
3441 /*-------------------------------------------------------------------------*/
3442
3443 /* Core methods for SPI controller protocol drivers. Some of the
3444 * other core methods are currently defined as inline functions.
3445 */
3446
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3447 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3448 u8 bits_per_word)
3449 {
3450 if (ctlr->bits_per_word_mask) {
3451 /* Only 32 bits fit in the mask */
3452 if (bits_per_word > 32)
3453 return -EINVAL;
3454 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3455 return -EINVAL;
3456 }
3457
3458 return 0;
3459 }
3460
3461 /**
3462 * spi_setup - setup SPI mode and clock rate
3463 * @spi: the device whose settings are being modified
3464 * Context: can sleep, and no requests are queued to the device
3465 *
3466 * SPI protocol drivers may need to update the transfer mode if the
3467 * device doesn't work with its default. They may likewise need
3468 * to update clock rates or word sizes from initial values. This function
3469 * changes those settings, and must be called from a context that can sleep.
3470 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3471 * effect the next time the device is selected and data is transferred to
3472 * or from it. When this function returns, the spi device is deselected.
3473 *
3474 * Note that this call will fail if the protocol driver specifies an option
3475 * that the underlying controller or its driver does not support. For
3476 * example, not all hardware supports wire transfers using nine bit words,
3477 * LSB-first wire encoding, or active-high chipselects.
3478 *
3479 * Return: zero on success, else a negative error code.
3480 */
spi_setup(struct spi_device * spi)3481 int spi_setup(struct spi_device *spi)
3482 {
3483 unsigned bad_bits, ugly_bits;
3484 int status;
3485
3486 /*
3487 * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3488 * are set at the same time
3489 */
3490 if ((hweight_long(spi->mode &
3491 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3492 (hweight_long(spi->mode &
3493 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3494 dev_err(&spi->dev,
3495 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3496 return -EINVAL;
3497 }
3498 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3499 */
3500 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3501 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3502 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3503 return -EINVAL;
3504 /* help drivers fail *cleanly* when they need options
3505 * that aren't supported with their current controller
3506 * SPI_CS_WORD has a fallback software implementation,
3507 * so it is ignored here.
3508 */
3509 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3510 SPI_NO_TX | SPI_NO_RX);
3511 /* nothing prevents from working with active-high CS in case if it
3512 * is driven by GPIO.
3513 */
3514 if (gpio_is_valid(spi->cs_gpio))
3515 bad_bits &= ~SPI_CS_HIGH;
3516 ugly_bits = bad_bits &
3517 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3518 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3519 if (ugly_bits) {
3520 dev_warn(&spi->dev,
3521 "setup: ignoring unsupported mode bits %x\n",
3522 ugly_bits);
3523 spi->mode &= ~ugly_bits;
3524 bad_bits &= ~ugly_bits;
3525 }
3526 if (bad_bits) {
3527 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3528 bad_bits);
3529 return -EINVAL;
3530 }
3531
3532 if (!spi->bits_per_word)
3533 spi->bits_per_word = 8;
3534
3535 status = __spi_validate_bits_per_word(spi->controller,
3536 spi->bits_per_word);
3537 if (status)
3538 return status;
3539
3540 if (spi->controller->max_speed_hz &&
3541 (!spi->max_speed_hz ||
3542 spi->max_speed_hz > spi->controller->max_speed_hz))
3543 spi->max_speed_hz = spi->controller->max_speed_hz;
3544
3545 mutex_lock(&spi->controller->io_mutex);
3546
3547 if (spi->controller->setup) {
3548 status = spi->controller->setup(spi);
3549 if (status) {
3550 mutex_unlock(&spi->controller->io_mutex);
3551 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3552 status);
3553 return status;
3554 }
3555 }
3556
3557 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3558 status = pm_runtime_get_sync(spi->controller->dev.parent);
3559 if (status < 0) {
3560 mutex_unlock(&spi->controller->io_mutex);
3561 pm_runtime_put_noidle(spi->controller->dev.parent);
3562 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3563 status);
3564 return status;
3565 }
3566
3567 /*
3568 * We do not want to return positive value from pm_runtime_get,
3569 * there are many instances of devices calling spi_setup() and
3570 * checking for a non-zero return value instead of a negative
3571 * return value.
3572 */
3573 status = 0;
3574
3575 spi_set_cs(spi, false, true);
3576 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3577 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3578 } else {
3579 spi_set_cs(spi, false, true);
3580 }
3581
3582 mutex_unlock(&spi->controller->io_mutex);
3583
3584 if (spi->rt && !spi->controller->rt) {
3585 spi->controller->rt = true;
3586 spi_set_thread_rt(spi->controller);
3587 }
3588
3589 trace_spi_setup(spi, status);
3590
3591 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3592 spi->mode & SPI_MODE_X_MASK,
3593 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3594 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3595 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3596 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3597 spi->bits_per_word, spi->max_speed_hz,
3598 status);
3599
3600 return status;
3601 }
3602 EXPORT_SYMBOL_GPL(spi_setup);
3603
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3604 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3605 struct spi_device *spi)
3606 {
3607 int delay1, delay2;
3608
3609 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3610 if (delay1 < 0)
3611 return delay1;
3612
3613 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3614 if (delay2 < 0)
3615 return delay2;
3616
3617 if (delay1 < delay2)
3618 memcpy(&xfer->word_delay, &spi->word_delay,
3619 sizeof(xfer->word_delay));
3620
3621 return 0;
3622 }
3623
__spi_validate(struct spi_device * spi,struct spi_message * message)3624 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3625 {
3626 struct spi_controller *ctlr = spi->controller;
3627 struct spi_transfer *xfer;
3628 int w_size;
3629
3630 if (list_empty(&message->transfers))
3631 return -EINVAL;
3632
3633 /* If an SPI controller does not support toggling the CS line on each
3634 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3635 * for the CS line, we can emulate the CS-per-word hardware function by
3636 * splitting transfers into one-word transfers and ensuring that
3637 * cs_change is set for each transfer.
3638 */
3639 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3640 spi->cs_gpiod ||
3641 gpio_is_valid(spi->cs_gpio))) {
3642 size_t maxsize;
3643 int ret;
3644
3645 maxsize = (spi->bits_per_word + 7) / 8;
3646
3647 /* spi_split_transfers_maxsize() requires message->spi */
3648 message->spi = spi;
3649
3650 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3651 GFP_KERNEL);
3652 if (ret)
3653 return ret;
3654
3655 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3656 /* don't change cs_change on the last entry in the list */
3657 if (list_is_last(&xfer->transfer_list, &message->transfers))
3658 break;
3659 xfer->cs_change = 1;
3660 }
3661 }
3662
3663 /* Half-duplex links include original MicroWire, and ones with
3664 * only one data pin like SPI_3WIRE (switches direction) or where
3665 * either MOSI or MISO is missing. They can also be caused by
3666 * software limitations.
3667 */
3668 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3669 (spi->mode & SPI_3WIRE)) {
3670 unsigned flags = ctlr->flags;
3671
3672 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3673 if (xfer->rx_buf && xfer->tx_buf)
3674 return -EINVAL;
3675 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3676 return -EINVAL;
3677 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3678 return -EINVAL;
3679 }
3680 }
3681
3682 /**
3683 * Set transfer bits_per_word and max speed as spi device default if
3684 * it is not set for this transfer.
3685 * Set transfer tx_nbits and rx_nbits as single transfer default
3686 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3687 * Ensure transfer word_delay is at least as long as that required by
3688 * device itself.
3689 */
3690 message->frame_length = 0;
3691 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3692 xfer->effective_speed_hz = 0;
3693 message->frame_length += xfer->len;
3694 if (!xfer->bits_per_word)
3695 xfer->bits_per_word = spi->bits_per_word;
3696
3697 if (!xfer->speed_hz)
3698 xfer->speed_hz = spi->max_speed_hz;
3699
3700 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3701 xfer->speed_hz = ctlr->max_speed_hz;
3702
3703 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3704 return -EINVAL;
3705
3706 /*
3707 * SPI transfer length should be multiple of SPI word size
3708 * where SPI word size should be power-of-two multiple
3709 */
3710 if (xfer->bits_per_word <= 8)
3711 w_size = 1;
3712 else if (xfer->bits_per_word <= 16)
3713 w_size = 2;
3714 else
3715 w_size = 4;
3716
3717 /* No partial transfers accepted */
3718 if (xfer->len % w_size)
3719 return -EINVAL;
3720
3721 if (xfer->speed_hz && ctlr->min_speed_hz &&
3722 xfer->speed_hz < ctlr->min_speed_hz)
3723 return -EINVAL;
3724
3725 if (xfer->tx_buf && !xfer->tx_nbits)
3726 xfer->tx_nbits = SPI_NBITS_SINGLE;
3727 if (xfer->rx_buf && !xfer->rx_nbits)
3728 xfer->rx_nbits = SPI_NBITS_SINGLE;
3729 /* check transfer tx/rx_nbits:
3730 * 1. check the value matches one of single, dual and quad
3731 * 2. check tx/rx_nbits match the mode in spi_device
3732 */
3733 if (xfer->tx_buf) {
3734 if (spi->mode & SPI_NO_TX)
3735 return -EINVAL;
3736 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3737 xfer->tx_nbits != SPI_NBITS_DUAL &&
3738 xfer->tx_nbits != SPI_NBITS_QUAD)
3739 return -EINVAL;
3740 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3741 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3742 return -EINVAL;
3743 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3744 !(spi->mode & SPI_TX_QUAD))
3745 return -EINVAL;
3746 }
3747 /* check transfer rx_nbits */
3748 if (xfer->rx_buf) {
3749 if (spi->mode & SPI_NO_RX)
3750 return -EINVAL;
3751 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3752 xfer->rx_nbits != SPI_NBITS_DUAL &&
3753 xfer->rx_nbits != SPI_NBITS_QUAD)
3754 return -EINVAL;
3755 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3756 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3757 return -EINVAL;
3758 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3759 !(spi->mode & SPI_RX_QUAD))
3760 return -EINVAL;
3761 }
3762
3763 if (_spi_xfer_word_delay_update(xfer, spi))
3764 return -EINVAL;
3765 }
3766
3767 message->status = -EINPROGRESS;
3768
3769 return 0;
3770 }
3771
__spi_async(struct spi_device * spi,struct spi_message * message)3772 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3773 {
3774 struct spi_controller *ctlr = spi->controller;
3775 struct spi_transfer *xfer;
3776
3777 /*
3778 * Some controllers do not support doing regular SPI transfers. Return
3779 * ENOTSUPP when this is the case.
3780 */
3781 if (!ctlr->transfer)
3782 return -ENOTSUPP;
3783
3784 message->spi = spi;
3785
3786 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3787 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3788
3789 trace_spi_message_submit(message);
3790
3791 if (!ctlr->ptp_sts_supported) {
3792 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3793 xfer->ptp_sts_word_pre = 0;
3794 ptp_read_system_prets(xfer->ptp_sts);
3795 }
3796 }
3797
3798 return ctlr->transfer(spi, message);
3799 }
3800
3801 /**
3802 * spi_async - asynchronous SPI transfer
3803 * @spi: device with which data will be exchanged
3804 * @message: describes the data transfers, including completion callback
3805 * Context: any (irqs may be blocked, etc)
3806 *
3807 * This call may be used in_irq and other contexts which can't sleep,
3808 * as well as from task contexts which can sleep.
3809 *
3810 * The completion callback is invoked in a context which can't sleep.
3811 * Before that invocation, the value of message->status is undefined.
3812 * When the callback is issued, message->status holds either zero (to
3813 * indicate complete success) or a negative error code. After that
3814 * callback returns, the driver which issued the transfer request may
3815 * deallocate the associated memory; it's no longer in use by any SPI
3816 * core or controller driver code.
3817 *
3818 * Note that although all messages to a spi_device are handled in
3819 * FIFO order, messages may go to different devices in other orders.
3820 * Some device might be higher priority, or have various "hard" access
3821 * time requirements, for example.
3822 *
3823 * On detection of any fault during the transfer, processing of
3824 * the entire message is aborted, and the device is deselected.
3825 * Until returning from the associated message completion callback,
3826 * no other spi_message queued to that device will be processed.
3827 * (This rule applies equally to all the synchronous transfer calls,
3828 * which are wrappers around this core asynchronous primitive.)
3829 *
3830 * Return: zero on success, else a negative error code.
3831 */
spi_async(struct spi_device * spi,struct spi_message * message)3832 int spi_async(struct spi_device *spi, struct spi_message *message)
3833 {
3834 struct spi_controller *ctlr = spi->controller;
3835 int ret;
3836 unsigned long flags;
3837
3838 ret = __spi_validate(spi, message);
3839 if (ret != 0)
3840 return ret;
3841
3842 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3843
3844 if (ctlr->bus_lock_flag)
3845 ret = -EBUSY;
3846 else
3847 ret = __spi_async(spi, message);
3848
3849 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3850
3851 return ret;
3852 }
3853 EXPORT_SYMBOL_GPL(spi_async);
3854
3855 /**
3856 * spi_async_locked - version of spi_async with exclusive bus usage
3857 * @spi: device with which data will be exchanged
3858 * @message: describes the data transfers, including completion callback
3859 * Context: any (irqs may be blocked, etc)
3860 *
3861 * This call may be used in_irq and other contexts which can't sleep,
3862 * as well as from task contexts which can sleep.
3863 *
3864 * The completion callback is invoked in a context which can't sleep.
3865 * Before that invocation, the value of message->status is undefined.
3866 * When the callback is issued, message->status holds either zero (to
3867 * indicate complete success) or a negative error code. After that
3868 * callback returns, the driver which issued the transfer request may
3869 * deallocate the associated memory; it's no longer in use by any SPI
3870 * core or controller driver code.
3871 *
3872 * Note that although all messages to a spi_device are handled in
3873 * FIFO order, messages may go to different devices in other orders.
3874 * Some device might be higher priority, or have various "hard" access
3875 * time requirements, for example.
3876 *
3877 * On detection of any fault during the transfer, processing of
3878 * the entire message is aborted, and the device is deselected.
3879 * Until returning from the associated message completion callback,
3880 * no other spi_message queued to that device will be processed.
3881 * (This rule applies equally to all the synchronous transfer calls,
3882 * which are wrappers around this core asynchronous primitive.)
3883 *
3884 * Return: zero on success, else a negative error code.
3885 */
spi_async_locked(struct spi_device * spi,struct spi_message * message)3886 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3887 {
3888 struct spi_controller *ctlr = spi->controller;
3889 int ret;
3890 unsigned long flags;
3891
3892 ret = __spi_validate(spi, message);
3893 if (ret != 0)
3894 return ret;
3895
3896 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3897
3898 ret = __spi_async(spi, message);
3899
3900 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3901
3902 return ret;
3903
3904 }
3905 EXPORT_SYMBOL_GPL(spi_async_locked);
3906
3907 /*-------------------------------------------------------------------------*/
3908
3909 /* Utility methods for SPI protocol drivers, layered on
3910 * top of the core. Some other utility methods are defined as
3911 * inline functions.
3912 */
3913
spi_complete(void * arg)3914 static void spi_complete(void *arg)
3915 {
3916 complete(arg);
3917 }
3918
__spi_sync(struct spi_device * spi,struct spi_message * message)3919 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3920 {
3921 DECLARE_COMPLETION_ONSTACK(done);
3922 int status;
3923 struct spi_controller *ctlr = spi->controller;
3924 unsigned long flags;
3925
3926 status = __spi_validate(spi, message);
3927 if (status != 0)
3928 return status;
3929
3930 message->complete = spi_complete;
3931 message->context = &done;
3932 message->spi = spi;
3933
3934 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3935 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3936
3937 /* If we're not using the legacy transfer method then we will
3938 * try to transfer in the calling context so special case.
3939 * This code would be less tricky if we could remove the
3940 * support for driver implemented message queues.
3941 */
3942 if (ctlr->transfer == spi_queued_transfer) {
3943 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3944
3945 trace_spi_message_submit(message);
3946
3947 status = __spi_queued_transfer(spi, message, false);
3948
3949 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3950 } else {
3951 status = spi_async_locked(spi, message);
3952 }
3953
3954 if (status == 0) {
3955 /* Push out the messages in the calling context if we
3956 * can.
3957 */
3958 if (ctlr->transfer == spi_queued_transfer) {
3959 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3960 spi_sync_immediate);
3961 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3962 spi_sync_immediate);
3963 __spi_pump_messages(ctlr, false);
3964 }
3965
3966 wait_for_completion(&done);
3967 status = message->status;
3968 }
3969 message->context = NULL;
3970 return status;
3971 }
3972
3973 /**
3974 * spi_sync - blocking/synchronous SPI data transfers
3975 * @spi: device with which data will be exchanged
3976 * @message: describes the data transfers
3977 * Context: can sleep
3978 *
3979 * This call may only be used from a context that may sleep. The sleep
3980 * is non-interruptible, and has no timeout. Low-overhead controller
3981 * drivers may DMA directly into and out of the message buffers.
3982 *
3983 * Note that the SPI device's chip select is active during the message,
3984 * and then is normally disabled between messages. Drivers for some
3985 * frequently-used devices may want to minimize costs of selecting a chip,
3986 * by leaving it selected in anticipation that the next message will go
3987 * to the same chip. (That may increase power usage.)
3988 *
3989 * Also, the caller is guaranteeing that the memory associated with the
3990 * message will not be freed before this call returns.
3991 *
3992 * Return: zero on success, else a negative error code.
3993 */
spi_sync(struct spi_device * spi,struct spi_message * message)3994 int spi_sync(struct spi_device *spi, struct spi_message *message)
3995 {
3996 int ret;
3997
3998 mutex_lock(&spi->controller->bus_lock_mutex);
3999 ret = __spi_sync(spi, message);
4000 mutex_unlock(&spi->controller->bus_lock_mutex);
4001
4002 return ret;
4003 }
4004 EXPORT_SYMBOL_GPL(spi_sync);
4005
4006 /**
4007 * spi_sync_locked - version of spi_sync with exclusive bus usage
4008 * @spi: device with which data will be exchanged
4009 * @message: describes the data transfers
4010 * Context: can sleep
4011 *
4012 * This call may only be used from a context that may sleep. The sleep
4013 * is non-interruptible, and has no timeout. Low-overhead controller
4014 * drivers may DMA directly into and out of the message buffers.
4015 *
4016 * This call should be used by drivers that require exclusive access to the
4017 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4018 * be released by a spi_bus_unlock call when the exclusive access is over.
4019 *
4020 * Return: zero on success, else a negative error code.
4021 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4022 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4023 {
4024 return __spi_sync(spi, message);
4025 }
4026 EXPORT_SYMBOL_GPL(spi_sync_locked);
4027
4028 /**
4029 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4030 * @ctlr: SPI bus master that should be locked for exclusive bus access
4031 * Context: can sleep
4032 *
4033 * This call may only be used from a context that may sleep. The sleep
4034 * is non-interruptible, and has no timeout.
4035 *
4036 * This call should be used by drivers that require exclusive access to the
4037 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4038 * exclusive access is over. Data transfer must be done by spi_sync_locked
4039 * and spi_async_locked calls when the SPI bus lock is held.
4040 *
4041 * Return: always zero.
4042 */
spi_bus_lock(struct spi_controller * ctlr)4043 int spi_bus_lock(struct spi_controller *ctlr)
4044 {
4045 unsigned long flags;
4046
4047 mutex_lock(&ctlr->bus_lock_mutex);
4048
4049 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4050 ctlr->bus_lock_flag = 1;
4051 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4052
4053 /* mutex remains locked until spi_bus_unlock is called */
4054
4055 return 0;
4056 }
4057 EXPORT_SYMBOL_GPL(spi_bus_lock);
4058
4059 /**
4060 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4061 * @ctlr: SPI bus master that was locked for exclusive bus access
4062 * Context: can sleep
4063 *
4064 * This call may only be used from a context that may sleep. The sleep
4065 * is non-interruptible, and has no timeout.
4066 *
4067 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4068 * call.
4069 *
4070 * Return: always zero.
4071 */
spi_bus_unlock(struct spi_controller * ctlr)4072 int spi_bus_unlock(struct spi_controller *ctlr)
4073 {
4074 ctlr->bus_lock_flag = 0;
4075
4076 mutex_unlock(&ctlr->bus_lock_mutex);
4077
4078 return 0;
4079 }
4080 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4081
4082 /* portable code must never pass more than 32 bytes */
4083 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4084
4085 static u8 *buf;
4086
4087 /**
4088 * spi_write_then_read - SPI synchronous write followed by read
4089 * @spi: device with which data will be exchanged
4090 * @txbuf: data to be written (need not be dma-safe)
4091 * @n_tx: size of txbuf, in bytes
4092 * @rxbuf: buffer into which data will be read (need not be dma-safe)
4093 * @n_rx: size of rxbuf, in bytes
4094 * Context: can sleep
4095 *
4096 * This performs a half duplex MicroWire style transaction with the
4097 * device, sending txbuf and then reading rxbuf. The return value
4098 * is zero for success, else a negative errno status code.
4099 * This call may only be used from a context that may sleep.
4100 *
4101 * Parameters to this routine are always copied using a small buffer.
4102 * Performance-sensitive or bulk transfer code should instead use
4103 * spi_{async,sync}() calls with dma-safe buffers.
4104 *
4105 * Return: zero on success, else a negative error code.
4106 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4107 int spi_write_then_read(struct spi_device *spi,
4108 const void *txbuf, unsigned n_tx,
4109 void *rxbuf, unsigned n_rx)
4110 {
4111 static DEFINE_MUTEX(lock);
4112
4113 int status;
4114 struct spi_message message;
4115 struct spi_transfer x[2];
4116 u8 *local_buf;
4117
4118 /* Use preallocated DMA-safe buffer if we can. We can't avoid
4119 * copying here, (as a pure convenience thing), but we can
4120 * keep heap costs out of the hot path unless someone else is
4121 * using the pre-allocated buffer or the transfer is too large.
4122 */
4123 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4124 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4125 GFP_KERNEL | GFP_DMA);
4126 if (!local_buf)
4127 return -ENOMEM;
4128 } else {
4129 local_buf = buf;
4130 }
4131
4132 spi_message_init(&message);
4133 memset(x, 0, sizeof(x));
4134 if (n_tx) {
4135 x[0].len = n_tx;
4136 spi_message_add_tail(&x[0], &message);
4137 }
4138 if (n_rx) {
4139 x[1].len = n_rx;
4140 spi_message_add_tail(&x[1], &message);
4141 }
4142
4143 memcpy(local_buf, txbuf, n_tx);
4144 x[0].tx_buf = local_buf;
4145 x[1].rx_buf = local_buf + n_tx;
4146
4147 /* do the i/o */
4148 status = spi_sync(spi, &message);
4149 if (status == 0)
4150 memcpy(rxbuf, x[1].rx_buf, n_rx);
4151
4152 if (x[0].tx_buf == buf)
4153 mutex_unlock(&lock);
4154 else
4155 kfree(local_buf);
4156
4157 return status;
4158 }
4159 EXPORT_SYMBOL_GPL(spi_write_then_read);
4160
4161 /*-------------------------------------------------------------------------*/
4162
4163 #if IS_ENABLED(CONFIG_OF)
4164 /* must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4165 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4166 {
4167 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4168
4169 return dev ? to_spi_device(dev) : NULL;
4170 }
4171 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
4172 #endif /* IS_ENABLED(CONFIG_OF) */
4173
4174 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4175 /* the spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4176 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4177 {
4178 struct device *dev;
4179
4180 dev = class_find_device_by_of_node(&spi_master_class, node);
4181 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4182 dev = class_find_device_by_of_node(&spi_slave_class, node);
4183 if (!dev)
4184 return NULL;
4185
4186 /* reference got in class_find_device */
4187 return container_of(dev, struct spi_controller, dev);
4188 }
4189
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4190 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4191 void *arg)
4192 {
4193 struct of_reconfig_data *rd = arg;
4194 struct spi_controller *ctlr;
4195 struct spi_device *spi;
4196
4197 switch (of_reconfig_get_state_change(action, arg)) {
4198 case OF_RECONFIG_CHANGE_ADD:
4199 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4200 if (ctlr == NULL)
4201 return NOTIFY_OK; /* not for us */
4202
4203 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4204 put_device(&ctlr->dev);
4205 return NOTIFY_OK;
4206 }
4207
4208 spi = of_register_spi_device(ctlr, rd->dn);
4209 put_device(&ctlr->dev);
4210
4211 if (IS_ERR(spi)) {
4212 pr_err("%s: failed to create for '%pOF'\n",
4213 __func__, rd->dn);
4214 of_node_clear_flag(rd->dn, OF_POPULATED);
4215 return notifier_from_errno(PTR_ERR(spi));
4216 }
4217 break;
4218
4219 case OF_RECONFIG_CHANGE_REMOVE:
4220 /* already depopulated? */
4221 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4222 return NOTIFY_OK;
4223
4224 /* find our device by node */
4225 spi = of_find_spi_device_by_node(rd->dn);
4226 if (spi == NULL)
4227 return NOTIFY_OK; /* no? not meant for us */
4228
4229 /* unregister takes one ref away */
4230 spi_unregister_device(spi);
4231
4232 /* and put the reference of the find */
4233 put_device(&spi->dev);
4234 break;
4235 }
4236
4237 return NOTIFY_OK;
4238 }
4239
4240 static struct notifier_block spi_of_notifier = {
4241 .notifier_call = of_spi_notify,
4242 };
4243 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4244 extern struct notifier_block spi_of_notifier;
4245 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4246
4247 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4248 static int spi_acpi_controller_match(struct device *dev, const void *data)
4249 {
4250 return ACPI_COMPANION(dev->parent) == data;
4251 }
4252
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4253 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4254 {
4255 struct device *dev;
4256
4257 dev = class_find_device(&spi_master_class, NULL, adev,
4258 spi_acpi_controller_match);
4259 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4260 dev = class_find_device(&spi_slave_class, NULL, adev,
4261 spi_acpi_controller_match);
4262 if (!dev)
4263 return NULL;
4264
4265 return container_of(dev, struct spi_controller, dev);
4266 }
4267
acpi_spi_find_device_by_adev(struct acpi_device * adev)4268 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4269 {
4270 struct device *dev;
4271
4272 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4273 return to_spi_device(dev);
4274 }
4275
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4276 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4277 void *arg)
4278 {
4279 struct acpi_device *adev = arg;
4280 struct spi_controller *ctlr;
4281 struct spi_device *spi;
4282
4283 switch (value) {
4284 case ACPI_RECONFIG_DEVICE_ADD:
4285 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4286 if (!ctlr)
4287 break;
4288
4289 acpi_register_spi_device(ctlr, adev);
4290 put_device(&ctlr->dev);
4291 break;
4292 case ACPI_RECONFIG_DEVICE_REMOVE:
4293 if (!acpi_device_enumerated(adev))
4294 break;
4295
4296 spi = acpi_spi_find_device_by_adev(adev);
4297 if (!spi)
4298 break;
4299
4300 spi_unregister_device(spi);
4301 put_device(&spi->dev);
4302 break;
4303 }
4304
4305 return NOTIFY_OK;
4306 }
4307
4308 static struct notifier_block spi_acpi_notifier = {
4309 .notifier_call = acpi_spi_notify,
4310 };
4311 #else
4312 extern struct notifier_block spi_acpi_notifier;
4313 #endif
4314
spi_init(void)4315 static int __init spi_init(void)
4316 {
4317 int status;
4318
4319 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4320 if (!buf) {
4321 status = -ENOMEM;
4322 goto err0;
4323 }
4324
4325 status = bus_register(&spi_bus_type);
4326 if (status < 0)
4327 goto err1;
4328
4329 status = class_register(&spi_master_class);
4330 if (status < 0)
4331 goto err2;
4332
4333 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4334 status = class_register(&spi_slave_class);
4335 if (status < 0)
4336 goto err3;
4337 }
4338
4339 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4340 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4341 if (IS_ENABLED(CONFIG_ACPI))
4342 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4343
4344 return 0;
4345
4346 err3:
4347 class_unregister(&spi_master_class);
4348 err2:
4349 bus_unregister(&spi_bus_type);
4350 err1:
4351 kfree(buf);
4352 buf = NULL;
4353 err0:
4354 return status;
4355 }
4356
4357 /* board_info is normally registered in arch_initcall(),
4358 * but even essential drivers wait till later
4359 *
4360 * REVISIT only boardinfo really needs static linking. the rest (device and
4361 * driver registration) _could_ be dynamically linked (modular) ... costs
4362 * include needing to have boardinfo data structures be much more public.
4363 */
4364 postcore_initcall(spi_init);
4365