1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43 #include "internals.h"
44
45 static DEFINE_IDR(spi_master_idr);
46
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55 }
56
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74 {
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83 }
84
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87 {
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118 }
119
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122 {
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141 }
142
143 #define SPI_STATISTICS_ATTRS(field, file) \
144 static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147 { \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 } \
152 static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155 }; \
156 static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159 { \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 } \
163 static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166 }
167
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171 { \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174 } \
175 SPI_STATISTICS_ATTRS(name, file)
176
177 #define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218 static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222 };
223
224 static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226 };
227
228 static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258 };
259
260 static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263 };
264
265 static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269 };
270
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301 };
302
303 static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306 };
307
308 static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311 };
312
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_controller * ctlr)313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_controller *ctlr)
316 {
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if ((xfer->tx_buf) &&
332 (xfer->tx_buf != ctlr->dummy_tx))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if ((xfer->rx_buf) &&
335 (xfer->rx_buf != ctlr->dummy_rx))
336 u64_stats_add(&stats->bytes_rx, xfer->len);
337
338 u64_stats_update_end(&stats->syncp);
339 put_cpu();
340 }
341
342 /*
343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
344 * and the sysfs version makes coldplug work too.
345 */
spi_match_id(const struct spi_device_id * id,const char * name)346 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
347 {
348 while (id->name[0]) {
349 if (!strcmp(name, id->name))
350 return id;
351 id++;
352 }
353 return NULL;
354 }
355
spi_get_device_id(const struct spi_device * sdev)356 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
357 {
358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359
360 return spi_match_id(sdrv->id_table, sdev->modalias);
361 }
362 EXPORT_SYMBOL_GPL(spi_get_device_id);
363
spi_get_device_match_data(const struct spi_device * sdev)364 const void *spi_get_device_match_data(const struct spi_device *sdev)
365 {
366 const void *match;
367
368 match = device_get_match_data(&sdev->dev);
369 if (match)
370 return match;
371
372 return (const void *)spi_get_device_id(sdev)->driver_data;
373 }
374 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
375
spi_match_device(struct device * dev,struct device_driver * drv)376 static int spi_match_device(struct device *dev, struct device_driver *drv)
377 {
378 const struct spi_device *spi = to_spi_device(dev);
379 const struct spi_driver *sdrv = to_spi_driver(drv);
380
381 /* Check override first, and if set, only use the named driver */
382 if (spi->driver_override)
383 return strcmp(spi->driver_override, drv->name) == 0;
384
385 /* Attempt an OF style match */
386 if (of_driver_match_device(dev, drv))
387 return 1;
388
389 /* Then try ACPI */
390 if (acpi_driver_match_device(dev, drv))
391 return 1;
392
393 if (sdrv->id_table)
394 return !!spi_match_id(sdrv->id_table, spi->modalias);
395
396 return strcmp(spi->modalias, drv->name) == 0;
397 }
398
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)399 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
400 {
401 const struct spi_device *spi = to_spi_device(dev);
402 int rc;
403
404 rc = acpi_device_uevent_modalias(dev, env);
405 if (rc != -ENODEV)
406 return rc;
407
408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
409 }
410
spi_probe(struct device * dev)411 static int spi_probe(struct device *dev)
412 {
413 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
414 struct spi_device *spi = to_spi_device(dev);
415 int ret;
416
417 ret = of_clk_set_defaults(dev->of_node, false);
418 if (ret)
419 return ret;
420
421 if (dev->of_node) {
422 spi->irq = of_irq_get(dev->of_node, 0);
423 if (spi->irq == -EPROBE_DEFER)
424 return -EPROBE_DEFER;
425 if (spi->irq < 0)
426 spi->irq = 0;
427 }
428
429 ret = dev_pm_domain_attach(dev, true);
430 if (ret)
431 return ret;
432
433 if (sdrv->probe) {
434 ret = sdrv->probe(spi);
435 if (ret)
436 dev_pm_domain_detach(dev, true);
437 }
438
439 return ret;
440 }
441
spi_remove(struct device * dev)442 static void spi_remove(struct device *dev)
443 {
444 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
445
446 if (sdrv->remove)
447 sdrv->remove(to_spi_device(dev));
448
449 dev_pm_domain_detach(dev, true);
450 }
451
spi_shutdown(struct device * dev)452 static void spi_shutdown(struct device *dev)
453 {
454 if (dev->driver) {
455 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
456
457 if (sdrv->shutdown)
458 sdrv->shutdown(to_spi_device(dev));
459 }
460 }
461
462 struct bus_type spi_bus_type = {
463 .name = "spi",
464 .dev_groups = spi_dev_groups,
465 .match = spi_match_device,
466 .uevent = spi_uevent,
467 .probe = spi_probe,
468 .remove = spi_remove,
469 .shutdown = spi_shutdown,
470 };
471 EXPORT_SYMBOL_GPL(spi_bus_type);
472
473 /**
474 * __spi_register_driver - register a SPI driver
475 * @owner: owner module of the driver to register
476 * @sdrv: the driver to register
477 * Context: can sleep
478 *
479 * Return: zero on success, else a negative error code.
480 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)481 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
482 {
483 sdrv->driver.owner = owner;
484 sdrv->driver.bus = &spi_bus_type;
485
486 /*
487 * For Really Good Reasons we use spi: modaliases not of:
488 * modaliases for DT so module autoloading won't work if we
489 * don't have a spi_device_id as well as a compatible string.
490 */
491 if (sdrv->driver.of_match_table) {
492 const struct of_device_id *of_id;
493
494 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
495 of_id++) {
496 const char *of_name;
497
498 /* Strip off any vendor prefix */
499 of_name = strnchr(of_id->compatible,
500 sizeof(of_id->compatible), ',');
501 if (of_name)
502 of_name++;
503 else
504 of_name = of_id->compatible;
505
506 if (sdrv->id_table) {
507 const struct spi_device_id *spi_id;
508
509 spi_id = spi_match_id(sdrv->id_table, of_name);
510 if (spi_id)
511 continue;
512 } else {
513 if (strcmp(sdrv->driver.name, of_name) == 0)
514 continue;
515 }
516
517 pr_warn("SPI driver %s has no spi_device_id for %s\n",
518 sdrv->driver.name, of_id->compatible);
519 }
520 }
521
522 return driver_register(&sdrv->driver);
523 }
524 EXPORT_SYMBOL_GPL(__spi_register_driver);
525
526 /*-------------------------------------------------------------------------*/
527
528 /*
529 * SPI devices should normally not be created by SPI device drivers; that
530 * would make them board-specific. Similarly with SPI controller drivers.
531 * Device registration normally goes into like arch/.../mach.../board-YYY.c
532 * with other readonly (flashable) information about mainboard devices.
533 */
534
535 struct boardinfo {
536 struct list_head list;
537 struct spi_board_info board_info;
538 };
539
540 static LIST_HEAD(board_list);
541 static LIST_HEAD(spi_controller_list);
542
543 /*
544 * Used to protect add/del operation for board_info list and
545 * spi_controller list, and their matching process also used
546 * to protect object of type struct idr.
547 */
548 static DEFINE_MUTEX(board_lock);
549
550 /**
551 * spi_alloc_device - Allocate a new SPI device
552 * @ctlr: Controller to which device is connected
553 * Context: can sleep
554 *
555 * Allows a driver to allocate and initialize a spi_device without
556 * registering it immediately. This allows a driver to directly
557 * fill the spi_device with device parameters before calling
558 * spi_add_device() on it.
559 *
560 * Caller is responsible to call spi_add_device() on the returned
561 * spi_device structure to add it to the SPI controller. If the caller
562 * needs to discard the spi_device without adding it, then it should
563 * call spi_dev_put() on it.
564 *
565 * Return: a pointer to the new device, or NULL.
566 */
spi_alloc_device(struct spi_controller * ctlr)567 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
568 {
569 struct spi_device *spi;
570
571 if (!spi_controller_get(ctlr))
572 return NULL;
573
574 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
575 if (!spi) {
576 spi_controller_put(ctlr);
577 return NULL;
578 }
579
580 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 if (!spi->pcpu_statistics) {
582 kfree(spi);
583 spi_controller_put(ctlr);
584 return NULL;
585 }
586
587 spi->master = spi->controller = ctlr;
588 spi->dev.parent = &ctlr->dev;
589 spi->dev.bus = &spi_bus_type;
590 spi->dev.release = spidev_release;
591 spi->mode = ctlr->buswidth_override_bits;
592
593 device_initialize(&spi->dev);
594 return spi;
595 }
596 EXPORT_SYMBOL_GPL(spi_alloc_device);
597
spi_dev_set_name(struct spi_device * spi)598 static void spi_dev_set_name(struct spi_device *spi)
599 {
600 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
601
602 if (adev) {
603 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
604 return;
605 }
606
607 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
608 spi_get_chipselect(spi, 0));
609 }
610
spi_dev_check(struct device * dev,void * data)611 static int spi_dev_check(struct device *dev, void *data)
612 {
613 struct spi_device *spi = to_spi_device(dev);
614 struct spi_device *new_spi = data;
615
616 if (spi->controller == new_spi->controller &&
617 spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
618 return -EBUSY;
619 return 0;
620 }
621
spi_cleanup(struct spi_device * spi)622 static void spi_cleanup(struct spi_device *spi)
623 {
624 if (spi->controller->cleanup)
625 spi->controller->cleanup(spi);
626 }
627
__spi_add_device(struct spi_device * spi)628 static int __spi_add_device(struct spi_device *spi)
629 {
630 struct spi_controller *ctlr = spi->controller;
631 struct device *dev = ctlr->dev.parent;
632 int status;
633
634 /* Chipselects are numbered 0..max; validate. */
635 if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
636 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
637 ctlr->num_chipselect);
638 return -EINVAL;
639 }
640
641 /* Set the bus ID string */
642 spi_dev_set_name(spi);
643
644 /*
645 * We need to make sure there's no other device with this
646 * chipselect **BEFORE** we call setup(), else we'll trash
647 * its configuration.
648 */
649 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
650 if (status) {
651 dev_err(dev, "chipselect %d already in use\n",
652 spi_get_chipselect(spi, 0));
653 return status;
654 }
655
656 /* Controller may unregister concurrently */
657 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
658 !device_is_registered(&ctlr->dev)) {
659 return -ENODEV;
660 }
661
662 if (ctlr->cs_gpiods)
663 spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
664
665 /*
666 * Drivers may modify this initial i/o setup, but will
667 * normally rely on the device being setup. Devices
668 * using SPI_CS_HIGH can't coexist well otherwise...
669 */
670 status = spi_setup(spi);
671 if (status < 0) {
672 dev_err(dev, "can't setup %s, status %d\n",
673 dev_name(&spi->dev), status);
674 return status;
675 }
676
677 /* Device may be bound to an active driver when this returns */
678 status = device_add(&spi->dev);
679 if (status < 0) {
680 dev_err(dev, "can't add %s, status %d\n",
681 dev_name(&spi->dev), status);
682 spi_cleanup(spi);
683 } else {
684 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
685 }
686
687 return status;
688 }
689
690 /**
691 * spi_add_device - Add spi_device allocated with spi_alloc_device
692 * @spi: spi_device to register
693 *
694 * Companion function to spi_alloc_device. Devices allocated with
695 * spi_alloc_device can be added onto the SPI bus with this function.
696 *
697 * Return: 0 on success; negative errno on failure
698 */
spi_add_device(struct spi_device * spi)699 int spi_add_device(struct spi_device *spi)
700 {
701 struct spi_controller *ctlr = spi->controller;
702 int status;
703
704 mutex_lock(&ctlr->add_lock);
705 status = __spi_add_device(spi);
706 mutex_unlock(&ctlr->add_lock);
707 return status;
708 }
709 EXPORT_SYMBOL_GPL(spi_add_device);
710
711 /**
712 * spi_new_device - instantiate one new SPI device
713 * @ctlr: Controller to which device is connected
714 * @chip: Describes the SPI device
715 * Context: can sleep
716 *
717 * On typical mainboards, this is purely internal; and it's not needed
718 * after board init creates the hard-wired devices. Some development
719 * platforms may not be able to use spi_register_board_info though, and
720 * this is exported so that for example a USB or parport based adapter
721 * driver could add devices (which it would learn about out-of-band).
722 *
723 * Return: the new device, or NULL.
724 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)725 struct spi_device *spi_new_device(struct spi_controller *ctlr,
726 struct spi_board_info *chip)
727 {
728 struct spi_device *proxy;
729 int status;
730
731 /*
732 * NOTE: caller did any chip->bus_num checks necessary.
733 *
734 * Also, unless we change the return value convention to use
735 * error-or-pointer (not NULL-or-pointer), troubleshootability
736 * suggests syslogged diagnostics are best here (ugh).
737 */
738
739 proxy = spi_alloc_device(ctlr);
740 if (!proxy)
741 return NULL;
742
743 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
744
745 spi_set_chipselect(proxy, 0, chip->chip_select);
746 proxy->max_speed_hz = chip->max_speed_hz;
747 proxy->mode = chip->mode;
748 proxy->irq = chip->irq;
749 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
750 proxy->dev.platform_data = (void *) chip->platform_data;
751 proxy->controller_data = chip->controller_data;
752 proxy->controller_state = NULL;
753
754 if (chip->swnode) {
755 status = device_add_software_node(&proxy->dev, chip->swnode);
756 if (status) {
757 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
758 chip->modalias, status);
759 goto err_dev_put;
760 }
761 }
762
763 status = spi_add_device(proxy);
764 if (status < 0)
765 goto err_dev_put;
766
767 return proxy;
768
769 err_dev_put:
770 device_remove_software_node(&proxy->dev);
771 spi_dev_put(proxy);
772 return NULL;
773 }
774 EXPORT_SYMBOL_GPL(spi_new_device);
775
776 /**
777 * spi_unregister_device - unregister a single SPI device
778 * @spi: spi_device to unregister
779 *
780 * Start making the passed SPI device vanish. Normally this would be handled
781 * by spi_unregister_controller().
782 */
spi_unregister_device(struct spi_device * spi)783 void spi_unregister_device(struct spi_device *spi)
784 {
785 if (!spi)
786 return;
787
788 if (spi->dev.of_node) {
789 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
790 of_node_put(spi->dev.of_node);
791 }
792 if (ACPI_COMPANION(&spi->dev))
793 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
794 device_remove_software_node(&spi->dev);
795 device_del(&spi->dev);
796 spi_cleanup(spi);
797 put_device(&spi->dev);
798 }
799 EXPORT_SYMBOL_GPL(spi_unregister_device);
800
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)801 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
802 struct spi_board_info *bi)
803 {
804 struct spi_device *dev;
805
806 if (ctlr->bus_num != bi->bus_num)
807 return;
808
809 dev = spi_new_device(ctlr, bi);
810 if (!dev)
811 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
812 bi->modalias);
813 }
814
815 /**
816 * spi_register_board_info - register SPI devices for a given board
817 * @info: array of chip descriptors
818 * @n: how many descriptors are provided
819 * Context: can sleep
820 *
821 * Board-specific early init code calls this (probably during arch_initcall)
822 * with segments of the SPI device table. Any device nodes are created later,
823 * after the relevant parent SPI controller (bus_num) is defined. We keep
824 * this table of devices forever, so that reloading a controller driver will
825 * not make Linux forget about these hard-wired devices.
826 *
827 * Other code can also call this, e.g. a particular add-on board might provide
828 * SPI devices through its expansion connector, so code initializing that board
829 * would naturally declare its SPI devices.
830 *
831 * The board info passed can safely be __initdata ... but be careful of
832 * any embedded pointers (platform_data, etc), they're copied as-is.
833 *
834 * Return: zero on success, else a negative error code.
835 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)836 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
837 {
838 struct boardinfo *bi;
839 int i;
840
841 if (!n)
842 return 0;
843
844 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
845 if (!bi)
846 return -ENOMEM;
847
848 for (i = 0; i < n; i++, bi++, info++) {
849 struct spi_controller *ctlr;
850
851 memcpy(&bi->board_info, info, sizeof(*info));
852
853 mutex_lock(&board_lock);
854 list_add_tail(&bi->list, &board_list);
855 list_for_each_entry(ctlr, &spi_controller_list, list)
856 spi_match_controller_to_boardinfo(ctlr,
857 &bi->board_info);
858 mutex_unlock(&board_lock);
859 }
860
861 return 0;
862 }
863
864 /*-------------------------------------------------------------------------*/
865
866 /* Core methods for SPI resource management */
867
868 /**
869 * spi_res_alloc - allocate a spi resource that is life-cycle managed
870 * during the processing of a spi_message while using
871 * spi_transfer_one
872 * @spi: the SPI device for which we allocate memory
873 * @release: the release code to execute for this resource
874 * @size: size to alloc and return
875 * @gfp: GFP allocation flags
876 *
877 * Return: the pointer to the allocated data
878 *
879 * This may get enhanced in the future to allocate from a memory pool
880 * of the @spi_device or @spi_controller to avoid repeated allocations.
881 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)882 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
883 size_t size, gfp_t gfp)
884 {
885 struct spi_res *sres;
886
887 sres = kzalloc(sizeof(*sres) + size, gfp);
888 if (!sres)
889 return NULL;
890
891 INIT_LIST_HEAD(&sres->entry);
892 sres->release = release;
893
894 return sres->data;
895 }
896
897 /**
898 * spi_res_free - free an SPI resource
899 * @res: pointer to the custom data of a resource
900 */
spi_res_free(void * res)901 static void spi_res_free(void *res)
902 {
903 struct spi_res *sres = container_of(res, struct spi_res, data);
904
905 if (!res)
906 return;
907
908 WARN_ON(!list_empty(&sres->entry));
909 kfree(sres);
910 }
911
912 /**
913 * spi_res_add - add a spi_res to the spi_message
914 * @message: the SPI message
915 * @res: the spi_resource
916 */
spi_res_add(struct spi_message * message,void * res)917 static void spi_res_add(struct spi_message *message, void *res)
918 {
919 struct spi_res *sres = container_of(res, struct spi_res, data);
920
921 WARN_ON(!list_empty(&sres->entry));
922 list_add_tail(&sres->entry, &message->resources);
923 }
924
925 /**
926 * spi_res_release - release all SPI resources for this message
927 * @ctlr: the @spi_controller
928 * @message: the @spi_message
929 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)930 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
931 {
932 struct spi_res *res, *tmp;
933
934 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
935 if (res->release)
936 res->release(ctlr, message, res->data);
937
938 list_del(&res->entry);
939
940 kfree(res);
941 }
942 }
943
944 /*-------------------------------------------------------------------------*/
945
spi_set_cs(struct spi_device * spi,bool enable,bool force)946 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
947 {
948 bool activate = enable;
949
950 /*
951 * Avoid calling into the driver (or doing delays) if the chip select
952 * isn't actually changing from the last time this was called.
953 */
954 if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
955 (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
956 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
957 return;
958
959 trace_spi_set_cs(spi, activate);
960
961 spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
962 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
963
964 if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
965 spi_delay_exec(&spi->cs_hold, NULL);
966
967 if (spi->mode & SPI_CS_HIGH)
968 enable = !enable;
969
970 if (spi_get_csgpiod(spi, 0)) {
971 if (!(spi->mode & SPI_NO_CS)) {
972 /*
973 * Historically ACPI has no means of the GPIO polarity and
974 * thus the SPISerialBus() resource defines it on the per-chip
975 * basis. In order to avoid a chain of negations, the GPIO
976 * polarity is considered being Active High. Even for the cases
977 * when _DSD() is involved (in the updated versions of ACPI)
978 * the GPIO CS polarity must be defined Active High to avoid
979 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
980 * into account.
981 */
982 if (has_acpi_companion(&spi->dev))
983 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
984 else
985 /* Polarity handled by GPIO library */
986 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
987 }
988 /* Some SPI masters need both GPIO CS & slave_select */
989 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
990 spi->controller->set_cs)
991 spi->controller->set_cs(spi, !enable);
992 } else if (spi->controller->set_cs) {
993 spi->controller->set_cs(spi, !enable);
994 }
995
996 if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
997 if (activate)
998 spi_delay_exec(&spi->cs_setup, NULL);
999 else
1000 spi_delay_exec(&spi->cs_inactive, NULL);
1001 }
1002 }
1003
1004 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1005 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1006 struct sg_table *sgt, void *buf, size_t len,
1007 enum dma_data_direction dir, unsigned long attrs)
1008 {
1009 const bool vmalloced_buf = is_vmalloc_addr(buf);
1010 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1011 #ifdef CONFIG_HIGHMEM
1012 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1013 (unsigned long)buf < (PKMAP_BASE +
1014 (LAST_PKMAP * PAGE_SIZE)));
1015 #else
1016 const bool kmap_buf = false;
1017 #endif
1018 int desc_len;
1019 int sgs;
1020 struct page *vm_page;
1021 struct scatterlist *sg;
1022 void *sg_buf;
1023 size_t min;
1024 int i, ret;
1025
1026 if (vmalloced_buf || kmap_buf) {
1027 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1028 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1029 } else if (virt_addr_valid(buf)) {
1030 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1031 sgs = DIV_ROUND_UP(len, desc_len);
1032 } else {
1033 return -EINVAL;
1034 }
1035
1036 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1037 if (ret != 0)
1038 return ret;
1039
1040 sg = &sgt->sgl[0];
1041 for (i = 0; i < sgs; i++) {
1042
1043 if (vmalloced_buf || kmap_buf) {
1044 /*
1045 * Next scatterlist entry size is the minimum between
1046 * the desc_len and the remaining buffer length that
1047 * fits in a page.
1048 */
1049 min = min_t(size_t, desc_len,
1050 min_t(size_t, len,
1051 PAGE_SIZE - offset_in_page(buf)));
1052 if (vmalloced_buf)
1053 vm_page = vmalloc_to_page(buf);
1054 else
1055 vm_page = kmap_to_page(buf);
1056 if (!vm_page) {
1057 sg_free_table(sgt);
1058 return -ENOMEM;
1059 }
1060 sg_set_page(sg, vm_page,
1061 min, offset_in_page(buf));
1062 } else {
1063 min = min_t(size_t, len, desc_len);
1064 sg_buf = buf;
1065 sg_set_buf(sg, sg_buf, min);
1066 }
1067
1068 buf += min;
1069 len -= min;
1070 sg = sg_next(sg);
1071 }
1072
1073 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1074 if (ret < 0) {
1075 sg_free_table(sgt);
1076 return ret;
1077 }
1078
1079 return 0;
1080 }
1081
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1082 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1083 struct sg_table *sgt, void *buf, size_t len,
1084 enum dma_data_direction dir)
1085 {
1086 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1087 }
1088
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1089 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1090 struct device *dev, struct sg_table *sgt,
1091 enum dma_data_direction dir,
1092 unsigned long attrs)
1093 {
1094 if (sgt->orig_nents) {
1095 dma_unmap_sgtable(dev, sgt, dir, attrs);
1096 sg_free_table(sgt);
1097 sgt->orig_nents = 0;
1098 sgt->nents = 0;
1099 }
1100 }
1101
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1102 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1103 struct sg_table *sgt, enum dma_data_direction dir)
1104 {
1105 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1106 }
1107
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1108 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1109 {
1110 struct device *tx_dev, *rx_dev;
1111 struct spi_transfer *xfer;
1112 int ret;
1113
1114 if (!ctlr->can_dma)
1115 return 0;
1116
1117 if (ctlr->dma_tx)
1118 tx_dev = ctlr->dma_tx->device->dev;
1119 else if (ctlr->dma_map_dev)
1120 tx_dev = ctlr->dma_map_dev;
1121 else
1122 tx_dev = ctlr->dev.parent;
1123
1124 if (ctlr->dma_rx)
1125 rx_dev = ctlr->dma_rx->device->dev;
1126 else if (ctlr->dma_map_dev)
1127 rx_dev = ctlr->dma_map_dev;
1128 else
1129 rx_dev = ctlr->dev.parent;
1130
1131 ret = -ENOMSG;
1132 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1133 /* The sync is done before each transfer. */
1134 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1135
1136 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1137 continue;
1138
1139 if (xfer->tx_buf != NULL) {
1140 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1141 (void *)xfer->tx_buf,
1142 xfer->len, DMA_TO_DEVICE,
1143 attrs);
1144 if (ret != 0)
1145 return ret;
1146 }
1147
1148 if (xfer->rx_buf != NULL) {
1149 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1150 xfer->rx_buf, xfer->len,
1151 DMA_FROM_DEVICE, attrs);
1152 if (ret != 0) {
1153 spi_unmap_buf_attrs(ctlr, tx_dev,
1154 &xfer->tx_sg, DMA_TO_DEVICE,
1155 attrs);
1156
1157 return ret;
1158 }
1159 }
1160 }
1161 /* No transfer has been mapped, bail out with success */
1162 if (ret)
1163 return 0;
1164
1165 ctlr->cur_rx_dma_dev = rx_dev;
1166 ctlr->cur_tx_dma_dev = tx_dev;
1167 ctlr->cur_msg_mapped = true;
1168
1169 return 0;
1170 }
1171
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1172 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1173 {
1174 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1175 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1176 struct spi_transfer *xfer;
1177
1178 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1179 return 0;
1180
1181 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1182 /* The sync has already been done after each transfer. */
1183 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1184
1185 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1186 continue;
1187
1188 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1189 DMA_FROM_DEVICE, attrs);
1190 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1191 DMA_TO_DEVICE, attrs);
1192 }
1193
1194 ctlr->cur_msg_mapped = false;
1195
1196 return 0;
1197 }
1198
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1199 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1200 struct spi_transfer *xfer)
1201 {
1202 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1203 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1204
1205 if (!ctlr->cur_msg_mapped)
1206 return;
1207
1208 if (xfer->tx_sg.orig_nents)
1209 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1210 if (xfer->rx_sg.orig_nents)
1211 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1212 }
1213
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1214 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1215 struct spi_transfer *xfer)
1216 {
1217 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1218 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1219
1220 if (!ctlr->cur_msg_mapped)
1221 return;
1222
1223 if (xfer->rx_sg.orig_nents)
1224 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1225 if (xfer->tx_sg.orig_nents)
1226 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1227 }
1228 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1229 static inline int __spi_map_msg(struct spi_controller *ctlr,
1230 struct spi_message *msg)
1231 {
1232 return 0;
1233 }
1234
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1235 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1236 struct spi_message *msg)
1237 {
1238 return 0;
1239 }
1240
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1241 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1242 struct spi_transfer *xfer)
1243 {
1244 }
1245
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1246 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1247 struct spi_transfer *xfer)
1248 {
1249 }
1250 #endif /* !CONFIG_HAS_DMA */
1251
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1252 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1253 struct spi_message *msg)
1254 {
1255 struct spi_transfer *xfer;
1256
1257 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1258 /*
1259 * Restore the original value of tx_buf or rx_buf if they are
1260 * NULL.
1261 */
1262 if (xfer->tx_buf == ctlr->dummy_tx)
1263 xfer->tx_buf = NULL;
1264 if (xfer->rx_buf == ctlr->dummy_rx)
1265 xfer->rx_buf = NULL;
1266 }
1267
1268 return __spi_unmap_msg(ctlr, msg);
1269 }
1270
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1271 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1272 {
1273 struct spi_transfer *xfer;
1274 void *tmp;
1275 unsigned int max_tx, max_rx;
1276
1277 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1278 && !(msg->spi->mode & SPI_3WIRE)) {
1279 max_tx = 0;
1280 max_rx = 0;
1281
1282 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1283 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1284 !xfer->tx_buf)
1285 max_tx = max(xfer->len, max_tx);
1286 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1287 !xfer->rx_buf)
1288 max_rx = max(xfer->len, max_rx);
1289 }
1290
1291 if (max_tx) {
1292 tmp = krealloc(ctlr->dummy_tx, max_tx,
1293 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1294 if (!tmp)
1295 return -ENOMEM;
1296 ctlr->dummy_tx = tmp;
1297 }
1298
1299 if (max_rx) {
1300 tmp = krealloc(ctlr->dummy_rx, max_rx,
1301 GFP_KERNEL | GFP_DMA);
1302 if (!tmp)
1303 return -ENOMEM;
1304 ctlr->dummy_rx = tmp;
1305 }
1306
1307 if (max_tx || max_rx) {
1308 list_for_each_entry(xfer, &msg->transfers,
1309 transfer_list) {
1310 if (!xfer->len)
1311 continue;
1312 if (!xfer->tx_buf)
1313 xfer->tx_buf = ctlr->dummy_tx;
1314 if (!xfer->rx_buf)
1315 xfer->rx_buf = ctlr->dummy_rx;
1316 }
1317 }
1318 }
1319
1320 return __spi_map_msg(ctlr, msg);
1321 }
1322
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1323 static int spi_transfer_wait(struct spi_controller *ctlr,
1324 struct spi_message *msg,
1325 struct spi_transfer *xfer)
1326 {
1327 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1328 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1329 u32 speed_hz = xfer->speed_hz;
1330 unsigned long long ms;
1331
1332 if (spi_controller_is_slave(ctlr)) {
1333 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1334 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1335 return -EINTR;
1336 }
1337 } else {
1338 if (!speed_hz)
1339 speed_hz = 100000;
1340
1341 /*
1342 * For each byte we wait for 8 cycles of the SPI clock.
1343 * Since speed is defined in Hz and we want milliseconds,
1344 * use respective multiplier, but before the division,
1345 * otherwise we may get 0 for short transfers.
1346 */
1347 ms = 8LL * MSEC_PER_SEC * xfer->len;
1348 do_div(ms, speed_hz);
1349
1350 /*
1351 * Increase it twice and add 200 ms tolerance, use
1352 * predefined maximum in case of overflow.
1353 */
1354 ms += ms + 200;
1355 if (ms > UINT_MAX)
1356 ms = UINT_MAX;
1357
1358 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1359 msecs_to_jiffies(ms));
1360
1361 if (ms == 0) {
1362 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1363 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1364 dev_err(&msg->spi->dev,
1365 "SPI transfer timed out\n");
1366 return -ETIMEDOUT;
1367 }
1368 }
1369
1370 return 0;
1371 }
1372
_spi_transfer_delay_ns(u32 ns)1373 static void _spi_transfer_delay_ns(u32 ns)
1374 {
1375 if (!ns)
1376 return;
1377 if (ns <= NSEC_PER_USEC) {
1378 ndelay(ns);
1379 } else {
1380 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1381
1382 if (us <= 10)
1383 udelay(us);
1384 else
1385 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1386 }
1387 }
1388
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1389 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1390 {
1391 u32 delay = _delay->value;
1392 u32 unit = _delay->unit;
1393 u32 hz;
1394
1395 if (!delay)
1396 return 0;
1397
1398 switch (unit) {
1399 case SPI_DELAY_UNIT_USECS:
1400 delay *= NSEC_PER_USEC;
1401 break;
1402 case SPI_DELAY_UNIT_NSECS:
1403 /* Nothing to do here */
1404 break;
1405 case SPI_DELAY_UNIT_SCK:
1406 /* Clock cycles need to be obtained from spi_transfer */
1407 if (!xfer)
1408 return -EINVAL;
1409 /*
1410 * If there is unknown effective speed, approximate it
1411 * by underestimating with half of the requested Hz.
1412 */
1413 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1414 if (!hz)
1415 return -EINVAL;
1416
1417 /* Convert delay to nanoseconds */
1418 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1419 break;
1420 default:
1421 return -EINVAL;
1422 }
1423
1424 return delay;
1425 }
1426 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1427
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1428 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1429 {
1430 int delay;
1431
1432 might_sleep();
1433
1434 if (!_delay)
1435 return -EINVAL;
1436
1437 delay = spi_delay_to_ns(_delay, xfer);
1438 if (delay < 0)
1439 return delay;
1440
1441 _spi_transfer_delay_ns(delay);
1442
1443 return 0;
1444 }
1445 EXPORT_SYMBOL_GPL(spi_delay_exec);
1446
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1447 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1448 struct spi_transfer *xfer)
1449 {
1450 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1451 u32 delay = xfer->cs_change_delay.value;
1452 u32 unit = xfer->cs_change_delay.unit;
1453 int ret;
1454
1455 /* Return early on "fast" mode - for everything but USECS */
1456 if (!delay) {
1457 if (unit == SPI_DELAY_UNIT_USECS)
1458 _spi_transfer_delay_ns(default_delay_ns);
1459 return;
1460 }
1461
1462 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1463 if (ret) {
1464 dev_err_once(&msg->spi->dev,
1465 "Use of unsupported delay unit %i, using default of %luus\n",
1466 unit, default_delay_ns / NSEC_PER_USEC);
1467 _spi_transfer_delay_ns(default_delay_ns);
1468 }
1469 }
1470
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1471 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1472 struct spi_transfer *xfer)
1473 {
1474 _spi_transfer_cs_change_delay(msg, xfer);
1475 }
1476 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1477
1478 /*
1479 * spi_transfer_one_message - Default implementation of transfer_one_message()
1480 *
1481 * This is a standard implementation of transfer_one_message() for
1482 * drivers which implement a transfer_one() operation. It provides
1483 * standard handling of delays and chip select management.
1484 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1485 static int spi_transfer_one_message(struct spi_controller *ctlr,
1486 struct spi_message *msg)
1487 {
1488 struct spi_transfer *xfer;
1489 bool keep_cs = false;
1490 int ret = 0;
1491 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1492 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1493
1494 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1495 spi_set_cs(msg->spi, !xfer->cs_off, false);
1496
1497 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1498 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1499
1500 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1501 trace_spi_transfer_start(msg, xfer);
1502
1503 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1504 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1505
1506 if (!ctlr->ptp_sts_supported) {
1507 xfer->ptp_sts_word_pre = 0;
1508 ptp_read_system_prets(xfer->ptp_sts);
1509 }
1510
1511 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1512 reinit_completion(&ctlr->xfer_completion);
1513
1514 fallback_pio:
1515 spi_dma_sync_for_device(ctlr, xfer);
1516 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1517 if (ret < 0) {
1518 spi_dma_sync_for_cpu(ctlr, xfer);
1519
1520 if (ctlr->cur_msg_mapped &&
1521 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1522 __spi_unmap_msg(ctlr, msg);
1523 ctlr->fallback = true;
1524 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1525 goto fallback_pio;
1526 }
1527
1528 SPI_STATISTICS_INCREMENT_FIELD(statm,
1529 errors);
1530 SPI_STATISTICS_INCREMENT_FIELD(stats,
1531 errors);
1532 dev_err(&msg->spi->dev,
1533 "SPI transfer failed: %d\n", ret);
1534 goto out;
1535 }
1536
1537 if (ret > 0) {
1538 ret = spi_transfer_wait(ctlr, msg, xfer);
1539 if (ret < 0)
1540 msg->status = ret;
1541 }
1542
1543 spi_dma_sync_for_cpu(ctlr, xfer);
1544 } else {
1545 if (xfer->len)
1546 dev_err(&msg->spi->dev,
1547 "Bufferless transfer has length %u\n",
1548 xfer->len);
1549 }
1550
1551 if (!ctlr->ptp_sts_supported) {
1552 ptp_read_system_postts(xfer->ptp_sts);
1553 xfer->ptp_sts_word_post = xfer->len;
1554 }
1555
1556 trace_spi_transfer_stop(msg, xfer);
1557
1558 if (msg->status != -EINPROGRESS)
1559 goto out;
1560
1561 spi_transfer_delay_exec(xfer);
1562
1563 if (xfer->cs_change) {
1564 if (list_is_last(&xfer->transfer_list,
1565 &msg->transfers)) {
1566 keep_cs = true;
1567 } else {
1568 if (!xfer->cs_off)
1569 spi_set_cs(msg->spi, false, false);
1570 _spi_transfer_cs_change_delay(msg, xfer);
1571 if (!list_next_entry(xfer, transfer_list)->cs_off)
1572 spi_set_cs(msg->spi, true, false);
1573 }
1574 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1575 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1576 spi_set_cs(msg->spi, xfer->cs_off, false);
1577 }
1578
1579 msg->actual_length += xfer->len;
1580 }
1581
1582 out:
1583 if (ret != 0 || !keep_cs)
1584 spi_set_cs(msg->spi, false, false);
1585
1586 if (msg->status == -EINPROGRESS)
1587 msg->status = ret;
1588
1589 if (msg->status && ctlr->handle_err)
1590 ctlr->handle_err(ctlr, msg);
1591
1592 spi_finalize_current_message(ctlr);
1593
1594 return ret;
1595 }
1596
1597 /**
1598 * spi_finalize_current_transfer - report completion of a transfer
1599 * @ctlr: the controller reporting completion
1600 *
1601 * Called by SPI drivers using the core transfer_one_message()
1602 * implementation to notify it that the current interrupt driven
1603 * transfer has finished and the next one may be scheduled.
1604 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1605 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1606 {
1607 complete(&ctlr->xfer_completion);
1608 }
1609 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1610
spi_idle_runtime_pm(struct spi_controller * ctlr)1611 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1612 {
1613 if (ctlr->auto_runtime_pm) {
1614 pm_runtime_mark_last_busy(ctlr->dev.parent);
1615 pm_runtime_put_autosuspend(ctlr->dev.parent);
1616 }
1617 }
1618
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1619 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1620 struct spi_message *msg, bool was_busy)
1621 {
1622 struct spi_transfer *xfer;
1623 int ret;
1624
1625 if (!was_busy && ctlr->auto_runtime_pm) {
1626 ret = pm_runtime_get_sync(ctlr->dev.parent);
1627 if (ret < 0) {
1628 pm_runtime_put_noidle(ctlr->dev.parent);
1629 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1630 ret);
1631
1632 msg->status = ret;
1633 spi_finalize_current_message(ctlr);
1634
1635 return ret;
1636 }
1637 }
1638
1639 if (!was_busy)
1640 trace_spi_controller_busy(ctlr);
1641
1642 if (!was_busy && ctlr->prepare_transfer_hardware) {
1643 ret = ctlr->prepare_transfer_hardware(ctlr);
1644 if (ret) {
1645 dev_err(&ctlr->dev,
1646 "failed to prepare transfer hardware: %d\n",
1647 ret);
1648
1649 if (ctlr->auto_runtime_pm)
1650 pm_runtime_put(ctlr->dev.parent);
1651
1652 msg->status = ret;
1653 spi_finalize_current_message(ctlr);
1654
1655 return ret;
1656 }
1657 }
1658
1659 trace_spi_message_start(msg);
1660
1661 ret = spi_split_transfers_maxsize(ctlr, msg,
1662 spi_max_transfer_size(msg->spi),
1663 GFP_KERNEL | GFP_DMA);
1664 if (ret) {
1665 msg->status = ret;
1666 spi_finalize_current_message(ctlr);
1667 return ret;
1668 }
1669
1670 if (ctlr->prepare_message) {
1671 ret = ctlr->prepare_message(ctlr, msg);
1672 if (ret) {
1673 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1674 ret);
1675 msg->status = ret;
1676 spi_finalize_current_message(ctlr);
1677 return ret;
1678 }
1679 msg->prepared = true;
1680 }
1681
1682 ret = spi_map_msg(ctlr, msg);
1683 if (ret) {
1684 msg->status = ret;
1685 spi_finalize_current_message(ctlr);
1686 return ret;
1687 }
1688
1689 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1690 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1691 xfer->ptp_sts_word_pre = 0;
1692 ptp_read_system_prets(xfer->ptp_sts);
1693 }
1694 }
1695
1696 /*
1697 * Drivers implementation of transfer_one_message() must arrange for
1698 * spi_finalize_current_message() to get called. Most drivers will do
1699 * this in the calling context, but some don't. For those cases, a
1700 * completion is used to guarantee that this function does not return
1701 * until spi_finalize_current_message() is done accessing
1702 * ctlr->cur_msg.
1703 * Use of the following two flags enable to opportunistically skip the
1704 * use of the completion since its use involves expensive spin locks.
1705 * In case of a race with the context that calls
1706 * spi_finalize_current_message() the completion will always be used,
1707 * due to strict ordering of these flags using barriers.
1708 */
1709 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1710 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1711 reinit_completion(&ctlr->cur_msg_completion);
1712 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1713
1714 ret = ctlr->transfer_one_message(ctlr, msg);
1715 if (ret) {
1716 dev_err(&ctlr->dev,
1717 "failed to transfer one message from queue\n");
1718 return ret;
1719 }
1720
1721 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1722 smp_mb(); /* See spi_finalize_current_message()... */
1723 if (READ_ONCE(ctlr->cur_msg_incomplete))
1724 wait_for_completion(&ctlr->cur_msg_completion);
1725
1726 return 0;
1727 }
1728
1729 /**
1730 * __spi_pump_messages - function which processes SPI message queue
1731 * @ctlr: controller to process queue for
1732 * @in_kthread: true if we are in the context of the message pump thread
1733 *
1734 * This function checks if there is any SPI message in the queue that
1735 * needs processing and if so call out to the driver to initialize hardware
1736 * and transfer each message.
1737 *
1738 * Note that it is called both from the kthread itself and also from
1739 * inside spi_sync(); the queue extraction handling at the top of the
1740 * function should deal with this safely.
1741 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1742 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1743 {
1744 struct spi_message *msg;
1745 bool was_busy = false;
1746 unsigned long flags;
1747 int ret;
1748
1749 /* Take the I/O mutex */
1750 mutex_lock(&ctlr->io_mutex);
1751
1752 /* Lock queue */
1753 spin_lock_irqsave(&ctlr->queue_lock, flags);
1754
1755 /* Make sure we are not already running a message */
1756 if (ctlr->cur_msg)
1757 goto out_unlock;
1758
1759 /* Check if the queue is idle */
1760 if (list_empty(&ctlr->queue) || !ctlr->running) {
1761 if (!ctlr->busy)
1762 goto out_unlock;
1763
1764 /* Defer any non-atomic teardown to the thread */
1765 if (!in_kthread) {
1766 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1767 !ctlr->unprepare_transfer_hardware) {
1768 spi_idle_runtime_pm(ctlr);
1769 ctlr->busy = false;
1770 ctlr->queue_empty = true;
1771 trace_spi_controller_idle(ctlr);
1772 } else {
1773 kthread_queue_work(ctlr->kworker,
1774 &ctlr->pump_messages);
1775 }
1776 goto out_unlock;
1777 }
1778
1779 ctlr->busy = false;
1780 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1781
1782 kfree(ctlr->dummy_rx);
1783 ctlr->dummy_rx = NULL;
1784 kfree(ctlr->dummy_tx);
1785 ctlr->dummy_tx = NULL;
1786 if (ctlr->unprepare_transfer_hardware &&
1787 ctlr->unprepare_transfer_hardware(ctlr))
1788 dev_err(&ctlr->dev,
1789 "failed to unprepare transfer hardware\n");
1790 spi_idle_runtime_pm(ctlr);
1791 trace_spi_controller_idle(ctlr);
1792
1793 spin_lock_irqsave(&ctlr->queue_lock, flags);
1794 ctlr->queue_empty = true;
1795 goto out_unlock;
1796 }
1797
1798 /* Extract head of queue */
1799 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1800 ctlr->cur_msg = msg;
1801
1802 list_del_init(&msg->queue);
1803 if (ctlr->busy)
1804 was_busy = true;
1805 else
1806 ctlr->busy = true;
1807 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1808
1809 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1810 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1811
1812 ctlr->cur_msg = NULL;
1813 ctlr->fallback = false;
1814
1815 mutex_unlock(&ctlr->io_mutex);
1816
1817 /* Prod the scheduler in case transfer_one() was busy waiting */
1818 if (!ret)
1819 cond_resched();
1820 return;
1821
1822 out_unlock:
1823 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1824 mutex_unlock(&ctlr->io_mutex);
1825 }
1826
1827 /**
1828 * spi_pump_messages - kthread work function which processes spi message queue
1829 * @work: pointer to kthread work struct contained in the controller struct
1830 */
spi_pump_messages(struct kthread_work * work)1831 static void spi_pump_messages(struct kthread_work *work)
1832 {
1833 struct spi_controller *ctlr =
1834 container_of(work, struct spi_controller, pump_messages);
1835
1836 __spi_pump_messages(ctlr, true);
1837 }
1838
1839 /**
1840 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1841 * @ctlr: Pointer to the spi_controller structure of the driver
1842 * @xfer: Pointer to the transfer being timestamped
1843 * @progress: How many words (not bytes) have been transferred so far
1844 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1845 * transfer, for less jitter in time measurement. Only compatible
1846 * with PIO drivers. If true, must follow up with
1847 * spi_take_timestamp_post or otherwise system will crash.
1848 * WARNING: for fully predictable results, the CPU frequency must
1849 * also be under control (governor).
1850 *
1851 * This is a helper for drivers to collect the beginning of the TX timestamp
1852 * for the requested byte from the SPI transfer. The frequency with which this
1853 * function must be called (once per word, once for the whole transfer, once
1854 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1855 * greater than or equal to the requested byte at the time of the call. The
1856 * timestamp is only taken once, at the first such call. It is assumed that
1857 * the driver advances its @tx buffer pointer monotonically.
1858 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1859 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1860 struct spi_transfer *xfer,
1861 size_t progress, bool irqs_off)
1862 {
1863 if (!xfer->ptp_sts)
1864 return;
1865
1866 if (xfer->timestamped)
1867 return;
1868
1869 if (progress > xfer->ptp_sts_word_pre)
1870 return;
1871
1872 /* Capture the resolution of the timestamp */
1873 xfer->ptp_sts_word_pre = progress;
1874
1875 if (irqs_off) {
1876 local_irq_save(ctlr->irq_flags);
1877 preempt_disable();
1878 }
1879
1880 ptp_read_system_prets(xfer->ptp_sts);
1881 }
1882 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1883
1884 /**
1885 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1886 * @ctlr: Pointer to the spi_controller structure of the driver
1887 * @xfer: Pointer to the transfer being timestamped
1888 * @progress: How many words (not bytes) have been transferred so far
1889 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1890 *
1891 * This is a helper for drivers to collect the end of the TX timestamp for
1892 * the requested byte from the SPI transfer. Can be called with an arbitrary
1893 * frequency: only the first call where @tx exceeds or is equal to the
1894 * requested word will be timestamped.
1895 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1896 void spi_take_timestamp_post(struct spi_controller *ctlr,
1897 struct spi_transfer *xfer,
1898 size_t progress, bool irqs_off)
1899 {
1900 if (!xfer->ptp_sts)
1901 return;
1902
1903 if (xfer->timestamped)
1904 return;
1905
1906 if (progress < xfer->ptp_sts_word_post)
1907 return;
1908
1909 ptp_read_system_postts(xfer->ptp_sts);
1910
1911 if (irqs_off) {
1912 local_irq_restore(ctlr->irq_flags);
1913 preempt_enable();
1914 }
1915
1916 /* Capture the resolution of the timestamp */
1917 xfer->ptp_sts_word_post = progress;
1918
1919 xfer->timestamped = 1;
1920 }
1921 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1922
1923 /**
1924 * spi_set_thread_rt - set the controller to pump at realtime priority
1925 * @ctlr: controller to boost priority of
1926 *
1927 * This can be called because the controller requested realtime priority
1928 * (by setting the ->rt value before calling spi_register_controller()) or
1929 * because a device on the bus said that its transfers needed realtime
1930 * priority.
1931 *
1932 * NOTE: at the moment if any device on a bus says it needs realtime then
1933 * the thread will be at realtime priority for all transfers on that
1934 * controller. If this eventually becomes a problem we may see if we can
1935 * find a way to boost the priority only temporarily during relevant
1936 * transfers.
1937 */
spi_set_thread_rt(struct spi_controller * ctlr)1938 static void spi_set_thread_rt(struct spi_controller *ctlr)
1939 {
1940 dev_info(&ctlr->dev,
1941 "will run message pump with realtime priority\n");
1942 sched_set_fifo(ctlr->kworker->task);
1943 }
1944
spi_init_queue(struct spi_controller * ctlr)1945 static int spi_init_queue(struct spi_controller *ctlr)
1946 {
1947 ctlr->running = false;
1948 ctlr->busy = false;
1949 ctlr->queue_empty = true;
1950
1951 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1952 if (IS_ERR(ctlr->kworker)) {
1953 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1954 return PTR_ERR(ctlr->kworker);
1955 }
1956
1957 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1958
1959 /*
1960 * Controller config will indicate if this controller should run the
1961 * message pump with high (realtime) priority to reduce the transfer
1962 * latency on the bus by minimising the delay between a transfer
1963 * request and the scheduling of the message pump thread. Without this
1964 * setting the message pump thread will remain at default priority.
1965 */
1966 if (ctlr->rt)
1967 spi_set_thread_rt(ctlr);
1968
1969 return 0;
1970 }
1971
1972 /**
1973 * spi_get_next_queued_message() - called by driver to check for queued
1974 * messages
1975 * @ctlr: the controller to check for queued messages
1976 *
1977 * If there are more messages in the queue, the next message is returned from
1978 * this call.
1979 *
1980 * Return: the next message in the queue, else NULL if the queue is empty.
1981 */
spi_get_next_queued_message(struct spi_controller * ctlr)1982 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1983 {
1984 struct spi_message *next;
1985 unsigned long flags;
1986
1987 /* Get a pointer to the next message, if any */
1988 spin_lock_irqsave(&ctlr->queue_lock, flags);
1989 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1990 queue);
1991 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1992
1993 return next;
1994 }
1995 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1996
1997 /**
1998 * spi_finalize_current_message() - the current message is complete
1999 * @ctlr: the controller to return the message to
2000 *
2001 * Called by the driver to notify the core that the message in the front of the
2002 * queue is complete and can be removed from the queue.
2003 */
spi_finalize_current_message(struct spi_controller * ctlr)2004 void spi_finalize_current_message(struct spi_controller *ctlr)
2005 {
2006 struct spi_transfer *xfer;
2007 struct spi_message *mesg;
2008 int ret;
2009
2010 mesg = ctlr->cur_msg;
2011
2012 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2013 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2014 ptp_read_system_postts(xfer->ptp_sts);
2015 xfer->ptp_sts_word_post = xfer->len;
2016 }
2017 }
2018
2019 if (unlikely(ctlr->ptp_sts_supported))
2020 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2021 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2022
2023 spi_unmap_msg(ctlr, mesg);
2024
2025 /*
2026 * In the prepare_messages callback the SPI bus has the opportunity
2027 * to split a transfer to smaller chunks.
2028 *
2029 * Release the split transfers here since spi_map_msg() is done on
2030 * the split transfers.
2031 */
2032 spi_res_release(ctlr, mesg);
2033
2034 if (mesg->prepared && ctlr->unprepare_message) {
2035 ret = ctlr->unprepare_message(ctlr, mesg);
2036 if (ret) {
2037 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2038 ret);
2039 }
2040 }
2041
2042 mesg->prepared = false;
2043
2044 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2045 smp_mb(); /* See __spi_pump_transfer_message()... */
2046 if (READ_ONCE(ctlr->cur_msg_need_completion))
2047 complete(&ctlr->cur_msg_completion);
2048
2049 trace_spi_message_done(mesg);
2050
2051 mesg->state = NULL;
2052 if (mesg->complete)
2053 mesg->complete(mesg->context);
2054 }
2055 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2056
spi_start_queue(struct spi_controller * ctlr)2057 static int spi_start_queue(struct spi_controller *ctlr)
2058 {
2059 unsigned long flags;
2060
2061 spin_lock_irqsave(&ctlr->queue_lock, flags);
2062
2063 if (ctlr->running || ctlr->busy) {
2064 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2065 return -EBUSY;
2066 }
2067
2068 ctlr->running = true;
2069 ctlr->cur_msg = NULL;
2070 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2071
2072 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2073
2074 return 0;
2075 }
2076
spi_stop_queue(struct spi_controller * ctlr)2077 static int spi_stop_queue(struct spi_controller *ctlr)
2078 {
2079 unsigned long flags;
2080 unsigned limit = 500;
2081 int ret = 0;
2082
2083 spin_lock_irqsave(&ctlr->queue_lock, flags);
2084
2085 /*
2086 * This is a bit lame, but is optimized for the common execution path.
2087 * A wait_queue on the ctlr->busy could be used, but then the common
2088 * execution path (pump_messages) would be required to call wake_up or
2089 * friends on every SPI message. Do this instead.
2090 */
2091 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2092 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2093 usleep_range(10000, 11000);
2094 spin_lock_irqsave(&ctlr->queue_lock, flags);
2095 }
2096
2097 if (!list_empty(&ctlr->queue) || ctlr->busy)
2098 ret = -EBUSY;
2099 else
2100 ctlr->running = false;
2101
2102 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2103
2104 if (ret) {
2105 dev_warn(&ctlr->dev, "could not stop message queue\n");
2106 return ret;
2107 }
2108 return ret;
2109 }
2110
spi_destroy_queue(struct spi_controller * ctlr)2111 static int spi_destroy_queue(struct spi_controller *ctlr)
2112 {
2113 int ret;
2114
2115 ret = spi_stop_queue(ctlr);
2116
2117 /*
2118 * kthread_flush_worker will block until all work is done.
2119 * If the reason that stop_queue timed out is that the work will never
2120 * finish, then it does no good to call flush/stop thread, so
2121 * return anyway.
2122 */
2123 if (ret) {
2124 dev_err(&ctlr->dev, "problem destroying queue\n");
2125 return ret;
2126 }
2127
2128 kthread_destroy_worker(ctlr->kworker);
2129
2130 return 0;
2131 }
2132
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2133 static int __spi_queued_transfer(struct spi_device *spi,
2134 struct spi_message *msg,
2135 bool need_pump)
2136 {
2137 struct spi_controller *ctlr = spi->controller;
2138 unsigned long flags;
2139
2140 spin_lock_irqsave(&ctlr->queue_lock, flags);
2141
2142 if (!ctlr->running) {
2143 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2144 return -ESHUTDOWN;
2145 }
2146 msg->actual_length = 0;
2147 msg->status = -EINPROGRESS;
2148
2149 list_add_tail(&msg->queue, &ctlr->queue);
2150 ctlr->queue_empty = false;
2151 if (!ctlr->busy && need_pump)
2152 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2153
2154 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2155 return 0;
2156 }
2157
2158 /**
2159 * spi_queued_transfer - transfer function for queued transfers
2160 * @spi: SPI device which is requesting transfer
2161 * @msg: SPI message which is to handled is queued to driver queue
2162 *
2163 * Return: zero on success, else a negative error code.
2164 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2165 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2166 {
2167 return __spi_queued_transfer(spi, msg, true);
2168 }
2169
spi_controller_initialize_queue(struct spi_controller * ctlr)2170 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2171 {
2172 int ret;
2173
2174 ctlr->transfer = spi_queued_transfer;
2175 if (!ctlr->transfer_one_message)
2176 ctlr->transfer_one_message = spi_transfer_one_message;
2177
2178 /* Initialize and start queue */
2179 ret = spi_init_queue(ctlr);
2180 if (ret) {
2181 dev_err(&ctlr->dev, "problem initializing queue\n");
2182 goto err_init_queue;
2183 }
2184 ctlr->queued = true;
2185 ret = spi_start_queue(ctlr);
2186 if (ret) {
2187 dev_err(&ctlr->dev, "problem starting queue\n");
2188 goto err_start_queue;
2189 }
2190
2191 return 0;
2192
2193 err_start_queue:
2194 spi_destroy_queue(ctlr);
2195 err_init_queue:
2196 return ret;
2197 }
2198
2199 /**
2200 * spi_flush_queue - Send all pending messages in the queue from the callers'
2201 * context
2202 * @ctlr: controller to process queue for
2203 *
2204 * This should be used when one wants to ensure all pending messages have been
2205 * sent before doing something. Is used by the spi-mem code to make sure SPI
2206 * memory operations do not preempt regular SPI transfers that have been queued
2207 * before the spi-mem operation.
2208 */
spi_flush_queue(struct spi_controller * ctlr)2209 void spi_flush_queue(struct spi_controller *ctlr)
2210 {
2211 if (ctlr->transfer == spi_queued_transfer)
2212 __spi_pump_messages(ctlr, false);
2213 }
2214
2215 /*-------------------------------------------------------------------------*/
2216
2217 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2218 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2219 struct spi_delay *delay, const char *prop)
2220 {
2221 u32 value;
2222
2223 if (!of_property_read_u32(nc, prop, &value)) {
2224 if (value > U16_MAX) {
2225 delay->value = DIV_ROUND_UP(value, 1000);
2226 delay->unit = SPI_DELAY_UNIT_USECS;
2227 } else {
2228 delay->value = value;
2229 delay->unit = SPI_DELAY_UNIT_NSECS;
2230 }
2231 }
2232 }
2233
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2234 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2235 struct device_node *nc)
2236 {
2237 u32 value;
2238 int rc;
2239
2240 /* Mode (clock phase/polarity/etc.) */
2241 if (of_property_read_bool(nc, "spi-cpha"))
2242 spi->mode |= SPI_CPHA;
2243 if (of_property_read_bool(nc, "spi-cpol"))
2244 spi->mode |= SPI_CPOL;
2245 if (of_property_read_bool(nc, "spi-3wire"))
2246 spi->mode |= SPI_3WIRE;
2247 if (of_property_read_bool(nc, "spi-lsb-first"))
2248 spi->mode |= SPI_LSB_FIRST;
2249 if (of_property_read_bool(nc, "spi-cs-high"))
2250 spi->mode |= SPI_CS_HIGH;
2251
2252 /* Device DUAL/QUAD mode */
2253 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2254 switch (value) {
2255 case 0:
2256 spi->mode |= SPI_NO_TX;
2257 break;
2258 case 1:
2259 break;
2260 case 2:
2261 spi->mode |= SPI_TX_DUAL;
2262 break;
2263 case 4:
2264 spi->mode |= SPI_TX_QUAD;
2265 break;
2266 case 8:
2267 spi->mode |= SPI_TX_OCTAL;
2268 break;
2269 default:
2270 dev_warn(&ctlr->dev,
2271 "spi-tx-bus-width %d not supported\n",
2272 value);
2273 break;
2274 }
2275 }
2276
2277 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2278 switch (value) {
2279 case 0:
2280 spi->mode |= SPI_NO_RX;
2281 break;
2282 case 1:
2283 break;
2284 case 2:
2285 spi->mode |= SPI_RX_DUAL;
2286 break;
2287 case 4:
2288 spi->mode |= SPI_RX_QUAD;
2289 break;
2290 case 8:
2291 spi->mode |= SPI_RX_OCTAL;
2292 break;
2293 default:
2294 dev_warn(&ctlr->dev,
2295 "spi-rx-bus-width %d not supported\n",
2296 value);
2297 break;
2298 }
2299 }
2300
2301 if (spi_controller_is_slave(ctlr)) {
2302 if (!of_node_name_eq(nc, "slave")) {
2303 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2304 nc);
2305 return -EINVAL;
2306 }
2307 return 0;
2308 }
2309
2310 /* Device address */
2311 rc = of_property_read_u32(nc, "reg", &value);
2312 if (rc) {
2313 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2314 nc, rc);
2315 return rc;
2316 }
2317 spi_set_chipselect(spi, 0, value);
2318
2319 /* Device speed */
2320 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2321 spi->max_speed_hz = value;
2322
2323 /* Device CS delays */
2324 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2325 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2326 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2327
2328 return 0;
2329 }
2330
2331 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2332 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2333 {
2334 struct spi_device *spi;
2335 int rc;
2336
2337 /* Alloc an spi_device */
2338 spi = spi_alloc_device(ctlr);
2339 if (!spi) {
2340 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2341 rc = -ENOMEM;
2342 goto err_out;
2343 }
2344
2345 /* Select device driver */
2346 rc = of_alias_from_compatible(nc, spi->modalias,
2347 sizeof(spi->modalias));
2348 if (rc < 0) {
2349 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2350 goto err_out;
2351 }
2352
2353 rc = of_spi_parse_dt(ctlr, spi, nc);
2354 if (rc)
2355 goto err_out;
2356
2357 /* Store a pointer to the node in the device structure */
2358 of_node_get(nc);
2359
2360 device_set_node(&spi->dev, of_fwnode_handle(nc));
2361
2362 /* Register the new device */
2363 rc = spi_add_device(spi);
2364 if (rc) {
2365 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2366 goto err_of_node_put;
2367 }
2368
2369 return spi;
2370
2371 err_of_node_put:
2372 of_node_put(nc);
2373 err_out:
2374 spi_dev_put(spi);
2375 return ERR_PTR(rc);
2376 }
2377
2378 /**
2379 * of_register_spi_devices() - Register child devices onto the SPI bus
2380 * @ctlr: Pointer to spi_controller device
2381 *
2382 * Registers an spi_device for each child node of controller node which
2383 * represents a valid SPI slave.
2384 */
of_register_spi_devices(struct spi_controller * ctlr)2385 static void of_register_spi_devices(struct spi_controller *ctlr)
2386 {
2387 struct spi_device *spi;
2388 struct device_node *nc;
2389
2390 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2391 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2392 continue;
2393 spi = of_register_spi_device(ctlr, nc);
2394 if (IS_ERR(spi)) {
2395 dev_warn(&ctlr->dev,
2396 "Failed to create SPI device for %pOF\n", nc);
2397 of_node_clear_flag(nc, OF_POPULATED);
2398 }
2399 }
2400 }
2401 #else
of_register_spi_devices(struct spi_controller * ctlr)2402 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2403 #endif
2404
2405 /**
2406 * spi_new_ancillary_device() - Register ancillary SPI device
2407 * @spi: Pointer to the main SPI device registering the ancillary device
2408 * @chip_select: Chip Select of the ancillary device
2409 *
2410 * Register an ancillary SPI device; for example some chips have a chip-select
2411 * for normal device usage and another one for setup/firmware upload.
2412 *
2413 * This may only be called from main SPI device's probe routine.
2414 *
2415 * Return: 0 on success; negative errno on failure
2416 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2417 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2418 u8 chip_select)
2419 {
2420 struct spi_controller *ctlr = spi->controller;
2421 struct spi_device *ancillary;
2422 int rc = 0;
2423
2424 /* Alloc an spi_device */
2425 ancillary = spi_alloc_device(ctlr);
2426 if (!ancillary) {
2427 rc = -ENOMEM;
2428 goto err_out;
2429 }
2430
2431 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2432
2433 /* Use provided chip-select for ancillary device */
2434 spi_set_chipselect(ancillary, 0, chip_select);
2435
2436 /* Take over SPI mode/speed from SPI main device */
2437 ancillary->max_speed_hz = spi->max_speed_hz;
2438 ancillary->mode = spi->mode;
2439
2440 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2441
2442 /* Register the new device */
2443 rc = __spi_add_device(ancillary);
2444 if (rc) {
2445 dev_err(&spi->dev, "failed to register ancillary device\n");
2446 goto err_out;
2447 }
2448
2449 return ancillary;
2450
2451 err_out:
2452 spi_dev_put(ancillary);
2453 return ERR_PTR(rc);
2454 }
2455 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2456
2457 #ifdef CONFIG_ACPI
2458 struct acpi_spi_lookup {
2459 struct spi_controller *ctlr;
2460 u32 max_speed_hz;
2461 u32 mode;
2462 int irq;
2463 u8 bits_per_word;
2464 u8 chip_select;
2465 int n;
2466 int index;
2467 };
2468
acpi_spi_count(struct acpi_resource * ares,void * data)2469 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2470 {
2471 struct acpi_resource_spi_serialbus *sb;
2472 int *count = data;
2473
2474 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2475 return 1;
2476
2477 sb = &ares->data.spi_serial_bus;
2478 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2479 return 1;
2480
2481 *count = *count + 1;
2482
2483 return 1;
2484 }
2485
2486 /**
2487 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2488 * @adev: ACPI device
2489 *
2490 * Return: the number of SpiSerialBus resources in the ACPI-device's
2491 * resource-list; or a negative error code.
2492 */
acpi_spi_count_resources(struct acpi_device * adev)2493 int acpi_spi_count_resources(struct acpi_device *adev)
2494 {
2495 LIST_HEAD(r);
2496 int count = 0;
2497 int ret;
2498
2499 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2500 if (ret < 0)
2501 return ret;
2502
2503 acpi_dev_free_resource_list(&r);
2504
2505 return count;
2506 }
2507 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2508
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2509 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2510 struct acpi_spi_lookup *lookup)
2511 {
2512 const union acpi_object *obj;
2513
2514 if (!x86_apple_machine)
2515 return;
2516
2517 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2518 && obj->buffer.length >= 4)
2519 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2520
2521 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2522 && obj->buffer.length == 8)
2523 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2524
2525 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2526 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2527 lookup->mode |= SPI_LSB_FIRST;
2528
2529 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2530 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2531 lookup->mode |= SPI_CPOL;
2532
2533 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2534 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2535 lookup->mode |= SPI_CPHA;
2536 }
2537
2538 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2539
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2540 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2541 {
2542 struct acpi_spi_lookup *lookup = data;
2543 struct spi_controller *ctlr = lookup->ctlr;
2544
2545 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2546 struct acpi_resource_spi_serialbus *sb;
2547 acpi_handle parent_handle;
2548 acpi_status status;
2549
2550 sb = &ares->data.spi_serial_bus;
2551 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2552
2553 if (lookup->index != -1 && lookup->n++ != lookup->index)
2554 return 1;
2555
2556 status = acpi_get_handle(NULL,
2557 sb->resource_source.string_ptr,
2558 &parent_handle);
2559
2560 if (ACPI_FAILURE(status))
2561 return -ENODEV;
2562
2563 if (ctlr) {
2564 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2565 return -ENODEV;
2566 } else {
2567 struct acpi_device *adev;
2568
2569 adev = acpi_fetch_acpi_dev(parent_handle);
2570 if (!adev)
2571 return -ENODEV;
2572
2573 ctlr = acpi_spi_find_controller_by_adev(adev);
2574 if (!ctlr)
2575 return -EPROBE_DEFER;
2576
2577 lookup->ctlr = ctlr;
2578 }
2579
2580 /*
2581 * ACPI DeviceSelection numbering is handled by the
2582 * host controller driver in Windows and can vary
2583 * from driver to driver. In Linux we always expect
2584 * 0 .. max - 1 so we need to ask the driver to
2585 * translate between the two schemes.
2586 */
2587 if (ctlr->fw_translate_cs) {
2588 int cs = ctlr->fw_translate_cs(ctlr,
2589 sb->device_selection);
2590 if (cs < 0)
2591 return cs;
2592 lookup->chip_select = cs;
2593 } else {
2594 lookup->chip_select = sb->device_selection;
2595 }
2596
2597 lookup->max_speed_hz = sb->connection_speed;
2598 lookup->bits_per_word = sb->data_bit_length;
2599
2600 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2601 lookup->mode |= SPI_CPHA;
2602 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2603 lookup->mode |= SPI_CPOL;
2604 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2605 lookup->mode |= SPI_CS_HIGH;
2606 }
2607 } else if (lookup->irq < 0) {
2608 struct resource r;
2609
2610 if (acpi_dev_resource_interrupt(ares, 0, &r))
2611 lookup->irq = r.start;
2612 }
2613
2614 /* Always tell the ACPI core to skip this resource */
2615 return 1;
2616 }
2617
2618 /**
2619 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2620 * @ctlr: controller to which the spi device belongs
2621 * @adev: ACPI Device for the spi device
2622 * @index: Index of the spi resource inside the ACPI Node
2623 *
2624 * This should be used to allocate a new SPI device from and ACPI Device node.
2625 * The caller is responsible for calling spi_add_device to register the SPI device.
2626 *
2627 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2628 * using the resource.
2629 * If index is set to -1, index is not used.
2630 * Note: If index is -1, ctlr must be set.
2631 *
2632 * Return: a pointer to the new device, or ERR_PTR on error.
2633 */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2634 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2635 struct acpi_device *adev,
2636 int index)
2637 {
2638 acpi_handle parent_handle = NULL;
2639 struct list_head resource_list;
2640 struct acpi_spi_lookup lookup = {};
2641 struct spi_device *spi;
2642 int ret;
2643
2644 if (!ctlr && index == -1)
2645 return ERR_PTR(-EINVAL);
2646
2647 lookup.ctlr = ctlr;
2648 lookup.irq = -1;
2649 lookup.index = index;
2650 lookup.n = 0;
2651
2652 INIT_LIST_HEAD(&resource_list);
2653 ret = acpi_dev_get_resources(adev, &resource_list,
2654 acpi_spi_add_resource, &lookup);
2655 acpi_dev_free_resource_list(&resource_list);
2656
2657 if (ret < 0)
2658 /* Found SPI in _CRS but it points to another controller */
2659 return ERR_PTR(ret);
2660
2661 if (!lookup.max_speed_hz &&
2662 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2663 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2664 /* Apple does not use _CRS but nested devices for SPI slaves */
2665 acpi_spi_parse_apple_properties(adev, &lookup);
2666 }
2667
2668 if (!lookup.max_speed_hz)
2669 return ERR_PTR(-ENODEV);
2670
2671 spi = spi_alloc_device(lookup.ctlr);
2672 if (!spi) {
2673 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2674 dev_name(&adev->dev));
2675 return ERR_PTR(-ENOMEM);
2676 }
2677
2678 ACPI_COMPANION_SET(&spi->dev, adev);
2679 spi->max_speed_hz = lookup.max_speed_hz;
2680 spi->mode |= lookup.mode;
2681 spi->irq = lookup.irq;
2682 spi->bits_per_word = lookup.bits_per_word;
2683 spi_set_chipselect(spi, 0, lookup.chip_select);
2684
2685 return spi;
2686 }
2687 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2688
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2689 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2690 struct acpi_device *adev)
2691 {
2692 struct spi_device *spi;
2693
2694 if (acpi_bus_get_status(adev) || !adev->status.present ||
2695 acpi_device_enumerated(adev))
2696 return AE_OK;
2697
2698 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2699 if (IS_ERR(spi)) {
2700 if (PTR_ERR(spi) == -ENOMEM)
2701 return AE_NO_MEMORY;
2702 else
2703 return AE_OK;
2704 }
2705
2706 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2707 sizeof(spi->modalias));
2708
2709 if (spi->irq < 0)
2710 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2711
2712 acpi_device_set_enumerated(adev);
2713
2714 adev->power.flags.ignore_parent = true;
2715 if (spi_add_device(spi)) {
2716 adev->power.flags.ignore_parent = false;
2717 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2718 dev_name(&adev->dev));
2719 spi_dev_put(spi);
2720 }
2721
2722 return AE_OK;
2723 }
2724
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2725 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2726 void *data, void **return_value)
2727 {
2728 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2729 struct spi_controller *ctlr = data;
2730
2731 if (!adev)
2732 return AE_OK;
2733
2734 return acpi_register_spi_device(ctlr, adev);
2735 }
2736
2737 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2738
acpi_register_spi_devices(struct spi_controller * ctlr)2739 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2740 {
2741 acpi_status status;
2742 acpi_handle handle;
2743
2744 handle = ACPI_HANDLE(ctlr->dev.parent);
2745 if (!handle)
2746 return;
2747
2748 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2749 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2750 acpi_spi_add_device, NULL, ctlr, NULL);
2751 if (ACPI_FAILURE(status))
2752 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2753 }
2754 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2755 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2756 #endif /* CONFIG_ACPI */
2757
spi_controller_release(struct device * dev)2758 static void spi_controller_release(struct device *dev)
2759 {
2760 struct spi_controller *ctlr;
2761
2762 ctlr = container_of(dev, struct spi_controller, dev);
2763 kfree(ctlr);
2764 }
2765
2766 static struct class spi_master_class = {
2767 .name = "spi_master",
2768 .dev_release = spi_controller_release,
2769 .dev_groups = spi_master_groups,
2770 };
2771
2772 #ifdef CONFIG_SPI_SLAVE
2773 /**
2774 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2775 * controller
2776 * @spi: device used for the current transfer
2777 */
spi_slave_abort(struct spi_device * spi)2778 int spi_slave_abort(struct spi_device *spi)
2779 {
2780 struct spi_controller *ctlr = spi->controller;
2781
2782 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2783 return ctlr->slave_abort(ctlr);
2784
2785 return -ENOTSUPP;
2786 }
2787 EXPORT_SYMBOL_GPL(spi_slave_abort);
2788
spi_target_abort(struct spi_device * spi)2789 int spi_target_abort(struct spi_device *spi)
2790 {
2791 struct spi_controller *ctlr = spi->controller;
2792
2793 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2794 return ctlr->target_abort(ctlr);
2795
2796 return -ENOTSUPP;
2797 }
2798 EXPORT_SYMBOL_GPL(spi_target_abort);
2799
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2800 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2801 char *buf)
2802 {
2803 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2804 dev);
2805 struct device *child;
2806
2807 child = device_find_any_child(&ctlr->dev);
2808 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2809 }
2810
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2811 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2812 const char *buf, size_t count)
2813 {
2814 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2815 dev);
2816 struct spi_device *spi;
2817 struct device *child;
2818 char name[32];
2819 int rc;
2820
2821 rc = sscanf(buf, "%31s", name);
2822 if (rc != 1 || !name[0])
2823 return -EINVAL;
2824
2825 child = device_find_any_child(&ctlr->dev);
2826 if (child) {
2827 /* Remove registered slave */
2828 device_unregister(child);
2829 put_device(child);
2830 }
2831
2832 if (strcmp(name, "(null)")) {
2833 /* Register new slave */
2834 spi = spi_alloc_device(ctlr);
2835 if (!spi)
2836 return -ENOMEM;
2837
2838 strscpy(spi->modalias, name, sizeof(spi->modalias));
2839
2840 rc = spi_add_device(spi);
2841 if (rc) {
2842 spi_dev_put(spi);
2843 return rc;
2844 }
2845 }
2846
2847 return count;
2848 }
2849
2850 static DEVICE_ATTR_RW(slave);
2851
2852 static struct attribute *spi_slave_attrs[] = {
2853 &dev_attr_slave.attr,
2854 NULL,
2855 };
2856
2857 static const struct attribute_group spi_slave_group = {
2858 .attrs = spi_slave_attrs,
2859 };
2860
2861 static const struct attribute_group *spi_slave_groups[] = {
2862 &spi_controller_statistics_group,
2863 &spi_slave_group,
2864 NULL,
2865 };
2866
2867 static struct class spi_slave_class = {
2868 .name = "spi_slave",
2869 .dev_release = spi_controller_release,
2870 .dev_groups = spi_slave_groups,
2871 };
2872 #else
2873 extern struct class spi_slave_class; /* dummy */
2874 #endif
2875
2876 /**
2877 * __spi_alloc_controller - allocate an SPI master or slave controller
2878 * @dev: the controller, possibly using the platform_bus
2879 * @size: how much zeroed driver-private data to allocate; the pointer to this
2880 * memory is in the driver_data field of the returned device, accessible
2881 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2882 * drivers granting DMA access to portions of their private data need to
2883 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2884 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2885 * slave (true) controller
2886 * Context: can sleep
2887 *
2888 * This call is used only by SPI controller drivers, which are the
2889 * only ones directly touching chip registers. It's how they allocate
2890 * an spi_controller structure, prior to calling spi_register_controller().
2891 *
2892 * This must be called from context that can sleep.
2893 *
2894 * The caller is responsible for assigning the bus number and initializing the
2895 * controller's methods before calling spi_register_controller(); and (after
2896 * errors adding the device) calling spi_controller_put() to prevent a memory
2897 * leak.
2898 *
2899 * Return: the SPI controller structure on success, else NULL.
2900 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2901 struct spi_controller *__spi_alloc_controller(struct device *dev,
2902 unsigned int size, bool slave)
2903 {
2904 struct spi_controller *ctlr;
2905 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2906
2907 if (!dev)
2908 return NULL;
2909
2910 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2911 if (!ctlr)
2912 return NULL;
2913
2914 device_initialize(&ctlr->dev);
2915 INIT_LIST_HEAD(&ctlr->queue);
2916 spin_lock_init(&ctlr->queue_lock);
2917 spin_lock_init(&ctlr->bus_lock_spinlock);
2918 mutex_init(&ctlr->bus_lock_mutex);
2919 mutex_init(&ctlr->io_mutex);
2920 mutex_init(&ctlr->add_lock);
2921 ctlr->bus_num = -1;
2922 ctlr->num_chipselect = 1;
2923 ctlr->slave = slave;
2924 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2925 ctlr->dev.class = &spi_slave_class;
2926 else
2927 ctlr->dev.class = &spi_master_class;
2928 ctlr->dev.parent = dev;
2929 pm_suspend_ignore_children(&ctlr->dev, true);
2930 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2931
2932 return ctlr;
2933 }
2934 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2935
devm_spi_release_controller(struct device * dev,void * ctlr)2936 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2937 {
2938 spi_controller_put(*(struct spi_controller **)ctlr);
2939 }
2940
2941 /**
2942 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2943 * @dev: physical device of SPI controller
2944 * @size: how much zeroed driver-private data to allocate
2945 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2946 * Context: can sleep
2947 *
2948 * Allocate an SPI controller and automatically release a reference on it
2949 * when @dev is unbound from its driver. Drivers are thus relieved from
2950 * having to call spi_controller_put().
2951 *
2952 * The arguments to this function are identical to __spi_alloc_controller().
2953 *
2954 * Return: the SPI controller structure on success, else NULL.
2955 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2956 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2957 unsigned int size,
2958 bool slave)
2959 {
2960 struct spi_controller **ptr, *ctlr;
2961
2962 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2963 GFP_KERNEL);
2964 if (!ptr)
2965 return NULL;
2966
2967 ctlr = __spi_alloc_controller(dev, size, slave);
2968 if (ctlr) {
2969 ctlr->devm_allocated = true;
2970 *ptr = ctlr;
2971 devres_add(dev, ptr);
2972 } else {
2973 devres_free(ptr);
2974 }
2975
2976 return ctlr;
2977 }
2978 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2979
2980 /**
2981 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2982 * @ctlr: The SPI master to grab GPIO descriptors for
2983 */
spi_get_gpio_descs(struct spi_controller * ctlr)2984 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2985 {
2986 int nb, i;
2987 struct gpio_desc **cs;
2988 struct device *dev = &ctlr->dev;
2989 unsigned long native_cs_mask = 0;
2990 unsigned int num_cs_gpios = 0;
2991
2992 nb = gpiod_count(dev, "cs");
2993 if (nb < 0) {
2994 /* No GPIOs at all is fine, else return the error */
2995 if (nb == -ENOENT)
2996 return 0;
2997 return nb;
2998 }
2999
3000 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3001
3002 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3003 GFP_KERNEL);
3004 if (!cs)
3005 return -ENOMEM;
3006 ctlr->cs_gpiods = cs;
3007
3008 for (i = 0; i < nb; i++) {
3009 /*
3010 * Most chipselects are active low, the inverted
3011 * semantics are handled by special quirks in gpiolib,
3012 * so initializing them GPIOD_OUT_LOW here means
3013 * "unasserted", in most cases this will drive the physical
3014 * line high.
3015 */
3016 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3017 GPIOD_OUT_LOW);
3018 if (IS_ERR(cs[i]))
3019 return PTR_ERR(cs[i]);
3020
3021 if (cs[i]) {
3022 /*
3023 * If we find a CS GPIO, name it after the device and
3024 * chip select line.
3025 */
3026 char *gpioname;
3027
3028 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3029 dev_name(dev), i);
3030 if (!gpioname)
3031 return -ENOMEM;
3032 gpiod_set_consumer_name(cs[i], gpioname);
3033 num_cs_gpios++;
3034 continue;
3035 }
3036
3037 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3038 dev_err(dev, "Invalid native chip select %d\n", i);
3039 return -EINVAL;
3040 }
3041 native_cs_mask |= BIT(i);
3042 }
3043
3044 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3045
3046 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3047 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3048 dev_err(dev, "No unused native chip select available\n");
3049 return -EINVAL;
3050 }
3051
3052 return 0;
3053 }
3054
spi_controller_check_ops(struct spi_controller * ctlr)3055 static int spi_controller_check_ops(struct spi_controller *ctlr)
3056 {
3057 /*
3058 * The controller may implement only the high-level SPI-memory like
3059 * operations if it does not support regular SPI transfers, and this is
3060 * valid use case.
3061 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3062 * one of the ->transfer_xxx() method be implemented.
3063 */
3064 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3065 if (!ctlr->transfer && !ctlr->transfer_one &&
3066 !ctlr->transfer_one_message) {
3067 return -EINVAL;
3068 }
3069 }
3070
3071 return 0;
3072 }
3073
3074 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3075 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3076 {
3077 int id;
3078
3079 mutex_lock(&board_lock);
3080 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3081 mutex_unlock(&board_lock);
3082 if (WARN(id < 0, "couldn't get idr"))
3083 return id == -ENOSPC ? -EBUSY : id;
3084 ctlr->bus_num = id;
3085 return 0;
3086 }
3087
3088 /**
3089 * spi_register_controller - register SPI master or slave controller
3090 * @ctlr: initialized master, originally from spi_alloc_master() or
3091 * spi_alloc_slave()
3092 * Context: can sleep
3093 *
3094 * SPI controllers connect to their drivers using some non-SPI bus,
3095 * such as the platform bus. The final stage of probe() in that code
3096 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3097 *
3098 * SPI controllers use board specific (often SOC specific) bus numbers,
3099 * and board-specific addressing for SPI devices combines those numbers
3100 * with chip select numbers. Since SPI does not directly support dynamic
3101 * device identification, boards need configuration tables telling which
3102 * chip is at which address.
3103 *
3104 * This must be called from context that can sleep. It returns zero on
3105 * success, else a negative error code (dropping the controller's refcount).
3106 * After a successful return, the caller is responsible for calling
3107 * spi_unregister_controller().
3108 *
3109 * Return: zero on success, else a negative error code.
3110 */
spi_register_controller(struct spi_controller * ctlr)3111 int spi_register_controller(struct spi_controller *ctlr)
3112 {
3113 struct device *dev = ctlr->dev.parent;
3114 struct boardinfo *bi;
3115 int first_dynamic;
3116 int status;
3117
3118 if (!dev)
3119 return -ENODEV;
3120
3121 /*
3122 * Make sure all necessary hooks are implemented before registering
3123 * the SPI controller.
3124 */
3125 status = spi_controller_check_ops(ctlr);
3126 if (status)
3127 return status;
3128
3129 if (ctlr->bus_num < 0)
3130 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3131 if (ctlr->bus_num >= 0) {
3132 /* Devices with a fixed bus num must check-in with the num */
3133 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3134 if (status)
3135 return status;
3136 }
3137 if (ctlr->bus_num < 0) {
3138 first_dynamic = of_alias_get_highest_id("spi");
3139 if (first_dynamic < 0)
3140 first_dynamic = 0;
3141 else
3142 first_dynamic++;
3143
3144 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3145 if (status)
3146 return status;
3147 }
3148 ctlr->bus_lock_flag = 0;
3149 init_completion(&ctlr->xfer_completion);
3150 init_completion(&ctlr->cur_msg_completion);
3151 if (!ctlr->max_dma_len)
3152 ctlr->max_dma_len = INT_MAX;
3153
3154 /*
3155 * Register the device, then userspace will see it.
3156 * Registration fails if the bus ID is in use.
3157 */
3158 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3159
3160 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3161 status = spi_get_gpio_descs(ctlr);
3162 if (status)
3163 goto free_bus_id;
3164 /*
3165 * A controller using GPIO descriptors always
3166 * supports SPI_CS_HIGH if need be.
3167 */
3168 ctlr->mode_bits |= SPI_CS_HIGH;
3169 }
3170
3171 /*
3172 * Even if it's just one always-selected device, there must
3173 * be at least one chipselect.
3174 */
3175 if (!ctlr->num_chipselect) {
3176 status = -EINVAL;
3177 goto free_bus_id;
3178 }
3179
3180 /* Setting last_cs to -1 means no chip selected */
3181 ctlr->last_cs = -1;
3182
3183 status = device_add(&ctlr->dev);
3184 if (status < 0)
3185 goto free_bus_id;
3186 dev_dbg(dev, "registered %s %s\n",
3187 spi_controller_is_slave(ctlr) ? "slave" : "master",
3188 dev_name(&ctlr->dev));
3189
3190 /*
3191 * If we're using a queued driver, start the queue. Note that we don't
3192 * need the queueing logic if the driver is only supporting high-level
3193 * memory operations.
3194 */
3195 if (ctlr->transfer) {
3196 dev_info(dev, "controller is unqueued, this is deprecated\n");
3197 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3198 status = spi_controller_initialize_queue(ctlr);
3199 if (status) {
3200 device_del(&ctlr->dev);
3201 goto free_bus_id;
3202 }
3203 }
3204 /* Add statistics */
3205 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3206 if (!ctlr->pcpu_statistics) {
3207 dev_err(dev, "Error allocating per-cpu statistics\n");
3208 status = -ENOMEM;
3209 goto destroy_queue;
3210 }
3211
3212 mutex_lock(&board_lock);
3213 list_add_tail(&ctlr->list, &spi_controller_list);
3214 list_for_each_entry(bi, &board_list, list)
3215 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3216 mutex_unlock(&board_lock);
3217
3218 /* Register devices from the device tree and ACPI */
3219 of_register_spi_devices(ctlr);
3220 acpi_register_spi_devices(ctlr);
3221 return status;
3222
3223 destroy_queue:
3224 spi_destroy_queue(ctlr);
3225 free_bus_id:
3226 mutex_lock(&board_lock);
3227 idr_remove(&spi_master_idr, ctlr->bus_num);
3228 mutex_unlock(&board_lock);
3229 return status;
3230 }
3231 EXPORT_SYMBOL_GPL(spi_register_controller);
3232
devm_spi_unregister(struct device * dev,void * res)3233 static void devm_spi_unregister(struct device *dev, void *res)
3234 {
3235 spi_unregister_controller(*(struct spi_controller **)res);
3236 }
3237
3238 /**
3239 * devm_spi_register_controller - register managed SPI master or slave
3240 * controller
3241 * @dev: device managing SPI controller
3242 * @ctlr: initialized controller, originally from spi_alloc_master() or
3243 * spi_alloc_slave()
3244 * Context: can sleep
3245 *
3246 * Register a SPI device as with spi_register_controller() which will
3247 * automatically be unregistered and freed.
3248 *
3249 * Return: zero on success, else a negative error code.
3250 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3251 int devm_spi_register_controller(struct device *dev,
3252 struct spi_controller *ctlr)
3253 {
3254 struct spi_controller **ptr;
3255 int ret;
3256
3257 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3258 if (!ptr)
3259 return -ENOMEM;
3260
3261 ret = spi_register_controller(ctlr);
3262 if (!ret) {
3263 *ptr = ctlr;
3264 devres_add(dev, ptr);
3265 } else {
3266 devres_free(ptr);
3267 }
3268
3269 return ret;
3270 }
3271 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3272
__unregister(struct device * dev,void * null)3273 static int __unregister(struct device *dev, void *null)
3274 {
3275 spi_unregister_device(to_spi_device(dev));
3276 return 0;
3277 }
3278
3279 /**
3280 * spi_unregister_controller - unregister SPI master or slave controller
3281 * @ctlr: the controller being unregistered
3282 * Context: can sleep
3283 *
3284 * This call is used only by SPI controller drivers, which are the
3285 * only ones directly touching chip registers.
3286 *
3287 * This must be called from context that can sleep.
3288 *
3289 * Note that this function also drops a reference to the controller.
3290 */
spi_unregister_controller(struct spi_controller * ctlr)3291 void spi_unregister_controller(struct spi_controller *ctlr)
3292 {
3293 struct spi_controller *found;
3294 int id = ctlr->bus_num;
3295
3296 /* Prevent addition of new devices, unregister existing ones */
3297 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3298 mutex_lock(&ctlr->add_lock);
3299
3300 device_for_each_child(&ctlr->dev, NULL, __unregister);
3301
3302 /* First make sure that this controller was ever added */
3303 mutex_lock(&board_lock);
3304 found = idr_find(&spi_master_idr, id);
3305 mutex_unlock(&board_lock);
3306 if (ctlr->queued) {
3307 if (spi_destroy_queue(ctlr))
3308 dev_err(&ctlr->dev, "queue remove failed\n");
3309 }
3310 mutex_lock(&board_lock);
3311 list_del(&ctlr->list);
3312 mutex_unlock(&board_lock);
3313
3314 device_del(&ctlr->dev);
3315
3316 /* Free bus id */
3317 mutex_lock(&board_lock);
3318 if (found == ctlr)
3319 idr_remove(&spi_master_idr, id);
3320 mutex_unlock(&board_lock);
3321
3322 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3323 mutex_unlock(&ctlr->add_lock);
3324
3325 /*
3326 * Release the last reference on the controller if its driver
3327 * has not yet been converted to devm_spi_alloc_master/slave().
3328 */
3329 if (!ctlr->devm_allocated)
3330 put_device(&ctlr->dev);
3331 }
3332 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3333
__spi_check_suspended(const struct spi_controller * ctlr)3334 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3335 {
3336 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3337 }
3338
__spi_mark_suspended(struct spi_controller * ctlr)3339 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3340 {
3341 mutex_lock(&ctlr->bus_lock_mutex);
3342 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3343 mutex_unlock(&ctlr->bus_lock_mutex);
3344 }
3345
__spi_mark_resumed(struct spi_controller * ctlr)3346 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3347 {
3348 mutex_lock(&ctlr->bus_lock_mutex);
3349 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3350 mutex_unlock(&ctlr->bus_lock_mutex);
3351 }
3352
spi_controller_suspend(struct spi_controller * ctlr)3353 int spi_controller_suspend(struct spi_controller *ctlr)
3354 {
3355 int ret = 0;
3356
3357 /* Basically no-ops for non-queued controllers */
3358 if (ctlr->queued) {
3359 ret = spi_stop_queue(ctlr);
3360 if (ret)
3361 dev_err(&ctlr->dev, "queue stop failed\n");
3362 }
3363
3364 __spi_mark_suspended(ctlr);
3365 return ret;
3366 }
3367 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3368
spi_controller_resume(struct spi_controller * ctlr)3369 int spi_controller_resume(struct spi_controller *ctlr)
3370 {
3371 int ret = 0;
3372
3373 __spi_mark_resumed(ctlr);
3374
3375 if (ctlr->queued) {
3376 ret = spi_start_queue(ctlr);
3377 if (ret)
3378 dev_err(&ctlr->dev, "queue restart failed\n");
3379 }
3380 return ret;
3381 }
3382 EXPORT_SYMBOL_GPL(spi_controller_resume);
3383
3384 /*-------------------------------------------------------------------------*/
3385
3386 /* Core methods for spi_message alterations */
3387
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3388 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3389 struct spi_message *msg,
3390 void *res)
3391 {
3392 struct spi_replaced_transfers *rxfer = res;
3393 size_t i;
3394
3395 /* Call extra callback if requested */
3396 if (rxfer->release)
3397 rxfer->release(ctlr, msg, res);
3398
3399 /* Insert replaced transfers back into the message */
3400 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3401
3402 /* Remove the formerly inserted entries */
3403 for (i = 0; i < rxfer->inserted; i++)
3404 list_del(&rxfer->inserted_transfers[i].transfer_list);
3405 }
3406
3407 /**
3408 * spi_replace_transfers - replace transfers with several transfers
3409 * and register change with spi_message.resources
3410 * @msg: the spi_message we work upon
3411 * @xfer_first: the first spi_transfer we want to replace
3412 * @remove: number of transfers to remove
3413 * @insert: the number of transfers we want to insert instead
3414 * @release: extra release code necessary in some circumstances
3415 * @extradatasize: extra data to allocate (with alignment guarantees
3416 * of struct @spi_transfer)
3417 * @gfp: gfp flags
3418 *
3419 * Returns: pointer to @spi_replaced_transfers,
3420 * PTR_ERR(...) in case of errors.
3421 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3422 static struct spi_replaced_transfers *spi_replace_transfers(
3423 struct spi_message *msg,
3424 struct spi_transfer *xfer_first,
3425 size_t remove,
3426 size_t insert,
3427 spi_replaced_release_t release,
3428 size_t extradatasize,
3429 gfp_t gfp)
3430 {
3431 struct spi_replaced_transfers *rxfer;
3432 struct spi_transfer *xfer;
3433 size_t i;
3434
3435 /* Allocate the structure using spi_res */
3436 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3437 struct_size(rxfer, inserted_transfers, insert)
3438 + extradatasize,
3439 gfp);
3440 if (!rxfer)
3441 return ERR_PTR(-ENOMEM);
3442
3443 /* The release code to invoke before running the generic release */
3444 rxfer->release = release;
3445
3446 /* Assign extradata */
3447 if (extradatasize)
3448 rxfer->extradata =
3449 &rxfer->inserted_transfers[insert];
3450
3451 /* Init the replaced_transfers list */
3452 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3453
3454 /*
3455 * Assign the list_entry after which we should reinsert
3456 * the @replaced_transfers - it may be spi_message.messages!
3457 */
3458 rxfer->replaced_after = xfer_first->transfer_list.prev;
3459
3460 /* Remove the requested number of transfers */
3461 for (i = 0; i < remove; i++) {
3462 /*
3463 * If the entry after replaced_after it is msg->transfers
3464 * then we have been requested to remove more transfers
3465 * than are in the list.
3466 */
3467 if (rxfer->replaced_after->next == &msg->transfers) {
3468 dev_err(&msg->spi->dev,
3469 "requested to remove more spi_transfers than are available\n");
3470 /* Insert replaced transfers back into the message */
3471 list_splice(&rxfer->replaced_transfers,
3472 rxfer->replaced_after);
3473
3474 /* Free the spi_replace_transfer structure... */
3475 spi_res_free(rxfer);
3476
3477 /* ...and return with an error */
3478 return ERR_PTR(-EINVAL);
3479 }
3480
3481 /*
3482 * Remove the entry after replaced_after from list of
3483 * transfers and add it to list of replaced_transfers.
3484 */
3485 list_move_tail(rxfer->replaced_after->next,
3486 &rxfer->replaced_transfers);
3487 }
3488
3489 /*
3490 * Create copy of the given xfer with identical settings
3491 * based on the first transfer to get removed.
3492 */
3493 for (i = 0; i < insert; i++) {
3494 /* We need to run in reverse order */
3495 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3496
3497 /* Copy all spi_transfer data */
3498 memcpy(xfer, xfer_first, sizeof(*xfer));
3499
3500 /* Add to list */
3501 list_add(&xfer->transfer_list, rxfer->replaced_after);
3502
3503 /* Clear cs_change and delay for all but the last */
3504 if (i) {
3505 xfer->cs_change = false;
3506 xfer->delay.value = 0;
3507 }
3508 }
3509
3510 /* Set up inserted... */
3511 rxfer->inserted = insert;
3512
3513 /* ...and register it with spi_res/spi_message */
3514 spi_res_add(msg, rxfer);
3515
3516 return rxfer;
3517 }
3518
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3519 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3520 struct spi_message *msg,
3521 struct spi_transfer **xferp,
3522 size_t maxsize,
3523 gfp_t gfp)
3524 {
3525 struct spi_transfer *xfer = *xferp, *xfers;
3526 struct spi_replaced_transfers *srt;
3527 size_t offset;
3528 size_t count, i;
3529
3530 /* Calculate how many we have to replace */
3531 count = DIV_ROUND_UP(xfer->len, maxsize);
3532
3533 /* Create replacement */
3534 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3535 if (IS_ERR(srt))
3536 return PTR_ERR(srt);
3537 xfers = srt->inserted_transfers;
3538
3539 /*
3540 * Now handle each of those newly inserted spi_transfers.
3541 * Note that the replacements spi_transfers all are preset
3542 * to the same values as *xferp, so tx_buf, rx_buf and len
3543 * are all identical (as well as most others)
3544 * so we just have to fix up len and the pointers.
3545 *
3546 * This also includes support for the depreciated
3547 * spi_message.is_dma_mapped interface.
3548 */
3549
3550 /*
3551 * The first transfer just needs the length modified, so we
3552 * run it outside the loop.
3553 */
3554 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3555
3556 /* All the others need rx_buf/tx_buf also set */
3557 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3558 /* Update rx_buf, tx_buf and DMA */
3559 if (xfers[i].rx_buf)
3560 xfers[i].rx_buf += offset;
3561 if (xfers[i].rx_dma)
3562 xfers[i].rx_dma += offset;
3563 if (xfers[i].tx_buf)
3564 xfers[i].tx_buf += offset;
3565 if (xfers[i].tx_dma)
3566 xfers[i].tx_dma += offset;
3567
3568 /* Update length */
3569 xfers[i].len = min(maxsize, xfers[i].len - offset);
3570 }
3571
3572 /*
3573 * We set up xferp to the last entry we have inserted,
3574 * so that we skip those already split transfers.
3575 */
3576 *xferp = &xfers[count - 1];
3577
3578 /* Increment statistics counters */
3579 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3580 transfers_split_maxsize);
3581 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3582 transfers_split_maxsize);
3583
3584 return 0;
3585 }
3586
3587 /**
3588 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3589 * when an individual transfer exceeds a
3590 * certain size
3591 * @ctlr: the @spi_controller for this transfer
3592 * @msg: the @spi_message to transform
3593 * @maxsize: the maximum when to apply this
3594 * @gfp: GFP allocation flags
3595 *
3596 * Return: status of transformation
3597 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3598 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3599 struct spi_message *msg,
3600 size_t maxsize,
3601 gfp_t gfp)
3602 {
3603 struct spi_transfer *xfer;
3604 int ret;
3605
3606 /*
3607 * Iterate over the transfer_list,
3608 * but note that xfer is advanced to the last transfer inserted
3609 * to avoid checking sizes again unnecessarily (also xfer does
3610 * potentially belong to a different list by the time the
3611 * replacement has happened).
3612 */
3613 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3614 if (xfer->len > maxsize) {
3615 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3616 maxsize, gfp);
3617 if (ret)
3618 return ret;
3619 }
3620 }
3621
3622 return 0;
3623 }
3624 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3625
3626
3627 /**
3628 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3629 * when an individual transfer exceeds a
3630 * certain number of SPI words
3631 * @ctlr: the @spi_controller for this transfer
3632 * @msg: the @spi_message to transform
3633 * @maxwords: the number of words to limit each transfer to
3634 * @gfp: GFP allocation flags
3635 *
3636 * Return: status of transformation
3637 */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords,gfp_t gfp)3638 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3639 struct spi_message *msg,
3640 size_t maxwords,
3641 gfp_t gfp)
3642 {
3643 struct spi_transfer *xfer;
3644
3645 /*
3646 * Iterate over the transfer_list,
3647 * but note that xfer is advanced to the last transfer inserted
3648 * to avoid checking sizes again unnecessarily (also xfer does
3649 * potentially belong to a different list by the time the
3650 * replacement has happened).
3651 */
3652 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3653 size_t maxsize;
3654 int ret;
3655
3656 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3657 if (xfer->len > maxsize) {
3658 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3659 maxsize, gfp);
3660 if (ret)
3661 return ret;
3662 }
3663 }
3664
3665 return 0;
3666 }
3667 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3668
3669 /*-------------------------------------------------------------------------*/
3670
3671 /*
3672 * Core methods for SPI controller protocol drivers. Some of the
3673 * other core methods are currently defined as inline functions.
3674 */
3675
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3676 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3677 u8 bits_per_word)
3678 {
3679 if (ctlr->bits_per_word_mask) {
3680 /* Only 32 bits fit in the mask */
3681 if (bits_per_word > 32)
3682 return -EINVAL;
3683 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3684 return -EINVAL;
3685 }
3686
3687 return 0;
3688 }
3689
3690 /**
3691 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3692 * @spi: the device that requires specific CS timing configuration
3693 *
3694 * Return: zero on success, else a negative error code.
3695 */
spi_set_cs_timing(struct spi_device * spi)3696 static int spi_set_cs_timing(struct spi_device *spi)
3697 {
3698 struct device *parent = spi->controller->dev.parent;
3699 int status = 0;
3700
3701 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3702 if (spi->controller->auto_runtime_pm) {
3703 status = pm_runtime_get_sync(parent);
3704 if (status < 0) {
3705 pm_runtime_put_noidle(parent);
3706 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3707 status);
3708 return status;
3709 }
3710
3711 status = spi->controller->set_cs_timing(spi);
3712 pm_runtime_mark_last_busy(parent);
3713 pm_runtime_put_autosuspend(parent);
3714 } else {
3715 status = spi->controller->set_cs_timing(spi);
3716 }
3717 }
3718 return status;
3719 }
3720
3721 /**
3722 * spi_setup - setup SPI mode and clock rate
3723 * @spi: the device whose settings are being modified
3724 * Context: can sleep, and no requests are queued to the device
3725 *
3726 * SPI protocol drivers may need to update the transfer mode if the
3727 * device doesn't work with its default. They may likewise need
3728 * to update clock rates or word sizes from initial values. This function
3729 * changes those settings, and must be called from a context that can sleep.
3730 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3731 * effect the next time the device is selected and data is transferred to
3732 * or from it. When this function returns, the SPI device is deselected.
3733 *
3734 * Note that this call will fail if the protocol driver specifies an option
3735 * that the underlying controller or its driver does not support. For
3736 * example, not all hardware supports wire transfers using nine bit words,
3737 * LSB-first wire encoding, or active-high chipselects.
3738 *
3739 * Return: zero on success, else a negative error code.
3740 */
spi_setup(struct spi_device * spi)3741 int spi_setup(struct spi_device *spi)
3742 {
3743 unsigned bad_bits, ugly_bits;
3744 int status = 0;
3745
3746 /*
3747 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3748 * are set at the same time.
3749 */
3750 if ((hweight_long(spi->mode &
3751 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3752 (hweight_long(spi->mode &
3753 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3754 dev_err(&spi->dev,
3755 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3756 return -EINVAL;
3757 }
3758 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3759 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3760 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3761 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3762 return -EINVAL;
3763 /*
3764 * Help drivers fail *cleanly* when they need options
3765 * that aren't supported with their current controller.
3766 * SPI_CS_WORD has a fallback software implementation,
3767 * so it is ignored here.
3768 */
3769 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3770 SPI_NO_TX | SPI_NO_RX);
3771 ugly_bits = bad_bits &
3772 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3773 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3774 if (ugly_bits) {
3775 dev_warn(&spi->dev,
3776 "setup: ignoring unsupported mode bits %x\n",
3777 ugly_bits);
3778 spi->mode &= ~ugly_bits;
3779 bad_bits &= ~ugly_bits;
3780 }
3781 if (bad_bits) {
3782 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3783 bad_bits);
3784 return -EINVAL;
3785 }
3786
3787 if (!spi->bits_per_word) {
3788 spi->bits_per_word = 8;
3789 } else {
3790 /*
3791 * Some controllers may not support the default 8 bits-per-word
3792 * so only perform the check when this is explicitly provided.
3793 */
3794 status = __spi_validate_bits_per_word(spi->controller,
3795 spi->bits_per_word);
3796 if (status)
3797 return status;
3798 }
3799
3800 if (spi->controller->max_speed_hz &&
3801 (!spi->max_speed_hz ||
3802 spi->max_speed_hz > spi->controller->max_speed_hz))
3803 spi->max_speed_hz = spi->controller->max_speed_hz;
3804
3805 mutex_lock(&spi->controller->io_mutex);
3806
3807 if (spi->controller->setup) {
3808 status = spi->controller->setup(spi);
3809 if (status) {
3810 mutex_unlock(&spi->controller->io_mutex);
3811 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3812 status);
3813 return status;
3814 }
3815 }
3816
3817 status = spi_set_cs_timing(spi);
3818 if (status) {
3819 mutex_unlock(&spi->controller->io_mutex);
3820 return status;
3821 }
3822
3823 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3824 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3825 if (status < 0) {
3826 mutex_unlock(&spi->controller->io_mutex);
3827 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3828 status);
3829 return status;
3830 }
3831
3832 /*
3833 * We do not want to return positive value from pm_runtime_get,
3834 * there are many instances of devices calling spi_setup() and
3835 * checking for a non-zero return value instead of a negative
3836 * return value.
3837 */
3838 status = 0;
3839
3840 spi_set_cs(spi, false, true);
3841 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3842 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3843 } else {
3844 spi_set_cs(spi, false, true);
3845 }
3846
3847 mutex_unlock(&spi->controller->io_mutex);
3848
3849 if (spi->rt && !spi->controller->rt) {
3850 spi->controller->rt = true;
3851 spi_set_thread_rt(spi->controller);
3852 }
3853
3854 trace_spi_setup(spi, status);
3855
3856 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3857 spi->mode & SPI_MODE_X_MASK,
3858 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3859 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3860 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3861 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3862 spi->bits_per_word, spi->max_speed_hz,
3863 status);
3864
3865 return status;
3866 }
3867 EXPORT_SYMBOL_GPL(spi_setup);
3868
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3869 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3870 struct spi_device *spi)
3871 {
3872 int delay1, delay2;
3873
3874 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3875 if (delay1 < 0)
3876 return delay1;
3877
3878 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3879 if (delay2 < 0)
3880 return delay2;
3881
3882 if (delay1 < delay2)
3883 memcpy(&xfer->word_delay, &spi->word_delay,
3884 sizeof(xfer->word_delay));
3885
3886 return 0;
3887 }
3888
__spi_validate(struct spi_device * spi,struct spi_message * message)3889 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3890 {
3891 struct spi_controller *ctlr = spi->controller;
3892 struct spi_transfer *xfer;
3893 int w_size;
3894
3895 if (list_empty(&message->transfers))
3896 return -EINVAL;
3897
3898 /*
3899 * If an SPI controller does not support toggling the CS line on each
3900 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3901 * for the CS line, we can emulate the CS-per-word hardware function by
3902 * splitting transfers into one-word transfers and ensuring that
3903 * cs_change is set for each transfer.
3904 */
3905 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3906 spi_get_csgpiod(spi, 0))) {
3907 size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
3908 int ret;
3909
3910 /* spi_split_transfers_maxsize() requires message->spi */
3911 message->spi = spi;
3912
3913 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3914 GFP_KERNEL);
3915 if (ret)
3916 return ret;
3917
3918 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3919 /* Don't change cs_change on the last entry in the list */
3920 if (list_is_last(&xfer->transfer_list, &message->transfers))
3921 break;
3922 xfer->cs_change = 1;
3923 }
3924 }
3925
3926 /*
3927 * Half-duplex links include original MicroWire, and ones with
3928 * only one data pin like SPI_3WIRE (switches direction) or where
3929 * either MOSI or MISO is missing. They can also be caused by
3930 * software limitations.
3931 */
3932 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3933 (spi->mode & SPI_3WIRE)) {
3934 unsigned flags = ctlr->flags;
3935
3936 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3937 if (xfer->rx_buf && xfer->tx_buf)
3938 return -EINVAL;
3939 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3940 return -EINVAL;
3941 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3942 return -EINVAL;
3943 }
3944 }
3945
3946 /*
3947 * Set transfer bits_per_word and max speed as spi device default if
3948 * it is not set for this transfer.
3949 * Set transfer tx_nbits and rx_nbits as single transfer default
3950 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3951 * Ensure transfer word_delay is at least as long as that required by
3952 * device itself.
3953 */
3954 message->frame_length = 0;
3955 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3956 xfer->effective_speed_hz = 0;
3957 message->frame_length += xfer->len;
3958 if (!xfer->bits_per_word)
3959 xfer->bits_per_word = spi->bits_per_word;
3960
3961 if (!xfer->speed_hz)
3962 xfer->speed_hz = spi->max_speed_hz;
3963
3964 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3965 xfer->speed_hz = ctlr->max_speed_hz;
3966
3967 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3968 return -EINVAL;
3969
3970 /*
3971 * SPI transfer length should be multiple of SPI word size
3972 * where SPI word size should be power-of-two multiple.
3973 */
3974 if (xfer->bits_per_word <= 8)
3975 w_size = 1;
3976 else if (xfer->bits_per_word <= 16)
3977 w_size = 2;
3978 else
3979 w_size = 4;
3980
3981 /* No partial transfers accepted */
3982 if (xfer->len % w_size)
3983 return -EINVAL;
3984
3985 if (xfer->speed_hz && ctlr->min_speed_hz &&
3986 xfer->speed_hz < ctlr->min_speed_hz)
3987 return -EINVAL;
3988
3989 if (xfer->tx_buf && !xfer->tx_nbits)
3990 xfer->tx_nbits = SPI_NBITS_SINGLE;
3991 if (xfer->rx_buf && !xfer->rx_nbits)
3992 xfer->rx_nbits = SPI_NBITS_SINGLE;
3993 /*
3994 * Check transfer tx/rx_nbits:
3995 * 1. check the value matches one of single, dual and quad
3996 * 2. check tx/rx_nbits match the mode in spi_device
3997 */
3998 if (xfer->tx_buf) {
3999 if (spi->mode & SPI_NO_TX)
4000 return -EINVAL;
4001 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4002 xfer->tx_nbits != SPI_NBITS_DUAL &&
4003 xfer->tx_nbits != SPI_NBITS_QUAD)
4004 return -EINVAL;
4005 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4006 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4007 return -EINVAL;
4008 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4009 !(spi->mode & SPI_TX_QUAD))
4010 return -EINVAL;
4011 }
4012 /* Check transfer rx_nbits */
4013 if (xfer->rx_buf) {
4014 if (spi->mode & SPI_NO_RX)
4015 return -EINVAL;
4016 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4017 xfer->rx_nbits != SPI_NBITS_DUAL &&
4018 xfer->rx_nbits != SPI_NBITS_QUAD)
4019 return -EINVAL;
4020 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4021 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4022 return -EINVAL;
4023 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4024 !(spi->mode & SPI_RX_QUAD))
4025 return -EINVAL;
4026 }
4027
4028 if (_spi_xfer_word_delay_update(xfer, spi))
4029 return -EINVAL;
4030 }
4031
4032 message->status = -EINPROGRESS;
4033
4034 return 0;
4035 }
4036
__spi_async(struct spi_device * spi,struct spi_message * message)4037 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4038 {
4039 struct spi_controller *ctlr = spi->controller;
4040 struct spi_transfer *xfer;
4041
4042 /*
4043 * Some controllers do not support doing regular SPI transfers. Return
4044 * ENOTSUPP when this is the case.
4045 */
4046 if (!ctlr->transfer)
4047 return -ENOTSUPP;
4048
4049 message->spi = spi;
4050
4051 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4052 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4053
4054 trace_spi_message_submit(message);
4055
4056 if (!ctlr->ptp_sts_supported) {
4057 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4058 xfer->ptp_sts_word_pre = 0;
4059 ptp_read_system_prets(xfer->ptp_sts);
4060 }
4061 }
4062
4063 return ctlr->transfer(spi, message);
4064 }
4065
4066 /**
4067 * spi_async - asynchronous SPI transfer
4068 * @spi: device with which data will be exchanged
4069 * @message: describes the data transfers, including completion callback
4070 * Context: any (IRQs may be blocked, etc)
4071 *
4072 * This call may be used in_irq and other contexts which can't sleep,
4073 * as well as from task contexts which can sleep.
4074 *
4075 * The completion callback is invoked in a context which can't sleep.
4076 * Before that invocation, the value of message->status is undefined.
4077 * When the callback is issued, message->status holds either zero (to
4078 * indicate complete success) or a negative error code. After that
4079 * callback returns, the driver which issued the transfer request may
4080 * deallocate the associated memory; it's no longer in use by any SPI
4081 * core or controller driver code.
4082 *
4083 * Note that although all messages to a spi_device are handled in
4084 * FIFO order, messages may go to different devices in other orders.
4085 * Some device might be higher priority, or have various "hard" access
4086 * time requirements, for example.
4087 *
4088 * On detection of any fault during the transfer, processing of
4089 * the entire message is aborted, and the device is deselected.
4090 * Until returning from the associated message completion callback,
4091 * no other spi_message queued to that device will be processed.
4092 * (This rule applies equally to all the synchronous transfer calls,
4093 * which are wrappers around this core asynchronous primitive.)
4094 *
4095 * Return: zero on success, else a negative error code.
4096 */
spi_async(struct spi_device * spi,struct spi_message * message)4097 int spi_async(struct spi_device *spi, struct spi_message *message)
4098 {
4099 struct spi_controller *ctlr = spi->controller;
4100 int ret;
4101 unsigned long flags;
4102
4103 ret = __spi_validate(spi, message);
4104 if (ret != 0)
4105 return ret;
4106
4107 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4108
4109 if (ctlr->bus_lock_flag)
4110 ret = -EBUSY;
4111 else
4112 ret = __spi_async(spi, message);
4113
4114 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4115
4116 return ret;
4117 }
4118 EXPORT_SYMBOL_GPL(spi_async);
4119
4120 /**
4121 * spi_async_locked - version of spi_async with exclusive bus usage
4122 * @spi: device with which data will be exchanged
4123 * @message: describes the data transfers, including completion callback
4124 * Context: any (IRQs may be blocked, etc)
4125 *
4126 * This call may be used in_irq and other contexts which can't sleep,
4127 * as well as from task contexts which can sleep.
4128 *
4129 * The completion callback is invoked in a context which can't sleep.
4130 * Before that invocation, the value of message->status is undefined.
4131 * When the callback is issued, message->status holds either zero (to
4132 * indicate complete success) or a negative error code. After that
4133 * callback returns, the driver which issued the transfer request may
4134 * deallocate the associated memory; it's no longer in use by any SPI
4135 * core or controller driver code.
4136 *
4137 * Note that although all messages to a spi_device are handled in
4138 * FIFO order, messages may go to different devices in other orders.
4139 * Some device might be higher priority, or have various "hard" access
4140 * time requirements, for example.
4141 *
4142 * On detection of any fault during the transfer, processing of
4143 * the entire message is aborted, and the device is deselected.
4144 * Until returning from the associated message completion callback,
4145 * no other spi_message queued to that device will be processed.
4146 * (This rule applies equally to all the synchronous transfer calls,
4147 * which are wrappers around this core asynchronous primitive.)
4148 *
4149 * Return: zero on success, else a negative error code.
4150 */
spi_async_locked(struct spi_device * spi,struct spi_message * message)4151 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4152 {
4153 struct spi_controller *ctlr = spi->controller;
4154 int ret;
4155 unsigned long flags;
4156
4157 ret = __spi_validate(spi, message);
4158 if (ret != 0)
4159 return ret;
4160
4161 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4162
4163 ret = __spi_async(spi, message);
4164
4165 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4166
4167 return ret;
4168
4169 }
4170
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4171 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4172 {
4173 bool was_busy;
4174 int ret;
4175
4176 mutex_lock(&ctlr->io_mutex);
4177
4178 was_busy = ctlr->busy;
4179
4180 ctlr->cur_msg = msg;
4181 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4182 if (ret)
4183 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4184 ctlr->cur_msg = NULL;
4185 ctlr->fallback = false;
4186
4187 if (!was_busy) {
4188 kfree(ctlr->dummy_rx);
4189 ctlr->dummy_rx = NULL;
4190 kfree(ctlr->dummy_tx);
4191 ctlr->dummy_tx = NULL;
4192 if (ctlr->unprepare_transfer_hardware &&
4193 ctlr->unprepare_transfer_hardware(ctlr))
4194 dev_err(&ctlr->dev,
4195 "failed to unprepare transfer hardware\n");
4196 spi_idle_runtime_pm(ctlr);
4197 }
4198
4199 mutex_unlock(&ctlr->io_mutex);
4200 }
4201
4202 /*-------------------------------------------------------------------------*/
4203
4204 /*
4205 * Utility methods for SPI protocol drivers, layered on
4206 * top of the core. Some other utility methods are defined as
4207 * inline functions.
4208 */
4209
spi_complete(void * arg)4210 static void spi_complete(void *arg)
4211 {
4212 complete(arg);
4213 }
4214
__spi_sync(struct spi_device * spi,struct spi_message * message)4215 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4216 {
4217 DECLARE_COMPLETION_ONSTACK(done);
4218 int status;
4219 struct spi_controller *ctlr = spi->controller;
4220
4221 if (__spi_check_suspended(ctlr)) {
4222 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4223 return -ESHUTDOWN;
4224 }
4225
4226 status = __spi_validate(spi, message);
4227 if (status != 0)
4228 return status;
4229
4230 message->spi = spi;
4231
4232 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4233 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4234
4235 /*
4236 * Checking queue_empty here only guarantees async/sync message
4237 * ordering when coming from the same context. It does not need to
4238 * guard against reentrancy from a different context. The io_mutex
4239 * will catch those cases.
4240 */
4241 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4242 message->actual_length = 0;
4243 message->status = -EINPROGRESS;
4244
4245 trace_spi_message_submit(message);
4246
4247 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4248 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4249
4250 __spi_transfer_message_noqueue(ctlr, message);
4251
4252 return message->status;
4253 }
4254
4255 /*
4256 * There are messages in the async queue that could have originated
4257 * from the same context, so we need to preserve ordering.
4258 * Therefor we send the message to the async queue and wait until they
4259 * are completed.
4260 */
4261 message->complete = spi_complete;
4262 message->context = &done;
4263 status = spi_async_locked(spi, message);
4264 if (status == 0) {
4265 wait_for_completion(&done);
4266 status = message->status;
4267 }
4268 message->complete = NULL;
4269 message->context = NULL;
4270
4271 return status;
4272 }
4273
4274 /**
4275 * spi_sync - blocking/synchronous SPI data transfers
4276 * @spi: device with which data will be exchanged
4277 * @message: describes the data transfers
4278 * Context: can sleep
4279 *
4280 * This call may only be used from a context that may sleep. The sleep
4281 * is non-interruptible, and has no timeout. Low-overhead controller
4282 * drivers may DMA directly into and out of the message buffers.
4283 *
4284 * Note that the SPI device's chip select is active during the message,
4285 * and then is normally disabled between messages. Drivers for some
4286 * frequently-used devices may want to minimize costs of selecting a chip,
4287 * by leaving it selected in anticipation that the next message will go
4288 * to the same chip. (That may increase power usage.)
4289 *
4290 * Also, the caller is guaranteeing that the memory associated with the
4291 * message will not be freed before this call returns.
4292 *
4293 * Return: zero on success, else a negative error code.
4294 */
spi_sync(struct spi_device * spi,struct spi_message * message)4295 int spi_sync(struct spi_device *spi, struct spi_message *message)
4296 {
4297 int ret;
4298
4299 mutex_lock(&spi->controller->bus_lock_mutex);
4300 ret = __spi_sync(spi, message);
4301 mutex_unlock(&spi->controller->bus_lock_mutex);
4302
4303 return ret;
4304 }
4305 EXPORT_SYMBOL_GPL(spi_sync);
4306
4307 /**
4308 * spi_sync_locked - version of spi_sync with exclusive bus usage
4309 * @spi: device with which data will be exchanged
4310 * @message: describes the data transfers
4311 * Context: can sleep
4312 *
4313 * This call may only be used from a context that may sleep. The sleep
4314 * is non-interruptible, and has no timeout. Low-overhead controller
4315 * drivers may DMA directly into and out of the message buffers.
4316 *
4317 * This call should be used by drivers that require exclusive access to the
4318 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4319 * be released by a spi_bus_unlock call when the exclusive access is over.
4320 *
4321 * Return: zero on success, else a negative error code.
4322 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4323 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4324 {
4325 return __spi_sync(spi, message);
4326 }
4327 EXPORT_SYMBOL_GPL(spi_sync_locked);
4328
4329 /**
4330 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4331 * @ctlr: SPI bus master that should be locked for exclusive bus access
4332 * Context: can sleep
4333 *
4334 * This call may only be used from a context that may sleep. The sleep
4335 * is non-interruptible, and has no timeout.
4336 *
4337 * This call should be used by drivers that require exclusive access to the
4338 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4339 * exclusive access is over. Data transfer must be done by spi_sync_locked
4340 * and spi_async_locked calls when the SPI bus lock is held.
4341 *
4342 * Return: always zero.
4343 */
spi_bus_lock(struct spi_controller * ctlr)4344 int spi_bus_lock(struct spi_controller *ctlr)
4345 {
4346 unsigned long flags;
4347
4348 mutex_lock(&ctlr->bus_lock_mutex);
4349
4350 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4351 ctlr->bus_lock_flag = 1;
4352 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4353
4354 /* Mutex remains locked until spi_bus_unlock() is called */
4355
4356 return 0;
4357 }
4358 EXPORT_SYMBOL_GPL(spi_bus_lock);
4359
4360 /**
4361 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4362 * @ctlr: SPI bus master that was locked for exclusive bus access
4363 * Context: can sleep
4364 *
4365 * This call may only be used from a context that may sleep. The sleep
4366 * is non-interruptible, and has no timeout.
4367 *
4368 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4369 * call.
4370 *
4371 * Return: always zero.
4372 */
spi_bus_unlock(struct spi_controller * ctlr)4373 int spi_bus_unlock(struct spi_controller *ctlr)
4374 {
4375 ctlr->bus_lock_flag = 0;
4376
4377 mutex_unlock(&ctlr->bus_lock_mutex);
4378
4379 return 0;
4380 }
4381 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4382
4383 /* Portable code must never pass more than 32 bytes */
4384 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4385
4386 static u8 *buf;
4387
4388 /**
4389 * spi_write_then_read - SPI synchronous write followed by read
4390 * @spi: device with which data will be exchanged
4391 * @txbuf: data to be written (need not be DMA-safe)
4392 * @n_tx: size of txbuf, in bytes
4393 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4394 * @n_rx: size of rxbuf, in bytes
4395 * Context: can sleep
4396 *
4397 * This performs a half duplex MicroWire style transaction with the
4398 * device, sending txbuf and then reading rxbuf. The return value
4399 * is zero for success, else a negative errno status code.
4400 * This call may only be used from a context that may sleep.
4401 *
4402 * Parameters to this routine are always copied using a small buffer.
4403 * Performance-sensitive or bulk transfer code should instead use
4404 * spi_{async,sync}() calls with DMA-safe buffers.
4405 *
4406 * Return: zero on success, else a negative error code.
4407 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4408 int spi_write_then_read(struct spi_device *spi,
4409 const void *txbuf, unsigned n_tx,
4410 void *rxbuf, unsigned n_rx)
4411 {
4412 static DEFINE_MUTEX(lock);
4413
4414 int status;
4415 struct spi_message message;
4416 struct spi_transfer x[2];
4417 u8 *local_buf;
4418
4419 /*
4420 * Use preallocated DMA-safe buffer if we can. We can't avoid
4421 * copying here, (as a pure convenience thing), but we can
4422 * keep heap costs out of the hot path unless someone else is
4423 * using the pre-allocated buffer or the transfer is too large.
4424 */
4425 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4426 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4427 GFP_KERNEL | GFP_DMA);
4428 if (!local_buf)
4429 return -ENOMEM;
4430 } else {
4431 local_buf = buf;
4432 }
4433
4434 spi_message_init(&message);
4435 memset(x, 0, sizeof(x));
4436 if (n_tx) {
4437 x[0].len = n_tx;
4438 spi_message_add_tail(&x[0], &message);
4439 }
4440 if (n_rx) {
4441 x[1].len = n_rx;
4442 spi_message_add_tail(&x[1], &message);
4443 }
4444
4445 memcpy(local_buf, txbuf, n_tx);
4446 x[0].tx_buf = local_buf;
4447 x[1].rx_buf = local_buf + n_tx;
4448
4449 /* Do the I/O */
4450 status = spi_sync(spi, &message);
4451 if (status == 0)
4452 memcpy(rxbuf, x[1].rx_buf, n_rx);
4453
4454 if (x[0].tx_buf == buf)
4455 mutex_unlock(&lock);
4456 else
4457 kfree(local_buf);
4458
4459 return status;
4460 }
4461 EXPORT_SYMBOL_GPL(spi_write_then_read);
4462
4463 /*-------------------------------------------------------------------------*/
4464
4465 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4466 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4467 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4468 {
4469 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4470
4471 return dev ? to_spi_device(dev) : NULL;
4472 }
4473
4474 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4475 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4476 {
4477 struct device *dev;
4478
4479 dev = class_find_device_by_of_node(&spi_master_class, node);
4480 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4481 dev = class_find_device_by_of_node(&spi_slave_class, node);
4482 if (!dev)
4483 return NULL;
4484
4485 /* Reference got in class_find_device */
4486 return container_of(dev, struct spi_controller, dev);
4487 }
4488
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4489 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4490 void *arg)
4491 {
4492 struct of_reconfig_data *rd = arg;
4493 struct spi_controller *ctlr;
4494 struct spi_device *spi;
4495
4496 switch (of_reconfig_get_state_change(action, arg)) {
4497 case OF_RECONFIG_CHANGE_ADD:
4498 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4499 if (ctlr == NULL)
4500 return NOTIFY_OK; /* Not for us */
4501
4502 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4503 put_device(&ctlr->dev);
4504 return NOTIFY_OK;
4505 }
4506
4507 /*
4508 * Clear the flag before adding the device so that fw_devlink
4509 * doesn't skip adding consumers to this device.
4510 */
4511 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4512 spi = of_register_spi_device(ctlr, rd->dn);
4513 put_device(&ctlr->dev);
4514
4515 if (IS_ERR(spi)) {
4516 pr_err("%s: failed to create for '%pOF'\n",
4517 __func__, rd->dn);
4518 of_node_clear_flag(rd->dn, OF_POPULATED);
4519 return notifier_from_errno(PTR_ERR(spi));
4520 }
4521 break;
4522
4523 case OF_RECONFIG_CHANGE_REMOVE:
4524 /* Already depopulated? */
4525 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4526 return NOTIFY_OK;
4527
4528 /* Find our device by node */
4529 spi = of_find_spi_device_by_node(rd->dn);
4530 if (spi == NULL)
4531 return NOTIFY_OK; /* No? not meant for us */
4532
4533 /* Unregister takes one ref away */
4534 spi_unregister_device(spi);
4535
4536 /* And put the reference of the find */
4537 put_device(&spi->dev);
4538 break;
4539 }
4540
4541 return NOTIFY_OK;
4542 }
4543
4544 static struct notifier_block spi_of_notifier = {
4545 .notifier_call = of_spi_notify,
4546 };
4547 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4548 extern struct notifier_block spi_of_notifier;
4549 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4550
4551 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4552 static int spi_acpi_controller_match(struct device *dev, const void *data)
4553 {
4554 return ACPI_COMPANION(dev->parent) == data;
4555 }
4556
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4557 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4558 {
4559 struct device *dev;
4560
4561 dev = class_find_device(&spi_master_class, NULL, adev,
4562 spi_acpi_controller_match);
4563 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4564 dev = class_find_device(&spi_slave_class, NULL, adev,
4565 spi_acpi_controller_match);
4566 if (!dev)
4567 return NULL;
4568
4569 return container_of(dev, struct spi_controller, dev);
4570 }
4571
acpi_spi_find_device_by_adev(struct acpi_device * adev)4572 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4573 {
4574 struct device *dev;
4575
4576 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4577 return to_spi_device(dev);
4578 }
4579
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4580 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4581 void *arg)
4582 {
4583 struct acpi_device *adev = arg;
4584 struct spi_controller *ctlr;
4585 struct spi_device *spi;
4586
4587 switch (value) {
4588 case ACPI_RECONFIG_DEVICE_ADD:
4589 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4590 if (!ctlr)
4591 break;
4592
4593 acpi_register_spi_device(ctlr, adev);
4594 put_device(&ctlr->dev);
4595 break;
4596 case ACPI_RECONFIG_DEVICE_REMOVE:
4597 if (!acpi_device_enumerated(adev))
4598 break;
4599
4600 spi = acpi_spi_find_device_by_adev(adev);
4601 if (!spi)
4602 break;
4603
4604 spi_unregister_device(spi);
4605 put_device(&spi->dev);
4606 break;
4607 }
4608
4609 return NOTIFY_OK;
4610 }
4611
4612 static struct notifier_block spi_acpi_notifier = {
4613 .notifier_call = acpi_spi_notify,
4614 };
4615 #else
4616 extern struct notifier_block spi_acpi_notifier;
4617 #endif
4618
spi_init(void)4619 static int __init spi_init(void)
4620 {
4621 int status;
4622
4623 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4624 if (!buf) {
4625 status = -ENOMEM;
4626 goto err0;
4627 }
4628
4629 status = bus_register(&spi_bus_type);
4630 if (status < 0)
4631 goto err1;
4632
4633 status = class_register(&spi_master_class);
4634 if (status < 0)
4635 goto err2;
4636
4637 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4638 status = class_register(&spi_slave_class);
4639 if (status < 0)
4640 goto err3;
4641 }
4642
4643 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4644 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4645 if (IS_ENABLED(CONFIG_ACPI))
4646 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4647
4648 return 0;
4649
4650 err3:
4651 class_unregister(&spi_master_class);
4652 err2:
4653 bus_unregister(&spi_bus_type);
4654 err1:
4655 kfree(buf);
4656 buf = NULL;
4657 err0:
4658 return status;
4659 }
4660
4661 /*
4662 * A board_info is normally registered in arch_initcall(),
4663 * but even essential drivers wait till later.
4664 *
4665 * REVISIT only boardinfo really needs static linking. The rest (device and
4666 * driver registration) _could_ be dynamically linked (modular) ... Costs
4667 * include needing to have boardinfo data structures be much more public.
4668 */
4669 postcore_initcall(spi_init);
4670