1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43 #include "internals.h"
44
45 static DEFINE_IDR(spi_master_idr);
46
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55 }
56
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74 {
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83 }
84
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87 {
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118 }
119
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122 {
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141 }
142
143 #define SPI_STATISTICS_ATTRS(field, file) \
144 static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147 { \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 } \
152 static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155 }; \
156 static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159 { \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 } \
163 static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166 }
167
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171 { \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174 } \
175 SPI_STATISTICS_ATTRS(name, file)
176
177 #define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218 static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222 };
223
224 static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226 };
227
228 static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258 };
259
260 static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263 };
264
265 static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269 };
270
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301 };
302
303 static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306 };
307
308 static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311 };
312
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_message *msg)
316 {
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if (spi_valid_txbuf(msg, xfer))
332 u64_stats_add(&stats->bytes_tx, xfer->len);
333 if (spi_valid_rxbuf(msg, xfer))
334 u64_stats_add(&stats->bytes_rx, xfer->len);
335
336 u64_stats_update_end(&stats->syncp);
337 put_cpu();
338 }
339
340 /*
341 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
342 * and the sysfs version makes coldplug work too.
343 */
spi_match_id(const struct spi_device_id * id,const char * name)344 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
345 {
346 while (id->name[0]) {
347 if (!strcmp(name, id->name))
348 return id;
349 id++;
350 }
351 return NULL;
352 }
353
spi_get_device_id(const struct spi_device * sdev)354 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
355 {
356 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
357
358 return spi_match_id(sdrv->id_table, sdev->modalias);
359 }
360 EXPORT_SYMBOL_GPL(spi_get_device_id);
361
spi_get_device_match_data(const struct spi_device * sdev)362 const void *spi_get_device_match_data(const struct spi_device *sdev)
363 {
364 const void *match;
365
366 match = device_get_match_data(&sdev->dev);
367 if (match)
368 return match;
369
370 return (const void *)spi_get_device_id(sdev)->driver_data;
371 }
372 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
373
spi_match_device(struct device * dev,const struct device_driver * drv)374 static int spi_match_device(struct device *dev, const struct device_driver *drv)
375 {
376 const struct spi_device *spi = to_spi_device(dev);
377 const struct spi_driver *sdrv = to_spi_driver(drv);
378
379 /* Check override first, and if set, only use the named driver */
380 if (spi->driver_override)
381 return strcmp(spi->driver_override, drv->name) == 0;
382
383 /* Attempt an OF style match */
384 if (of_driver_match_device(dev, drv))
385 return 1;
386
387 /* Then try ACPI */
388 if (acpi_driver_match_device(dev, drv))
389 return 1;
390
391 if (sdrv->id_table)
392 return !!spi_match_id(sdrv->id_table, spi->modalias);
393
394 return strcmp(spi->modalias, drv->name) == 0;
395 }
396
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)397 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
398 {
399 const struct spi_device *spi = to_spi_device(dev);
400 int rc;
401
402 rc = acpi_device_uevent_modalias(dev, env);
403 if (rc != -ENODEV)
404 return rc;
405
406 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
407 }
408
spi_probe(struct device * dev)409 static int spi_probe(struct device *dev)
410 {
411 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
412 struct spi_device *spi = to_spi_device(dev);
413 int ret;
414
415 ret = of_clk_set_defaults(dev->of_node, false);
416 if (ret)
417 return ret;
418
419 if (dev->of_node) {
420 spi->irq = of_irq_get(dev->of_node, 0);
421 if (spi->irq == -EPROBE_DEFER)
422 return -EPROBE_DEFER;
423 if (spi->irq < 0)
424 spi->irq = 0;
425 }
426
427 if (has_acpi_companion(dev) && spi->irq < 0) {
428 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
429
430 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
431 if (spi->irq == -EPROBE_DEFER)
432 return -EPROBE_DEFER;
433 if (spi->irq < 0)
434 spi->irq = 0;
435 }
436
437 ret = dev_pm_domain_attach(dev, true);
438 if (ret)
439 return ret;
440
441 if (sdrv->probe) {
442 ret = sdrv->probe(spi);
443 if (ret)
444 dev_pm_domain_detach(dev, true);
445 }
446
447 return ret;
448 }
449
spi_remove(struct device * dev)450 static void spi_remove(struct device *dev)
451 {
452 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
453
454 if (sdrv->remove)
455 sdrv->remove(to_spi_device(dev));
456
457 dev_pm_domain_detach(dev, true);
458 }
459
spi_shutdown(struct device * dev)460 static void spi_shutdown(struct device *dev)
461 {
462 if (dev->driver) {
463 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
464
465 if (sdrv->shutdown)
466 sdrv->shutdown(to_spi_device(dev));
467 }
468 }
469
470 const struct bus_type spi_bus_type = {
471 .name = "spi",
472 .dev_groups = spi_dev_groups,
473 .match = spi_match_device,
474 .uevent = spi_uevent,
475 .probe = spi_probe,
476 .remove = spi_remove,
477 .shutdown = spi_shutdown,
478 };
479 EXPORT_SYMBOL_GPL(spi_bus_type);
480
481 /**
482 * __spi_register_driver - register a SPI driver
483 * @owner: owner module of the driver to register
484 * @sdrv: the driver to register
485 * Context: can sleep
486 *
487 * Return: zero on success, else a negative error code.
488 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)489 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
490 {
491 sdrv->driver.owner = owner;
492 sdrv->driver.bus = &spi_bus_type;
493
494 /*
495 * For Really Good Reasons we use spi: modaliases not of:
496 * modaliases for DT so module autoloading won't work if we
497 * don't have a spi_device_id as well as a compatible string.
498 */
499 if (sdrv->driver.of_match_table) {
500 const struct of_device_id *of_id;
501
502 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
503 of_id++) {
504 const char *of_name;
505
506 /* Strip off any vendor prefix */
507 of_name = strnchr(of_id->compatible,
508 sizeof(of_id->compatible), ',');
509 if (of_name)
510 of_name++;
511 else
512 of_name = of_id->compatible;
513
514 if (sdrv->id_table) {
515 const struct spi_device_id *spi_id;
516
517 spi_id = spi_match_id(sdrv->id_table, of_name);
518 if (spi_id)
519 continue;
520 } else {
521 if (strcmp(sdrv->driver.name, of_name) == 0)
522 continue;
523 }
524
525 pr_warn("SPI driver %s has no spi_device_id for %s\n",
526 sdrv->driver.name, of_id->compatible);
527 }
528 }
529
530 return driver_register(&sdrv->driver);
531 }
532 EXPORT_SYMBOL_GPL(__spi_register_driver);
533
534 /*-------------------------------------------------------------------------*/
535
536 /*
537 * SPI devices should normally not be created by SPI device drivers; that
538 * would make them board-specific. Similarly with SPI controller drivers.
539 * Device registration normally goes into like arch/.../mach.../board-YYY.c
540 * with other readonly (flashable) information about mainboard devices.
541 */
542
543 struct boardinfo {
544 struct list_head list;
545 struct spi_board_info board_info;
546 };
547
548 static LIST_HEAD(board_list);
549 static LIST_HEAD(spi_controller_list);
550
551 /*
552 * Used to protect add/del operation for board_info list and
553 * spi_controller list, and their matching process also used
554 * to protect object of type struct idr.
555 */
556 static DEFINE_MUTEX(board_lock);
557
558 /**
559 * spi_alloc_device - Allocate a new SPI device
560 * @ctlr: Controller to which device is connected
561 * Context: can sleep
562 *
563 * Allows a driver to allocate and initialize a spi_device without
564 * registering it immediately. This allows a driver to directly
565 * fill the spi_device with device parameters before calling
566 * spi_add_device() on it.
567 *
568 * Caller is responsible to call spi_add_device() on the returned
569 * spi_device structure to add it to the SPI controller. If the caller
570 * needs to discard the spi_device without adding it, then it should
571 * call spi_dev_put() on it.
572 *
573 * Return: a pointer to the new device, or NULL.
574 */
spi_alloc_device(struct spi_controller * ctlr)575 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
576 {
577 struct spi_device *spi;
578
579 if (!spi_controller_get(ctlr))
580 return NULL;
581
582 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
583 if (!spi) {
584 spi_controller_put(ctlr);
585 return NULL;
586 }
587
588 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
589 if (!spi->pcpu_statistics) {
590 kfree(spi);
591 spi_controller_put(ctlr);
592 return NULL;
593 }
594
595 spi->controller = ctlr;
596 spi->dev.parent = &ctlr->dev;
597 spi->dev.bus = &spi_bus_type;
598 spi->dev.release = spidev_release;
599 spi->mode = ctlr->buswidth_override_bits;
600
601 device_initialize(&spi->dev);
602 return spi;
603 }
604 EXPORT_SYMBOL_GPL(spi_alloc_device);
605
spi_dev_set_name(struct spi_device * spi)606 static void spi_dev_set_name(struct spi_device *spi)
607 {
608 struct device *dev = &spi->dev;
609 struct fwnode_handle *fwnode = dev_fwnode(dev);
610
611 if (is_acpi_device_node(fwnode)) {
612 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
613 return;
614 }
615
616 if (is_software_node(fwnode)) {
617 dev_set_name(dev, "spi-%pfwP", fwnode);
618 return;
619 }
620
621 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
622 spi_get_chipselect(spi, 0));
623 }
624
625 /*
626 * Zero(0) is a valid physical CS value and can be located at any
627 * logical CS in the spi->chip_select[]. If all the physical CS
628 * are initialized to 0 then It would be difficult to differentiate
629 * between a valid physical CS 0 & an unused logical CS whose physical
630 * CS can be 0. As a solution to this issue initialize all the CS to -1.
631 * Now all the unused logical CS will have -1 physical CS value & can be
632 * ignored while performing physical CS validity checks.
633 */
634 #define SPI_INVALID_CS ((s8)-1)
635
is_valid_cs(s8 chip_select)636 static inline bool is_valid_cs(s8 chip_select)
637 {
638 return chip_select != SPI_INVALID_CS;
639 }
640
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)641 static inline int spi_dev_check_cs(struct device *dev,
642 struct spi_device *spi, u8 idx,
643 struct spi_device *new_spi, u8 new_idx)
644 {
645 u8 cs, cs_new;
646 u8 idx_new;
647
648 cs = spi_get_chipselect(spi, idx);
649 for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
650 cs_new = spi_get_chipselect(new_spi, idx_new);
651 if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
652 dev_err(dev, "chipselect %u already in use\n", cs_new);
653 return -EBUSY;
654 }
655 }
656 return 0;
657 }
658
spi_dev_check(struct device * dev,void * data)659 static int spi_dev_check(struct device *dev, void *data)
660 {
661 struct spi_device *spi = to_spi_device(dev);
662 struct spi_device *new_spi = data;
663 int status, idx;
664
665 if (spi->controller == new_spi->controller) {
666 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
667 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
668 if (status)
669 return status;
670 }
671 }
672 return 0;
673 }
674
spi_cleanup(struct spi_device * spi)675 static void spi_cleanup(struct spi_device *spi)
676 {
677 if (spi->controller->cleanup)
678 spi->controller->cleanup(spi);
679 }
680
__spi_add_device(struct spi_device * spi)681 static int __spi_add_device(struct spi_device *spi)
682 {
683 struct spi_controller *ctlr = spi->controller;
684 struct device *dev = ctlr->dev.parent;
685 int status, idx;
686 u8 cs;
687
688 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
689 /* Chipselects are numbered 0..max; validate. */
690 cs = spi_get_chipselect(spi, idx);
691 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
692 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
693 ctlr->num_chipselect);
694 return -EINVAL;
695 }
696 }
697
698 /*
699 * Make sure that multiple logical CS doesn't map to the same physical CS.
700 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
701 */
702 if (!spi_controller_is_target(ctlr)) {
703 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
704 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
705 if (status)
706 return status;
707 }
708 }
709
710 /* Set the bus ID string */
711 spi_dev_set_name(spi);
712
713 /*
714 * We need to make sure there's no other device with this
715 * chipselect **BEFORE** we call setup(), else we'll trash
716 * its configuration.
717 */
718 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
719 if (status)
720 return status;
721
722 /* Controller may unregister concurrently */
723 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
724 !device_is_registered(&ctlr->dev)) {
725 return -ENODEV;
726 }
727
728 if (ctlr->cs_gpiods) {
729 u8 cs;
730
731 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
732 cs = spi_get_chipselect(spi, idx);
733 if (is_valid_cs(cs))
734 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
735 }
736 }
737
738 /*
739 * Drivers may modify this initial i/o setup, but will
740 * normally rely on the device being setup. Devices
741 * using SPI_CS_HIGH can't coexist well otherwise...
742 */
743 status = spi_setup(spi);
744 if (status < 0) {
745 dev_err(dev, "can't setup %s, status %d\n",
746 dev_name(&spi->dev), status);
747 return status;
748 }
749
750 /* Device may be bound to an active driver when this returns */
751 status = device_add(&spi->dev);
752 if (status < 0) {
753 dev_err(dev, "can't add %s, status %d\n",
754 dev_name(&spi->dev), status);
755 spi_cleanup(spi);
756 } else {
757 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
758 }
759
760 return status;
761 }
762
763 /**
764 * spi_add_device - Add spi_device allocated with spi_alloc_device
765 * @spi: spi_device to register
766 *
767 * Companion function to spi_alloc_device. Devices allocated with
768 * spi_alloc_device can be added onto the SPI bus with this function.
769 *
770 * Return: 0 on success; negative errno on failure
771 */
spi_add_device(struct spi_device * spi)772 int spi_add_device(struct spi_device *spi)
773 {
774 struct spi_controller *ctlr = spi->controller;
775 int status;
776
777 /* Set the bus ID string */
778 spi_dev_set_name(spi);
779
780 mutex_lock(&ctlr->add_lock);
781 status = __spi_add_device(spi);
782 mutex_unlock(&ctlr->add_lock);
783 return status;
784 }
785 EXPORT_SYMBOL_GPL(spi_add_device);
786
spi_set_all_cs_unused(struct spi_device * spi)787 static void spi_set_all_cs_unused(struct spi_device *spi)
788 {
789 u8 idx;
790
791 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
792 spi_set_chipselect(spi, idx, SPI_INVALID_CS);
793 }
794
795 /**
796 * spi_new_device - instantiate one new SPI device
797 * @ctlr: Controller to which device is connected
798 * @chip: Describes the SPI device
799 * Context: can sleep
800 *
801 * On typical mainboards, this is purely internal; and it's not needed
802 * after board init creates the hard-wired devices. Some development
803 * platforms may not be able to use spi_register_board_info though, and
804 * this is exported so that for example a USB or parport based adapter
805 * driver could add devices (which it would learn about out-of-band).
806 *
807 * Return: the new device, or NULL.
808 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)809 struct spi_device *spi_new_device(struct spi_controller *ctlr,
810 struct spi_board_info *chip)
811 {
812 struct spi_device *proxy;
813 int status;
814
815 /*
816 * NOTE: caller did any chip->bus_num checks necessary.
817 *
818 * Also, unless we change the return value convention to use
819 * error-or-pointer (not NULL-or-pointer), troubleshootability
820 * suggests syslogged diagnostics are best here (ugh).
821 */
822
823 proxy = spi_alloc_device(ctlr);
824 if (!proxy)
825 return NULL;
826
827 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
828
829 /* Use provided chip-select for proxy device */
830 spi_set_all_cs_unused(proxy);
831 spi_set_chipselect(proxy, 0, chip->chip_select);
832
833 proxy->max_speed_hz = chip->max_speed_hz;
834 proxy->mode = chip->mode;
835 proxy->irq = chip->irq;
836 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
837 proxy->dev.platform_data = (void *) chip->platform_data;
838 proxy->controller_data = chip->controller_data;
839 proxy->controller_state = NULL;
840 /*
841 * By default spi->chip_select[0] will hold the physical CS number,
842 * so set bit 0 in spi->cs_index_mask.
843 */
844 proxy->cs_index_mask = BIT(0);
845
846 if (chip->swnode) {
847 status = device_add_software_node(&proxy->dev, chip->swnode);
848 if (status) {
849 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
850 chip->modalias, status);
851 goto err_dev_put;
852 }
853 }
854
855 status = spi_add_device(proxy);
856 if (status < 0)
857 goto err_dev_put;
858
859 return proxy;
860
861 err_dev_put:
862 device_remove_software_node(&proxy->dev);
863 spi_dev_put(proxy);
864 return NULL;
865 }
866 EXPORT_SYMBOL_GPL(spi_new_device);
867
868 /**
869 * spi_unregister_device - unregister a single SPI device
870 * @spi: spi_device to unregister
871 *
872 * Start making the passed SPI device vanish. Normally this would be handled
873 * by spi_unregister_controller().
874 */
spi_unregister_device(struct spi_device * spi)875 void spi_unregister_device(struct spi_device *spi)
876 {
877 if (!spi)
878 return;
879
880 if (spi->dev.of_node) {
881 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
882 of_node_put(spi->dev.of_node);
883 }
884 if (ACPI_COMPANION(&spi->dev))
885 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
886 device_remove_software_node(&spi->dev);
887 device_del(&spi->dev);
888 spi_cleanup(spi);
889 put_device(&spi->dev);
890 }
891 EXPORT_SYMBOL_GPL(spi_unregister_device);
892
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)893 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
894 struct spi_board_info *bi)
895 {
896 struct spi_device *dev;
897
898 if (ctlr->bus_num != bi->bus_num)
899 return;
900
901 dev = spi_new_device(ctlr, bi);
902 if (!dev)
903 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
904 bi->modalias);
905 }
906
907 /**
908 * spi_register_board_info - register SPI devices for a given board
909 * @info: array of chip descriptors
910 * @n: how many descriptors are provided
911 * Context: can sleep
912 *
913 * Board-specific early init code calls this (probably during arch_initcall)
914 * with segments of the SPI device table. Any device nodes are created later,
915 * after the relevant parent SPI controller (bus_num) is defined. We keep
916 * this table of devices forever, so that reloading a controller driver will
917 * not make Linux forget about these hard-wired devices.
918 *
919 * Other code can also call this, e.g. a particular add-on board might provide
920 * SPI devices through its expansion connector, so code initializing that board
921 * would naturally declare its SPI devices.
922 *
923 * The board info passed can safely be __initdata ... but be careful of
924 * any embedded pointers (platform_data, etc), they're copied as-is.
925 *
926 * Return: zero on success, else a negative error code.
927 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)928 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
929 {
930 struct boardinfo *bi;
931 int i;
932
933 if (!n)
934 return 0;
935
936 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
937 if (!bi)
938 return -ENOMEM;
939
940 for (i = 0; i < n; i++, bi++, info++) {
941 struct spi_controller *ctlr;
942
943 memcpy(&bi->board_info, info, sizeof(*info));
944
945 mutex_lock(&board_lock);
946 list_add_tail(&bi->list, &board_list);
947 list_for_each_entry(ctlr, &spi_controller_list, list)
948 spi_match_controller_to_boardinfo(ctlr,
949 &bi->board_info);
950 mutex_unlock(&board_lock);
951 }
952
953 return 0;
954 }
955
956 /*-------------------------------------------------------------------------*/
957
958 /* Core methods for SPI resource management */
959
960 /**
961 * spi_res_alloc - allocate a spi resource that is life-cycle managed
962 * during the processing of a spi_message while using
963 * spi_transfer_one
964 * @spi: the SPI device for which we allocate memory
965 * @release: the release code to execute for this resource
966 * @size: size to alloc and return
967 * @gfp: GFP allocation flags
968 *
969 * Return: the pointer to the allocated data
970 *
971 * This may get enhanced in the future to allocate from a memory pool
972 * of the @spi_device or @spi_controller to avoid repeated allocations.
973 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)974 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
975 size_t size, gfp_t gfp)
976 {
977 struct spi_res *sres;
978
979 sres = kzalloc(sizeof(*sres) + size, gfp);
980 if (!sres)
981 return NULL;
982
983 INIT_LIST_HEAD(&sres->entry);
984 sres->release = release;
985
986 return sres->data;
987 }
988
989 /**
990 * spi_res_free - free an SPI resource
991 * @res: pointer to the custom data of a resource
992 */
spi_res_free(void * res)993 static void spi_res_free(void *res)
994 {
995 struct spi_res *sres = container_of(res, struct spi_res, data);
996
997 if (!res)
998 return;
999
1000 WARN_ON(!list_empty(&sres->entry));
1001 kfree(sres);
1002 }
1003
1004 /**
1005 * spi_res_add - add a spi_res to the spi_message
1006 * @message: the SPI message
1007 * @res: the spi_resource
1008 */
spi_res_add(struct spi_message * message,void * res)1009 static void spi_res_add(struct spi_message *message, void *res)
1010 {
1011 struct spi_res *sres = container_of(res, struct spi_res, data);
1012
1013 WARN_ON(!list_empty(&sres->entry));
1014 list_add_tail(&sres->entry, &message->resources);
1015 }
1016
1017 /**
1018 * spi_res_release - release all SPI resources for this message
1019 * @ctlr: the @spi_controller
1020 * @message: the @spi_message
1021 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1022 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1023 {
1024 struct spi_res *res, *tmp;
1025
1026 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1027 if (res->release)
1028 res->release(ctlr, message, res->data);
1029
1030 list_del(&res->entry);
1031
1032 kfree(res);
1033 }
1034 }
1035
1036 /*-------------------------------------------------------------------------*/
1037 #define spi_for_each_valid_cs(spi, idx) \
1038 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \
1039 if (!(spi->cs_index_mask & BIT(idx))) {} else
1040
spi_is_last_cs(struct spi_device * spi)1041 static inline bool spi_is_last_cs(struct spi_device *spi)
1042 {
1043 u8 idx;
1044 bool last = false;
1045
1046 spi_for_each_valid_cs(spi, idx) {
1047 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1048 last = true;
1049 }
1050 return last;
1051 }
1052
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1053 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1054 {
1055 /*
1056 * Historically ACPI has no means of the GPIO polarity and
1057 * thus the SPISerialBus() resource defines it on the per-chip
1058 * basis. In order to avoid a chain of negations, the GPIO
1059 * polarity is considered being Active High. Even for the cases
1060 * when _DSD() is involved (in the updated versions of ACPI)
1061 * the GPIO CS polarity must be defined Active High to avoid
1062 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1063 * into account.
1064 */
1065 if (has_acpi_companion(&spi->dev))
1066 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1067 else
1068 /* Polarity handled by GPIO library */
1069 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1070
1071 if (activate)
1072 spi_delay_exec(&spi->cs_setup, NULL);
1073 else
1074 spi_delay_exec(&spi->cs_inactive, NULL);
1075 }
1076
spi_set_cs(struct spi_device * spi,bool enable,bool force)1077 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1078 {
1079 bool activate = enable;
1080 u8 idx;
1081
1082 /*
1083 * Avoid calling into the driver (or doing delays) if the chip select
1084 * isn't actually changing from the last time this was called.
1085 */
1086 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1087 spi_is_last_cs(spi)) ||
1088 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1089 !spi_is_last_cs(spi))) &&
1090 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1091 return;
1092
1093 trace_spi_set_cs(spi, activate);
1094
1095 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1096 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1097 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1098 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1099
1100 if (spi->mode & SPI_CS_HIGH)
1101 enable = !enable;
1102
1103 /*
1104 * Handle chip select delays for GPIO based CS or controllers without
1105 * programmable chip select timing.
1106 */
1107 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1108 spi_delay_exec(&spi->cs_hold, NULL);
1109
1110 if (spi_is_csgpiod(spi)) {
1111 if (!(spi->mode & SPI_NO_CS)) {
1112 spi_for_each_valid_cs(spi, idx) {
1113 if (spi_get_csgpiod(spi, idx))
1114 spi_toggle_csgpiod(spi, idx, enable, activate);
1115 }
1116 }
1117 /* Some SPI masters need both GPIO CS & slave_select */
1118 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1119 spi->controller->set_cs)
1120 spi->controller->set_cs(spi, !enable);
1121 } else if (spi->controller->set_cs) {
1122 spi->controller->set_cs(spi, !enable);
1123 }
1124
1125 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1126 if (activate)
1127 spi_delay_exec(&spi->cs_setup, NULL);
1128 else
1129 spi_delay_exec(&spi->cs_inactive, NULL);
1130 }
1131 }
1132
1133 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1134 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1135 struct sg_table *sgt, void *buf, size_t len,
1136 enum dma_data_direction dir, unsigned long attrs)
1137 {
1138 const bool vmalloced_buf = is_vmalloc_addr(buf);
1139 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1140 #ifdef CONFIG_HIGHMEM
1141 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1142 (unsigned long)buf < (PKMAP_BASE +
1143 (LAST_PKMAP * PAGE_SIZE)));
1144 #else
1145 const bool kmap_buf = false;
1146 #endif
1147 int desc_len;
1148 int sgs;
1149 struct page *vm_page;
1150 struct scatterlist *sg;
1151 void *sg_buf;
1152 size_t min;
1153 int i, ret;
1154
1155 if (vmalloced_buf || kmap_buf) {
1156 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1157 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1158 } else if (virt_addr_valid(buf)) {
1159 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1160 sgs = DIV_ROUND_UP(len, desc_len);
1161 } else {
1162 return -EINVAL;
1163 }
1164
1165 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1166 if (ret != 0)
1167 return ret;
1168
1169 sg = &sgt->sgl[0];
1170 for (i = 0; i < sgs; i++) {
1171
1172 if (vmalloced_buf || kmap_buf) {
1173 /*
1174 * Next scatterlist entry size is the minimum between
1175 * the desc_len and the remaining buffer length that
1176 * fits in a page.
1177 */
1178 min = min_t(size_t, desc_len,
1179 min_t(size_t, len,
1180 PAGE_SIZE - offset_in_page(buf)));
1181 if (vmalloced_buf)
1182 vm_page = vmalloc_to_page(buf);
1183 else
1184 vm_page = kmap_to_page(buf);
1185 if (!vm_page) {
1186 sg_free_table(sgt);
1187 return -ENOMEM;
1188 }
1189 sg_set_page(sg, vm_page,
1190 min, offset_in_page(buf));
1191 } else {
1192 min = min_t(size_t, len, desc_len);
1193 sg_buf = buf;
1194 sg_set_buf(sg, sg_buf, min);
1195 }
1196
1197 buf += min;
1198 len -= min;
1199 sg = sg_next(sg);
1200 }
1201
1202 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1203 if (ret < 0) {
1204 sg_free_table(sgt);
1205 return ret;
1206 }
1207
1208 return 0;
1209 }
1210
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1211 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1212 struct sg_table *sgt, void *buf, size_t len,
1213 enum dma_data_direction dir)
1214 {
1215 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1216 }
1217
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1218 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1219 struct device *dev, struct sg_table *sgt,
1220 enum dma_data_direction dir,
1221 unsigned long attrs)
1222 {
1223 dma_unmap_sgtable(dev, sgt, dir, attrs);
1224 sg_free_table(sgt);
1225 sgt->orig_nents = 0;
1226 sgt->nents = 0;
1227 }
1228
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1229 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1230 struct sg_table *sgt, enum dma_data_direction dir)
1231 {
1232 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1233 }
1234
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1235 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1236 {
1237 struct device *tx_dev, *rx_dev;
1238 struct spi_transfer *xfer;
1239 int ret;
1240
1241 if (!ctlr->can_dma)
1242 return 0;
1243
1244 if (ctlr->dma_tx)
1245 tx_dev = ctlr->dma_tx->device->dev;
1246 else if (ctlr->dma_map_dev)
1247 tx_dev = ctlr->dma_map_dev;
1248 else
1249 tx_dev = ctlr->dev.parent;
1250
1251 if (ctlr->dma_rx)
1252 rx_dev = ctlr->dma_rx->device->dev;
1253 else if (ctlr->dma_map_dev)
1254 rx_dev = ctlr->dma_map_dev;
1255 else
1256 rx_dev = ctlr->dev.parent;
1257
1258 ret = -ENOMSG;
1259 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1260 /* The sync is done before each transfer. */
1261 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1262
1263 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1264 continue;
1265
1266 if (xfer->tx_buf != NULL) {
1267 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1268 (void *)xfer->tx_buf,
1269 xfer->len, DMA_TO_DEVICE,
1270 attrs);
1271 if (ret != 0)
1272 return ret;
1273
1274 xfer->tx_sg_mapped = true;
1275 }
1276
1277 if (xfer->rx_buf != NULL) {
1278 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1279 xfer->rx_buf, xfer->len,
1280 DMA_FROM_DEVICE, attrs);
1281 if (ret != 0) {
1282 spi_unmap_buf_attrs(ctlr, tx_dev,
1283 &xfer->tx_sg, DMA_TO_DEVICE,
1284 attrs);
1285
1286 return ret;
1287 }
1288
1289 xfer->rx_sg_mapped = true;
1290 }
1291 }
1292 /* No transfer has been mapped, bail out with success */
1293 if (ret)
1294 return 0;
1295
1296 ctlr->cur_rx_dma_dev = rx_dev;
1297 ctlr->cur_tx_dma_dev = tx_dev;
1298
1299 return 0;
1300 }
1301
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1302 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1303 {
1304 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1305 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1306 struct spi_transfer *xfer;
1307
1308 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1309 /* The sync has already been done after each transfer. */
1310 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1311
1312 if (xfer->rx_sg_mapped)
1313 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1314 DMA_FROM_DEVICE, attrs);
1315 xfer->rx_sg_mapped = false;
1316
1317 if (xfer->tx_sg_mapped)
1318 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1319 DMA_TO_DEVICE, attrs);
1320 xfer->tx_sg_mapped = false;
1321 }
1322
1323 return 0;
1324 }
1325
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1326 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1327 struct spi_transfer *xfer)
1328 {
1329 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1330 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1331
1332 if (xfer->tx_sg_mapped)
1333 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1334 if (xfer->rx_sg_mapped)
1335 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1336 }
1337
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1338 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1339 struct spi_transfer *xfer)
1340 {
1341 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1342 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1343
1344 if (xfer->rx_sg_mapped)
1345 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1346 if (xfer->tx_sg_mapped)
1347 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1348 }
1349 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1350 static inline int __spi_map_msg(struct spi_controller *ctlr,
1351 struct spi_message *msg)
1352 {
1353 return 0;
1354 }
1355
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1356 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1357 struct spi_message *msg)
1358 {
1359 return 0;
1360 }
1361
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1362 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1363 struct spi_transfer *xfer)
1364 {
1365 }
1366
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1367 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1368 struct spi_transfer *xfer)
1369 {
1370 }
1371 #endif /* !CONFIG_HAS_DMA */
1372
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1373 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1374 struct spi_message *msg)
1375 {
1376 struct spi_transfer *xfer;
1377
1378 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1379 /*
1380 * Restore the original value of tx_buf or rx_buf if they are
1381 * NULL.
1382 */
1383 if (xfer->tx_buf == ctlr->dummy_tx)
1384 xfer->tx_buf = NULL;
1385 if (xfer->rx_buf == ctlr->dummy_rx)
1386 xfer->rx_buf = NULL;
1387 }
1388
1389 return __spi_unmap_msg(ctlr, msg);
1390 }
1391
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1392 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1393 {
1394 struct spi_transfer *xfer;
1395 void *tmp;
1396 unsigned int max_tx, max_rx;
1397
1398 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1399 && !(msg->spi->mode & SPI_3WIRE)) {
1400 max_tx = 0;
1401 max_rx = 0;
1402
1403 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1404 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1405 !xfer->tx_buf)
1406 max_tx = max(xfer->len, max_tx);
1407 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1408 !xfer->rx_buf)
1409 max_rx = max(xfer->len, max_rx);
1410 }
1411
1412 if (max_tx) {
1413 tmp = krealloc(ctlr->dummy_tx, max_tx,
1414 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1415 if (!tmp)
1416 return -ENOMEM;
1417 ctlr->dummy_tx = tmp;
1418 }
1419
1420 if (max_rx) {
1421 tmp = krealloc(ctlr->dummy_rx, max_rx,
1422 GFP_KERNEL | GFP_DMA);
1423 if (!tmp)
1424 return -ENOMEM;
1425 ctlr->dummy_rx = tmp;
1426 }
1427
1428 if (max_tx || max_rx) {
1429 list_for_each_entry(xfer, &msg->transfers,
1430 transfer_list) {
1431 if (!xfer->len)
1432 continue;
1433 if (!xfer->tx_buf)
1434 xfer->tx_buf = ctlr->dummy_tx;
1435 if (!xfer->rx_buf)
1436 xfer->rx_buf = ctlr->dummy_rx;
1437 }
1438 }
1439 }
1440
1441 return __spi_map_msg(ctlr, msg);
1442 }
1443
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1444 static int spi_transfer_wait(struct spi_controller *ctlr,
1445 struct spi_message *msg,
1446 struct spi_transfer *xfer)
1447 {
1448 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1449 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1450 u32 speed_hz = xfer->speed_hz;
1451 unsigned long long ms;
1452
1453 if (spi_controller_is_target(ctlr)) {
1454 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1455 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1456 return -EINTR;
1457 }
1458 } else {
1459 if (!speed_hz)
1460 speed_hz = 100000;
1461
1462 /*
1463 * For each byte we wait for 8 cycles of the SPI clock.
1464 * Since speed is defined in Hz and we want milliseconds,
1465 * use respective multiplier, but before the division,
1466 * otherwise we may get 0 for short transfers.
1467 */
1468 ms = 8LL * MSEC_PER_SEC * xfer->len;
1469 do_div(ms, speed_hz);
1470
1471 /*
1472 * Increase it twice and add 200 ms tolerance, use
1473 * predefined maximum in case of overflow.
1474 */
1475 ms += ms + 200;
1476 if (ms > UINT_MAX)
1477 ms = UINT_MAX;
1478
1479 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1480 msecs_to_jiffies(ms));
1481
1482 if (ms == 0) {
1483 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1484 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1485 dev_err(&msg->spi->dev,
1486 "SPI transfer timed out\n");
1487 return -ETIMEDOUT;
1488 }
1489
1490 if (xfer->error & SPI_TRANS_FAIL_IO)
1491 return -EIO;
1492 }
1493
1494 return 0;
1495 }
1496
_spi_transfer_delay_ns(u32 ns)1497 static void _spi_transfer_delay_ns(u32 ns)
1498 {
1499 if (!ns)
1500 return;
1501 if (ns <= NSEC_PER_USEC) {
1502 ndelay(ns);
1503 } else {
1504 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1505
1506 if (us <= 10)
1507 udelay(us);
1508 else
1509 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1510 }
1511 }
1512
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1513 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1514 {
1515 u32 delay = _delay->value;
1516 u32 unit = _delay->unit;
1517 u32 hz;
1518
1519 if (!delay)
1520 return 0;
1521
1522 switch (unit) {
1523 case SPI_DELAY_UNIT_USECS:
1524 delay *= NSEC_PER_USEC;
1525 break;
1526 case SPI_DELAY_UNIT_NSECS:
1527 /* Nothing to do here */
1528 break;
1529 case SPI_DELAY_UNIT_SCK:
1530 /* Clock cycles need to be obtained from spi_transfer */
1531 if (!xfer)
1532 return -EINVAL;
1533 /*
1534 * If there is unknown effective speed, approximate it
1535 * by underestimating with half of the requested Hz.
1536 */
1537 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1538 if (!hz)
1539 return -EINVAL;
1540
1541 /* Convert delay to nanoseconds */
1542 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1543 break;
1544 default:
1545 return -EINVAL;
1546 }
1547
1548 return delay;
1549 }
1550 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1551
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1552 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1553 {
1554 int delay;
1555
1556 might_sleep();
1557
1558 if (!_delay)
1559 return -EINVAL;
1560
1561 delay = spi_delay_to_ns(_delay, xfer);
1562 if (delay < 0)
1563 return delay;
1564
1565 _spi_transfer_delay_ns(delay);
1566
1567 return 0;
1568 }
1569 EXPORT_SYMBOL_GPL(spi_delay_exec);
1570
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1571 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1572 struct spi_transfer *xfer)
1573 {
1574 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1575 u32 delay = xfer->cs_change_delay.value;
1576 u32 unit = xfer->cs_change_delay.unit;
1577 int ret;
1578
1579 /* Return early on "fast" mode - for everything but USECS */
1580 if (!delay) {
1581 if (unit == SPI_DELAY_UNIT_USECS)
1582 _spi_transfer_delay_ns(default_delay_ns);
1583 return;
1584 }
1585
1586 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1587 if (ret) {
1588 dev_err_once(&msg->spi->dev,
1589 "Use of unsupported delay unit %i, using default of %luus\n",
1590 unit, default_delay_ns / NSEC_PER_USEC);
1591 _spi_transfer_delay_ns(default_delay_ns);
1592 }
1593 }
1594
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1595 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1596 struct spi_transfer *xfer)
1597 {
1598 _spi_transfer_cs_change_delay(msg, xfer);
1599 }
1600 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1601
1602 /*
1603 * spi_transfer_one_message - Default implementation of transfer_one_message()
1604 *
1605 * This is a standard implementation of transfer_one_message() for
1606 * drivers which implement a transfer_one() operation. It provides
1607 * standard handling of delays and chip select management.
1608 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1609 static int spi_transfer_one_message(struct spi_controller *ctlr,
1610 struct spi_message *msg)
1611 {
1612 struct spi_transfer *xfer;
1613 bool keep_cs = false;
1614 int ret = 0;
1615 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1616 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1617
1618 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1619 spi_set_cs(msg->spi, !xfer->cs_off, false);
1620
1621 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1622 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1623
1624 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1625 trace_spi_transfer_start(msg, xfer);
1626
1627 spi_statistics_add_transfer_stats(statm, xfer, msg);
1628 spi_statistics_add_transfer_stats(stats, xfer, msg);
1629
1630 if (!ctlr->ptp_sts_supported) {
1631 xfer->ptp_sts_word_pre = 0;
1632 ptp_read_system_prets(xfer->ptp_sts);
1633 }
1634
1635 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1636 reinit_completion(&ctlr->xfer_completion);
1637
1638 fallback_pio:
1639 spi_dma_sync_for_device(ctlr, xfer);
1640 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1641 if (ret < 0) {
1642 spi_dma_sync_for_cpu(ctlr, xfer);
1643
1644 if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1645 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1646 __spi_unmap_msg(ctlr, msg);
1647 ctlr->fallback = true;
1648 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1649 goto fallback_pio;
1650 }
1651
1652 SPI_STATISTICS_INCREMENT_FIELD(statm,
1653 errors);
1654 SPI_STATISTICS_INCREMENT_FIELD(stats,
1655 errors);
1656 dev_err(&msg->spi->dev,
1657 "SPI transfer failed: %d\n", ret);
1658 goto out;
1659 }
1660
1661 if (ret > 0) {
1662 ret = spi_transfer_wait(ctlr, msg, xfer);
1663 if (ret < 0)
1664 msg->status = ret;
1665 }
1666
1667 spi_dma_sync_for_cpu(ctlr, xfer);
1668 } else {
1669 if (xfer->len)
1670 dev_err(&msg->spi->dev,
1671 "Bufferless transfer has length %u\n",
1672 xfer->len);
1673 }
1674
1675 if (!ctlr->ptp_sts_supported) {
1676 ptp_read_system_postts(xfer->ptp_sts);
1677 xfer->ptp_sts_word_post = xfer->len;
1678 }
1679
1680 trace_spi_transfer_stop(msg, xfer);
1681
1682 if (msg->status != -EINPROGRESS)
1683 goto out;
1684
1685 spi_transfer_delay_exec(xfer);
1686
1687 if (xfer->cs_change) {
1688 if (list_is_last(&xfer->transfer_list,
1689 &msg->transfers)) {
1690 keep_cs = true;
1691 } else {
1692 if (!xfer->cs_off)
1693 spi_set_cs(msg->spi, false, false);
1694 _spi_transfer_cs_change_delay(msg, xfer);
1695 if (!list_next_entry(xfer, transfer_list)->cs_off)
1696 spi_set_cs(msg->spi, true, false);
1697 }
1698 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1699 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1700 spi_set_cs(msg->spi, xfer->cs_off, false);
1701 }
1702
1703 msg->actual_length += xfer->len;
1704 }
1705
1706 out:
1707 if (ret != 0 || !keep_cs)
1708 spi_set_cs(msg->spi, false, false);
1709
1710 if (msg->status == -EINPROGRESS)
1711 msg->status = ret;
1712
1713 if (msg->status && ctlr->handle_err)
1714 ctlr->handle_err(ctlr, msg);
1715
1716 spi_finalize_current_message(ctlr);
1717
1718 return ret;
1719 }
1720
1721 /**
1722 * spi_finalize_current_transfer - report completion of a transfer
1723 * @ctlr: the controller reporting completion
1724 *
1725 * Called by SPI drivers using the core transfer_one_message()
1726 * implementation to notify it that the current interrupt driven
1727 * transfer has finished and the next one may be scheduled.
1728 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1729 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1730 {
1731 complete(&ctlr->xfer_completion);
1732 }
1733 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1734
spi_idle_runtime_pm(struct spi_controller * ctlr)1735 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1736 {
1737 if (ctlr->auto_runtime_pm) {
1738 pm_runtime_mark_last_busy(ctlr->dev.parent);
1739 pm_runtime_put_autosuspend(ctlr->dev.parent);
1740 }
1741 }
1742
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1743 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1744 struct spi_message *msg, bool was_busy)
1745 {
1746 struct spi_transfer *xfer;
1747 int ret;
1748
1749 if (!was_busy && ctlr->auto_runtime_pm) {
1750 ret = pm_runtime_get_sync(ctlr->dev.parent);
1751 if (ret < 0) {
1752 pm_runtime_put_noidle(ctlr->dev.parent);
1753 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1754 ret);
1755
1756 msg->status = ret;
1757 spi_finalize_current_message(ctlr);
1758
1759 return ret;
1760 }
1761 }
1762
1763 if (!was_busy)
1764 trace_spi_controller_busy(ctlr);
1765
1766 if (!was_busy && ctlr->prepare_transfer_hardware) {
1767 ret = ctlr->prepare_transfer_hardware(ctlr);
1768 if (ret) {
1769 dev_err(&ctlr->dev,
1770 "failed to prepare transfer hardware: %d\n",
1771 ret);
1772
1773 if (ctlr->auto_runtime_pm)
1774 pm_runtime_put(ctlr->dev.parent);
1775
1776 msg->status = ret;
1777 spi_finalize_current_message(ctlr);
1778
1779 return ret;
1780 }
1781 }
1782
1783 trace_spi_message_start(msg);
1784
1785 if (ctlr->prepare_message) {
1786 ret = ctlr->prepare_message(ctlr, msg);
1787 if (ret) {
1788 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1789 ret);
1790 msg->status = ret;
1791 spi_finalize_current_message(ctlr);
1792 return ret;
1793 }
1794 msg->prepared = true;
1795 }
1796
1797 ret = spi_map_msg(ctlr, msg);
1798 if (ret) {
1799 msg->status = ret;
1800 spi_finalize_current_message(ctlr);
1801 return ret;
1802 }
1803
1804 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1805 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1806 xfer->ptp_sts_word_pre = 0;
1807 ptp_read_system_prets(xfer->ptp_sts);
1808 }
1809 }
1810
1811 /*
1812 * Drivers implementation of transfer_one_message() must arrange for
1813 * spi_finalize_current_message() to get called. Most drivers will do
1814 * this in the calling context, but some don't. For those cases, a
1815 * completion is used to guarantee that this function does not return
1816 * until spi_finalize_current_message() is done accessing
1817 * ctlr->cur_msg.
1818 * Use of the following two flags enable to opportunistically skip the
1819 * use of the completion since its use involves expensive spin locks.
1820 * In case of a race with the context that calls
1821 * spi_finalize_current_message() the completion will always be used,
1822 * due to strict ordering of these flags using barriers.
1823 */
1824 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1825 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1826 reinit_completion(&ctlr->cur_msg_completion);
1827 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1828
1829 ret = ctlr->transfer_one_message(ctlr, msg);
1830 if (ret) {
1831 dev_err(&ctlr->dev,
1832 "failed to transfer one message from queue\n");
1833 return ret;
1834 }
1835
1836 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1837 smp_mb(); /* See spi_finalize_current_message()... */
1838 if (READ_ONCE(ctlr->cur_msg_incomplete))
1839 wait_for_completion(&ctlr->cur_msg_completion);
1840
1841 return 0;
1842 }
1843
1844 /**
1845 * __spi_pump_messages - function which processes SPI message queue
1846 * @ctlr: controller to process queue for
1847 * @in_kthread: true if we are in the context of the message pump thread
1848 *
1849 * This function checks if there is any SPI message in the queue that
1850 * needs processing and if so call out to the driver to initialize hardware
1851 * and transfer each message.
1852 *
1853 * Note that it is called both from the kthread itself and also from
1854 * inside spi_sync(); the queue extraction handling at the top of the
1855 * function should deal with this safely.
1856 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1857 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1858 {
1859 struct spi_message *msg;
1860 bool was_busy = false;
1861 unsigned long flags;
1862 int ret;
1863
1864 /* Take the I/O mutex */
1865 mutex_lock(&ctlr->io_mutex);
1866
1867 /* Lock queue */
1868 spin_lock_irqsave(&ctlr->queue_lock, flags);
1869
1870 /* Make sure we are not already running a message */
1871 if (ctlr->cur_msg)
1872 goto out_unlock;
1873
1874 /* Check if the queue is idle */
1875 if (list_empty(&ctlr->queue) || !ctlr->running) {
1876 if (!ctlr->busy)
1877 goto out_unlock;
1878
1879 /* Defer any non-atomic teardown to the thread */
1880 if (!in_kthread) {
1881 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1882 !ctlr->unprepare_transfer_hardware) {
1883 spi_idle_runtime_pm(ctlr);
1884 ctlr->busy = false;
1885 ctlr->queue_empty = true;
1886 trace_spi_controller_idle(ctlr);
1887 } else {
1888 kthread_queue_work(ctlr->kworker,
1889 &ctlr->pump_messages);
1890 }
1891 goto out_unlock;
1892 }
1893
1894 ctlr->busy = false;
1895 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1896
1897 kfree(ctlr->dummy_rx);
1898 ctlr->dummy_rx = NULL;
1899 kfree(ctlr->dummy_tx);
1900 ctlr->dummy_tx = NULL;
1901 if (ctlr->unprepare_transfer_hardware &&
1902 ctlr->unprepare_transfer_hardware(ctlr))
1903 dev_err(&ctlr->dev,
1904 "failed to unprepare transfer hardware\n");
1905 spi_idle_runtime_pm(ctlr);
1906 trace_spi_controller_idle(ctlr);
1907
1908 spin_lock_irqsave(&ctlr->queue_lock, flags);
1909 ctlr->queue_empty = true;
1910 goto out_unlock;
1911 }
1912
1913 /* Extract head of queue */
1914 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1915 ctlr->cur_msg = msg;
1916
1917 list_del_init(&msg->queue);
1918 if (ctlr->busy)
1919 was_busy = true;
1920 else
1921 ctlr->busy = true;
1922 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1923
1924 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1925 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1926
1927 ctlr->cur_msg = NULL;
1928 ctlr->fallback = false;
1929
1930 mutex_unlock(&ctlr->io_mutex);
1931
1932 /* Prod the scheduler in case transfer_one() was busy waiting */
1933 if (!ret)
1934 cond_resched();
1935 return;
1936
1937 out_unlock:
1938 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1939 mutex_unlock(&ctlr->io_mutex);
1940 }
1941
1942 /**
1943 * spi_pump_messages - kthread work function which processes spi message queue
1944 * @work: pointer to kthread work struct contained in the controller struct
1945 */
spi_pump_messages(struct kthread_work * work)1946 static void spi_pump_messages(struct kthread_work *work)
1947 {
1948 struct spi_controller *ctlr =
1949 container_of(work, struct spi_controller, pump_messages);
1950
1951 __spi_pump_messages(ctlr, true);
1952 }
1953
1954 /**
1955 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1956 * @ctlr: Pointer to the spi_controller structure of the driver
1957 * @xfer: Pointer to the transfer being timestamped
1958 * @progress: How many words (not bytes) have been transferred so far
1959 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1960 * transfer, for less jitter in time measurement. Only compatible
1961 * with PIO drivers. If true, must follow up with
1962 * spi_take_timestamp_post or otherwise system will crash.
1963 * WARNING: for fully predictable results, the CPU frequency must
1964 * also be under control (governor).
1965 *
1966 * This is a helper for drivers to collect the beginning of the TX timestamp
1967 * for the requested byte from the SPI transfer. The frequency with which this
1968 * function must be called (once per word, once for the whole transfer, once
1969 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1970 * greater than or equal to the requested byte at the time of the call. The
1971 * timestamp is only taken once, at the first such call. It is assumed that
1972 * the driver advances its @tx buffer pointer monotonically.
1973 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1974 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1975 struct spi_transfer *xfer,
1976 size_t progress, bool irqs_off)
1977 {
1978 if (!xfer->ptp_sts)
1979 return;
1980
1981 if (xfer->timestamped)
1982 return;
1983
1984 if (progress > xfer->ptp_sts_word_pre)
1985 return;
1986
1987 /* Capture the resolution of the timestamp */
1988 xfer->ptp_sts_word_pre = progress;
1989
1990 if (irqs_off) {
1991 local_irq_save(ctlr->irq_flags);
1992 preempt_disable();
1993 }
1994
1995 ptp_read_system_prets(xfer->ptp_sts);
1996 }
1997 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1998
1999 /**
2000 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
2001 * @ctlr: Pointer to the spi_controller structure of the driver
2002 * @xfer: Pointer to the transfer being timestamped
2003 * @progress: How many words (not bytes) have been transferred so far
2004 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
2005 *
2006 * This is a helper for drivers to collect the end of the TX timestamp for
2007 * the requested byte from the SPI transfer. Can be called with an arbitrary
2008 * frequency: only the first call where @tx exceeds or is equal to the
2009 * requested word will be timestamped.
2010 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)2011 void spi_take_timestamp_post(struct spi_controller *ctlr,
2012 struct spi_transfer *xfer,
2013 size_t progress, bool irqs_off)
2014 {
2015 if (!xfer->ptp_sts)
2016 return;
2017
2018 if (xfer->timestamped)
2019 return;
2020
2021 if (progress < xfer->ptp_sts_word_post)
2022 return;
2023
2024 ptp_read_system_postts(xfer->ptp_sts);
2025
2026 if (irqs_off) {
2027 local_irq_restore(ctlr->irq_flags);
2028 preempt_enable();
2029 }
2030
2031 /* Capture the resolution of the timestamp */
2032 xfer->ptp_sts_word_post = progress;
2033
2034 xfer->timestamped = 1;
2035 }
2036 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2037
2038 /**
2039 * spi_set_thread_rt - set the controller to pump at realtime priority
2040 * @ctlr: controller to boost priority of
2041 *
2042 * This can be called because the controller requested realtime priority
2043 * (by setting the ->rt value before calling spi_register_controller()) or
2044 * because a device on the bus said that its transfers needed realtime
2045 * priority.
2046 *
2047 * NOTE: at the moment if any device on a bus says it needs realtime then
2048 * the thread will be at realtime priority for all transfers on that
2049 * controller. If this eventually becomes a problem we may see if we can
2050 * find a way to boost the priority only temporarily during relevant
2051 * transfers.
2052 */
spi_set_thread_rt(struct spi_controller * ctlr)2053 static void spi_set_thread_rt(struct spi_controller *ctlr)
2054 {
2055 dev_info(&ctlr->dev,
2056 "will run message pump with realtime priority\n");
2057 sched_set_fifo(ctlr->kworker->task);
2058 }
2059
spi_init_queue(struct spi_controller * ctlr)2060 static int spi_init_queue(struct spi_controller *ctlr)
2061 {
2062 ctlr->running = false;
2063 ctlr->busy = false;
2064 ctlr->queue_empty = true;
2065
2066 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2067 if (IS_ERR(ctlr->kworker)) {
2068 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2069 return PTR_ERR(ctlr->kworker);
2070 }
2071
2072 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2073
2074 /*
2075 * Controller config will indicate if this controller should run the
2076 * message pump with high (realtime) priority to reduce the transfer
2077 * latency on the bus by minimising the delay between a transfer
2078 * request and the scheduling of the message pump thread. Without this
2079 * setting the message pump thread will remain at default priority.
2080 */
2081 if (ctlr->rt)
2082 spi_set_thread_rt(ctlr);
2083
2084 return 0;
2085 }
2086
2087 /**
2088 * spi_get_next_queued_message() - called by driver to check for queued
2089 * messages
2090 * @ctlr: the controller to check for queued messages
2091 *
2092 * If there are more messages in the queue, the next message is returned from
2093 * this call.
2094 *
2095 * Return: the next message in the queue, else NULL if the queue is empty.
2096 */
spi_get_next_queued_message(struct spi_controller * ctlr)2097 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2098 {
2099 struct spi_message *next;
2100 unsigned long flags;
2101
2102 /* Get a pointer to the next message, if any */
2103 spin_lock_irqsave(&ctlr->queue_lock, flags);
2104 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2105 queue);
2106 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2107
2108 return next;
2109 }
2110 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2111
2112 /*
2113 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2114 * and spi_maybe_unoptimize_message()
2115 * @msg: the message to unoptimize
2116 *
2117 * Peripheral drivers should use spi_unoptimize_message() and callers inside
2118 * core should use spi_maybe_unoptimize_message() rather than calling this
2119 * function directly.
2120 *
2121 * It is not valid to call this on a message that is not currently optimized.
2122 */
__spi_unoptimize_message(struct spi_message * msg)2123 static void __spi_unoptimize_message(struct spi_message *msg)
2124 {
2125 struct spi_controller *ctlr = msg->spi->controller;
2126
2127 if (ctlr->unoptimize_message)
2128 ctlr->unoptimize_message(msg);
2129
2130 spi_res_release(ctlr, msg);
2131
2132 msg->optimized = false;
2133 msg->opt_state = NULL;
2134 }
2135
2136 /*
2137 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2138 * @msg: the message to unoptimize
2139 *
2140 * This function is used to unoptimize a message if and only if it was
2141 * optimized by the core (via spi_maybe_optimize_message()).
2142 */
spi_maybe_unoptimize_message(struct spi_message * msg)2143 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2144 {
2145 if (!msg->pre_optimized && msg->optimized &&
2146 !msg->spi->controller->defer_optimize_message)
2147 __spi_unoptimize_message(msg);
2148 }
2149
2150 /**
2151 * spi_finalize_current_message() - the current message is complete
2152 * @ctlr: the controller to return the message to
2153 *
2154 * Called by the driver to notify the core that the message in the front of the
2155 * queue is complete and can be removed from the queue.
2156 */
spi_finalize_current_message(struct spi_controller * ctlr)2157 void spi_finalize_current_message(struct spi_controller *ctlr)
2158 {
2159 struct spi_transfer *xfer;
2160 struct spi_message *mesg;
2161 int ret;
2162
2163 mesg = ctlr->cur_msg;
2164
2165 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2166 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2167 ptp_read_system_postts(xfer->ptp_sts);
2168 xfer->ptp_sts_word_post = xfer->len;
2169 }
2170 }
2171
2172 if (unlikely(ctlr->ptp_sts_supported))
2173 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2174 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2175
2176 spi_unmap_msg(ctlr, mesg);
2177
2178 if (mesg->prepared && ctlr->unprepare_message) {
2179 ret = ctlr->unprepare_message(ctlr, mesg);
2180 if (ret) {
2181 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2182 ret);
2183 }
2184 }
2185
2186 mesg->prepared = false;
2187
2188 spi_maybe_unoptimize_message(mesg);
2189
2190 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2191 smp_mb(); /* See __spi_pump_transfer_message()... */
2192 if (READ_ONCE(ctlr->cur_msg_need_completion))
2193 complete(&ctlr->cur_msg_completion);
2194
2195 trace_spi_message_done(mesg);
2196
2197 mesg->state = NULL;
2198 if (mesg->complete)
2199 mesg->complete(mesg->context);
2200 }
2201 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2202
spi_start_queue(struct spi_controller * ctlr)2203 static int spi_start_queue(struct spi_controller *ctlr)
2204 {
2205 unsigned long flags;
2206
2207 spin_lock_irqsave(&ctlr->queue_lock, flags);
2208
2209 if (ctlr->running || ctlr->busy) {
2210 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2211 return -EBUSY;
2212 }
2213
2214 ctlr->running = true;
2215 ctlr->cur_msg = NULL;
2216 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2217
2218 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2219
2220 return 0;
2221 }
2222
spi_stop_queue(struct spi_controller * ctlr)2223 static int spi_stop_queue(struct spi_controller *ctlr)
2224 {
2225 unsigned int limit = 500;
2226 unsigned long flags;
2227
2228 /*
2229 * This is a bit lame, but is optimized for the common execution path.
2230 * A wait_queue on the ctlr->busy could be used, but then the common
2231 * execution path (pump_messages) would be required to call wake_up or
2232 * friends on every SPI message. Do this instead.
2233 */
2234 do {
2235 spin_lock_irqsave(&ctlr->queue_lock, flags);
2236 if (list_empty(&ctlr->queue) && !ctlr->busy) {
2237 ctlr->running = false;
2238 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2239 return 0;
2240 }
2241 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2242 usleep_range(10000, 11000);
2243 } while (--limit);
2244
2245 return -EBUSY;
2246 }
2247
spi_destroy_queue(struct spi_controller * ctlr)2248 static int spi_destroy_queue(struct spi_controller *ctlr)
2249 {
2250 int ret;
2251
2252 ret = spi_stop_queue(ctlr);
2253
2254 /*
2255 * kthread_flush_worker will block until all work is done.
2256 * If the reason that stop_queue timed out is that the work will never
2257 * finish, then it does no good to call flush/stop thread, so
2258 * return anyway.
2259 */
2260 if (ret) {
2261 dev_err(&ctlr->dev, "problem destroying queue\n");
2262 return ret;
2263 }
2264
2265 kthread_destroy_worker(ctlr->kworker);
2266
2267 return 0;
2268 }
2269
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2270 static int __spi_queued_transfer(struct spi_device *spi,
2271 struct spi_message *msg,
2272 bool need_pump)
2273 {
2274 struct spi_controller *ctlr = spi->controller;
2275 unsigned long flags;
2276
2277 spin_lock_irqsave(&ctlr->queue_lock, flags);
2278
2279 if (!ctlr->running) {
2280 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2281 return -ESHUTDOWN;
2282 }
2283 msg->actual_length = 0;
2284 msg->status = -EINPROGRESS;
2285
2286 list_add_tail(&msg->queue, &ctlr->queue);
2287 ctlr->queue_empty = false;
2288 if (!ctlr->busy && need_pump)
2289 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2290
2291 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2292 return 0;
2293 }
2294
2295 /**
2296 * spi_queued_transfer - transfer function for queued transfers
2297 * @spi: SPI device which is requesting transfer
2298 * @msg: SPI message which is to handled is queued to driver queue
2299 *
2300 * Return: zero on success, else a negative error code.
2301 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2302 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2303 {
2304 return __spi_queued_transfer(spi, msg, true);
2305 }
2306
spi_controller_initialize_queue(struct spi_controller * ctlr)2307 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2308 {
2309 int ret;
2310
2311 ctlr->transfer = spi_queued_transfer;
2312 if (!ctlr->transfer_one_message)
2313 ctlr->transfer_one_message = spi_transfer_one_message;
2314
2315 /* Initialize and start queue */
2316 ret = spi_init_queue(ctlr);
2317 if (ret) {
2318 dev_err(&ctlr->dev, "problem initializing queue\n");
2319 goto err_init_queue;
2320 }
2321 ctlr->queued = true;
2322 ret = spi_start_queue(ctlr);
2323 if (ret) {
2324 dev_err(&ctlr->dev, "problem starting queue\n");
2325 goto err_start_queue;
2326 }
2327
2328 return 0;
2329
2330 err_start_queue:
2331 spi_destroy_queue(ctlr);
2332 err_init_queue:
2333 return ret;
2334 }
2335
2336 /**
2337 * spi_flush_queue - Send all pending messages in the queue from the callers'
2338 * context
2339 * @ctlr: controller to process queue for
2340 *
2341 * This should be used when one wants to ensure all pending messages have been
2342 * sent before doing something. Is used by the spi-mem code to make sure SPI
2343 * memory operations do not preempt regular SPI transfers that have been queued
2344 * before the spi-mem operation.
2345 */
spi_flush_queue(struct spi_controller * ctlr)2346 void spi_flush_queue(struct spi_controller *ctlr)
2347 {
2348 if (ctlr->transfer == spi_queued_transfer)
2349 __spi_pump_messages(ctlr, false);
2350 }
2351
2352 /*-------------------------------------------------------------------------*/
2353
2354 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2355 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2356 struct spi_delay *delay, const char *prop)
2357 {
2358 u32 value;
2359
2360 if (!of_property_read_u32(nc, prop, &value)) {
2361 if (value > U16_MAX) {
2362 delay->value = DIV_ROUND_UP(value, 1000);
2363 delay->unit = SPI_DELAY_UNIT_USECS;
2364 } else {
2365 delay->value = value;
2366 delay->unit = SPI_DELAY_UNIT_NSECS;
2367 }
2368 }
2369 }
2370
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2371 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2372 struct device_node *nc)
2373 {
2374 u32 value, cs[SPI_CS_CNT_MAX];
2375 int rc, idx;
2376
2377 /* Mode (clock phase/polarity/etc.) */
2378 if (of_property_read_bool(nc, "spi-cpha"))
2379 spi->mode |= SPI_CPHA;
2380 if (of_property_read_bool(nc, "spi-cpol"))
2381 spi->mode |= SPI_CPOL;
2382 if (of_property_read_bool(nc, "spi-3wire"))
2383 spi->mode |= SPI_3WIRE;
2384 if (of_property_read_bool(nc, "spi-lsb-first"))
2385 spi->mode |= SPI_LSB_FIRST;
2386 if (of_property_read_bool(nc, "spi-cs-high"))
2387 spi->mode |= SPI_CS_HIGH;
2388
2389 /* Device DUAL/QUAD mode */
2390 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2391 switch (value) {
2392 case 0:
2393 spi->mode |= SPI_NO_TX;
2394 break;
2395 case 1:
2396 break;
2397 case 2:
2398 spi->mode |= SPI_TX_DUAL;
2399 break;
2400 case 4:
2401 spi->mode |= SPI_TX_QUAD;
2402 break;
2403 case 8:
2404 spi->mode |= SPI_TX_OCTAL;
2405 break;
2406 default:
2407 dev_warn(&ctlr->dev,
2408 "spi-tx-bus-width %d not supported\n",
2409 value);
2410 break;
2411 }
2412 }
2413
2414 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2415 switch (value) {
2416 case 0:
2417 spi->mode |= SPI_NO_RX;
2418 break;
2419 case 1:
2420 break;
2421 case 2:
2422 spi->mode |= SPI_RX_DUAL;
2423 break;
2424 case 4:
2425 spi->mode |= SPI_RX_QUAD;
2426 break;
2427 case 8:
2428 spi->mode |= SPI_RX_OCTAL;
2429 break;
2430 default:
2431 dev_warn(&ctlr->dev,
2432 "spi-rx-bus-width %d not supported\n",
2433 value);
2434 break;
2435 }
2436 }
2437
2438 if (spi_controller_is_target(ctlr)) {
2439 if (!of_node_name_eq(nc, "slave")) {
2440 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2441 nc);
2442 return -EINVAL;
2443 }
2444 return 0;
2445 }
2446
2447 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2448 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2449 return -EINVAL;
2450 }
2451
2452 spi_set_all_cs_unused(spi);
2453
2454 /* Device address */
2455 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2456 SPI_CS_CNT_MAX);
2457 if (rc < 0) {
2458 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2459 nc, rc);
2460 return rc;
2461 }
2462 if (rc > ctlr->num_chipselect) {
2463 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2464 nc, rc);
2465 return rc;
2466 }
2467 if ((of_property_read_bool(nc, "parallel-memories")) &&
2468 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2469 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2470 return -EINVAL;
2471 }
2472 for (idx = 0; idx < rc; idx++)
2473 spi_set_chipselect(spi, idx, cs[idx]);
2474
2475 /*
2476 * By default spi->chip_select[0] will hold the physical CS number,
2477 * so set bit 0 in spi->cs_index_mask.
2478 */
2479 spi->cs_index_mask = BIT(0);
2480
2481 /* Device speed */
2482 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2483 spi->max_speed_hz = value;
2484
2485 /* Device CS delays */
2486 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2487 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2488 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2489
2490 return 0;
2491 }
2492
2493 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2494 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2495 {
2496 struct spi_device *spi;
2497 int rc;
2498
2499 /* Alloc an spi_device */
2500 spi = spi_alloc_device(ctlr);
2501 if (!spi) {
2502 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2503 rc = -ENOMEM;
2504 goto err_out;
2505 }
2506
2507 /* Select device driver */
2508 rc = of_alias_from_compatible(nc, spi->modalias,
2509 sizeof(spi->modalias));
2510 if (rc < 0) {
2511 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2512 goto err_out;
2513 }
2514
2515 rc = of_spi_parse_dt(ctlr, spi, nc);
2516 if (rc)
2517 goto err_out;
2518
2519 /* Store a pointer to the node in the device structure */
2520 of_node_get(nc);
2521
2522 device_set_node(&spi->dev, of_fwnode_handle(nc));
2523
2524 /* Register the new device */
2525 rc = spi_add_device(spi);
2526 if (rc) {
2527 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2528 goto err_of_node_put;
2529 }
2530
2531 return spi;
2532
2533 err_of_node_put:
2534 of_node_put(nc);
2535 err_out:
2536 spi_dev_put(spi);
2537 return ERR_PTR(rc);
2538 }
2539
2540 /**
2541 * of_register_spi_devices() - Register child devices onto the SPI bus
2542 * @ctlr: Pointer to spi_controller device
2543 *
2544 * Registers an spi_device for each child node of controller node which
2545 * represents a valid SPI slave.
2546 */
of_register_spi_devices(struct spi_controller * ctlr)2547 static void of_register_spi_devices(struct spi_controller *ctlr)
2548 {
2549 struct spi_device *spi;
2550 struct device_node *nc;
2551
2552 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2553 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2554 continue;
2555 spi = of_register_spi_device(ctlr, nc);
2556 if (IS_ERR(spi)) {
2557 dev_warn(&ctlr->dev,
2558 "Failed to create SPI device for %pOF\n", nc);
2559 of_node_clear_flag(nc, OF_POPULATED);
2560 }
2561 }
2562 }
2563 #else
of_register_spi_devices(struct spi_controller * ctlr)2564 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2565 #endif
2566
2567 /**
2568 * spi_new_ancillary_device() - Register ancillary SPI device
2569 * @spi: Pointer to the main SPI device registering the ancillary device
2570 * @chip_select: Chip Select of the ancillary device
2571 *
2572 * Register an ancillary SPI device; for example some chips have a chip-select
2573 * for normal device usage and another one for setup/firmware upload.
2574 *
2575 * This may only be called from main SPI device's probe routine.
2576 *
2577 * Return: 0 on success; negative errno on failure
2578 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2579 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2580 u8 chip_select)
2581 {
2582 struct spi_controller *ctlr = spi->controller;
2583 struct spi_device *ancillary;
2584 int rc;
2585
2586 /* Alloc an spi_device */
2587 ancillary = spi_alloc_device(ctlr);
2588 if (!ancillary) {
2589 rc = -ENOMEM;
2590 goto err_out;
2591 }
2592
2593 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2594
2595 /* Use provided chip-select for ancillary device */
2596 spi_set_all_cs_unused(ancillary);
2597 spi_set_chipselect(ancillary, 0, chip_select);
2598
2599 /* Take over SPI mode/speed from SPI main device */
2600 ancillary->max_speed_hz = spi->max_speed_hz;
2601 ancillary->mode = spi->mode;
2602 /*
2603 * By default spi->chip_select[0] will hold the physical CS number,
2604 * so set bit 0 in spi->cs_index_mask.
2605 */
2606 ancillary->cs_index_mask = BIT(0);
2607
2608 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2609
2610 /* Register the new device */
2611 rc = __spi_add_device(ancillary);
2612 if (rc) {
2613 dev_err(&spi->dev, "failed to register ancillary device\n");
2614 goto err_out;
2615 }
2616
2617 return ancillary;
2618
2619 err_out:
2620 spi_dev_put(ancillary);
2621 return ERR_PTR(rc);
2622 }
2623 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2624
2625 #ifdef CONFIG_ACPI
2626 struct acpi_spi_lookup {
2627 struct spi_controller *ctlr;
2628 u32 max_speed_hz;
2629 u32 mode;
2630 int irq;
2631 u8 bits_per_word;
2632 u8 chip_select;
2633 int n;
2634 int index;
2635 };
2636
acpi_spi_count(struct acpi_resource * ares,void * data)2637 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2638 {
2639 struct acpi_resource_spi_serialbus *sb;
2640 int *count = data;
2641
2642 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2643 return 1;
2644
2645 sb = &ares->data.spi_serial_bus;
2646 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2647 return 1;
2648
2649 *count = *count + 1;
2650
2651 return 1;
2652 }
2653
2654 /**
2655 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2656 * @adev: ACPI device
2657 *
2658 * Return: the number of SpiSerialBus resources in the ACPI-device's
2659 * resource-list; or a negative error code.
2660 */
acpi_spi_count_resources(struct acpi_device * adev)2661 int acpi_spi_count_resources(struct acpi_device *adev)
2662 {
2663 LIST_HEAD(r);
2664 int count = 0;
2665 int ret;
2666
2667 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2668 if (ret < 0)
2669 return ret;
2670
2671 acpi_dev_free_resource_list(&r);
2672
2673 return count;
2674 }
2675 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2676
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2677 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2678 struct acpi_spi_lookup *lookup)
2679 {
2680 const union acpi_object *obj;
2681
2682 if (!x86_apple_machine)
2683 return;
2684
2685 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2686 && obj->buffer.length >= 4)
2687 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2688
2689 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2690 && obj->buffer.length == 8)
2691 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2692
2693 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2694 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2695 lookup->mode |= SPI_LSB_FIRST;
2696
2697 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2698 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2699 lookup->mode |= SPI_CPOL;
2700
2701 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2702 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2703 lookup->mode |= SPI_CPHA;
2704 }
2705
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2706 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2707 {
2708 struct acpi_spi_lookup *lookup = data;
2709 struct spi_controller *ctlr = lookup->ctlr;
2710
2711 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2712 struct acpi_resource_spi_serialbus *sb;
2713 acpi_handle parent_handle;
2714 acpi_status status;
2715
2716 sb = &ares->data.spi_serial_bus;
2717 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2718
2719 if (lookup->index != -1 && lookup->n++ != lookup->index)
2720 return 1;
2721
2722 status = acpi_get_handle(NULL,
2723 sb->resource_source.string_ptr,
2724 &parent_handle);
2725
2726 if (ACPI_FAILURE(status))
2727 return -ENODEV;
2728
2729 if (ctlr) {
2730 if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2731 return -ENODEV;
2732 } else {
2733 struct acpi_device *adev;
2734
2735 adev = acpi_fetch_acpi_dev(parent_handle);
2736 if (!adev)
2737 return -ENODEV;
2738
2739 ctlr = acpi_spi_find_controller_by_adev(adev);
2740 if (!ctlr)
2741 return -EPROBE_DEFER;
2742
2743 lookup->ctlr = ctlr;
2744 }
2745
2746 /*
2747 * ACPI DeviceSelection numbering is handled by the
2748 * host controller driver in Windows and can vary
2749 * from driver to driver. In Linux we always expect
2750 * 0 .. max - 1 so we need to ask the driver to
2751 * translate between the two schemes.
2752 */
2753 if (ctlr->fw_translate_cs) {
2754 int cs = ctlr->fw_translate_cs(ctlr,
2755 sb->device_selection);
2756 if (cs < 0)
2757 return cs;
2758 lookup->chip_select = cs;
2759 } else {
2760 lookup->chip_select = sb->device_selection;
2761 }
2762
2763 lookup->max_speed_hz = sb->connection_speed;
2764 lookup->bits_per_word = sb->data_bit_length;
2765
2766 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2767 lookup->mode |= SPI_CPHA;
2768 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2769 lookup->mode |= SPI_CPOL;
2770 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2771 lookup->mode |= SPI_CS_HIGH;
2772 }
2773 } else if (lookup->irq < 0) {
2774 struct resource r;
2775
2776 if (acpi_dev_resource_interrupt(ares, 0, &r))
2777 lookup->irq = r.start;
2778 }
2779
2780 /* Always tell the ACPI core to skip this resource */
2781 return 1;
2782 }
2783
2784 /**
2785 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2786 * @ctlr: controller to which the spi device belongs
2787 * @adev: ACPI Device for the spi device
2788 * @index: Index of the spi resource inside the ACPI Node
2789 *
2790 * This should be used to allocate a new SPI device from and ACPI Device node.
2791 * The caller is responsible for calling spi_add_device to register the SPI device.
2792 *
2793 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2794 * using the resource.
2795 * If index is set to -1, index is not used.
2796 * Note: If index is -1, ctlr must be set.
2797 *
2798 * Return: a pointer to the new device, or ERR_PTR on error.
2799 */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2800 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2801 struct acpi_device *adev,
2802 int index)
2803 {
2804 acpi_handle parent_handle = NULL;
2805 struct list_head resource_list;
2806 struct acpi_spi_lookup lookup = {};
2807 struct spi_device *spi;
2808 int ret;
2809
2810 if (!ctlr && index == -1)
2811 return ERR_PTR(-EINVAL);
2812
2813 lookup.ctlr = ctlr;
2814 lookup.irq = -1;
2815 lookup.index = index;
2816 lookup.n = 0;
2817
2818 INIT_LIST_HEAD(&resource_list);
2819 ret = acpi_dev_get_resources(adev, &resource_list,
2820 acpi_spi_add_resource, &lookup);
2821 acpi_dev_free_resource_list(&resource_list);
2822
2823 if (ret < 0)
2824 /* Found SPI in _CRS but it points to another controller */
2825 return ERR_PTR(ret);
2826
2827 if (!lookup.max_speed_hz &&
2828 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2829 device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2830 /* Apple does not use _CRS but nested devices for SPI slaves */
2831 acpi_spi_parse_apple_properties(adev, &lookup);
2832 }
2833
2834 if (!lookup.max_speed_hz)
2835 return ERR_PTR(-ENODEV);
2836
2837 spi = spi_alloc_device(lookup.ctlr);
2838 if (!spi) {
2839 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2840 dev_name(&adev->dev));
2841 return ERR_PTR(-ENOMEM);
2842 }
2843
2844 spi_set_all_cs_unused(spi);
2845 spi_set_chipselect(spi, 0, lookup.chip_select);
2846
2847 ACPI_COMPANION_SET(&spi->dev, adev);
2848 spi->max_speed_hz = lookup.max_speed_hz;
2849 spi->mode |= lookup.mode;
2850 spi->irq = lookup.irq;
2851 spi->bits_per_word = lookup.bits_per_word;
2852 /*
2853 * By default spi->chip_select[0] will hold the physical CS number,
2854 * so set bit 0 in spi->cs_index_mask.
2855 */
2856 spi->cs_index_mask = BIT(0);
2857
2858 return spi;
2859 }
2860 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2861
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2862 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2863 struct acpi_device *adev)
2864 {
2865 struct spi_device *spi;
2866
2867 if (acpi_bus_get_status(adev) || !adev->status.present ||
2868 acpi_device_enumerated(adev))
2869 return AE_OK;
2870
2871 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2872 if (IS_ERR(spi)) {
2873 if (PTR_ERR(spi) == -ENOMEM)
2874 return AE_NO_MEMORY;
2875 else
2876 return AE_OK;
2877 }
2878
2879 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2880 sizeof(spi->modalias));
2881
2882 acpi_device_set_enumerated(adev);
2883
2884 adev->power.flags.ignore_parent = true;
2885 if (spi_add_device(spi)) {
2886 adev->power.flags.ignore_parent = false;
2887 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2888 dev_name(&adev->dev));
2889 spi_dev_put(spi);
2890 }
2891
2892 return AE_OK;
2893 }
2894
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2895 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2896 void *data, void **return_value)
2897 {
2898 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2899 struct spi_controller *ctlr = data;
2900
2901 if (!adev)
2902 return AE_OK;
2903
2904 return acpi_register_spi_device(ctlr, adev);
2905 }
2906
2907 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2908
acpi_register_spi_devices(struct spi_controller * ctlr)2909 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2910 {
2911 acpi_status status;
2912 acpi_handle handle;
2913
2914 handle = ACPI_HANDLE(ctlr->dev.parent);
2915 if (!handle)
2916 return;
2917
2918 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2919 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2920 acpi_spi_add_device, NULL, ctlr, NULL);
2921 if (ACPI_FAILURE(status))
2922 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2923 }
2924 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2925 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2926 #endif /* CONFIG_ACPI */
2927
spi_controller_release(struct device * dev)2928 static void spi_controller_release(struct device *dev)
2929 {
2930 struct spi_controller *ctlr;
2931
2932 ctlr = container_of(dev, struct spi_controller, dev);
2933 kfree(ctlr);
2934 }
2935
2936 static struct class spi_master_class = {
2937 .name = "spi_master",
2938 .dev_release = spi_controller_release,
2939 .dev_groups = spi_master_groups,
2940 };
2941
2942 #ifdef CONFIG_SPI_SLAVE
2943 /**
2944 * spi_target_abort - abort the ongoing transfer request on an SPI slave
2945 * controller
2946 * @spi: device used for the current transfer
2947 */
spi_target_abort(struct spi_device * spi)2948 int spi_target_abort(struct spi_device *spi)
2949 {
2950 struct spi_controller *ctlr = spi->controller;
2951
2952 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2953 return ctlr->target_abort(ctlr);
2954
2955 return -ENOTSUPP;
2956 }
2957 EXPORT_SYMBOL_GPL(spi_target_abort);
2958
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2959 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2960 char *buf)
2961 {
2962 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2963 dev);
2964 struct device *child;
2965
2966 child = device_find_any_child(&ctlr->dev);
2967 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2968 }
2969
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2970 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2971 const char *buf, size_t count)
2972 {
2973 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2974 dev);
2975 struct spi_device *spi;
2976 struct device *child;
2977 char name[32];
2978 int rc;
2979
2980 rc = sscanf(buf, "%31s", name);
2981 if (rc != 1 || !name[0])
2982 return -EINVAL;
2983
2984 child = device_find_any_child(&ctlr->dev);
2985 if (child) {
2986 /* Remove registered slave */
2987 device_unregister(child);
2988 put_device(child);
2989 }
2990
2991 if (strcmp(name, "(null)")) {
2992 /* Register new slave */
2993 spi = spi_alloc_device(ctlr);
2994 if (!spi)
2995 return -ENOMEM;
2996
2997 strscpy(spi->modalias, name, sizeof(spi->modalias));
2998
2999 rc = spi_add_device(spi);
3000 if (rc) {
3001 spi_dev_put(spi);
3002 return rc;
3003 }
3004 }
3005
3006 return count;
3007 }
3008
3009 static DEVICE_ATTR_RW(slave);
3010
3011 static struct attribute *spi_slave_attrs[] = {
3012 &dev_attr_slave.attr,
3013 NULL,
3014 };
3015
3016 static const struct attribute_group spi_slave_group = {
3017 .attrs = spi_slave_attrs,
3018 };
3019
3020 static const struct attribute_group *spi_slave_groups[] = {
3021 &spi_controller_statistics_group,
3022 &spi_slave_group,
3023 NULL,
3024 };
3025
3026 static struct class spi_slave_class = {
3027 .name = "spi_slave",
3028 .dev_release = spi_controller_release,
3029 .dev_groups = spi_slave_groups,
3030 };
3031 #else
3032 extern struct class spi_slave_class; /* dummy */
3033 #endif
3034
3035 /**
3036 * __spi_alloc_controller - allocate an SPI master or slave controller
3037 * @dev: the controller, possibly using the platform_bus
3038 * @size: how much zeroed driver-private data to allocate; the pointer to this
3039 * memory is in the driver_data field of the returned device, accessible
3040 * with spi_controller_get_devdata(); the memory is cacheline aligned;
3041 * drivers granting DMA access to portions of their private data need to
3042 * round up @size using ALIGN(size, dma_get_cache_alignment()).
3043 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3044 * slave (true) controller
3045 * Context: can sleep
3046 *
3047 * This call is used only by SPI controller drivers, which are the
3048 * only ones directly touching chip registers. It's how they allocate
3049 * an spi_controller structure, prior to calling spi_register_controller().
3050 *
3051 * This must be called from context that can sleep.
3052 *
3053 * The caller is responsible for assigning the bus number and initializing the
3054 * controller's methods before calling spi_register_controller(); and (after
3055 * errors adding the device) calling spi_controller_put() to prevent a memory
3056 * leak.
3057 *
3058 * Return: the SPI controller structure on success, else NULL.
3059 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3060 struct spi_controller *__spi_alloc_controller(struct device *dev,
3061 unsigned int size, bool slave)
3062 {
3063 struct spi_controller *ctlr;
3064 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3065
3066 if (!dev)
3067 return NULL;
3068
3069 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3070 if (!ctlr)
3071 return NULL;
3072
3073 device_initialize(&ctlr->dev);
3074 INIT_LIST_HEAD(&ctlr->queue);
3075 spin_lock_init(&ctlr->queue_lock);
3076 spin_lock_init(&ctlr->bus_lock_spinlock);
3077 mutex_init(&ctlr->bus_lock_mutex);
3078 mutex_init(&ctlr->io_mutex);
3079 mutex_init(&ctlr->add_lock);
3080 ctlr->bus_num = -1;
3081 ctlr->num_chipselect = 1;
3082 ctlr->slave = slave;
3083 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3084 ctlr->dev.class = &spi_slave_class;
3085 else
3086 ctlr->dev.class = &spi_master_class;
3087 ctlr->dev.parent = dev;
3088 pm_suspend_ignore_children(&ctlr->dev, true);
3089 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3090
3091 return ctlr;
3092 }
3093 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3094
devm_spi_release_controller(struct device * dev,void * ctlr)3095 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3096 {
3097 spi_controller_put(*(struct spi_controller **)ctlr);
3098 }
3099
3100 /**
3101 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3102 * @dev: physical device of SPI controller
3103 * @size: how much zeroed driver-private data to allocate
3104 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3105 * Context: can sleep
3106 *
3107 * Allocate an SPI controller and automatically release a reference on it
3108 * when @dev is unbound from its driver. Drivers are thus relieved from
3109 * having to call spi_controller_put().
3110 *
3111 * The arguments to this function are identical to __spi_alloc_controller().
3112 *
3113 * Return: the SPI controller structure on success, else NULL.
3114 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3115 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3116 unsigned int size,
3117 bool slave)
3118 {
3119 struct spi_controller **ptr, *ctlr;
3120
3121 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3122 GFP_KERNEL);
3123 if (!ptr)
3124 return NULL;
3125
3126 ctlr = __spi_alloc_controller(dev, size, slave);
3127 if (ctlr) {
3128 ctlr->devm_allocated = true;
3129 *ptr = ctlr;
3130 devres_add(dev, ptr);
3131 } else {
3132 devres_free(ptr);
3133 }
3134
3135 return ctlr;
3136 }
3137 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3138
3139 /**
3140 * spi_get_gpio_descs() - grab chip select GPIOs for the master
3141 * @ctlr: The SPI master to grab GPIO descriptors for
3142 */
spi_get_gpio_descs(struct spi_controller * ctlr)3143 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3144 {
3145 int nb, i;
3146 struct gpio_desc **cs;
3147 struct device *dev = &ctlr->dev;
3148 unsigned long native_cs_mask = 0;
3149 unsigned int num_cs_gpios = 0;
3150
3151 nb = gpiod_count(dev, "cs");
3152 if (nb < 0) {
3153 /* No GPIOs at all is fine, else return the error */
3154 if (nb == -ENOENT)
3155 return 0;
3156 return nb;
3157 }
3158
3159 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3160
3161 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3162 GFP_KERNEL);
3163 if (!cs)
3164 return -ENOMEM;
3165 ctlr->cs_gpiods = cs;
3166
3167 for (i = 0; i < nb; i++) {
3168 /*
3169 * Most chipselects are active low, the inverted
3170 * semantics are handled by special quirks in gpiolib,
3171 * so initializing them GPIOD_OUT_LOW here means
3172 * "unasserted", in most cases this will drive the physical
3173 * line high.
3174 */
3175 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3176 GPIOD_OUT_LOW);
3177 if (IS_ERR(cs[i]))
3178 return PTR_ERR(cs[i]);
3179
3180 if (cs[i]) {
3181 /*
3182 * If we find a CS GPIO, name it after the device and
3183 * chip select line.
3184 */
3185 char *gpioname;
3186
3187 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3188 dev_name(dev), i);
3189 if (!gpioname)
3190 return -ENOMEM;
3191 gpiod_set_consumer_name(cs[i], gpioname);
3192 num_cs_gpios++;
3193 continue;
3194 }
3195
3196 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3197 dev_err(dev, "Invalid native chip select %d\n", i);
3198 return -EINVAL;
3199 }
3200 native_cs_mask |= BIT(i);
3201 }
3202
3203 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3204
3205 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3206 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3207 dev_err(dev, "No unused native chip select available\n");
3208 return -EINVAL;
3209 }
3210
3211 return 0;
3212 }
3213
spi_controller_check_ops(struct spi_controller * ctlr)3214 static int spi_controller_check_ops(struct spi_controller *ctlr)
3215 {
3216 /*
3217 * The controller may implement only the high-level SPI-memory like
3218 * operations if it does not support regular SPI transfers, and this is
3219 * valid use case.
3220 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3221 * one of the ->transfer_xxx() method be implemented.
3222 */
3223 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3224 if (!ctlr->transfer && !ctlr->transfer_one &&
3225 !ctlr->transfer_one_message) {
3226 return -EINVAL;
3227 }
3228 }
3229
3230 return 0;
3231 }
3232
3233 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3234 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3235 {
3236 int id;
3237
3238 mutex_lock(&board_lock);
3239 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3240 mutex_unlock(&board_lock);
3241 if (WARN(id < 0, "couldn't get idr"))
3242 return id == -ENOSPC ? -EBUSY : id;
3243 ctlr->bus_num = id;
3244 return 0;
3245 }
3246
3247 /**
3248 * spi_register_controller - register SPI master or slave controller
3249 * @ctlr: initialized master, originally from spi_alloc_master() or
3250 * spi_alloc_slave()
3251 * Context: can sleep
3252 *
3253 * SPI controllers connect to their drivers using some non-SPI bus,
3254 * such as the platform bus. The final stage of probe() in that code
3255 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3256 *
3257 * SPI controllers use board specific (often SOC specific) bus numbers,
3258 * and board-specific addressing for SPI devices combines those numbers
3259 * with chip select numbers. Since SPI does not directly support dynamic
3260 * device identification, boards need configuration tables telling which
3261 * chip is at which address.
3262 *
3263 * This must be called from context that can sleep. It returns zero on
3264 * success, else a negative error code (dropping the controller's refcount).
3265 * After a successful return, the caller is responsible for calling
3266 * spi_unregister_controller().
3267 *
3268 * Return: zero on success, else a negative error code.
3269 */
spi_register_controller(struct spi_controller * ctlr)3270 int spi_register_controller(struct spi_controller *ctlr)
3271 {
3272 struct device *dev = ctlr->dev.parent;
3273 struct boardinfo *bi;
3274 int first_dynamic;
3275 int status;
3276 int idx;
3277
3278 if (!dev)
3279 return -ENODEV;
3280
3281 /*
3282 * Make sure all necessary hooks are implemented before registering
3283 * the SPI controller.
3284 */
3285 status = spi_controller_check_ops(ctlr);
3286 if (status)
3287 return status;
3288
3289 if (ctlr->bus_num < 0)
3290 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3291 if (ctlr->bus_num >= 0) {
3292 /* Devices with a fixed bus num must check-in with the num */
3293 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3294 if (status)
3295 return status;
3296 }
3297 if (ctlr->bus_num < 0) {
3298 first_dynamic = of_alias_get_highest_id("spi");
3299 if (first_dynamic < 0)
3300 first_dynamic = 0;
3301 else
3302 first_dynamic++;
3303
3304 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3305 if (status)
3306 return status;
3307 }
3308 ctlr->bus_lock_flag = 0;
3309 init_completion(&ctlr->xfer_completion);
3310 init_completion(&ctlr->cur_msg_completion);
3311 if (!ctlr->max_dma_len)
3312 ctlr->max_dma_len = INT_MAX;
3313
3314 /*
3315 * Register the device, then userspace will see it.
3316 * Registration fails if the bus ID is in use.
3317 */
3318 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3319
3320 if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3321 status = spi_get_gpio_descs(ctlr);
3322 if (status)
3323 goto free_bus_id;
3324 /*
3325 * A controller using GPIO descriptors always
3326 * supports SPI_CS_HIGH if need be.
3327 */
3328 ctlr->mode_bits |= SPI_CS_HIGH;
3329 }
3330
3331 /*
3332 * Even if it's just one always-selected device, there must
3333 * be at least one chipselect.
3334 */
3335 if (!ctlr->num_chipselect) {
3336 status = -EINVAL;
3337 goto free_bus_id;
3338 }
3339
3340 /* Setting last_cs to SPI_INVALID_CS means no chip selected */
3341 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3342 ctlr->last_cs[idx] = SPI_INVALID_CS;
3343
3344 status = device_add(&ctlr->dev);
3345 if (status < 0)
3346 goto free_bus_id;
3347 dev_dbg(dev, "registered %s %s\n",
3348 spi_controller_is_target(ctlr) ? "target" : "host",
3349 dev_name(&ctlr->dev));
3350
3351 /*
3352 * If we're using a queued driver, start the queue. Note that we don't
3353 * need the queueing logic if the driver is only supporting high-level
3354 * memory operations.
3355 */
3356 if (ctlr->transfer) {
3357 dev_info(dev, "controller is unqueued, this is deprecated\n");
3358 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3359 status = spi_controller_initialize_queue(ctlr);
3360 if (status) {
3361 device_del(&ctlr->dev);
3362 goto free_bus_id;
3363 }
3364 }
3365 /* Add statistics */
3366 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3367 if (!ctlr->pcpu_statistics) {
3368 dev_err(dev, "Error allocating per-cpu statistics\n");
3369 status = -ENOMEM;
3370 goto destroy_queue;
3371 }
3372
3373 mutex_lock(&board_lock);
3374 list_add_tail(&ctlr->list, &spi_controller_list);
3375 list_for_each_entry(bi, &board_list, list)
3376 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3377 mutex_unlock(&board_lock);
3378
3379 /* Register devices from the device tree and ACPI */
3380 of_register_spi_devices(ctlr);
3381 acpi_register_spi_devices(ctlr);
3382 return status;
3383
3384 destroy_queue:
3385 spi_destroy_queue(ctlr);
3386 free_bus_id:
3387 mutex_lock(&board_lock);
3388 idr_remove(&spi_master_idr, ctlr->bus_num);
3389 mutex_unlock(&board_lock);
3390 return status;
3391 }
3392 EXPORT_SYMBOL_GPL(spi_register_controller);
3393
devm_spi_unregister(struct device * dev,void * res)3394 static void devm_spi_unregister(struct device *dev, void *res)
3395 {
3396 spi_unregister_controller(*(struct spi_controller **)res);
3397 }
3398
3399 /**
3400 * devm_spi_register_controller - register managed SPI master or slave
3401 * controller
3402 * @dev: device managing SPI controller
3403 * @ctlr: initialized controller, originally from spi_alloc_master() or
3404 * spi_alloc_slave()
3405 * Context: can sleep
3406 *
3407 * Register a SPI device as with spi_register_controller() which will
3408 * automatically be unregistered and freed.
3409 *
3410 * Return: zero on success, else a negative error code.
3411 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3412 int devm_spi_register_controller(struct device *dev,
3413 struct spi_controller *ctlr)
3414 {
3415 struct spi_controller **ptr;
3416 int ret;
3417
3418 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3419 if (!ptr)
3420 return -ENOMEM;
3421
3422 ret = spi_register_controller(ctlr);
3423 if (!ret) {
3424 *ptr = ctlr;
3425 devres_add(dev, ptr);
3426 } else {
3427 devres_free(ptr);
3428 }
3429
3430 return ret;
3431 }
3432 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3433
__unregister(struct device * dev,void * null)3434 static int __unregister(struct device *dev, void *null)
3435 {
3436 spi_unregister_device(to_spi_device(dev));
3437 return 0;
3438 }
3439
3440 /**
3441 * spi_unregister_controller - unregister SPI master or slave controller
3442 * @ctlr: the controller being unregistered
3443 * Context: can sleep
3444 *
3445 * This call is used only by SPI controller drivers, which are the
3446 * only ones directly touching chip registers.
3447 *
3448 * This must be called from context that can sleep.
3449 *
3450 * Note that this function also drops a reference to the controller.
3451 */
spi_unregister_controller(struct spi_controller * ctlr)3452 void spi_unregister_controller(struct spi_controller *ctlr)
3453 {
3454 struct spi_controller *found;
3455 int id = ctlr->bus_num;
3456
3457 /* Prevent addition of new devices, unregister existing ones */
3458 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3459 mutex_lock(&ctlr->add_lock);
3460
3461 device_for_each_child(&ctlr->dev, NULL, __unregister);
3462
3463 /* First make sure that this controller was ever added */
3464 mutex_lock(&board_lock);
3465 found = idr_find(&spi_master_idr, id);
3466 mutex_unlock(&board_lock);
3467 if (ctlr->queued) {
3468 if (spi_destroy_queue(ctlr))
3469 dev_err(&ctlr->dev, "queue remove failed\n");
3470 }
3471 mutex_lock(&board_lock);
3472 list_del(&ctlr->list);
3473 mutex_unlock(&board_lock);
3474
3475 device_del(&ctlr->dev);
3476
3477 /* Free bus id */
3478 mutex_lock(&board_lock);
3479 if (found == ctlr)
3480 idr_remove(&spi_master_idr, id);
3481 mutex_unlock(&board_lock);
3482
3483 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3484 mutex_unlock(&ctlr->add_lock);
3485
3486 /*
3487 * Release the last reference on the controller if its driver
3488 * has not yet been converted to devm_spi_alloc_master/slave().
3489 */
3490 if (!ctlr->devm_allocated)
3491 put_device(&ctlr->dev);
3492 }
3493 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3494
__spi_check_suspended(const struct spi_controller * ctlr)3495 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3496 {
3497 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3498 }
3499
__spi_mark_suspended(struct spi_controller * ctlr)3500 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3501 {
3502 mutex_lock(&ctlr->bus_lock_mutex);
3503 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3504 mutex_unlock(&ctlr->bus_lock_mutex);
3505 }
3506
__spi_mark_resumed(struct spi_controller * ctlr)3507 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3508 {
3509 mutex_lock(&ctlr->bus_lock_mutex);
3510 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3511 mutex_unlock(&ctlr->bus_lock_mutex);
3512 }
3513
spi_controller_suspend(struct spi_controller * ctlr)3514 int spi_controller_suspend(struct spi_controller *ctlr)
3515 {
3516 int ret = 0;
3517
3518 /* Basically no-ops for non-queued controllers */
3519 if (ctlr->queued) {
3520 ret = spi_stop_queue(ctlr);
3521 if (ret)
3522 dev_err(&ctlr->dev, "queue stop failed\n");
3523 }
3524
3525 __spi_mark_suspended(ctlr);
3526 return ret;
3527 }
3528 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3529
spi_controller_resume(struct spi_controller * ctlr)3530 int spi_controller_resume(struct spi_controller *ctlr)
3531 {
3532 int ret = 0;
3533
3534 __spi_mark_resumed(ctlr);
3535
3536 if (ctlr->queued) {
3537 ret = spi_start_queue(ctlr);
3538 if (ret)
3539 dev_err(&ctlr->dev, "queue restart failed\n");
3540 }
3541 return ret;
3542 }
3543 EXPORT_SYMBOL_GPL(spi_controller_resume);
3544
3545 /*-------------------------------------------------------------------------*/
3546
3547 /* Core methods for spi_message alterations */
3548
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3549 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3550 struct spi_message *msg,
3551 void *res)
3552 {
3553 struct spi_replaced_transfers *rxfer = res;
3554 size_t i;
3555
3556 /* Call extra callback if requested */
3557 if (rxfer->release)
3558 rxfer->release(ctlr, msg, res);
3559
3560 /* Insert replaced transfers back into the message */
3561 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3562
3563 /* Remove the formerly inserted entries */
3564 for (i = 0; i < rxfer->inserted; i++)
3565 list_del(&rxfer->inserted_transfers[i].transfer_list);
3566 }
3567
3568 /**
3569 * spi_replace_transfers - replace transfers with several transfers
3570 * and register change with spi_message.resources
3571 * @msg: the spi_message we work upon
3572 * @xfer_first: the first spi_transfer we want to replace
3573 * @remove: number of transfers to remove
3574 * @insert: the number of transfers we want to insert instead
3575 * @release: extra release code necessary in some circumstances
3576 * @extradatasize: extra data to allocate (with alignment guarantees
3577 * of struct @spi_transfer)
3578 * @gfp: gfp flags
3579 *
3580 * Returns: pointer to @spi_replaced_transfers,
3581 * PTR_ERR(...) in case of errors.
3582 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3583 static struct spi_replaced_transfers *spi_replace_transfers(
3584 struct spi_message *msg,
3585 struct spi_transfer *xfer_first,
3586 size_t remove,
3587 size_t insert,
3588 spi_replaced_release_t release,
3589 size_t extradatasize,
3590 gfp_t gfp)
3591 {
3592 struct spi_replaced_transfers *rxfer;
3593 struct spi_transfer *xfer;
3594 size_t i;
3595
3596 /* Allocate the structure using spi_res */
3597 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3598 struct_size(rxfer, inserted_transfers, insert)
3599 + extradatasize,
3600 gfp);
3601 if (!rxfer)
3602 return ERR_PTR(-ENOMEM);
3603
3604 /* The release code to invoke before running the generic release */
3605 rxfer->release = release;
3606
3607 /* Assign extradata */
3608 if (extradatasize)
3609 rxfer->extradata =
3610 &rxfer->inserted_transfers[insert];
3611
3612 /* Init the replaced_transfers list */
3613 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3614
3615 /*
3616 * Assign the list_entry after which we should reinsert
3617 * the @replaced_transfers - it may be spi_message.messages!
3618 */
3619 rxfer->replaced_after = xfer_first->transfer_list.prev;
3620
3621 /* Remove the requested number of transfers */
3622 for (i = 0; i < remove; i++) {
3623 /*
3624 * If the entry after replaced_after it is msg->transfers
3625 * then we have been requested to remove more transfers
3626 * than are in the list.
3627 */
3628 if (rxfer->replaced_after->next == &msg->transfers) {
3629 dev_err(&msg->spi->dev,
3630 "requested to remove more spi_transfers than are available\n");
3631 /* Insert replaced transfers back into the message */
3632 list_splice(&rxfer->replaced_transfers,
3633 rxfer->replaced_after);
3634
3635 /* Free the spi_replace_transfer structure... */
3636 spi_res_free(rxfer);
3637
3638 /* ...and return with an error */
3639 return ERR_PTR(-EINVAL);
3640 }
3641
3642 /*
3643 * Remove the entry after replaced_after from list of
3644 * transfers and add it to list of replaced_transfers.
3645 */
3646 list_move_tail(rxfer->replaced_after->next,
3647 &rxfer->replaced_transfers);
3648 }
3649
3650 /*
3651 * Create copy of the given xfer with identical settings
3652 * based on the first transfer to get removed.
3653 */
3654 for (i = 0; i < insert; i++) {
3655 /* We need to run in reverse order */
3656 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3657
3658 /* Copy all spi_transfer data */
3659 memcpy(xfer, xfer_first, sizeof(*xfer));
3660
3661 /* Add to list */
3662 list_add(&xfer->transfer_list, rxfer->replaced_after);
3663
3664 /* Clear cs_change and delay for all but the last */
3665 if (i) {
3666 xfer->cs_change = false;
3667 xfer->delay.value = 0;
3668 }
3669 }
3670
3671 /* Set up inserted... */
3672 rxfer->inserted = insert;
3673
3674 /* ...and register it with spi_res/spi_message */
3675 spi_res_add(msg, rxfer);
3676
3677 return rxfer;
3678 }
3679
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3680 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3681 struct spi_message *msg,
3682 struct spi_transfer **xferp,
3683 size_t maxsize)
3684 {
3685 struct spi_transfer *xfer = *xferp, *xfers;
3686 struct spi_replaced_transfers *srt;
3687 size_t offset;
3688 size_t count, i;
3689
3690 /* Calculate how many we have to replace */
3691 count = DIV_ROUND_UP(xfer->len, maxsize);
3692
3693 /* Create replacement */
3694 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3695 if (IS_ERR(srt))
3696 return PTR_ERR(srt);
3697 xfers = srt->inserted_transfers;
3698
3699 /*
3700 * Now handle each of those newly inserted spi_transfers.
3701 * Note that the replacements spi_transfers all are preset
3702 * to the same values as *xferp, so tx_buf, rx_buf and len
3703 * are all identical (as well as most others)
3704 * so we just have to fix up len and the pointers.
3705 */
3706
3707 /*
3708 * The first transfer just needs the length modified, so we
3709 * run it outside the loop.
3710 */
3711 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3712
3713 /* All the others need rx_buf/tx_buf also set */
3714 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3715 /* Update rx_buf, tx_buf and DMA */
3716 if (xfers[i].rx_buf)
3717 xfers[i].rx_buf += offset;
3718 if (xfers[i].tx_buf)
3719 xfers[i].tx_buf += offset;
3720
3721 /* Update length */
3722 xfers[i].len = min(maxsize, xfers[i].len - offset);
3723 }
3724
3725 /*
3726 * We set up xferp to the last entry we have inserted,
3727 * so that we skip those already split transfers.
3728 */
3729 *xferp = &xfers[count - 1];
3730
3731 /* Increment statistics counters */
3732 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3733 transfers_split_maxsize);
3734 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3735 transfers_split_maxsize);
3736
3737 return 0;
3738 }
3739
3740 /**
3741 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3742 * when an individual transfer exceeds a
3743 * certain size
3744 * @ctlr: the @spi_controller for this transfer
3745 * @msg: the @spi_message to transform
3746 * @maxsize: the maximum when to apply this
3747 *
3748 * This function allocates resources that are automatically freed during the
3749 * spi message unoptimize phase so this function should only be called from
3750 * optimize_message callbacks.
3751 *
3752 * Return: status of transformation
3753 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3754 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3755 struct spi_message *msg,
3756 size_t maxsize)
3757 {
3758 struct spi_transfer *xfer;
3759 int ret;
3760
3761 /*
3762 * Iterate over the transfer_list,
3763 * but note that xfer is advanced to the last transfer inserted
3764 * to avoid checking sizes again unnecessarily (also xfer does
3765 * potentially belong to a different list by the time the
3766 * replacement has happened).
3767 */
3768 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3769 if (xfer->len > maxsize) {
3770 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3771 maxsize);
3772 if (ret)
3773 return ret;
3774 }
3775 }
3776
3777 return 0;
3778 }
3779 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3780
3781
3782 /**
3783 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3784 * when an individual transfer exceeds a
3785 * certain number of SPI words
3786 * @ctlr: the @spi_controller for this transfer
3787 * @msg: the @spi_message to transform
3788 * @maxwords: the number of words to limit each transfer to
3789 *
3790 * This function allocates resources that are automatically freed during the
3791 * spi message unoptimize phase so this function should only be called from
3792 * optimize_message callbacks.
3793 *
3794 * Return: status of transformation
3795 */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3796 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3797 struct spi_message *msg,
3798 size_t maxwords)
3799 {
3800 struct spi_transfer *xfer;
3801
3802 /*
3803 * Iterate over the transfer_list,
3804 * but note that xfer is advanced to the last transfer inserted
3805 * to avoid checking sizes again unnecessarily (also xfer does
3806 * potentially belong to a different list by the time the
3807 * replacement has happened).
3808 */
3809 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3810 size_t maxsize;
3811 int ret;
3812
3813 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3814 if (xfer->len > maxsize) {
3815 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3816 maxsize);
3817 if (ret)
3818 return ret;
3819 }
3820 }
3821
3822 return 0;
3823 }
3824 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3825
3826 /*-------------------------------------------------------------------------*/
3827
3828 /*
3829 * Core methods for SPI controller protocol drivers. Some of the
3830 * other core methods are currently defined as inline functions.
3831 */
3832
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3833 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3834 u8 bits_per_word)
3835 {
3836 if (ctlr->bits_per_word_mask) {
3837 /* Only 32 bits fit in the mask */
3838 if (bits_per_word > 32)
3839 return -EINVAL;
3840 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3841 return -EINVAL;
3842 }
3843
3844 return 0;
3845 }
3846
3847 /**
3848 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3849 * @spi: the device that requires specific CS timing configuration
3850 *
3851 * Return: zero on success, else a negative error code.
3852 */
spi_set_cs_timing(struct spi_device * spi)3853 static int spi_set_cs_timing(struct spi_device *spi)
3854 {
3855 struct device *parent = spi->controller->dev.parent;
3856 int status = 0;
3857
3858 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3859 if (spi->controller->auto_runtime_pm) {
3860 status = pm_runtime_get_sync(parent);
3861 if (status < 0) {
3862 pm_runtime_put_noidle(parent);
3863 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3864 status);
3865 return status;
3866 }
3867
3868 status = spi->controller->set_cs_timing(spi);
3869 pm_runtime_mark_last_busy(parent);
3870 pm_runtime_put_autosuspend(parent);
3871 } else {
3872 status = spi->controller->set_cs_timing(spi);
3873 }
3874 }
3875 return status;
3876 }
3877
3878 /**
3879 * spi_setup - setup SPI mode and clock rate
3880 * @spi: the device whose settings are being modified
3881 * Context: can sleep, and no requests are queued to the device
3882 *
3883 * SPI protocol drivers may need to update the transfer mode if the
3884 * device doesn't work with its default. They may likewise need
3885 * to update clock rates or word sizes from initial values. This function
3886 * changes those settings, and must be called from a context that can sleep.
3887 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3888 * effect the next time the device is selected and data is transferred to
3889 * or from it. When this function returns, the SPI device is deselected.
3890 *
3891 * Note that this call will fail if the protocol driver specifies an option
3892 * that the underlying controller or its driver does not support. For
3893 * example, not all hardware supports wire transfers using nine bit words,
3894 * LSB-first wire encoding, or active-high chipselects.
3895 *
3896 * Return: zero on success, else a negative error code.
3897 */
spi_setup(struct spi_device * spi)3898 int spi_setup(struct spi_device *spi)
3899 {
3900 unsigned bad_bits, ugly_bits;
3901 int status;
3902
3903 /*
3904 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3905 * are set at the same time.
3906 */
3907 if ((hweight_long(spi->mode &
3908 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3909 (hweight_long(spi->mode &
3910 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3911 dev_err(&spi->dev,
3912 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3913 return -EINVAL;
3914 }
3915 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3916 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3917 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3918 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3919 return -EINVAL;
3920 /* Check against conflicting MOSI idle configuration */
3921 if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3922 dev_err(&spi->dev,
3923 "setup: MOSI configured to idle low and high at the same time.\n");
3924 return -EINVAL;
3925 }
3926 /*
3927 * Help drivers fail *cleanly* when they need options
3928 * that aren't supported with their current controller.
3929 * SPI_CS_WORD has a fallback software implementation,
3930 * so it is ignored here.
3931 */
3932 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3933 SPI_NO_TX | SPI_NO_RX);
3934 ugly_bits = bad_bits &
3935 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3936 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3937 if (ugly_bits) {
3938 dev_warn(&spi->dev,
3939 "setup: ignoring unsupported mode bits %x\n",
3940 ugly_bits);
3941 spi->mode &= ~ugly_bits;
3942 bad_bits &= ~ugly_bits;
3943 }
3944 if (bad_bits) {
3945 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3946 bad_bits);
3947 return -EINVAL;
3948 }
3949
3950 if (!spi->bits_per_word) {
3951 spi->bits_per_word = 8;
3952 } else {
3953 /*
3954 * Some controllers may not support the default 8 bits-per-word
3955 * so only perform the check when this is explicitly provided.
3956 */
3957 status = __spi_validate_bits_per_word(spi->controller,
3958 spi->bits_per_word);
3959 if (status)
3960 return status;
3961 }
3962
3963 if (spi->controller->max_speed_hz &&
3964 (!spi->max_speed_hz ||
3965 spi->max_speed_hz > spi->controller->max_speed_hz))
3966 spi->max_speed_hz = spi->controller->max_speed_hz;
3967
3968 mutex_lock(&spi->controller->io_mutex);
3969
3970 if (spi->controller->setup) {
3971 status = spi->controller->setup(spi);
3972 if (status) {
3973 mutex_unlock(&spi->controller->io_mutex);
3974 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3975 status);
3976 return status;
3977 }
3978 }
3979
3980 status = spi_set_cs_timing(spi);
3981 if (status) {
3982 mutex_unlock(&spi->controller->io_mutex);
3983 return status;
3984 }
3985
3986 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3987 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3988 if (status < 0) {
3989 mutex_unlock(&spi->controller->io_mutex);
3990 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3991 status);
3992 return status;
3993 }
3994
3995 /*
3996 * We do not want to return positive value from pm_runtime_get,
3997 * there are many instances of devices calling spi_setup() and
3998 * checking for a non-zero return value instead of a negative
3999 * return value.
4000 */
4001 status = 0;
4002
4003 spi_set_cs(spi, false, true);
4004 pm_runtime_mark_last_busy(spi->controller->dev.parent);
4005 pm_runtime_put_autosuspend(spi->controller->dev.parent);
4006 } else {
4007 spi_set_cs(spi, false, true);
4008 }
4009
4010 mutex_unlock(&spi->controller->io_mutex);
4011
4012 if (spi->rt && !spi->controller->rt) {
4013 spi->controller->rt = true;
4014 spi_set_thread_rt(spi->controller);
4015 }
4016
4017 trace_spi_setup(spi, status);
4018
4019 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4020 spi->mode & SPI_MODE_X_MASK,
4021 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4022 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4023 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4024 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4025 spi->bits_per_word, spi->max_speed_hz,
4026 status);
4027
4028 return status;
4029 }
4030 EXPORT_SYMBOL_GPL(spi_setup);
4031
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4032 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4033 struct spi_device *spi)
4034 {
4035 int delay1, delay2;
4036
4037 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4038 if (delay1 < 0)
4039 return delay1;
4040
4041 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4042 if (delay2 < 0)
4043 return delay2;
4044
4045 if (delay1 < delay2)
4046 memcpy(&xfer->word_delay, &spi->word_delay,
4047 sizeof(xfer->word_delay));
4048
4049 return 0;
4050 }
4051
__spi_validate(struct spi_device * spi,struct spi_message * message)4052 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4053 {
4054 struct spi_controller *ctlr = spi->controller;
4055 struct spi_transfer *xfer;
4056 int w_size;
4057
4058 if (list_empty(&message->transfers))
4059 return -EINVAL;
4060
4061 message->spi = spi;
4062
4063 /*
4064 * Half-duplex links include original MicroWire, and ones with
4065 * only one data pin like SPI_3WIRE (switches direction) or where
4066 * either MOSI or MISO is missing. They can also be caused by
4067 * software limitations.
4068 */
4069 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4070 (spi->mode & SPI_3WIRE)) {
4071 unsigned flags = ctlr->flags;
4072
4073 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4074 if (xfer->rx_buf && xfer->tx_buf)
4075 return -EINVAL;
4076 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4077 return -EINVAL;
4078 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4079 return -EINVAL;
4080 }
4081 }
4082
4083 /*
4084 * Set transfer bits_per_word and max speed as spi device default if
4085 * it is not set for this transfer.
4086 * Set transfer tx_nbits and rx_nbits as single transfer default
4087 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4088 * Ensure transfer word_delay is at least as long as that required by
4089 * device itself.
4090 */
4091 message->frame_length = 0;
4092 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4093 xfer->effective_speed_hz = 0;
4094 message->frame_length += xfer->len;
4095 if (!xfer->bits_per_word)
4096 xfer->bits_per_word = spi->bits_per_word;
4097
4098 if (!xfer->speed_hz)
4099 xfer->speed_hz = spi->max_speed_hz;
4100
4101 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4102 xfer->speed_hz = ctlr->max_speed_hz;
4103
4104 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4105 return -EINVAL;
4106
4107 /*
4108 * SPI transfer length should be multiple of SPI word size
4109 * where SPI word size should be power-of-two multiple.
4110 */
4111 if (xfer->bits_per_word <= 8)
4112 w_size = 1;
4113 else if (xfer->bits_per_word <= 16)
4114 w_size = 2;
4115 else
4116 w_size = 4;
4117
4118 /* No partial transfers accepted */
4119 if (xfer->len % w_size)
4120 return -EINVAL;
4121
4122 if (xfer->speed_hz && ctlr->min_speed_hz &&
4123 xfer->speed_hz < ctlr->min_speed_hz)
4124 return -EINVAL;
4125
4126 if (xfer->tx_buf && !xfer->tx_nbits)
4127 xfer->tx_nbits = SPI_NBITS_SINGLE;
4128 if (xfer->rx_buf && !xfer->rx_nbits)
4129 xfer->rx_nbits = SPI_NBITS_SINGLE;
4130 /*
4131 * Check transfer tx/rx_nbits:
4132 * 1. check the value matches one of single, dual and quad
4133 * 2. check tx/rx_nbits match the mode in spi_device
4134 */
4135 if (xfer->tx_buf) {
4136 if (spi->mode & SPI_NO_TX)
4137 return -EINVAL;
4138 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4139 xfer->tx_nbits != SPI_NBITS_DUAL &&
4140 xfer->tx_nbits != SPI_NBITS_QUAD &&
4141 xfer->tx_nbits != SPI_NBITS_OCTAL)
4142 return -EINVAL;
4143 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4144 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
4145 return -EINVAL;
4146 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4147 !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
4148 return -EINVAL;
4149 if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
4150 !(spi->mode & SPI_TX_OCTAL))
4151 return -EINVAL;
4152 }
4153 /* Check transfer rx_nbits */
4154 if (xfer->rx_buf) {
4155 if (spi->mode & SPI_NO_RX)
4156 return -EINVAL;
4157 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4158 xfer->rx_nbits != SPI_NBITS_DUAL &&
4159 xfer->rx_nbits != SPI_NBITS_QUAD &&
4160 xfer->rx_nbits != SPI_NBITS_OCTAL)
4161 return -EINVAL;
4162 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4163 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4164 return -EINVAL;
4165 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4166 !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
4167 return -EINVAL;
4168 if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
4169 !(spi->mode & SPI_RX_OCTAL))
4170 return -EINVAL;
4171 }
4172
4173 if (_spi_xfer_word_delay_update(xfer, spi))
4174 return -EINVAL;
4175 }
4176
4177 message->status = -EINPROGRESS;
4178
4179 return 0;
4180 }
4181
4182 /*
4183 * spi_split_transfers - generic handling of transfer splitting
4184 * @msg: the message to split
4185 *
4186 * Under certain conditions, a SPI controller may not support arbitrary
4187 * transfer sizes or other features required by a peripheral. This function
4188 * will split the transfers in the message into smaller transfers that are
4189 * supported by the controller.
4190 *
4191 * Controllers with special requirements not covered here can also split
4192 * transfers in the optimize_message() callback.
4193 *
4194 * Context: can sleep
4195 * Return: zero on success, else a negative error code
4196 */
spi_split_transfers(struct spi_message * msg)4197 static int spi_split_transfers(struct spi_message *msg)
4198 {
4199 struct spi_controller *ctlr = msg->spi->controller;
4200 struct spi_transfer *xfer;
4201 int ret;
4202
4203 /*
4204 * If an SPI controller does not support toggling the CS line on each
4205 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4206 * for the CS line, we can emulate the CS-per-word hardware function by
4207 * splitting transfers into one-word transfers and ensuring that
4208 * cs_change is set for each transfer.
4209 */
4210 if ((msg->spi->mode & SPI_CS_WORD) &&
4211 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4212 ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4213 if (ret)
4214 return ret;
4215
4216 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4217 /* Don't change cs_change on the last entry in the list */
4218 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4219 break;
4220
4221 xfer->cs_change = 1;
4222 }
4223 } else {
4224 ret = spi_split_transfers_maxsize(ctlr, msg,
4225 spi_max_transfer_size(msg->spi));
4226 if (ret)
4227 return ret;
4228 }
4229
4230 return 0;
4231 }
4232
4233 /*
4234 * __spi_optimize_message - shared implementation for spi_optimize_message()
4235 * and spi_maybe_optimize_message()
4236 * @spi: the device that will be used for the message
4237 * @msg: the message to optimize
4238 *
4239 * Peripheral drivers will call spi_optimize_message() and the spi core will
4240 * call spi_maybe_optimize_message() instead of calling this directly.
4241 *
4242 * It is not valid to call this on a message that has already been optimized.
4243 *
4244 * Return: zero on success, else a negative error code
4245 */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4246 static int __spi_optimize_message(struct spi_device *spi,
4247 struct spi_message *msg)
4248 {
4249 struct spi_controller *ctlr = spi->controller;
4250 int ret;
4251
4252 ret = __spi_validate(spi, msg);
4253 if (ret)
4254 return ret;
4255
4256 ret = spi_split_transfers(msg);
4257 if (ret)
4258 return ret;
4259
4260 if (ctlr->optimize_message) {
4261 ret = ctlr->optimize_message(msg);
4262 if (ret) {
4263 spi_res_release(ctlr, msg);
4264 return ret;
4265 }
4266 }
4267
4268 msg->optimized = true;
4269
4270 return 0;
4271 }
4272
4273 /*
4274 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4275 * @spi: the device that will be used for the message
4276 * @msg: the message to optimize
4277 * Return: zero on success, else a negative error code
4278 */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4279 static int spi_maybe_optimize_message(struct spi_device *spi,
4280 struct spi_message *msg)
4281 {
4282 if (spi->controller->defer_optimize_message) {
4283 msg->spi = spi;
4284 return 0;
4285 }
4286
4287 if (msg->pre_optimized)
4288 return 0;
4289
4290 return __spi_optimize_message(spi, msg);
4291 }
4292
4293 /**
4294 * spi_optimize_message - do any one-time validation and setup for a SPI message
4295 * @spi: the device that will be used for the message
4296 * @msg: the message to optimize
4297 *
4298 * Peripheral drivers that reuse the same message repeatedly may call this to
4299 * perform as much message prep as possible once, rather than repeating it each
4300 * time a message transfer is performed to improve throughput and reduce CPU
4301 * usage.
4302 *
4303 * Once a message has been optimized, it cannot be modified with the exception
4304 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4305 * only the data in the memory it points to).
4306 *
4307 * Calls to this function must be balanced with calls to spi_unoptimize_message()
4308 * to avoid leaking resources.
4309 *
4310 * Context: can sleep
4311 * Return: zero on success, else a negative error code
4312 */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4313 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4314 {
4315 int ret;
4316
4317 /*
4318 * Pre-optimization is not supported and optimization is deferred e.g.
4319 * when using spi-mux.
4320 */
4321 if (spi->controller->defer_optimize_message)
4322 return 0;
4323
4324 ret = __spi_optimize_message(spi, msg);
4325 if (ret)
4326 return ret;
4327
4328 /*
4329 * This flag indicates that the peripheral driver called spi_optimize_message()
4330 * and therefore we shouldn't unoptimize message automatically when finalizing
4331 * the message but rather wait until spi_unoptimize_message() is called
4332 * by the peripheral driver.
4333 */
4334 msg->pre_optimized = true;
4335
4336 return 0;
4337 }
4338 EXPORT_SYMBOL_GPL(spi_optimize_message);
4339
4340 /**
4341 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4342 * @msg: the message to unoptimize
4343 *
4344 * Calls to this function must be balanced with calls to spi_optimize_message().
4345 *
4346 * Context: can sleep
4347 */
spi_unoptimize_message(struct spi_message * msg)4348 void spi_unoptimize_message(struct spi_message *msg)
4349 {
4350 if (msg->spi->controller->defer_optimize_message)
4351 return;
4352
4353 __spi_unoptimize_message(msg);
4354 msg->pre_optimized = false;
4355 }
4356 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4357
__spi_async(struct spi_device * spi,struct spi_message * message)4358 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4359 {
4360 struct spi_controller *ctlr = spi->controller;
4361 struct spi_transfer *xfer;
4362
4363 /*
4364 * Some controllers do not support doing regular SPI transfers. Return
4365 * ENOTSUPP when this is the case.
4366 */
4367 if (!ctlr->transfer)
4368 return -ENOTSUPP;
4369
4370 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4371 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4372
4373 trace_spi_message_submit(message);
4374
4375 if (!ctlr->ptp_sts_supported) {
4376 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4377 xfer->ptp_sts_word_pre = 0;
4378 ptp_read_system_prets(xfer->ptp_sts);
4379 }
4380 }
4381
4382 return ctlr->transfer(spi, message);
4383 }
4384
devm_spi_unoptimize_message(void * msg)4385 static void devm_spi_unoptimize_message(void *msg)
4386 {
4387 spi_unoptimize_message(msg);
4388 }
4389
4390 /**
4391 * devm_spi_optimize_message - managed version of spi_optimize_message()
4392 * @dev: the device that manages @msg (usually @spi->dev)
4393 * @spi: the device that will be used for the message
4394 * @msg: the message to optimize
4395 * Return: zero on success, else a negative error code
4396 *
4397 * spi_unoptimize_message() will automatically be called when the device is
4398 * removed.
4399 */
devm_spi_optimize_message(struct device * dev,struct spi_device * spi,struct spi_message * msg)4400 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4401 struct spi_message *msg)
4402 {
4403 int ret;
4404
4405 ret = spi_optimize_message(spi, msg);
4406 if (ret)
4407 return ret;
4408
4409 return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4410 }
4411 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4412
4413 /**
4414 * spi_async - asynchronous SPI transfer
4415 * @spi: device with which data will be exchanged
4416 * @message: describes the data transfers, including completion callback
4417 * Context: any (IRQs may be blocked, etc)
4418 *
4419 * This call may be used in_irq and other contexts which can't sleep,
4420 * as well as from task contexts which can sleep.
4421 *
4422 * The completion callback is invoked in a context which can't sleep.
4423 * Before that invocation, the value of message->status is undefined.
4424 * When the callback is issued, message->status holds either zero (to
4425 * indicate complete success) or a negative error code. After that
4426 * callback returns, the driver which issued the transfer request may
4427 * deallocate the associated memory; it's no longer in use by any SPI
4428 * core or controller driver code.
4429 *
4430 * Note that although all messages to a spi_device are handled in
4431 * FIFO order, messages may go to different devices in other orders.
4432 * Some device might be higher priority, or have various "hard" access
4433 * time requirements, for example.
4434 *
4435 * On detection of any fault during the transfer, processing of
4436 * the entire message is aborted, and the device is deselected.
4437 * Until returning from the associated message completion callback,
4438 * no other spi_message queued to that device will be processed.
4439 * (This rule applies equally to all the synchronous transfer calls,
4440 * which are wrappers around this core asynchronous primitive.)
4441 *
4442 * Return: zero on success, else a negative error code.
4443 */
spi_async(struct spi_device * spi,struct spi_message * message)4444 int spi_async(struct spi_device *spi, struct spi_message *message)
4445 {
4446 struct spi_controller *ctlr = spi->controller;
4447 int ret;
4448 unsigned long flags;
4449
4450 ret = spi_maybe_optimize_message(spi, message);
4451 if (ret)
4452 return ret;
4453
4454 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4455
4456 if (ctlr->bus_lock_flag)
4457 ret = -EBUSY;
4458 else
4459 ret = __spi_async(spi, message);
4460
4461 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4462
4463 return ret;
4464 }
4465 EXPORT_SYMBOL_GPL(spi_async);
4466
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4467 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4468 {
4469 bool was_busy;
4470 int ret;
4471
4472 mutex_lock(&ctlr->io_mutex);
4473
4474 was_busy = ctlr->busy;
4475
4476 ctlr->cur_msg = msg;
4477 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4478 if (ret)
4479 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4480 ctlr->cur_msg = NULL;
4481 ctlr->fallback = false;
4482
4483 if (!was_busy) {
4484 kfree(ctlr->dummy_rx);
4485 ctlr->dummy_rx = NULL;
4486 kfree(ctlr->dummy_tx);
4487 ctlr->dummy_tx = NULL;
4488 if (ctlr->unprepare_transfer_hardware &&
4489 ctlr->unprepare_transfer_hardware(ctlr))
4490 dev_err(&ctlr->dev,
4491 "failed to unprepare transfer hardware\n");
4492 spi_idle_runtime_pm(ctlr);
4493 }
4494
4495 mutex_unlock(&ctlr->io_mutex);
4496 }
4497
4498 /*-------------------------------------------------------------------------*/
4499
4500 /*
4501 * Utility methods for SPI protocol drivers, layered on
4502 * top of the core. Some other utility methods are defined as
4503 * inline functions.
4504 */
4505
spi_complete(void * arg)4506 static void spi_complete(void *arg)
4507 {
4508 complete(arg);
4509 }
4510
__spi_sync(struct spi_device * spi,struct spi_message * message)4511 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4512 {
4513 DECLARE_COMPLETION_ONSTACK(done);
4514 unsigned long flags;
4515 int status;
4516 struct spi_controller *ctlr = spi->controller;
4517
4518 if (__spi_check_suspended(ctlr)) {
4519 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4520 return -ESHUTDOWN;
4521 }
4522
4523 status = spi_maybe_optimize_message(spi, message);
4524 if (status)
4525 return status;
4526
4527 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4528 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4529
4530 /*
4531 * Checking queue_empty here only guarantees async/sync message
4532 * ordering when coming from the same context. It does not need to
4533 * guard against reentrancy from a different context. The io_mutex
4534 * will catch those cases.
4535 */
4536 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4537 message->actual_length = 0;
4538 message->status = -EINPROGRESS;
4539
4540 trace_spi_message_submit(message);
4541
4542 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4543 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4544
4545 __spi_transfer_message_noqueue(ctlr, message);
4546
4547 return message->status;
4548 }
4549
4550 /*
4551 * There are messages in the async queue that could have originated
4552 * from the same context, so we need to preserve ordering.
4553 * Therefor we send the message to the async queue and wait until they
4554 * are completed.
4555 */
4556 message->complete = spi_complete;
4557 message->context = &done;
4558
4559 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4560 status = __spi_async(spi, message);
4561 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4562
4563 if (status == 0) {
4564 wait_for_completion(&done);
4565 status = message->status;
4566 }
4567 message->complete = NULL;
4568 message->context = NULL;
4569
4570 return status;
4571 }
4572
4573 /**
4574 * spi_sync - blocking/synchronous SPI data transfers
4575 * @spi: device with which data will be exchanged
4576 * @message: describes the data transfers
4577 * Context: can sleep
4578 *
4579 * This call may only be used from a context that may sleep. The sleep
4580 * is non-interruptible, and has no timeout. Low-overhead controller
4581 * drivers may DMA directly into and out of the message buffers.
4582 *
4583 * Note that the SPI device's chip select is active during the message,
4584 * and then is normally disabled between messages. Drivers for some
4585 * frequently-used devices may want to minimize costs of selecting a chip,
4586 * by leaving it selected in anticipation that the next message will go
4587 * to the same chip. (That may increase power usage.)
4588 *
4589 * Also, the caller is guaranteeing that the memory associated with the
4590 * message will not be freed before this call returns.
4591 *
4592 * Return: zero on success, else a negative error code.
4593 */
spi_sync(struct spi_device * spi,struct spi_message * message)4594 int spi_sync(struct spi_device *spi, struct spi_message *message)
4595 {
4596 int ret;
4597
4598 mutex_lock(&spi->controller->bus_lock_mutex);
4599 ret = __spi_sync(spi, message);
4600 mutex_unlock(&spi->controller->bus_lock_mutex);
4601
4602 return ret;
4603 }
4604 EXPORT_SYMBOL_GPL(spi_sync);
4605
4606 /**
4607 * spi_sync_locked - version of spi_sync with exclusive bus usage
4608 * @spi: device with which data will be exchanged
4609 * @message: describes the data transfers
4610 * Context: can sleep
4611 *
4612 * This call may only be used from a context that may sleep. The sleep
4613 * is non-interruptible, and has no timeout. Low-overhead controller
4614 * drivers may DMA directly into and out of the message buffers.
4615 *
4616 * This call should be used by drivers that require exclusive access to the
4617 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4618 * be released by a spi_bus_unlock call when the exclusive access is over.
4619 *
4620 * Return: zero on success, else a negative error code.
4621 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4622 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4623 {
4624 return __spi_sync(spi, message);
4625 }
4626 EXPORT_SYMBOL_GPL(spi_sync_locked);
4627
4628 /**
4629 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4630 * @ctlr: SPI bus master that should be locked for exclusive bus access
4631 * Context: can sleep
4632 *
4633 * This call may only be used from a context that may sleep. The sleep
4634 * is non-interruptible, and has no timeout.
4635 *
4636 * This call should be used by drivers that require exclusive access to the
4637 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4638 * exclusive access is over. Data transfer must be done by spi_sync_locked
4639 * and spi_async_locked calls when the SPI bus lock is held.
4640 *
4641 * Return: always zero.
4642 */
spi_bus_lock(struct spi_controller * ctlr)4643 int spi_bus_lock(struct spi_controller *ctlr)
4644 {
4645 unsigned long flags;
4646
4647 mutex_lock(&ctlr->bus_lock_mutex);
4648
4649 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4650 ctlr->bus_lock_flag = 1;
4651 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4652
4653 /* Mutex remains locked until spi_bus_unlock() is called */
4654
4655 return 0;
4656 }
4657 EXPORT_SYMBOL_GPL(spi_bus_lock);
4658
4659 /**
4660 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4661 * @ctlr: SPI bus master that was locked for exclusive bus access
4662 * Context: can sleep
4663 *
4664 * This call may only be used from a context that may sleep. The sleep
4665 * is non-interruptible, and has no timeout.
4666 *
4667 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4668 * call.
4669 *
4670 * Return: always zero.
4671 */
spi_bus_unlock(struct spi_controller * ctlr)4672 int spi_bus_unlock(struct spi_controller *ctlr)
4673 {
4674 ctlr->bus_lock_flag = 0;
4675
4676 mutex_unlock(&ctlr->bus_lock_mutex);
4677
4678 return 0;
4679 }
4680 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4681
4682 /* Portable code must never pass more than 32 bytes */
4683 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4684
4685 static u8 *buf;
4686
4687 /**
4688 * spi_write_then_read - SPI synchronous write followed by read
4689 * @spi: device with which data will be exchanged
4690 * @txbuf: data to be written (need not be DMA-safe)
4691 * @n_tx: size of txbuf, in bytes
4692 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4693 * @n_rx: size of rxbuf, in bytes
4694 * Context: can sleep
4695 *
4696 * This performs a half duplex MicroWire style transaction with the
4697 * device, sending txbuf and then reading rxbuf. The return value
4698 * is zero for success, else a negative errno status code.
4699 * This call may only be used from a context that may sleep.
4700 *
4701 * Parameters to this routine are always copied using a small buffer.
4702 * Performance-sensitive or bulk transfer code should instead use
4703 * spi_{async,sync}() calls with DMA-safe buffers.
4704 *
4705 * Return: zero on success, else a negative error code.
4706 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4707 int spi_write_then_read(struct spi_device *spi,
4708 const void *txbuf, unsigned n_tx,
4709 void *rxbuf, unsigned n_rx)
4710 {
4711 static DEFINE_MUTEX(lock);
4712
4713 int status;
4714 struct spi_message message;
4715 struct spi_transfer x[2];
4716 u8 *local_buf;
4717
4718 /*
4719 * Use preallocated DMA-safe buffer if we can. We can't avoid
4720 * copying here, (as a pure convenience thing), but we can
4721 * keep heap costs out of the hot path unless someone else is
4722 * using the pre-allocated buffer or the transfer is too large.
4723 */
4724 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4725 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4726 GFP_KERNEL | GFP_DMA);
4727 if (!local_buf)
4728 return -ENOMEM;
4729 } else {
4730 local_buf = buf;
4731 }
4732
4733 spi_message_init(&message);
4734 memset(x, 0, sizeof(x));
4735 if (n_tx) {
4736 x[0].len = n_tx;
4737 spi_message_add_tail(&x[0], &message);
4738 }
4739 if (n_rx) {
4740 x[1].len = n_rx;
4741 spi_message_add_tail(&x[1], &message);
4742 }
4743
4744 memcpy(local_buf, txbuf, n_tx);
4745 x[0].tx_buf = local_buf;
4746 x[1].rx_buf = local_buf + n_tx;
4747
4748 /* Do the I/O */
4749 status = spi_sync(spi, &message);
4750 if (status == 0)
4751 memcpy(rxbuf, x[1].rx_buf, n_rx);
4752
4753 if (x[0].tx_buf == buf)
4754 mutex_unlock(&lock);
4755 else
4756 kfree(local_buf);
4757
4758 return status;
4759 }
4760 EXPORT_SYMBOL_GPL(spi_write_then_read);
4761
4762 /*-------------------------------------------------------------------------*/
4763
4764 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4765 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4766 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4767 {
4768 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4769
4770 return dev ? to_spi_device(dev) : NULL;
4771 }
4772
4773 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4774 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4775 {
4776 struct device *dev;
4777
4778 dev = class_find_device_by_of_node(&spi_master_class, node);
4779 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4780 dev = class_find_device_by_of_node(&spi_slave_class, node);
4781 if (!dev)
4782 return NULL;
4783
4784 /* Reference got in class_find_device */
4785 return container_of(dev, struct spi_controller, dev);
4786 }
4787
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4788 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4789 void *arg)
4790 {
4791 struct of_reconfig_data *rd = arg;
4792 struct spi_controller *ctlr;
4793 struct spi_device *spi;
4794
4795 switch (of_reconfig_get_state_change(action, arg)) {
4796 case OF_RECONFIG_CHANGE_ADD:
4797 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4798 if (ctlr == NULL)
4799 return NOTIFY_OK; /* Not for us */
4800
4801 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4802 put_device(&ctlr->dev);
4803 return NOTIFY_OK;
4804 }
4805
4806 /*
4807 * Clear the flag before adding the device so that fw_devlink
4808 * doesn't skip adding consumers to this device.
4809 */
4810 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4811 spi = of_register_spi_device(ctlr, rd->dn);
4812 put_device(&ctlr->dev);
4813
4814 if (IS_ERR(spi)) {
4815 pr_err("%s: failed to create for '%pOF'\n",
4816 __func__, rd->dn);
4817 of_node_clear_flag(rd->dn, OF_POPULATED);
4818 return notifier_from_errno(PTR_ERR(spi));
4819 }
4820 break;
4821
4822 case OF_RECONFIG_CHANGE_REMOVE:
4823 /* Already depopulated? */
4824 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4825 return NOTIFY_OK;
4826
4827 /* Find our device by node */
4828 spi = of_find_spi_device_by_node(rd->dn);
4829 if (spi == NULL)
4830 return NOTIFY_OK; /* No? not meant for us */
4831
4832 /* Unregister takes one ref away */
4833 spi_unregister_device(spi);
4834
4835 /* And put the reference of the find */
4836 put_device(&spi->dev);
4837 break;
4838 }
4839
4840 return NOTIFY_OK;
4841 }
4842
4843 static struct notifier_block spi_of_notifier = {
4844 .notifier_call = of_spi_notify,
4845 };
4846 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4847 extern struct notifier_block spi_of_notifier;
4848 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4849
4850 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4851 static int spi_acpi_controller_match(struct device *dev, const void *data)
4852 {
4853 return ACPI_COMPANION(dev->parent) == data;
4854 }
4855
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4856 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4857 {
4858 struct device *dev;
4859
4860 dev = class_find_device(&spi_master_class, NULL, adev,
4861 spi_acpi_controller_match);
4862 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4863 dev = class_find_device(&spi_slave_class, NULL, adev,
4864 spi_acpi_controller_match);
4865 if (!dev)
4866 return NULL;
4867
4868 return container_of(dev, struct spi_controller, dev);
4869 }
4870 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4871
acpi_spi_find_device_by_adev(struct acpi_device * adev)4872 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4873 {
4874 struct device *dev;
4875
4876 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4877 return to_spi_device(dev);
4878 }
4879
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4880 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4881 void *arg)
4882 {
4883 struct acpi_device *adev = arg;
4884 struct spi_controller *ctlr;
4885 struct spi_device *spi;
4886
4887 switch (value) {
4888 case ACPI_RECONFIG_DEVICE_ADD:
4889 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4890 if (!ctlr)
4891 break;
4892
4893 acpi_register_spi_device(ctlr, adev);
4894 put_device(&ctlr->dev);
4895 break;
4896 case ACPI_RECONFIG_DEVICE_REMOVE:
4897 if (!acpi_device_enumerated(adev))
4898 break;
4899
4900 spi = acpi_spi_find_device_by_adev(adev);
4901 if (!spi)
4902 break;
4903
4904 spi_unregister_device(spi);
4905 put_device(&spi->dev);
4906 break;
4907 }
4908
4909 return NOTIFY_OK;
4910 }
4911
4912 static struct notifier_block spi_acpi_notifier = {
4913 .notifier_call = acpi_spi_notify,
4914 };
4915 #else
4916 extern struct notifier_block spi_acpi_notifier;
4917 #endif
4918
spi_init(void)4919 static int __init spi_init(void)
4920 {
4921 int status;
4922
4923 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4924 if (!buf) {
4925 status = -ENOMEM;
4926 goto err0;
4927 }
4928
4929 status = bus_register(&spi_bus_type);
4930 if (status < 0)
4931 goto err1;
4932
4933 status = class_register(&spi_master_class);
4934 if (status < 0)
4935 goto err2;
4936
4937 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4938 status = class_register(&spi_slave_class);
4939 if (status < 0)
4940 goto err3;
4941 }
4942
4943 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4944 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4945 if (IS_ENABLED(CONFIG_ACPI))
4946 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4947
4948 return 0;
4949
4950 err3:
4951 class_unregister(&spi_master_class);
4952 err2:
4953 bus_unregister(&spi_bus_type);
4954 err1:
4955 kfree(buf);
4956 buf = NULL;
4957 err0:
4958 return status;
4959 }
4960
4961 /*
4962 * A board_info is normally registered in arch_initcall(),
4963 * but even essential drivers wait till later.
4964 *
4965 * REVISIT only boardinfo really needs static linking. The rest (device and
4966 * driver registration) _could_ be dynamically linked (modular) ... Costs
4967 * include needing to have boardinfo data structures be much more public.
4968 */
4969 postcore_initcall(spi_init);
4970