• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/property.h>
25 #include <linux/export.h>
26 #include <linux/sched/rt.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/delay.h>
29 #include <linux/kthread.h>
30 #include <linux/ioport.h>
31 #include <linux/acpi.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/platform_data/x86/apple.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <linux/percpu.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42 
43 #include "internals.h"
44 
45 static DEFINE_IDR(spi_master_idr);
46 
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 	struct spi_device	*spi = to_spi_device(dev);
50 
51 	spi_controller_put(spi->controller);
52 	kfree(spi->driver_override);
53 	free_percpu(spi->pcpu_statistics);
54 	kfree(spi);
55 }
56 
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 	const struct spi_device	*spi = to_spi_device(dev);
61 	int len;
62 
63 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 	if (len != -ENODEV)
65 		return len;
66 
67 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70 
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 				     struct device_attribute *a,
73 				     const char *buf, size_t count)
74 {
75 	struct spi_device *spi = to_spi_device(dev);
76 	int ret;
77 
78 	ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 	if (ret)
80 		return ret;
81 
82 	return count;
83 }
84 
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 				    struct device_attribute *a, char *buf)
87 {
88 	const struct spi_device *spi = to_spi_device(dev);
89 	ssize_t len;
90 
91 	device_lock(dev);
92 	len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
93 	device_unlock(dev);
94 	return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97 
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 	struct spi_statistics __percpu *pcpu_stats;
101 
102 	if (dev)
103 		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 	else
105 		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106 
107 	if (pcpu_stats) {
108 		int cpu;
109 
110 		for_each_possible_cpu(cpu) {
111 			struct spi_statistics *stat;
112 
113 			stat = per_cpu_ptr(pcpu_stats, cpu);
114 			u64_stats_init(&stat->syncp);
115 		}
116 	}
117 	return pcpu_stats;
118 }
119 
120 #define spi_pcpu_stats_totalize(ret, in, field)				\
121 do {									\
122 	int i;								\
123 	ret = 0;							\
124 	for_each_possible_cpu(i) {					\
125 		const struct spi_statistics *pcpu_stats;		\
126 		u64 inc;						\
127 		unsigned int start;					\
128 		pcpu_stats = per_cpu_ptr(in, i);			\
129 		do {							\
130 			start = u64_stats_fetch_begin_irq(		\
131 					&pcpu_stats->syncp);		\
132 			inc = u64_stats_read(&pcpu_stats->field);	\
133 		} while (u64_stats_fetch_retry_irq(			\
134 					&pcpu_stats->syncp, start));	\
135 		ret += inc;						\
136 	}								\
137 } while (0)
138 
139 #define SPI_STATISTICS_ATTRS(field, file)				\
140 static ssize_t spi_controller_##field##_show(struct device *dev,	\
141 					     struct device_attribute *attr, \
142 					     char *buf)			\
143 {									\
144 	struct spi_controller *ctlr = container_of(dev,			\
145 					 struct spi_controller, dev);	\
146 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
147 }									\
148 static struct device_attribute dev_attr_spi_controller_##field = {	\
149 	.attr = { .name = file, .mode = 0444 },				\
150 	.show = spi_controller_##field##_show,				\
151 };									\
152 static ssize_t spi_device_##field##_show(struct device *dev,		\
153 					 struct device_attribute *attr,	\
154 					char *buf)			\
155 {									\
156 	struct spi_device *spi = to_spi_device(dev);			\
157 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
158 }									\
159 static struct device_attribute dev_attr_spi_device_##field = {		\
160 	.attr = { .name = file, .mode = 0444 },				\
161 	.show = spi_device_##field##_show,				\
162 }
163 
164 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
165 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
166 					    char *buf)			\
167 {									\
168 	ssize_t len;							\
169 	u64 val;							\
170 	spi_pcpu_stats_totalize(val, stat, field);			\
171 	len = sysfs_emit(buf, "%llu\n", val);				\
172 	return len;							\
173 }									\
174 SPI_STATISTICS_ATTRS(name, file)
175 
176 #define SPI_STATISTICS_SHOW(field)					\
177 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
178 				 field)
179 
180 SPI_STATISTICS_SHOW(messages);
181 SPI_STATISTICS_SHOW(transfers);
182 SPI_STATISTICS_SHOW(errors);
183 SPI_STATISTICS_SHOW(timedout);
184 
185 SPI_STATISTICS_SHOW(spi_sync);
186 SPI_STATISTICS_SHOW(spi_sync_immediate);
187 SPI_STATISTICS_SHOW(spi_async);
188 
189 SPI_STATISTICS_SHOW(bytes);
190 SPI_STATISTICS_SHOW(bytes_rx);
191 SPI_STATISTICS_SHOW(bytes_tx);
192 
193 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
194 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
195 				 "transfer_bytes_histo_" number,	\
196 				 transfer_bytes_histo[index])
197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
214 
215 SPI_STATISTICS_SHOW(transfers_split_maxsize);
216 
217 static struct attribute *spi_dev_attrs[] = {
218 	&dev_attr_modalias.attr,
219 	&dev_attr_driver_override.attr,
220 	NULL,
221 };
222 
223 static const struct attribute_group spi_dev_group = {
224 	.attrs  = spi_dev_attrs,
225 };
226 
227 static struct attribute *spi_device_statistics_attrs[] = {
228 	&dev_attr_spi_device_messages.attr,
229 	&dev_attr_spi_device_transfers.attr,
230 	&dev_attr_spi_device_errors.attr,
231 	&dev_attr_spi_device_timedout.attr,
232 	&dev_attr_spi_device_spi_sync.attr,
233 	&dev_attr_spi_device_spi_sync_immediate.attr,
234 	&dev_attr_spi_device_spi_async.attr,
235 	&dev_attr_spi_device_bytes.attr,
236 	&dev_attr_spi_device_bytes_rx.attr,
237 	&dev_attr_spi_device_bytes_tx.attr,
238 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
239 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
249 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
250 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
251 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
252 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
253 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
254 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
255 	&dev_attr_spi_device_transfers_split_maxsize.attr,
256 	NULL,
257 };
258 
259 static const struct attribute_group spi_device_statistics_group = {
260 	.name  = "statistics",
261 	.attrs  = spi_device_statistics_attrs,
262 };
263 
264 static const struct attribute_group *spi_dev_groups[] = {
265 	&spi_dev_group,
266 	&spi_device_statistics_group,
267 	NULL,
268 };
269 
270 static struct attribute *spi_controller_statistics_attrs[] = {
271 	&dev_attr_spi_controller_messages.attr,
272 	&dev_attr_spi_controller_transfers.attr,
273 	&dev_attr_spi_controller_errors.attr,
274 	&dev_attr_spi_controller_timedout.attr,
275 	&dev_attr_spi_controller_spi_sync.attr,
276 	&dev_attr_spi_controller_spi_sync_immediate.attr,
277 	&dev_attr_spi_controller_spi_async.attr,
278 	&dev_attr_spi_controller_bytes.attr,
279 	&dev_attr_spi_controller_bytes_rx.attr,
280 	&dev_attr_spi_controller_bytes_tx.attr,
281 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
282 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
292 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
293 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
294 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
295 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
296 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
297 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
298 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
299 	NULL,
300 };
301 
302 static const struct attribute_group spi_controller_statistics_group = {
303 	.name  = "statistics",
304 	.attrs  = spi_controller_statistics_attrs,
305 };
306 
307 static const struct attribute_group *spi_master_groups[] = {
308 	&spi_controller_statistics_group,
309 	NULL,
310 };
311 
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_controller * ctlr)312 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
313 					      struct spi_transfer *xfer,
314 					      struct spi_controller *ctlr)
315 {
316 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
317 	struct spi_statistics *stats;
318 
319 	if (l2len < 0)
320 		l2len = 0;
321 
322 	get_cpu();
323 	stats = this_cpu_ptr(pcpu_stats);
324 	u64_stats_update_begin(&stats->syncp);
325 
326 	u64_stats_inc(&stats->transfers);
327 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
328 
329 	u64_stats_add(&stats->bytes, xfer->len);
330 	if ((xfer->tx_buf) &&
331 	    (xfer->tx_buf != ctlr->dummy_tx))
332 		u64_stats_add(&stats->bytes_tx, xfer->len);
333 	if ((xfer->rx_buf) &&
334 	    (xfer->rx_buf != ctlr->dummy_rx))
335 		u64_stats_add(&stats->bytes_rx, xfer->len);
336 
337 	u64_stats_update_end(&stats->syncp);
338 	put_cpu();
339 }
340 
341 /*
342  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343  * and the sysfs version makes coldplug work too.
344  */
spi_match_id(const struct spi_device_id * id,const char * name)345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346 {
347 	while (id->name[0]) {
348 		if (!strcmp(name, id->name))
349 			return id;
350 		id++;
351 	}
352 	return NULL;
353 }
354 
spi_get_device_id(const struct spi_device * sdev)355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356 {
357 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358 
359 	return spi_match_id(sdrv->id_table, sdev->modalias);
360 }
361 EXPORT_SYMBOL_GPL(spi_get_device_id);
362 
spi_get_device_match_data(const struct spi_device * sdev)363 const void *spi_get_device_match_data(const struct spi_device *sdev)
364 {
365 	const void *match;
366 
367 	match = device_get_match_data(&sdev->dev);
368 	if (match)
369 		return match;
370 
371 	return (const void *)spi_get_device_id(sdev)->driver_data;
372 }
373 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
374 
spi_match_device(struct device * dev,struct device_driver * drv)375 static int spi_match_device(struct device *dev, struct device_driver *drv)
376 {
377 	const struct spi_device	*spi = to_spi_device(dev);
378 	const struct spi_driver	*sdrv = to_spi_driver(drv);
379 
380 	/* Check override first, and if set, only use the named driver */
381 	if (spi->driver_override)
382 		return strcmp(spi->driver_override, drv->name) == 0;
383 
384 	/* Attempt an OF style match */
385 	if (of_driver_match_device(dev, drv))
386 		return 1;
387 
388 	/* Then try ACPI */
389 	if (acpi_driver_match_device(dev, drv))
390 		return 1;
391 
392 	if (sdrv->id_table)
393 		return !!spi_match_id(sdrv->id_table, spi->modalias);
394 
395 	return strcmp(spi->modalias, drv->name) == 0;
396 }
397 
spi_uevent(struct device * dev,struct kobj_uevent_env * env)398 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
399 {
400 	const struct spi_device		*spi = to_spi_device(dev);
401 	int rc;
402 
403 	rc = acpi_device_uevent_modalias(dev, env);
404 	if (rc != -ENODEV)
405 		return rc;
406 
407 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
408 }
409 
spi_probe(struct device * dev)410 static int spi_probe(struct device *dev)
411 {
412 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
413 	struct spi_device		*spi = to_spi_device(dev);
414 	int ret;
415 
416 	ret = of_clk_set_defaults(dev->of_node, false);
417 	if (ret)
418 		return ret;
419 
420 	if (dev->of_node) {
421 		spi->irq = of_irq_get(dev->of_node, 0);
422 		if (spi->irq == -EPROBE_DEFER)
423 			return -EPROBE_DEFER;
424 		if (spi->irq < 0)
425 			spi->irq = 0;
426 	}
427 
428 	ret = dev_pm_domain_attach(dev, true);
429 	if (ret)
430 		return ret;
431 
432 	if (sdrv->probe) {
433 		ret = sdrv->probe(spi);
434 		if (ret)
435 			dev_pm_domain_detach(dev, true);
436 	}
437 
438 	return ret;
439 }
440 
spi_remove(struct device * dev)441 static void spi_remove(struct device *dev)
442 {
443 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
444 
445 	if (sdrv->remove)
446 		sdrv->remove(to_spi_device(dev));
447 
448 	dev_pm_domain_detach(dev, true);
449 }
450 
spi_shutdown(struct device * dev)451 static void spi_shutdown(struct device *dev)
452 {
453 	if (dev->driver) {
454 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
455 
456 		if (sdrv->shutdown)
457 			sdrv->shutdown(to_spi_device(dev));
458 	}
459 }
460 
461 struct bus_type spi_bus_type = {
462 	.name		= "spi",
463 	.dev_groups	= spi_dev_groups,
464 	.match		= spi_match_device,
465 	.uevent		= spi_uevent,
466 	.probe		= spi_probe,
467 	.remove		= spi_remove,
468 	.shutdown	= spi_shutdown,
469 };
470 EXPORT_SYMBOL_GPL(spi_bus_type);
471 
472 /**
473  * __spi_register_driver - register a SPI driver
474  * @owner: owner module of the driver to register
475  * @sdrv: the driver to register
476  * Context: can sleep
477  *
478  * Return: zero on success, else a negative error code.
479  */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)480 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
481 {
482 	sdrv->driver.owner = owner;
483 	sdrv->driver.bus = &spi_bus_type;
484 
485 	/*
486 	 * For Really Good Reasons we use spi: modaliases not of:
487 	 * modaliases for DT so module autoloading won't work if we
488 	 * don't have a spi_device_id as well as a compatible string.
489 	 */
490 	if (sdrv->driver.of_match_table) {
491 		const struct of_device_id *of_id;
492 
493 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
494 		     of_id++) {
495 			const char *of_name;
496 
497 			/* Strip off any vendor prefix */
498 			of_name = strnchr(of_id->compatible,
499 					  sizeof(of_id->compatible), ',');
500 			if (of_name)
501 				of_name++;
502 			else
503 				of_name = of_id->compatible;
504 
505 			if (sdrv->id_table) {
506 				const struct spi_device_id *spi_id;
507 
508 				spi_id = spi_match_id(sdrv->id_table, of_name);
509 				if (spi_id)
510 					continue;
511 			} else {
512 				if (strcmp(sdrv->driver.name, of_name) == 0)
513 					continue;
514 			}
515 
516 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
517 				sdrv->driver.name, of_id->compatible);
518 		}
519 	}
520 
521 	return driver_register(&sdrv->driver);
522 }
523 EXPORT_SYMBOL_GPL(__spi_register_driver);
524 
525 /*-------------------------------------------------------------------------*/
526 
527 /*
528  * SPI devices should normally not be created by SPI device drivers; that
529  * would make them board-specific.  Similarly with SPI controller drivers.
530  * Device registration normally goes into like arch/.../mach.../board-YYY.c
531  * with other readonly (flashable) information about mainboard devices.
532  */
533 
534 struct boardinfo {
535 	struct list_head	list;
536 	struct spi_board_info	board_info;
537 };
538 
539 static LIST_HEAD(board_list);
540 static LIST_HEAD(spi_controller_list);
541 
542 /*
543  * Used to protect add/del operation for board_info list and
544  * spi_controller list, and their matching process also used
545  * to protect object of type struct idr.
546  */
547 static DEFINE_MUTEX(board_lock);
548 
549 /**
550  * spi_alloc_device - Allocate a new SPI device
551  * @ctlr: Controller to which device is connected
552  * Context: can sleep
553  *
554  * Allows a driver to allocate and initialize a spi_device without
555  * registering it immediately.  This allows a driver to directly
556  * fill the spi_device with device parameters before calling
557  * spi_add_device() on it.
558  *
559  * Caller is responsible to call spi_add_device() on the returned
560  * spi_device structure to add it to the SPI controller.  If the caller
561  * needs to discard the spi_device without adding it, then it should
562  * call spi_dev_put() on it.
563  *
564  * Return: a pointer to the new device, or NULL.
565  */
spi_alloc_device(struct spi_controller * ctlr)566 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
567 {
568 	struct spi_device	*spi;
569 
570 	if (!spi_controller_get(ctlr))
571 		return NULL;
572 
573 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
574 	if (!spi) {
575 		spi_controller_put(ctlr);
576 		return NULL;
577 	}
578 
579 	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
580 	if (!spi->pcpu_statistics) {
581 		kfree(spi);
582 		spi_controller_put(ctlr);
583 		return NULL;
584 	}
585 
586 	spi->master = spi->controller = ctlr;
587 	spi->dev.parent = &ctlr->dev;
588 	spi->dev.bus = &spi_bus_type;
589 	spi->dev.release = spidev_release;
590 	spi->mode = ctlr->buswidth_override_bits;
591 
592 	device_initialize(&spi->dev);
593 	return spi;
594 }
595 EXPORT_SYMBOL_GPL(spi_alloc_device);
596 
spi_dev_set_name(struct spi_device * spi)597 static void spi_dev_set_name(struct spi_device *spi)
598 {
599 	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
600 
601 	if (adev) {
602 		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
603 		return;
604 	}
605 
606 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
607 		     spi_get_chipselect(spi, 0));
608 }
609 
spi_dev_check(struct device * dev,void * data)610 static int spi_dev_check(struct device *dev, void *data)
611 {
612 	struct spi_device *spi = to_spi_device(dev);
613 	struct spi_device *new_spi = data;
614 
615 	if (spi->controller == new_spi->controller &&
616 	    spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
617 		return -EBUSY;
618 	return 0;
619 }
620 
spi_cleanup(struct spi_device * spi)621 static void spi_cleanup(struct spi_device *spi)
622 {
623 	if (spi->controller->cleanup)
624 		spi->controller->cleanup(spi);
625 }
626 
__spi_add_device(struct spi_device * spi)627 static int __spi_add_device(struct spi_device *spi)
628 {
629 	struct spi_controller *ctlr = spi->controller;
630 	struct device *dev = ctlr->dev.parent;
631 	int status;
632 
633 	/*
634 	 * We need to make sure there's no other device with this
635 	 * chipselect **BEFORE** we call setup(), else we'll trash
636 	 * its configuration.
637 	 */
638 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
639 	if (status) {
640 		dev_err(dev, "chipselect %d already in use\n",
641 				spi_get_chipselect(spi, 0));
642 		return status;
643 	}
644 
645 	/* Controller may unregister concurrently */
646 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
647 	    !device_is_registered(&ctlr->dev)) {
648 		return -ENODEV;
649 	}
650 
651 	if (ctlr->cs_gpiods)
652 		spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
653 
654 	/*
655 	 * Drivers may modify this initial i/o setup, but will
656 	 * normally rely on the device being setup.  Devices
657 	 * using SPI_CS_HIGH can't coexist well otherwise...
658 	 */
659 	status = spi_setup(spi);
660 	if (status < 0) {
661 		dev_err(dev, "can't setup %s, status %d\n",
662 				dev_name(&spi->dev), status);
663 		return status;
664 	}
665 
666 	/* Device may be bound to an active driver when this returns */
667 	status = device_add(&spi->dev);
668 	if (status < 0) {
669 		dev_err(dev, "can't add %s, status %d\n",
670 				dev_name(&spi->dev), status);
671 		spi_cleanup(spi);
672 	} else {
673 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
674 	}
675 
676 	return status;
677 }
678 
679 /**
680  * spi_add_device - Add spi_device allocated with spi_alloc_device
681  * @spi: spi_device to register
682  *
683  * Companion function to spi_alloc_device.  Devices allocated with
684  * spi_alloc_device can be added onto the spi bus with this function.
685  *
686  * Return: 0 on success; negative errno on failure
687  */
spi_add_device(struct spi_device * spi)688 int spi_add_device(struct spi_device *spi)
689 {
690 	struct spi_controller *ctlr = spi->controller;
691 	struct device *dev = ctlr->dev.parent;
692 	int status;
693 
694 	/* Chipselects are numbered 0..max; validate. */
695 	if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
696 		dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
697 			ctlr->num_chipselect);
698 		return -EINVAL;
699 	}
700 
701 	/* Set the bus ID string */
702 	spi_dev_set_name(spi);
703 
704 	mutex_lock(&ctlr->add_lock);
705 	status = __spi_add_device(spi);
706 	mutex_unlock(&ctlr->add_lock);
707 	return status;
708 }
709 EXPORT_SYMBOL_GPL(spi_add_device);
710 
spi_add_device_locked(struct spi_device * spi)711 static int spi_add_device_locked(struct spi_device *spi)
712 {
713 	struct spi_controller *ctlr = spi->controller;
714 	struct device *dev = ctlr->dev.parent;
715 
716 	/* Chipselects are numbered 0..max; validate. */
717 	if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
718 		dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
719 			ctlr->num_chipselect);
720 		return -EINVAL;
721 	}
722 
723 	/* Set the bus ID string */
724 	spi_dev_set_name(spi);
725 
726 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
727 	return __spi_add_device(spi);
728 }
729 
730 /**
731  * spi_new_device - instantiate one new SPI device
732  * @ctlr: Controller to which device is connected
733  * @chip: Describes the SPI device
734  * Context: can sleep
735  *
736  * On typical mainboards, this is purely internal; and it's not needed
737  * after board init creates the hard-wired devices.  Some development
738  * platforms may not be able to use spi_register_board_info though, and
739  * this is exported so that for example a USB or parport based adapter
740  * driver could add devices (which it would learn about out-of-band).
741  *
742  * Return: the new device, or NULL.
743  */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)744 struct spi_device *spi_new_device(struct spi_controller *ctlr,
745 				  struct spi_board_info *chip)
746 {
747 	struct spi_device	*proxy;
748 	int			status;
749 
750 	/*
751 	 * NOTE:  caller did any chip->bus_num checks necessary.
752 	 *
753 	 * Also, unless we change the return value convention to use
754 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
755 	 * suggests syslogged diagnostics are best here (ugh).
756 	 */
757 
758 	proxy = spi_alloc_device(ctlr);
759 	if (!proxy)
760 		return NULL;
761 
762 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
763 
764 	spi_set_chipselect(proxy, 0, chip->chip_select);
765 	proxy->max_speed_hz = chip->max_speed_hz;
766 	proxy->mode = chip->mode;
767 	proxy->irq = chip->irq;
768 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
769 	proxy->dev.platform_data = (void *) chip->platform_data;
770 	proxy->controller_data = chip->controller_data;
771 	proxy->controller_state = NULL;
772 
773 	if (chip->swnode) {
774 		status = device_add_software_node(&proxy->dev, chip->swnode);
775 		if (status) {
776 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
777 				chip->modalias, status);
778 			goto err_dev_put;
779 		}
780 	}
781 
782 	status = spi_add_device(proxy);
783 	if (status < 0)
784 		goto err_dev_put;
785 
786 	return proxy;
787 
788 err_dev_put:
789 	device_remove_software_node(&proxy->dev);
790 	spi_dev_put(proxy);
791 	return NULL;
792 }
793 EXPORT_SYMBOL_GPL(spi_new_device);
794 
795 /**
796  * spi_unregister_device - unregister a single SPI device
797  * @spi: spi_device to unregister
798  *
799  * Start making the passed SPI device vanish. Normally this would be handled
800  * by spi_unregister_controller().
801  */
spi_unregister_device(struct spi_device * spi)802 void spi_unregister_device(struct spi_device *spi)
803 {
804 	if (!spi)
805 		return;
806 
807 	if (spi->dev.of_node) {
808 		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
809 		of_node_put(spi->dev.of_node);
810 	}
811 	if (ACPI_COMPANION(&spi->dev))
812 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
813 	device_remove_software_node(&spi->dev);
814 	device_del(&spi->dev);
815 	spi_cleanup(spi);
816 	put_device(&spi->dev);
817 }
818 EXPORT_SYMBOL_GPL(spi_unregister_device);
819 
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)820 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
821 					      struct spi_board_info *bi)
822 {
823 	struct spi_device *dev;
824 
825 	if (ctlr->bus_num != bi->bus_num)
826 		return;
827 
828 	dev = spi_new_device(ctlr, bi);
829 	if (!dev)
830 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
831 			bi->modalias);
832 }
833 
834 /**
835  * spi_register_board_info - register SPI devices for a given board
836  * @info: array of chip descriptors
837  * @n: how many descriptors are provided
838  * Context: can sleep
839  *
840  * Board-specific early init code calls this (probably during arch_initcall)
841  * with segments of the SPI device table.  Any device nodes are created later,
842  * after the relevant parent SPI controller (bus_num) is defined.  We keep
843  * this table of devices forever, so that reloading a controller driver will
844  * not make Linux forget about these hard-wired devices.
845  *
846  * Other code can also call this, e.g. a particular add-on board might provide
847  * SPI devices through its expansion connector, so code initializing that board
848  * would naturally declare its SPI devices.
849  *
850  * The board info passed can safely be __initdata ... but be careful of
851  * any embedded pointers (platform_data, etc), they're copied as-is.
852  *
853  * Return: zero on success, else a negative error code.
854  */
spi_register_board_info(struct spi_board_info const * info,unsigned n)855 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
856 {
857 	struct boardinfo *bi;
858 	int i;
859 
860 	if (!n)
861 		return 0;
862 
863 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
864 	if (!bi)
865 		return -ENOMEM;
866 
867 	for (i = 0; i < n; i++, bi++, info++) {
868 		struct spi_controller *ctlr;
869 
870 		memcpy(&bi->board_info, info, sizeof(*info));
871 
872 		mutex_lock(&board_lock);
873 		list_add_tail(&bi->list, &board_list);
874 		list_for_each_entry(ctlr, &spi_controller_list, list)
875 			spi_match_controller_to_boardinfo(ctlr,
876 							  &bi->board_info);
877 		mutex_unlock(&board_lock);
878 	}
879 
880 	return 0;
881 }
882 
883 /*-------------------------------------------------------------------------*/
884 
885 /* Core methods for SPI resource management */
886 
887 /**
888  * spi_res_alloc - allocate a spi resource that is life-cycle managed
889  *                 during the processing of a spi_message while using
890  *                 spi_transfer_one
891  * @spi:     the spi device for which we allocate memory
892  * @release: the release code to execute for this resource
893  * @size:    size to alloc and return
894  * @gfp:     GFP allocation flags
895  *
896  * Return: the pointer to the allocated data
897  *
898  * This may get enhanced in the future to allocate from a memory pool
899  * of the @spi_device or @spi_controller to avoid repeated allocations.
900  */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)901 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
902 			   size_t size, gfp_t gfp)
903 {
904 	struct spi_res *sres;
905 
906 	sres = kzalloc(sizeof(*sres) + size, gfp);
907 	if (!sres)
908 		return NULL;
909 
910 	INIT_LIST_HEAD(&sres->entry);
911 	sres->release = release;
912 
913 	return sres->data;
914 }
915 
916 /**
917  * spi_res_free - free an spi resource
918  * @res: pointer to the custom data of a resource
919  */
spi_res_free(void * res)920 static void spi_res_free(void *res)
921 {
922 	struct spi_res *sres = container_of(res, struct spi_res, data);
923 
924 	if (!res)
925 		return;
926 
927 	WARN_ON(!list_empty(&sres->entry));
928 	kfree(sres);
929 }
930 
931 /**
932  * spi_res_add - add a spi_res to the spi_message
933  * @message: the spi message
934  * @res:     the spi_resource
935  */
spi_res_add(struct spi_message * message,void * res)936 static void spi_res_add(struct spi_message *message, void *res)
937 {
938 	struct spi_res *sres = container_of(res, struct spi_res, data);
939 
940 	WARN_ON(!list_empty(&sres->entry));
941 	list_add_tail(&sres->entry, &message->resources);
942 }
943 
944 /**
945  * spi_res_release - release all spi resources for this message
946  * @ctlr:  the @spi_controller
947  * @message: the @spi_message
948  */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)949 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
950 {
951 	struct spi_res *res, *tmp;
952 
953 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
954 		if (res->release)
955 			res->release(ctlr, message, res->data);
956 
957 		list_del(&res->entry);
958 
959 		kfree(res);
960 	}
961 }
962 
963 /*-------------------------------------------------------------------------*/
964 
spi_set_cs(struct spi_device * spi,bool enable,bool force)965 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
966 {
967 	bool activate = enable;
968 
969 	/*
970 	 * Avoid calling into the driver (or doing delays) if the chip select
971 	 * isn't actually changing from the last time this was called.
972 	 */
973 	if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
974 		       (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
975 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
976 		return;
977 
978 	trace_spi_set_cs(spi, activate);
979 
980 	spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
981 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
982 
983 	if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
984 		spi_delay_exec(&spi->cs_hold, NULL);
985 
986 	if (spi->mode & SPI_CS_HIGH)
987 		enable = !enable;
988 
989 	if (spi_get_csgpiod(spi, 0)) {
990 		if (!(spi->mode & SPI_NO_CS)) {
991 			/*
992 			 * Historically ACPI has no means of the GPIO polarity and
993 			 * thus the SPISerialBus() resource defines it on the per-chip
994 			 * basis. In order to avoid a chain of negations, the GPIO
995 			 * polarity is considered being Active High. Even for the cases
996 			 * when _DSD() is involved (in the updated versions of ACPI)
997 			 * the GPIO CS polarity must be defined Active High to avoid
998 			 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
999 			 * into account.
1000 			 */
1001 			if (has_acpi_companion(&spi->dev))
1002 				gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
1003 			else
1004 				/* Polarity handled by GPIO library */
1005 				gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
1006 		}
1007 		/* Some SPI masters need both GPIO CS & slave_select */
1008 		if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
1009 		    spi->controller->set_cs)
1010 			spi->controller->set_cs(spi, !enable);
1011 	} else if (spi->controller->set_cs) {
1012 		spi->controller->set_cs(spi, !enable);
1013 	}
1014 
1015 	if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
1016 		if (activate)
1017 			spi_delay_exec(&spi->cs_setup, NULL);
1018 		else
1019 			spi_delay_exec(&spi->cs_inactive, NULL);
1020 	}
1021 }
1022 
1023 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1024 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1025 			     struct sg_table *sgt, void *buf, size_t len,
1026 			     enum dma_data_direction dir, unsigned long attrs)
1027 {
1028 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1029 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1030 #ifdef CONFIG_HIGHMEM
1031 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1032 				(unsigned long)buf < (PKMAP_BASE +
1033 					(LAST_PKMAP * PAGE_SIZE)));
1034 #else
1035 	const bool kmap_buf = false;
1036 #endif
1037 	int desc_len;
1038 	int sgs;
1039 	struct page *vm_page;
1040 	struct scatterlist *sg;
1041 	void *sg_buf;
1042 	size_t min;
1043 	int i, ret;
1044 
1045 	if (vmalloced_buf || kmap_buf) {
1046 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1047 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1048 	} else if (virt_addr_valid(buf)) {
1049 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1050 		sgs = DIV_ROUND_UP(len, desc_len);
1051 	} else {
1052 		return -EINVAL;
1053 	}
1054 
1055 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1056 	if (ret != 0)
1057 		return ret;
1058 
1059 	sg = &sgt->sgl[0];
1060 	for (i = 0; i < sgs; i++) {
1061 
1062 		if (vmalloced_buf || kmap_buf) {
1063 			/*
1064 			 * Next scatterlist entry size is the minimum between
1065 			 * the desc_len and the remaining buffer length that
1066 			 * fits in a page.
1067 			 */
1068 			min = min_t(size_t, desc_len,
1069 				    min_t(size_t, len,
1070 					  PAGE_SIZE - offset_in_page(buf)));
1071 			if (vmalloced_buf)
1072 				vm_page = vmalloc_to_page(buf);
1073 			else
1074 				vm_page = kmap_to_page(buf);
1075 			if (!vm_page) {
1076 				sg_free_table(sgt);
1077 				return -ENOMEM;
1078 			}
1079 			sg_set_page(sg, vm_page,
1080 				    min, offset_in_page(buf));
1081 		} else {
1082 			min = min_t(size_t, len, desc_len);
1083 			sg_buf = buf;
1084 			sg_set_buf(sg, sg_buf, min);
1085 		}
1086 
1087 		buf += min;
1088 		len -= min;
1089 		sg = sg_next(sg);
1090 	}
1091 
1092 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1093 	if (ret < 0) {
1094 		sg_free_table(sgt);
1095 		return ret;
1096 	}
1097 
1098 	return 0;
1099 }
1100 
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1101 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1102 		struct sg_table *sgt, void *buf, size_t len,
1103 		enum dma_data_direction dir)
1104 {
1105 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1106 }
1107 
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1108 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1109 				struct device *dev, struct sg_table *sgt,
1110 				enum dma_data_direction dir,
1111 				unsigned long attrs)
1112 {
1113 	if (sgt->orig_nents) {
1114 		dma_unmap_sgtable(dev, sgt, dir, attrs);
1115 		sg_free_table(sgt);
1116 		sgt->orig_nents = 0;
1117 		sgt->nents = 0;
1118 	}
1119 }
1120 
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1121 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1122 		   struct sg_table *sgt, enum dma_data_direction dir)
1123 {
1124 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1125 }
1126 
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1127 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1128 {
1129 	struct device *tx_dev, *rx_dev;
1130 	struct spi_transfer *xfer;
1131 	int ret;
1132 
1133 	if (!ctlr->can_dma)
1134 		return 0;
1135 
1136 	if (ctlr->dma_tx)
1137 		tx_dev = ctlr->dma_tx->device->dev;
1138 	else if (ctlr->dma_map_dev)
1139 		tx_dev = ctlr->dma_map_dev;
1140 	else
1141 		tx_dev = ctlr->dev.parent;
1142 
1143 	if (ctlr->dma_rx)
1144 		rx_dev = ctlr->dma_rx->device->dev;
1145 	else if (ctlr->dma_map_dev)
1146 		rx_dev = ctlr->dma_map_dev;
1147 	else
1148 		rx_dev = ctlr->dev.parent;
1149 
1150 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1151 		/* The sync is done before each transfer. */
1152 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1153 
1154 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1155 			continue;
1156 
1157 		if (xfer->tx_buf != NULL) {
1158 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1159 						(void *)xfer->tx_buf,
1160 						xfer->len, DMA_TO_DEVICE,
1161 						attrs);
1162 			if (ret != 0)
1163 				return ret;
1164 		}
1165 
1166 		if (xfer->rx_buf != NULL) {
1167 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1168 						xfer->rx_buf, xfer->len,
1169 						DMA_FROM_DEVICE, attrs);
1170 			if (ret != 0) {
1171 				spi_unmap_buf_attrs(ctlr, tx_dev,
1172 						&xfer->tx_sg, DMA_TO_DEVICE,
1173 						attrs);
1174 
1175 				return ret;
1176 			}
1177 		}
1178 	}
1179 
1180 	ctlr->cur_rx_dma_dev = rx_dev;
1181 	ctlr->cur_tx_dma_dev = tx_dev;
1182 	ctlr->cur_msg_mapped = true;
1183 
1184 	return 0;
1185 }
1186 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1187 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1188 {
1189 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1190 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1191 	struct spi_transfer *xfer;
1192 
1193 	if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1194 		return 0;
1195 
1196 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1197 		/* The sync has already been done after each transfer. */
1198 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1199 
1200 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1201 			continue;
1202 
1203 		spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1204 				    DMA_FROM_DEVICE, attrs);
1205 		spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1206 				    DMA_TO_DEVICE, attrs);
1207 	}
1208 
1209 	ctlr->cur_msg_mapped = false;
1210 
1211 	return 0;
1212 }
1213 
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1214 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1215 				    struct spi_transfer *xfer)
1216 {
1217 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1218 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1219 
1220 	if (!ctlr->cur_msg_mapped)
1221 		return;
1222 
1223 	if (xfer->tx_sg.orig_nents)
1224 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1225 	if (xfer->rx_sg.orig_nents)
1226 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1227 }
1228 
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1229 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1230 				 struct spi_transfer *xfer)
1231 {
1232 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1233 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1234 
1235 	if (!ctlr->cur_msg_mapped)
1236 		return;
1237 
1238 	if (xfer->rx_sg.orig_nents)
1239 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1240 	if (xfer->tx_sg.orig_nents)
1241 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1242 }
1243 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1244 static inline int __spi_map_msg(struct spi_controller *ctlr,
1245 				struct spi_message *msg)
1246 {
1247 	return 0;
1248 }
1249 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1250 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1251 				  struct spi_message *msg)
1252 {
1253 	return 0;
1254 }
1255 
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1256 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1257 				    struct spi_transfer *xfer)
1258 {
1259 }
1260 
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1261 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1262 				 struct spi_transfer *xfer)
1263 {
1264 }
1265 #endif /* !CONFIG_HAS_DMA */
1266 
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1267 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1268 				struct spi_message *msg)
1269 {
1270 	struct spi_transfer *xfer;
1271 
1272 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1273 		/*
1274 		 * Restore the original value of tx_buf or rx_buf if they are
1275 		 * NULL.
1276 		 */
1277 		if (xfer->tx_buf == ctlr->dummy_tx)
1278 			xfer->tx_buf = NULL;
1279 		if (xfer->rx_buf == ctlr->dummy_rx)
1280 			xfer->rx_buf = NULL;
1281 	}
1282 
1283 	return __spi_unmap_msg(ctlr, msg);
1284 }
1285 
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1286 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1287 {
1288 	struct spi_transfer *xfer;
1289 	void *tmp;
1290 	unsigned int max_tx, max_rx;
1291 
1292 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1293 		&& !(msg->spi->mode & SPI_3WIRE)) {
1294 		max_tx = 0;
1295 		max_rx = 0;
1296 
1297 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1298 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1299 			    !xfer->tx_buf)
1300 				max_tx = max(xfer->len, max_tx);
1301 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1302 			    !xfer->rx_buf)
1303 				max_rx = max(xfer->len, max_rx);
1304 		}
1305 
1306 		if (max_tx) {
1307 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1308 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1309 			if (!tmp)
1310 				return -ENOMEM;
1311 			ctlr->dummy_tx = tmp;
1312 		}
1313 
1314 		if (max_rx) {
1315 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1316 				       GFP_KERNEL | GFP_DMA);
1317 			if (!tmp)
1318 				return -ENOMEM;
1319 			ctlr->dummy_rx = tmp;
1320 		}
1321 
1322 		if (max_tx || max_rx) {
1323 			list_for_each_entry(xfer, &msg->transfers,
1324 					    transfer_list) {
1325 				if (!xfer->len)
1326 					continue;
1327 				if (!xfer->tx_buf)
1328 					xfer->tx_buf = ctlr->dummy_tx;
1329 				if (!xfer->rx_buf)
1330 					xfer->rx_buf = ctlr->dummy_rx;
1331 			}
1332 		}
1333 	}
1334 
1335 	return __spi_map_msg(ctlr, msg);
1336 }
1337 
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1338 static int spi_transfer_wait(struct spi_controller *ctlr,
1339 			     struct spi_message *msg,
1340 			     struct spi_transfer *xfer)
1341 {
1342 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1343 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1344 	u32 speed_hz = xfer->speed_hz;
1345 	unsigned long long ms;
1346 
1347 	if (spi_controller_is_slave(ctlr)) {
1348 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1349 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1350 			return -EINTR;
1351 		}
1352 	} else {
1353 		if (!speed_hz)
1354 			speed_hz = 100000;
1355 
1356 		/*
1357 		 * For each byte we wait for 8 cycles of the SPI clock.
1358 		 * Since speed is defined in Hz and we want milliseconds,
1359 		 * use respective multiplier, but before the division,
1360 		 * otherwise we may get 0 for short transfers.
1361 		 */
1362 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1363 		do_div(ms, speed_hz);
1364 
1365 		/*
1366 		 * Increase it twice and add 200 ms tolerance, use
1367 		 * predefined maximum in case of overflow.
1368 		 */
1369 		ms += ms + 200;
1370 		if (ms > UINT_MAX)
1371 			ms = UINT_MAX;
1372 
1373 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1374 						 msecs_to_jiffies(ms));
1375 
1376 		if (ms == 0) {
1377 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1378 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1379 			dev_err(&msg->spi->dev,
1380 				"SPI transfer timed out\n");
1381 			return -ETIMEDOUT;
1382 		}
1383 	}
1384 
1385 	return 0;
1386 }
1387 
_spi_transfer_delay_ns(u32 ns)1388 static void _spi_transfer_delay_ns(u32 ns)
1389 {
1390 	if (!ns)
1391 		return;
1392 	if (ns <= NSEC_PER_USEC) {
1393 		ndelay(ns);
1394 	} else {
1395 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1396 
1397 		if (us <= 10)
1398 			udelay(us);
1399 		else
1400 			usleep_range(us, us + DIV_ROUND_UP(us, 10));
1401 	}
1402 }
1403 
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1404 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1405 {
1406 	u32 delay = _delay->value;
1407 	u32 unit = _delay->unit;
1408 	u32 hz;
1409 
1410 	if (!delay)
1411 		return 0;
1412 
1413 	switch (unit) {
1414 	case SPI_DELAY_UNIT_USECS:
1415 		delay *= NSEC_PER_USEC;
1416 		break;
1417 	case SPI_DELAY_UNIT_NSECS:
1418 		/* Nothing to do here */
1419 		break;
1420 	case SPI_DELAY_UNIT_SCK:
1421 		/* Clock cycles need to be obtained from spi_transfer */
1422 		if (!xfer)
1423 			return -EINVAL;
1424 		/*
1425 		 * If there is unknown effective speed, approximate it
1426 		 * by underestimating with half of the requested hz.
1427 		 */
1428 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1429 		if (!hz)
1430 			return -EINVAL;
1431 
1432 		/* Convert delay to nanoseconds */
1433 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1434 		break;
1435 	default:
1436 		return -EINVAL;
1437 	}
1438 
1439 	return delay;
1440 }
1441 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1442 
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1443 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1444 {
1445 	int delay;
1446 
1447 	might_sleep();
1448 
1449 	if (!_delay)
1450 		return -EINVAL;
1451 
1452 	delay = spi_delay_to_ns(_delay, xfer);
1453 	if (delay < 0)
1454 		return delay;
1455 
1456 	_spi_transfer_delay_ns(delay);
1457 
1458 	return 0;
1459 }
1460 EXPORT_SYMBOL_GPL(spi_delay_exec);
1461 
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1462 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1463 					  struct spi_transfer *xfer)
1464 {
1465 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1466 	u32 delay = xfer->cs_change_delay.value;
1467 	u32 unit = xfer->cs_change_delay.unit;
1468 	int ret;
1469 
1470 	/* Return early on "fast" mode - for everything but USECS */
1471 	if (!delay) {
1472 		if (unit == SPI_DELAY_UNIT_USECS)
1473 			_spi_transfer_delay_ns(default_delay_ns);
1474 		return;
1475 	}
1476 
1477 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1478 	if (ret) {
1479 		dev_err_once(&msg->spi->dev,
1480 			     "Use of unsupported delay unit %i, using default of %luus\n",
1481 			     unit, default_delay_ns / NSEC_PER_USEC);
1482 		_spi_transfer_delay_ns(default_delay_ns);
1483 	}
1484 }
1485 
1486 /*
1487  * spi_transfer_one_message - Default implementation of transfer_one_message()
1488  *
1489  * This is a standard implementation of transfer_one_message() for
1490  * drivers which implement a transfer_one() operation.  It provides
1491  * standard handling of delays and chip select management.
1492  */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1493 static int spi_transfer_one_message(struct spi_controller *ctlr,
1494 				    struct spi_message *msg)
1495 {
1496 	struct spi_transfer *xfer;
1497 	bool keep_cs = false;
1498 	int ret = 0;
1499 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1500 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1501 
1502 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1503 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1504 
1505 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1506 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1507 
1508 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1509 		trace_spi_transfer_start(msg, xfer);
1510 
1511 		spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1512 		spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1513 
1514 		if (!ctlr->ptp_sts_supported) {
1515 			xfer->ptp_sts_word_pre = 0;
1516 			ptp_read_system_prets(xfer->ptp_sts);
1517 		}
1518 
1519 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1520 			reinit_completion(&ctlr->xfer_completion);
1521 
1522 fallback_pio:
1523 			spi_dma_sync_for_device(ctlr, xfer);
1524 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1525 			if (ret < 0) {
1526 				spi_dma_sync_for_cpu(ctlr, xfer);
1527 
1528 				if (ctlr->cur_msg_mapped &&
1529 				   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1530 					__spi_unmap_msg(ctlr, msg);
1531 					ctlr->fallback = true;
1532 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1533 					goto fallback_pio;
1534 				}
1535 
1536 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1537 							       errors);
1538 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1539 							       errors);
1540 				dev_err(&msg->spi->dev,
1541 					"SPI transfer failed: %d\n", ret);
1542 				goto out;
1543 			}
1544 
1545 			if (ret > 0) {
1546 				ret = spi_transfer_wait(ctlr, msg, xfer);
1547 				if (ret < 0)
1548 					msg->status = ret;
1549 			}
1550 
1551 			spi_dma_sync_for_cpu(ctlr, xfer);
1552 		} else {
1553 			if (xfer->len)
1554 				dev_err(&msg->spi->dev,
1555 					"Bufferless transfer has length %u\n",
1556 					xfer->len);
1557 		}
1558 
1559 		if (!ctlr->ptp_sts_supported) {
1560 			ptp_read_system_postts(xfer->ptp_sts);
1561 			xfer->ptp_sts_word_post = xfer->len;
1562 		}
1563 
1564 		trace_spi_transfer_stop(msg, xfer);
1565 
1566 		if (msg->status != -EINPROGRESS)
1567 			goto out;
1568 
1569 		spi_transfer_delay_exec(xfer);
1570 
1571 		if (xfer->cs_change) {
1572 			if (list_is_last(&xfer->transfer_list,
1573 					 &msg->transfers)) {
1574 				keep_cs = true;
1575 			} else {
1576 				if (!xfer->cs_off)
1577 					spi_set_cs(msg->spi, false, false);
1578 				_spi_transfer_cs_change_delay(msg, xfer);
1579 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1580 					spi_set_cs(msg->spi, true, false);
1581 			}
1582 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1583 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1584 			spi_set_cs(msg->spi, xfer->cs_off, false);
1585 		}
1586 
1587 		msg->actual_length += xfer->len;
1588 	}
1589 
1590 out:
1591 	if (ret != 0 || !keep_cs)
1592 		spi_set_cs(msg->spi, false, false);
1593 
1594 	if (msg->status == -EINPROGRESS)
1595 		msg->status = ret;
1596 
1597 	if (msg->status && ctlr->handle_err)
1598 		ctlr->handle_err(ctlr, msg);
1599 
1600 	spi_finalize_current_message(ctlr);
1601 
1602 	return ret;
1603 }
1604 
1605 /**
1606  * spi_finalize_current_transfer - report completion of a transfer
1607  * @ctlr: the controller reporting completion
1608  *
1609  * Called by SPI drivers using the core transfer_one_message()
1610  * implementation to notify it that the current interrupt driven
1611  * transfer has finished and the next one may be scheduled.
1612  */
spi_finalize_current_transfer(struct spi_controller * ctlr)1613 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1614 {
1615 	complete(&ctlr->xfer_completion);
1616 }
1617 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1618 
spi_idle_runtime_pm(struct spi_controller * ctlr)1619 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1620 {
1621 	if (ctlr->auto_runtime_pm) {
1622 		pm_runtime_mark_last_busy(ctlr->dev.parent);
1623 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1624 	}
1625 }
1626 
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1627 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1628 		struct spi_message *msg, bool was_busy)
1629 {
1630 	struct spi_transfer *xfer;
1631 	int ret;
1632 
1633 	if (!was_busy && ctlr->auto_runtime_pm) {
1634 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1635 		if (ret < 0) {
1636 			pm_runtime_put_noidle(ctlr->dev.parent);
1637 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1638 				ret);
1639 
1640 			msg->status = ret;
1641 			spi_finalize_current_message(ctlr);
1642 
1643 			return ret;
1644 		}
1645 	}
1646 
1647 	if (!was_busy)
1648 		trace_spi_controller_busy(ctlr);
1649 
1650 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1651 		ret = ctlr->prepare_transfer_hardware(ctlr);
1652 		if (ret) {
1653 			dev_err(&ctlr->dev,
1654 				"failed to prepare transfer hardware: %d\n",
1655 				ret);
1656 
1657 			if (ctlr->auto_runtime_pm)
1658 				pm_runtime_put(ctlr->dev.parent);
1659 
1660 			msg->status = ret;
1661 			spi_finalize_current_message(ctlr);
1662 
1663 			return ret;
1664 		}
1665 	}
1666 
1667 	trace_spi_message_start(msg);
1668 
1669 	ret = spi_split_transfers_maxsize(ctlr, msg,
1670 					  spi_max_transfer_size(msg->spi),
1671 					  GFP_KERNEL | GFP_DMA);
1672 	if (ret) {
1673 		msg->status = ret;
1674 		spi_finalize_current_message(ctlr);
1675 		return ret;
1676 	}
1677 
1678 	if (ctlr->prepare_message) {
1679 		ret = ctlr->prepare_message(ctlr, msg);
1680 		if (ret) {
1681 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1682 				ret);
1683 			msg->status = ret;
1684 			spi_finalize_current_message(ctlr);
1685 			return ret;
1686 		}
1687 		msg->prepared = true;
1688 	}
1689 
1690 	ret = spi_map_msg(ctlr, msg);
1691 	if (ret) {
1692 		msg->status = ret;
1693 		spi_finalize_current_message(ctlr);
1694 		return ret;
1695 	}
1696 
1697 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1698 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1699 			xfer->ptp_sts_word_pre = 0;
1700 			ptp_read_system_prets(xfer->ptp_sts);
1701 		}
1702 	}
1703 
1704 	/*
1705 	 * Drivers implementation of transfer_one_message() must arrange for
1706 	 * spi_finalize_current_message() to get called. Most drivers will do
1707 	 * this in the calling context, but some don't. For those cases, a
1708 	 * completion is used to guarantee that this function does not return
1709 	 * until spi_finalize_current_message() is done accessing
1710 	 * ctlr->cur_msg.
1711 	 * Use of the following two flags enable to opportunistically skip the
1712 	 * use of the completion since its use involves expensive spin locks.
1713 	 * In case of a race with the context that calls
1714 	 * spi_finalize_current_message() the completion will always be used,
1715 	 * due to strict ordering of these flags using barriers.
1716 	 */
1717 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1718 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1719 	reinit_completion(&ctlr->cur_msg_completion);
1720 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1721 
1722 	ret = ctlr->transfer_one_message(ctlr, msg);
1723 	if (ret) {
1724 		dev_err(&ctlr->dev,
1725 			"failed to transfer one message from queue\n");
1726 		return ret;
1727 	}
1728 
1729 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1730 	smp_mb(); /* See spi_finalize_current_message()... */
1731 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1732 		wait_for_completion(&ctlr->cur_msg_completion);
1733 
1734 	return 0;
1735 }
1736 
1737 /**
1738  * __spi_pump_messages - function which processes spi message queue
1739  * @ctlr: controller to process queue for
1740  * @in_kthread: true if we are in the context of the message pump thread
1741  *
1742  * This function checks if there is any spi message in the queue that
1743  * needs processing and if so call out to the driver to initialize hardware
1744  * and transfer each message.
1745  *
1746  * Note that it is called both from the kthread itself and also from
1747  * inside spi_sync(); the queue extraction handling at the top of the
1748  * function should deal with this safely.
1749  */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1750 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1751 {
1752 	struct spi_message *msg;
1753 	bool was_busy = false;
1754 	unsigned long flags;
1755 	int ret;
1756 
1757 	/* Take the IO mutex */
1758 	mutex_lock(&ctlr->io_mutex);
1759 
1760 	/* Lock queue */
1761 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1762 
1763 	/* Make sure we are not already running a message */
1764 	if (ctlr->cur_msg)
1765 		goto out_unlock;
1766 
1767 	/* Check if the queue is idle */
1768 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1769 		if (!ctlr->busy)
1770 			goto out_unlock;
1771 
1772 		/* Defer any non-atomic teardown to the thread */
1773 		if (!in_kthread) {
1774 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1775 			    !ctlr->unprepare_transfer_hardware) {
1776 				spi_idle_runtime_pm(ctlr);
1777 				ctlr->busy = false;
1778 				ctlr->queue_empty = true;
1779 				trace_spi_controller_idle(ctlr);
1780 			} else {
1781 				kthread_queue_work(ctlr->kworker,
1782 						   &ctlr->pump_messages);
1783 			}
1784 			goto out_unlock;
1785 		}
1786 
1787 		ctlr->busy = false;
1788 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1789 
1790 		kfree(ctlr->dummy_rx);
1791 		ctlr->dummy_rx = NULL;
1792 		kfree(ctlr->dummy_tx);
1793 		ctlr->dummy_tx = NULL;
1794 		if (ctlr->unprepare_transfer_hardware &&
1795 		    ctlr->unprepare_transfer_hardware(ctlr))
1796 			dev_err(&ctlr->dev,
1797 				"failed to unprepare transfer hardware\n");
1798 		spi_idle_runtime_pm(ctlr);
1799 		trace_spi_controller_idle(ctlr);
1800 
1801 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1802 		ctlr->queue_empty = true;
1803 		goto out_unlock;
1804 	}
1805 
1806 	/* Extract head of queue */
1807 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1808 	ctlr->cur_msg = msg;
1809 
1810 	list_del_init(&msg->queue);
1811 	if (ctlr->busy)
1812 		was_busy = true;
1813 	else
1814 		ctlr->busy = true;
1815 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1816 
1817 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1818 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1819 
1820 	ctlr->cur_msg = NULL;
1821 	ctlr->fallback = false;
1822 
1823 	mutex_unlock(&ctlr->io_mutex);
1824 
1825 	/* Prod the scheduler in case transfer_one() was busy waiting */
1826 	if (!ret)
1827 		cond_resched();
1828 	return;
1829 
1830 out_unlock:
1831 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1832 	mutex_unlock(&ctlr->io_mutex);
1833 }
1834 
1835 /**
1836  * spi_pump_messages - kthread work function which processes spi message queue
1837  * @work: pointer to kthread work struct contained in the controller struct
1838  */
spi_pump_messages(struct kthread_work * work)1839 static void spi_pump_messages(struct kthread_work *work)
1840 {
1841 	struct spi_controller *ctlr =
1842 		container_of(work, struct spi_controller, pump_messages);
1843 
1844 	__spi_pump_messages(ctlr, true);
1845 }
1846 
1847 /**
1848  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1849  * @ctlr: Pointer to the spi_controller structure of the driver
1850  * @xfer: Pointer to the transfer being timestamped
1851  * @progress: How many words (not bytes) have been transferred so far
1852  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1853  *	      transfer, for less jitter in time measurement. Only compatible
1854  *	      with PIO drivers. If true, must follow up with
1855  *	      spi_take_timestamp_post or otherwise system will crash.
1856  *	      WARNING: for fully predictable results, the CPU frequency must
1857  *	      also be under control (governor).
1858  *
1859  * This is a helper for drivers to collect the beginning of the TX timestamp
1860  * for the requested byte from the SPI transfer. The frequency with which this
1861  * function must be called (once per word, once for the whole transfer, once
1862  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1863  * greater than or equal to the requested byte at the time of the call. The
1864  * timestamp is only taken once, at the first such call. It is assumed that
1865  * the driver advances its @tx buffer pointer monotonically.
1866  */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1867 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1868 			    struct spi_transfer *xfer,
1869 			    size_t progress, bool irqs_off)
1870 {
1871 	if (!xfer->ptp_sts)
1872 		return;
1873 
1874 	if (xfer->timestamped)
1875 		return;
1876 
1877 	if (progress > xfer->ptp_sts_word_pre)
1878 		return;
1879 
1880 	/* Capture the resolution of the timestamp */
1881 	xfer->ptp_sts_word_pre = progress;
1882 
1883 	if (irqs_off) {
1884 		local_irq_save(ctlr->irq_flags);
1885 		preempt_disable();
1886 	}
1887 
1888 	ptp_read_system_prets(xfer->ptp_sts);
1889 }
1890 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1891 
1892 /**
1893  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1894  * @ctlr: Pointer to the spi_controller structure of the driver
1895  * @xfer: Pointer to the transfer being timestamped
1896  * @progress: How many words (not bytes) have been transferred so far
1897  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1898  *
1899  * This is a helper for drivers to collect the end of the TX timestamp for
1900  * the requested byte from the SPI transfer. Can be called with an arbitrary
1901  * frequency: only the first call where @tx exceeds or is equal to the
1902  * requested word will be timestamped.
1903  */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1904 void spi_take_timestamp_post(struct spi_controller *ctlr,
1905 			     struct spi_transfer *xfer,
1906 			     size_t progress, bool irqs_off)
1907 {
1908 	if (!xfer->ptp_sts)
1909 		return;
1910 
1911 	if (xfer->timestamped)
1912 		return;
1913 
1914 	if (progress < xfer->ptp_sts_word_post)
1915 		return;
1916 
1917 	ptp_read_system_postts(xfer->ptp_sts);
1918 
1919 	if (irqs_off) {
1920 		local_irq_restore(ctlr->irq_flags);
1921 		preempt_enable();
1922 	}
1923 
1924 	/* Capture the resolution of the timestamp */
1925 	xfer->ptp_sts_word_post = progress;
1926 
1927 	xfer->timestamped = true;
1928 }
1929 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1930 
1931 /**
1932  * spi_set_thread_rt - set the controller to pump at realtime priority
1933  * @ctlr: controller to boost priority of
1934  *
1935  * This can be called because the controller requested realtime priority
1936  * (by setting the ->rt value before calling spi_register_controller()) or
1937  * because a device on the bus said that its transfers needed realtime
1938  * priority.
1939  *
1940  * NOTE: at the moment if any device on a bus says it needs realtime then
1941  * the thread will be at realtime priority for all transfers on that
1942  * controller.  If this eventually becomes a problem we may see if we can
1943  * find a way to boost the priority only temporarily during relevant
1944  * transfers.
1945  */
spi_set_thread_rt(struct spi_controller * ctlr)1946 static void spi_set_thread_rt(struct spi_controller *ctlr)
1947 {
1948 	dev_info(&ctlr->dev,
1949 		"will run message pump with realtime priority\n");
1950 	sched_set_fifo(ctlr->kworker->task);
1951 }
1952 
spi_init_queue(struct spi_controller * ctlr)1953 static int spi_init_queue(struct spi_controller *ctlr)
1954 {
1955 	ctlr->running = false;
1956 	ctlr->busy = false;
1957 	ctlr->queue_empty = true;
1958 
1959 	ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1960 	if (IS_ERR(ctlr->kworker)) {
1961 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1962 		return PTR_ERR(ctlr->kworker);
1963 	}
1964 
1965 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1966 
1967 	/*
1968 	 * Controller config will indicate if this controller should run the
1969 	 * message pump with high (realtime) priority to reduce the transfer
1970 	 * latency on the bus by minimising the delay between a transfer
1971 	 * request and the scheduling of the message pump thread. Without this
1972 	 * setting the message pump thread will remain at default priority.
1973 	 */
1974 	if (ctlr->rt)
1975 		spi_set_thread_rt(ctlr);
1976 
1977 	return 0;
1978 }
1979 
1980 /**
1981  * spi_get_next_queued_message() - called by driver to check for queued
1982  * messages
1983  * @ctlr: the controller to check for queued messages
1984  *
1985  * If there are more messages in the queue, the next message is returned from
1986  * this call.
1987  *
1988  * Return: the next message in the queue, else NULL if the queue is empty.
1989  */
spi_get_next_queued_message(struct spi_controller * ctlr)1990 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1991 {
1992 	struct spi_message *next;
1993 	unsigned long flags;
1994 
1995 	/* Get a pointer to the next message, if any */
1996 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1997 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1998 					queue);
1999 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2000 
2001 	return next;
2002 }
2003 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2004 
2005 /**
2006  * spi_finalize_current_message() - the current message is complete
2007  * @ctlr: the controller to return the message to
2008  *
2009  * Called by the driver to notify the core that the message in the front of the
2010  * queue is complete and can be removed from the queue.
2011  */
spi_finalize_current_message(struct spi_controller * ctlr)2012 void spi_finalize_current_message(struct spi_controller *ctlr)
2013 {
2014 	struct spi_transfer *xfer;
2015 	struct spi_message *mesg;
2016 	int ret;
2017 
2018 	mesg = ctlr->cur_msg;
2019 
2020 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2021 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2022 			ptp_read_system_postts(xfer->ptp_sts);
2023 			xfer->ptp_sts_word_post = xfer->len;
2024 		}
2025 	}
2026 
2027 	if (unlikely(ctlr->ptp_sts_supported))
2028 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2029 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2030 
2031 	spi_unmap_msg(ctlr, mesg);
2032 
2033 	/*
2034 	 * In the prepare_messages callback the SPI bus has the opportunity
2035 	 * to split a transfer to smaller chunks.
2036 	 *
2037 	 * Release the split transfers here since spi_map_msg() is done on
2038 	 * the split transfers.
2039 	 */
2040 	spi_res_release(ctlr, mesg);
2041 
2042 	if (mesg->prepared && ctlr->unprepare_message) {
2043 		ret = ctlr->unprepare_message(ctlr, mesg);
2044 		if (ret) {
2045 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2046 				ret);
2047 		}
2048 	}
2049 
2050 	mesg->prepared = false;
2051 
2052 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2053 	smp_mb(); /* See __spi_pump_transfer_message()... */
2054 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2055 		complete(&ctlr->cur_msg_completion);
2056 
2057 	trace_spi_message_done(mesg);
2058 
2059 	mesg->state = NULL;
2060 	if (mesg->complete)
2061 		mesg->complete(mesg->context);
2062 }
2063 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2064 
spi_start_queue(struct spi_controller * ctlr)2065 static int spi_start_queue(struct spi_controller *ctlr)
2066 {
2067 	unsigned long flags;
2068 
2069 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2070 
2071 	if (ctlr->running || ctlr->busy) {
2072 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2073 		return -EBUSY;
2074 	}
2075 
2076 	ctlr->running = true;
2077 	ctlr->cur_msg = NULL;
2078 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2079 
2080 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2081 
2082 	return 0;
2083 }
2084 
spi_stop_queue(struct spi_controller * ctlr)2085 static int spi_stop_queue(struct spi_controller *ctlr)
2086 {
2087 	unsigned long flags;
2088 	unsigned limit = 500;
2089 	int ret = 0;
2090 
2091 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2092 
2093 	/*
2094 	 * This is a bit lame, but is optimized for the common execution path.
2095 	 * A wait_queue on the ctlr->busy could be used, but then the common
2096 	 * execution path (pump_messages) would be required to call wake_up or
2097 	 * friends on every SPI message. Do this instead.
2098 	 */
2099 	while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2100 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2101 		usleep_range(10000, 11000);
2102 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2103 	}
2104 
2105 	if (!list_empty(&ctlr->queue) || ctlr->busy)
2106 		ret = -EBUSY;
2107 	else
2108 		ctlr->running = false;
2109 
2110 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2111 
2112 	if (ret) {
2113 		dev_warn(&ctlr->dev, "could not stop message queue\n");
2114 		return ret;
2115 	}
2116 	return ret;
2117 }
2118 
spi_destroy_queue(struct spi_controller * ctlr)2119 static int spi_destroy_queue(struct spi_controller *ctlr)
2120 {
2121 	int ret;
2122 
2123 	ret = spi_stop_queue(ctlr);
2124 
2125 	/*
2126 	 * kthread_flush_worker will block until all work is done.
2127 	 * If the reason that stop_queue timed out is that the work will never
2128 	 * finish, then it does no good to call flush/stop thread, so
2129 	 * return anyway.
2130 	 */
2131 	if (ret) {
2132 		dev_err(&ctlr->dev, "problem destroying queue\n");
2133 		return ret;
2134 	}
2135 
2136 	kthread_destroy_worker(ctlr->kworker);
2137 
2138 	return 0;
2139 }
2140 
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2141 static int __spi_queued_transfer(struct spi_device *spi,
2142 				 struct spi_message *msg,
2143 				 bool need_pump)
2144 {
2145 	struct spi_controller *ctlr = spi->controller;
2146 	unsigned long flags;
2147 
2148 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2149 
2150 	if (!ctlr->running) {
2151 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2152 		return -ESHUTDOWN;
2153 	}
2154 	msg->actual_length = 0;
2155 	msg->status = -EINPROGRESS;
2156 
2157 	list_add_tail(&msg->queue, &ctlr->queue);
2158 	ctlr->queue_empty = false;
2159 	if (!ctlr->busy && need_pump)
2160 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2161 
2162 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2163 	return 0;
2164 }
2165 
2166 /**
2167  * spi_queued_transfer - transfer function for queued transfers
2168  * @spi: spi device which is requesting transfer
2169  * @msg: spi message which is to handled is queued to driver queue
2170  *
2171  * Return: zero on success, else a negative error code.
2172  */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2173 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2174 {
2175 	return __spi_queued_transfer(spi, msg, true);
2176 }
2177 
spi_controller_initialize_queue(struct spi_controller * ctlr)2178 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2179 {
2180 	int ret;
2181 
2182 	ctlr->transfer = spi_queued_transfer;
2183 	if (!ctlr->transfer_one_message)
2184 		ctlr->transfer_one_message = spi_transfer_one_message;
2185 
2186 	/* Initialize and start queue */
2187 	ret = spi_init_queue(ctlr);
2188 	if (ret) {
2189 		dev_err(&ctlr->dev, "problem initializing queue\n");
2190 		goto err_init_queue;
2191 	}
2192 	ctlr->queued = true;
2193 	ret = spi_start_queue(ctlr);
2194 	if (ret) {
2195 		dev_err(&ctlr->dev, "problem starting queue\n");
2196 		goto err_start_queue;
2197 	}
2198 
2199 	return 0;
2200 
2201 err_start_queue:
2202 	spi_destroy_queue(ctlr);
2203 err_init_queue:
2204 	return ret;
2205 }
2206 
2207 /**
2208  * spi_flush_queue - Send all pending messages in the queue from the callers'
2209  *		     context
2210  * @ctlr: controller to process queue for
2211  *
2212  * This should be used when one wants to ensure all pending messages have been
2213  * sent before doing something. Is used by the spi-mem code to make sure SPI
2214  * memory operations do not preempt regular SPI transfers that have been queued
2215  * before the spi-mem operation.
2216  */
spi_flush_queue(struct spi_controller * ctlr)2217 void spi_flush_queue(struct spi_controller *ctlr)
2218 {
2219 	if (ctlr->transfer == spi_queued_transfer)
2220 		__spi_pump_messages(ctlr, false);
2221 }
2222 
2223 /*-------------------------------------------------------------------------*/
2224 
2225 #if defined(CONFIG_OF)
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2226 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2227 			   struct device_node *nc)
2228 {
2229 	u32 value;
2230 	int rc;
2231 
2232 	/* Mode (clock phase/polarity/etc.) */
2233 	if (of_property_read_bool(nc, "spi-cpha"))
2234 		spi->mode |= SPI_CPHA;
2235 	if (of_property_read_bool(nc, "spi-cpol"))
2236 		spi->mode |= SPI_CPOL;
2237 	if (of_property_read_bool(nc, "spi-3wire"))
2238 		spi->mode |= SPI_3WIRE;
2239 	if (of_property_read_bool(nc, "spi-lsb-first"))
2240 		spi->mode |= SPI_LSB_FIRST;
2241 	if (of_property_read_bool(nc, "spi-cs-high"))
2242 		spi->mode |= SPI_CS_HIGH;
2243 
2244 	/* Device DUAL/QUAD mode */
2245 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2246 		switch (value) {
2247 		case 0:
2248 			spi->mode |= SPI_NO_TX;
2249 			break;
2250 		case 1:
2251 			break;
2252 		case 2:
2253 			spi->mode |= SPI_TX_DUAL;
2254 			break;
2255 		case 4:
2256 			spi->mode |= SPI_TX_QUAD;
2257 			break;
2258 		case 8:
2259 			spi->mode |= SPI_TX_OCTAL;
2260 			break;
2261 		default:
2262 			dev_warn(&ctlr->dev,
2263 				"spi-tx-bus-width %d not supported\n",
2264 				value);
2265 			break;
2266 		}
2267 	}
2268 
2269 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2270 		switch (value) {
2271 		case 0:
2272 			spi->mode |= SPI_NO_RX;
2273 			break;
2274 		case 1:
2275 			break;
2276 		case 2:
2277 			spi->mode |= SPI_RX_DUAL;
2278 			break;
2279 		case 4:
2280 			spi->mode |= SPI_RX_QUAD;
2281 			break;
2282 		case 8:
2283 			spi->mode |= SPI_RX_OCTAL;
2284 			break;
2285 		default:
2286 			dev_warn(&ctlr->dev,
2287 				"spi-rx-bus-width %d not supported\n",
2288 				value);
2289 			break;
2290 		}
2291 	}
2292 
2293 	if (spi_controller_is_slave(ctlr)) {
2294 		if (!of_node_name_eq(nc, "slave")) {
2295 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2296 				nc);
2297 			return -EINVAL;
2298 		}
2299 		return 0;
2300 	}
2301 
2302 	/* Device address */
2303 	rc = of_property_read_u32(nc, "reg", &value);
2304 	if (rc) {
2305 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2306 			nc, rc);
2307 		return rc;
2308 	}
2309 	spi_set_chipselect(spi, 0, value);
2310 
2311 	/* Device speed */
2312 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2313 		spi->max_speed_hz = value;
2314 
2315 	return 0;
2316 }
2317 
2318 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2319 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2320 {
2321 	struct spi_device *spi;
2322 	int rc;
2323 
2324 	/* Alloc an spi_device */
2325 	spi = spi_alloc_device(ctlr);
2326 	if (!spi) {
2327 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2328 		rc = -ENOMEM;
2329 		goto err_out;
2330 	}
2331 
2332 	/* Select device driver */
2333 	rc = of_modalias_node(nc, spi->modalias,
2334 				sizeof(spi->modalias));
2335 	if (rc < 0) {
2336 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2337 		goto err_out;
2338 	}
2339 
2340 	rc = of_spi_parse_dt(ctlr, spi, nc);
2341 	if (rc)
2342 		goto err_out;
2343 
2344 	/* Store a pointer to the node in the device structure */
2345 	of_node_get(nc);
2346 	spi->dev.of_node = nc;
2347 	spi->dev.fwnode = of_fwnode_handle(nc);
2348 
2349 	/* Register the new device */
2350 	rc = spi_add_device(spi);
2351 	if (rc) {
2352 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2353 		goto err_of_node_put;
2354 	}
2355 
2356 	return spi;
2357 
2358 err_of_node_put:
2359 	of_node_put(nc);
2360 err_out:
2361 	spi_dev_put(spi);
2362 	return ERR_PTR(rc);
2363 }
2364 
2365 /**
2366  * of_register_spi_devices() - Register child devices onto the SPI bus
2367  * @ctlr:	Pointer to spi_controller device
2368  *
2369  * Registers an spi_device for each child node of controller node which
2370  * represents a valid SPI slave.
2371  */
of_register_spi_devices(struct spi_controller * ctlr)2372 static void of_register_spi_devices(struct spi_controller *ctlr)
2373 {
2374 	struct spi_device *spi;
2375 	struct device_node *nc;
2376 
2377 	if (!ctlr->dev.of_node)
2378 		return;
2379 
2380 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2381 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2382 			continue;
2383 		spi = of_register_spi_device(ctlr, nc);
2384 		if (IS_ERR(spi)) {
2385 			dev_warn(&ctlr->dev,
2386 				 "Failed to create SPI device for %pOF\n", nc);
2387 			of_node_clear_flag(nc, OF_POPULATED);
2388 		}
2389 	}
2390 }
2391 #else
of_register_spi_devices(struct spi_controller * ctlr)2392 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2393 #endif
2394 
2395 /**
2396  * spi_new_ancillary_device() - Register ancillary SPI device
2397  * @spi:         Pointer to the main SPI device registering the ancillary device
2398  * @chip_select: Chip Select of the ancillary device
2399  *
2400  * Register an ancillary SPI device; for example some chips have a chip-select
2401  * for normal device usage and another one for setup/firmware upload.
2402  *
2403  * This may only be called from main SPI device's probe routine.
2404  *
2405  * Return: 0 on success; negative errno on failure
2406  */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2407 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2408 					     u8 chip_select)
2409 {
2410 	struct spi_device *ancillary;
2411 	int rc = 0;
2412 
2413 	/* Alloc an spi_device */
2414 	ancillary = spi_alloc_device(spi->controller);
2415 	if (!ancillary) {
2416 		rc = -ENOMEM;
2417 		goto err_out;
2418 	}
2419 
2420 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2421 
2422 	/* Use provided chip-select for ancillary device */
2423 	spi_set_chipselect(ancillary, 0, chip_select);
2424 
2425 	/* Take over SPI mode/speed from SPI main device */
2426 	ancillary->max_speed_hz = spi->max_speed_hz;
2427 	ancillary->mode = spi->mode;
2428 
2429 	/* Register the new device */
2430 	rc = spi_add_device_locked(ancillary);
2431 	if (rc) {
2432 		dev_err(&spi->dev, "failed to register ancillary device\n");
2433 		goto err_out;
2434 	}
2435 
2436 	return ancillary;
2437 
2438 err_out:
2439 	spi_dev_put(ancillary);
2440 	return ERR_PTR(rc);
2441 }
2442 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2443 
2444 #ifdef CONFIG_ACPI
2445 struct acpi_spi_lookup {
2446 	struct spi_controller 	*ctlr;
2447 	u32			max_speed_hz;
2448 	u32			mode;
2449 	int			irq;
2450 	u8			bits_per_word;
2451 	u8			chip_select;
2452 	int			n;
2453 	int			index;
2454 };
2455 
acpi_spi_count(struct acpi_resource * ares,void * data)2456 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2457 {
2458 	struct acpi_resource_spi_serialbus *sb;
2459 	int *count = data;
2460 
2461 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2462 		return 1;
2463 
2464 	sb = &ares->data.spi_serial_bus;
2465 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2466 		return 1;
2467 
2468 	*count = *count + 1;
2469 
2470 	return 1;
2471 }
2472 
2473 /**
2474  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2475  * @adev:	ACPI device
2476  *
2477  * Returns the number of SpiSerialBus resources in the ACPI-device's
2478  * resource-list; or a negative error code.
2479  */
acpi_spi_count_resources(struct acpi_device * adev)2480 int acpi_spi_count_resources(struct acpi_device *adev)
2481 {
2482 	LIST_HEAD(r);
2483 	int count = 0;
2484 	int ret;
2485 
2486 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2487 	if (ret < 0)
2488 		return ret;
2489 
2490 	acpi_dev_free_resource_list(&r);
2491 
2492 	return count;
2493 }
2494 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2495 
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2496 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2497 					    struct acpi_spi_lookup *lookup)
2498 {
2499 	const union acpi_object *obj;
2500 
2501 	if (!x86_apple_machine)
2502 		return;
2503 
2504 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2505 	    && obj->buffer.length >= 4)
2506 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2507 
2508 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2509 	    && obj->buffer.length == 8)
2510 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2511 
2512 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2513 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2514 		lookup->mode |= SPI_LSB_FIRST;
2515 
2516 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2517 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2518 		lookup->mode |= SPI_CPOL;
2519 
2520 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2521 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2522 		lookup->mode |= SPI_CPHA;
2523 }
2524 
2525 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2526 
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2527 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2528 {
2529 	struct acpi_spi_lookup *lookup = data;
2530 	struct spi_controller *ctlr = lookup->ctlr;
2531 
2532 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2533 		struct acpi_resource_spi_serialbus *sb;
2534 		acpi_handle parent_handle;
2535 		acpi_status status;
2536 
2537 		sb = &ares->data.spi_serial_bus;
2538 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2539 
2540 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2541 				return 1;
2542 
2543 			status = acpi_get_handle(NULL,
2544 						 sb->resource_source.string_ptr,
2545 						 &parent_handle);
2546 
2547 			if (ACPI_FAILURE(status))
2548 				return -ENODEV;
2549 
2550 			if (ctlr) {
2551 				if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2552 					return -ENODEV;
2553 			} else {
2554 				struct acpi_device *adev;
2555 
2556 				adev = acpi_fetch_acpi_dev(parent_handle);
2557 				if (!adev)
2558 					return -ENODEV;
2559 
2560 				ctlr = acpi_spi_find_controller_by_adev(adev);
2561 				if (!ctlr)
2562 					return -EPROBE_DEFER;
2563 
2564 				lookup->ctlr = ctlr;
2565 			}
2566 
2567 			/*
2568 			 * ACPI DeviceSelection numbering is handled by the
2569 			 * host controller driver in Windows and can vary
2570 			 * from driver to driver. In Linux we always expect
2571 			 * 0 .. max - 1 so we need to ask the driver to
2572 			 * translate between the two schemes.
2573 			 */
2574 			if (ctlr->fw_translate_cs) {
2575 				int cs = ctlr->fw_translate_cs(ctlr,
2576 						sb->device_selection);
2577 				if (cs < 0)
2578 					return cs;
2579 				lookup->chip_select = cs;
2580 			} else {
2581 				lookup->chip_select = sb->device_selection;
2582 			}
2583 
2584 			lookup->max_speed_hz = sb->connection_speed;
2585 			lookup->bits_per_word = sb->data_bit_length;
2586 
2587 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2588 				lookup->mode |= SPI_CPHA;
2589 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2590 				lookup->mode |= SPI_CPOL;
2591 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2592 				lookup->mode |= SPI_CS_HIGH;
2593 		}
2594 	} else if (lookup->irq < 0) {
2595 		struct resource r;
2596 
2597 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2598 			lookup->irq = r.start;
2599 	}
2600 
2601 	/* Always tell the ACPI core to skip this resource */
2602 	return 1;
2603 }
2604 
2605 /**
2606  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2607  * @ctlr: controller to which the spi device belongs
2608  * @adev: ACPI Device for the spi device
2609  * @index: Index of the spi resource inside the ACPI Node
2610  *
2611  * This should be used to allocate a new spi device from and ACPI Node.
2612  * The caller is responsible for calling spi_add_device to register the spi device.
2613  *
2614  * If ctlr is set to NULL, the Controller for the spi device will be looked up
2615  * using the resource.
2616  * If index is set to -1, index is not used.
2617  * Note: If index is -1, ctlr must be set.
2618  *
2619  * Return: a pointer to the new device, or ERR_PTR on error.
2620  */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2621 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2622 					 struct acpi_device *adev,
2623 					 int index)
2624 {
2625 	acpi_handle parent_handle = NULL;
2626 	struct list_head resource_list;
2627 	struct acpi_spi_lookup lookup = {};
2628 	struct spi_device *spi;
2629 	int ret;
2630 
2631 	if (!ctlr && index == -1)
2632 		return ERR_PTR(-EINVAL);
2633 
2634 	lookup.ctlr		= ctlr;
2635 	lookup.irq		= -1;
2636 	lookup.index		= index;
2637 	lookup.n		= 0;
2638 
2639 	INIT_LIST_HEAD(&resource_list);
2640 	ret = acpi_dev_get_resources(adev, &resource_list,
2641 				     acpi_spi_add_resource, &lookup);
2642 	acpi_dev_free_resource_list(&resource_list);
2643 
2644 	if (ret < 0)
2645 		/* Found SPI in _CRS but it points to another controller */
2646 		return ERR_PTR(ret);
2647 
2648 	if (!lookup.max_speed_hz &&
2649 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2650 	    ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2651 		/* Apple does not use _CRS but nested devices for SPI slaves */
2652 		acpi_spi_parse_apple_properties(adev, &lookup);
2653 	}
2654 
2655 	if (!lookup.max_speed_hz)
2656 		return ERR_PTR(-ENODEV);
2657 
2658 	spi = spi_alloc_device(lookup.ctlr);
2659 	if (!spi) {
2660 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2661 			dev_name(&adev->dev));
2662 		return ERR_PTR(-ENOMEM);
2663 	}
2664 
2665 	ACPI_COMPANION_SET(&spi->dev, adev);
2666 	spi->max_speed_hz	= lookup.max_speed_hz;
2667 	spi->mode		|= lookup.mode;
2668 	spi->irq		= lookup.irq;
2669 	spi->bits_per_word	= lookup.bits_per_word;
2670 	spi_set_chipselect(spi, 0, lookup.chip_select);
2671 
2672 	return spi;
2673 }
2674 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2675 
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2676 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2677 					    struct acpi_device *adev)
2678 {
2679 	struct spi_device *spi;
2680 
2681 	if (acpi_bus_get_status(adev) || !adev->status.present ||
2682 	    acpi_device_enumerated(adev))
2683 		return AE_OK;
2684 
2685 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2686 	if (IS_ERR(spi)) {
2687 		if (PTR_ERR(spi) == -ENOMEM)
2688 			return AE_NO_MEMORY;
2689 		else
2690 			return AE_OK;
2691 	}
2692 
2693 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2694 			  sizeof(spi->modalias));
2695 
2696 	if (spi->irq < 0)
2697 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2698 
2699 	acpi_device_set_enumerated(adev);
2700 
2701 	adev->power.flags.ignore_parent = true;
2702 	if (spi_add_device(spi)) {
2703 		adev->power.flags.ignore_parent = false;
2704 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2705 			dev_name(&adev->dev));
2706 		spi_dev_put(spi);
2707 	}
2708 
2709 	return AE_OK;
2710 }
2711 
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2712 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2713 				       void *data, void **return_value)
2714 {
2715 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2716 	struct spi_controller *ctlr = data;
2717 
2718 	if (!adev)
2719 		return AE_OK;
2720 
2721 	return acpi_register_spi_device(ctlr, adev);
2722 }
2723 
2724 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2725 
acpi_register_spi_devices(struct spi_controller * ctlr)2726 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2727 {
2728 	acpi_status status;
2729 	acpi_handle handle;
2730 
2731 	handle = ACPI_HANDLE(ctlr->dev.parent);
2732 	if (!handle)
2733 		return;
2734 
2735 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2736 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2737 				     acpi_spi_add_device, NULL, ctlr, NULL);
2738 	if (ACPI_FAILURE(status))
2739 		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2740 }
2741 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2742 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2743 #endif /* CONFIG_ACPI */
2744 
spi_controller_release(struct device * dev)2745 static void spi_controller_release(struct device *dev)
2746 {
2747 	struct spi_controller *ctlr;
2748 
2749 	ctlr = container_of(dev, struct spi_controller, dev);
2750 	kfree(ctlr);
2751 }
2752 
2753 static struct class spi_master_class = {
2754 	.name		= "spi_master",
2755 	.owner		= THIS_MODULE,
2756 	.dev_release	= spi_controller_release,
2757 	.dev_groups	= spi_master_groups,
2758 };
2759 
2760 #ifdef CONFIG_SPI_SLAVE
2761 /**
2762  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2763  *		     controller
2764  * @spi: device used for the current transfer
2765  */
spi_slave_abort(struct spi_device * spi)2766 int spi_slave_abort(struct spi_device *spi)
2767 {
2768 	struct spi_controller *ctlr = spi->controller;
2769 
2770 	if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2771 		return ctlr->slave_abort(ctlr);
2772 
2773 	return -ENOTSUPP;
2774 }
2775 EXPORT_SYMBOL_GPL(spi_slave_abort);
2776 
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2777 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2778 			  char *buf)
2779 {
2780 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2781 						   dev);
2782 	struct device *child;
2783 
2784 	child = device_find_any_child(&ctlr->dev);
2785 	return sprintf(buf, "%s\n",
2786 		       child ? to_spi_device(child)->modalias : NULL);
2787 }
2788 
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2789 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2790 			   const char *buf, size_t count)
2791 {
2792 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2793 						   dev);
2794 	struct spi_device *spi;
2795 	struct device *child;
2796 	char name[32];
2797 	int rc;
2798 
2799 	rc = sscanf(buf, "%31s", name);
2800 	if (rc != 1 || !name[0])
2801 		return -EINVAL;
2802 
2803 	child = device_find_any_child(&ctlr->dev);
2804 	if (child) {
2805 		/* Remove registered slave */
2806 		device_unregister(child);
2807 		put_device(child);
2808 	}
2809 
2810 	if (strcmp(name, "(null)")) {
2811 		/* Register new slave */
2812 		spi = spi_alloc_device(ctlr);
2813 		if (!spi)
2814 			return -ENOMEM;
2815 
2816 		strscpy(spi->modalias, name, sizeof(spi->modalias));
2817 
2818 		rc = spi_add_device(spi);
2819 		if (rc) {
2820 			spi_dev_put(spi);
2821 			return rc;
2822 		}
2823 	}
2824 
2825 	return count;
2826 }
2827 
2828 static DEVICE_ATTR_RW(slave);
2829 
2830 static struct attribute *spi_slave_attrs[] = {
2831 	&dev_attr_slave.attr,
2832 	NULL,
2833 };
2834 
2835 static const struct attribute_group spi_slave_group = {
2836 	.attrs = spi_slave_attrs,
2837 };
2838 
2839 static const struct attribute_group *spi_slave_groups[] = {
2840 	&spi_controller_statistics_group,
2841 	&spi_slave_group,
2842 	NULL,
2843 };
2844 
2845 static struct class spi_slave_class = {
2846 	.name		= "spi_slave",
2847 	.owner		= THIS_MODULE,
2848 	.dev_release	= spi_controller_release,
2849 	.dev_groups	= spi_slave_groups,
2850 };
2851 #else
2852 extern struct class spi_slave_class;	/* dummy */
2853 #endif
2854 
2855 /**
2856  * __spi_alloc_controller - allocate an SPI master or slave controller
2857  * @dev: the controller, possibly using the platform_bus
2858  * @size: how much zeroed driver-private data to allocate; the pointer to this
2859  *	memory is in the driver_data field of the returned device, accessible
2860  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
2861  *	drivers granting DMA access to portions of their private data need to
2862  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
2863  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2864  *	slave (true) controller
2865  * Context: can sleep
2866  *
2867  * This call is used only by SPI controller drivers, which are the
2868  * only ones directly touching chip registers.  It's how they allocate
2869  * an spi_controller structure, prior to calling spi_register_controller().
2870  *
2871  * This must be called from context that can sleep.
2872  *
2873  * The caller is responsible for assigning the bus number and initializing the
2874  * controller's methods before calling spi_register_controller(); and (after
2875  * errors adding the device) calling spi_controller_put() to prevent a memory
2876  * leak.
2877  *
2878  * Return: the SPI controller structure on success, else NULL.
2879  */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2880 struct spi_controller *__spi_alloc_controller(struct device *dev,
2881 					      unsigned int size, bool slave)
2882 {
2883 	struct spi_controller	*ctlr;
2884 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2885 
2886 	if (!dev)
2887 		return NULL;
2888 
2889 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2890 	if (!ctlr)
2891 		return NULL;
2892 
2893 	device_initialize(&ctlr->dev);
2894 	INIT_LIST_HEAD(&ctlr->queue);
2895 	spin_lock_init(&ctlr->queue_lock);
2896 	spin_lock_init(&ctlr->bus_lock_spinlock);
2897 	mutex_init(&ctlr->bus_lock_mutex);
2898 	mutex_init(&ctlr->io_mutex);
2899 	mutex_init(&ctlr->add_lock);
2900 	ctlr->bus_num = -1;
2901 	ctlr->num_chipselect = 1;
2902 	ctlr->slave = slave;
2903 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2904 		ctlr->dev.class = &spi_slave_class;
2905 	else
2906 		ctlr->dev.class = &spi_master_class;
2907 	ctlr->dev.parent = dev;
2908 	pm_suspend_ignore_children(&ctlr->dev, true);
2909 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2910 
2911 	return ctlr;
2912 }
2913 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2914 
devm_spi_release_controller(struct device * dev,void * ctlr)2915 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2916 {
2917 	spi_controller_put(*(struct spi_controller **)ctlr);
2918 }
2919 
2920 /**
2921  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2922  * @dev: physical device of SPI controller
2923  * @size: how much zeroed driver-private data to allocate
2924  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2925  * Context: can sleep
2926  *
2927  * Allocate an SPI controller and automatically release a reference on it
2928  * when @dev is unbound from its driver.  Drivers are thus relieved from
2929  * having to call spi_controller_put().
2930  *
2931  * The arguments to this function are identical to __spi_alloc_controller().
2932  *
2933  * Return: the SPI controller structure on success, else NULL.
2934  */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2935 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2936 						   unsigned int size,
2937 						   bool slave)
2938 {
2939 	struct spi_controller **ptr, *ctlr;
2940 
2941 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2942 			   GFP_KERNEL);
2943 	if (!ptr)
2944 		return NULL;
2945 
2946 	ctlr = __spi_alloc_controller(dev, size, slave);
2947 	if (ctlr) {
2948 		ctlr->devm_allocated = true;
2949 		*ptr = ctlr;
2950 		devres_add(dev, ptr);
2951 	} else {
2952 		devres_free(ptr);
2953 	}
2954 
2955 	return ctlr;
2956 }
2957 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2958 
2959 /**
2960  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2961  * @ctlr: The SPI master to grab GPIO descriptors for
2962  */
spi_get_gpio_descs(struct spi_controller * ctlr)2963 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2964 {
2965 	int nb, i;
2966 	struct gpio_desc **cs;
2967 	struct device *dev = &ctlr->dev;
2968 	unsigned long native_cs_mask = 0;
2969 	unsigned int num_cs_gpios = 0;
2970 
2971 	nb = gpiod_count(dev, "cs");
2972 	if (nb < 0) {
2973 		/* No GPIOs at all is fine, else return the error */
2974 		if (nb == -ENOENT)
2975 			return 0;
2976 		return nb;
2977 	}
2978 
2979 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2980 
2981 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2982 			  GFP_KERNEL);
2983 	if (!cs)
2984 		return -ENOMEM;
2985 	ctlr->cs_gpiods = cs;
2986 
2987 	for (i = 0; i < nb; i++) {
2988 		/*
2989 		 * Most chipselects are active low, the inverted
2990 		 * semantics are handled by special quirks in gpiolib,
2991 		 * so initializing them GPIOD_OUT_LOW here means
2992 		 * "unasserted", in most cases this will drive the physical
2993 		 * line high.
2994 		 */
2995 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2996 						      GPIOD_OUT_LOW);
2997 		if (IS_ERR(cs[i]))
2998 			return PTR_ERR(cs[i]);
2999 
3000 		if (cs[i]) {
3001 			/*
3002 			 * If we find a CS GPIO, name it after the device and
3003 			 * chip select line.
3004 			 */
3005 			char *gpioname;
3006 
3007 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3008 						  dev_name(dev), i);
3009 			if (!gpioname)
3010 				return -ENOMEM;
3011 			gpiod_set_consumer_name(cs[i], gpioname);
3012 			num_cs_gpios++;
3013 			continue;
3014 		}
3015 
3016 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3017 			dev_err(dev, "Invalid native chip select %d\n", i);
3018 			return -EINVAL;
3019 		}
3020 		native_cs_mask |= BIT(i);
3021 	}
3022 
3023 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3024 
3025 	if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
3026 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3027 		dev_err(dev, "No unused native chip select available\n");
3028 		return -EINVAL;
3029 	}
3030 
3031 	return 0;
3032 }
3033 
spi_controller_check_ops(struct spi_controller * ctlr)3034 static int spi_controller_check_ops(struct spi_controller *ctlr)
3035 {
3036 	/*
3037 	 * The controller may implement only the high-level SPI-memory like
3038 	 * operations if it does not support regular SPI transfers, and this is
3039 	 * valid use case.
3040 	 * If ->mem_ops is NULL, we request that at least one of the
3041 	 * ->transfer_xxx() method be implemented.
3042 	 */
3043 	if (ctlr->mem_ops) {
3044 		if (!ctlr->mem_ops->exec_op)
3045 			return -EINVAL;
3046 	} else if (!ctlr->transfer && !ctlr->transfer_one &&
3047 		   !ctlr->transfer_one_message) {
3048 		return -EINVAL;
3049 	}
3050 
3051 	return 0;
3052 }
3053 
3054 /**
3055  * spi_register_controller - register SPI master or slave controller
3056  * @ctlr: initialized master, originally from spi_alloc_master() or
3057  *	spi_alloc_slave()
3058  * Context: can sleep
3059  *
3060  * SPI controllers connect to their drivers using some non-SPI bus,
3061  * such as the platform bus.  The final stage of probe() in that code
3062  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3063  *
3064  * SPI controllers use board specific (often SOC specific) bus numbers,
3065  * and board-specific addressing for SPI devices combines those numbers
3066  * with chip select numbers.  Since SPI does not directly support dynamic
3067  * device identification, boards need configuration tables telling which
3068  * chip is at which address.
3069  *
3070  * This must be called from context that can sleep.  It returns zero on
3071  * success, else a negative error code (dropping the controller's refcount).
3072  * After a successful return, the caller is responsible for calling
3073  * spi_unregister_controller().
3074  *
3075  * Return: zero on success, else a negative error code.
3076  */
spi_register_controller(struct spi_controller * ctlr)3077 int spi_register_controller(struct spi_controller *ctlr)
3078 {
3079 	struct device		*dev = ctlr->dev.parent;
3080 	struct boardinfo	*bi;
3081 	int			status;
3082 	int			id, first_dynamic;
3083 
3084 	if (!dev)
3085 		return -ENODEV;
3086 
3087 	/*
3088 	 * Make sure all necessary hooks are implemented before registering
3089 	 * the SPI controller.
3090 	 */
3091 	status = spi_controller_check_ops(ctlr);
3092 	if (status)
3093 		return status;
3094 
3095 	if (ctlr->bus_num >= 0) {
3096 		/* Devices with a fixed bus num must check-in with the num */
3097 		mutex_lock(&board_lock);
3098 		id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3099 			ctlr->bus_num + 1, GFP_KERNEL);
3100 		mutex_unlock(&board_lock);
3101 		if (WARN(id < 0, "couldn't get idr"))
3102 			return id == -ENOSPC ? -EBUSY : id;
3103 		ctlr->bus_num = id;
3104 	} else if (ctlr->dev.of_node) {
3105 		/* Allocate dynamic bus number using Linux idr */
3106 		id = of_alias_get_id(ctlr->dev.of_node, "spi");
3107 		if (id >= 0) {
3108 			ctlr->bus_num = id;
3109 			mutex_lock(&board_lock);
3110 			id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3111 				       ctlr->bus_num + 1, GFP_KERNEL);
3112 			mutex_unlock(&board_lock);
3113 			if (WARN(id < 0, "couldn't get idr"))
3114 				return id == -ENOSPC ? -EBUSY : id;
3115 		}
3116 	}
3117 	if (ctlr->bus_num < 0) {
3118 		first_dynamic = of_alias_get_highest_id("spi");
3119 		if (first_dynamic < 0)
3120 			first_dynamic = 0;
3121 		else
3122 			first_dynamic++;
3123 
3124 		mutex_lock(&board_lock);
3125 		id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
3126 			       0, GFP_KERNEL);
3127 		mutex_unlock(&board_lock);
3128 		if (WARN(id < 0, "couldn't get idr"))
3129 			return id;
3130 		ctlr->bus_num = id;
3131 	}
3132 	ctlr->bus_lock_flag = 0;
3133 	init_completion(&ctlr->xfer_completion);
3134 	init_completion(&ctlr->cur_msg_completion);
3135 	if (!ctlr->max_dma_len)
3136 		ctlr->max_dma_len = INT_MAX;
3137 
3138 	/*
3139 	 * Register the device, then userspace will see it.
3140 	 * Registration fails if the bus ID is in use.
3141 	 */
3142 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3143 
3144 	if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3145 		status = spi_get_gpio_descs(ctlr);
3146 		if (status)
3147 			goto free_bus_id;
3148 		/*
3149 		 * A controller using GPIO descriptors always
3150 		 * supports SPI_CS_HIGH if need be.
3151 		 */
3152 		ctlr->mode_bits |= SPI_CS_HIGH;
3153 	}
3154 
3155 	/*
3156 	 * Even if it's just one always-selected device, there must
3157 	 * be at least one chipselect.
3158 	 */
3159 	if (!ctlr->num_chipselect) {
3160 		status = -EINVAL;
3161 		goto free_bus_id;
3162 	}
3163 
3164 	/* Setting last_cs to -1 means no chip selected */
3165 	ctlr->last_cs = -1;
3166 
3167 	status = device_add(&ctlr->dev);
3168 	if (status < 0)
3169 		goto free_bus_id;
3170 	dev_dbg(dev, "registered %s %s\n",
3171 			spi_controller_is_slave(ctlr) ? "slave" : "master",
3172 			dev_name(&ctlr->dev));
3173 
3174 	/*
3175 	 * If we're using a queued driver, start the queue. Note that we don't
3176 	 * need the queueing logic if the driver is only supporting high-level
3177 	 * memory operations.
3178 	 */
3179 	if (ctlr->transfer) {
3180 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3181 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3182 		status = spi_controller_initialize_queue(ctlr);
3183 		if (status) {
3184 			device_del(&ctlr->dev);
3185 			goto free_bus_id;
3186 		}
3187 	}
3188 	/* Add statistics */
3189 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3190 	if (!ctlr->pcpu_statistics) {
3191 		dev_err(dev, "Error allocating per-cpu statistics\n");
3192 		status = -ENOMEM;
3193 		goto destroy_queue;
3194 	}
3195 
3196 	mutex_lock(&board_lock);
3197 	list_add_tail(&ctlr->list, &spi_controller_list);
3198 	list_for_each_entry(bi, &board_list, list)
3199 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3200 	mutex_unlock(&board_lock);
3201 
3202 	/* Register devices from the device tree and ACPI */
3203 	of_register_spi_devices(ctlr);
3204 	acpi_register_spi_devices(ctlr);
3205 	return status;
3206 
3207 destroy_queue:
3208 	spi_destroy_queue(ctlr);
3209 free_bus_id:
3210 	mutex_lock(&board_lock);
3211 	idr_remove(&spi_master_idr, ctlr->bus_num);
3212 	mutex_unlock(&board_lock);
3213 	return status;
3214 }
3215 EXPORT_SYMBOL_GPL(spi_register_controller);
3216 
devm_spi_unregister(struct device * dev,void * res)3217 static void devm_spi_unregister(struct device *dev, void *res)
3218 {
3219 	spi_unregister_controller(*(struct spi_controller **)res);
3220 }
3221 
3222 /**
3223  * devm_spi_register_controller - register managed SPI master or slave
3224  *	controller
3225  * @dev:    device managing SPI controller
3226  * @ctlr: initialized controller, originally from spi_alloc_master() or
3227  *	spi_alloc_slave()
3228  * Context: can sleep
3229  *
3230  * Register a SPI device as with spi_register_controller() which will
3231  * automatically be unregistered and freed.
3232  *
3233  * Return: zero on success, else a negative error code.
3234  */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3235 int devm_spi_register_controller(struct device *dev,
3236 				 struct spi_controller *ctlr)
3237 {
3238 	struct spi_controller **ptr;
3239 	int ret;
3240 
3241 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3242 	if (!ptr)
3243 		return -ENOMEM;
3244 
3245 	ret = spi_register_controller(ctlr);
3246 	if (!ret) {
3247 		*ptr = ctlr;
3248 		devres_add(dev, ptr);
3249 	} else {
3250 		devres_free(ptr);
3251 	}
3252 
3253 	return ret;
3254 }
3255 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3256 
__unregister(struct device * dev,void * null)3257 static int __unregister(struct device *dev, void *null)
3258 {
3259 	spi_unregister_device(to_spi_device(dev));
3260 	return 0;
3261 }
3262 
3263 /**
3264  * spi_unregister_controller - unregister SPI master or slave controller
3265  * @ctlr: the controller being unregistered
3266  * Context: can sleep
3267  *
3268  * This call is used only by SPI controller drivers, which are the
3269  * only ones directly touching chip registers.
3270  *
3271  * This must be called from context that can sleep.
3272  *
3273  * Note that this function also drops a reference to the controller.
3274  */
spi_unregister_controller(struct spi_controller * ctlr)3275 void spi_unregister_controller(struct spi_controller *ctlr)
3276 {
3277 	struct spi_controller *found;
3278 	int id = ctlr->bus_num;
3279 
3280 	/* Prevent addition of new devices, unregister existing ones */
3281 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3282 		mutex_lock(&ctlr->add_lock);
3283 
3284 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3285 
3286 	/* First make sure that this controller was ever added */
3287 	mutex_lock(&board_lock);
3288 	found = idr_find(&spi_master_idr, id);
3289 	mutex_unlock(&board_lock);
3290 	if (ctlr->queued) {
3291 		if (spi_destroy_queue(ctlr))
3292 			dev_err(&ctlr->dev, "queue remove failed\n");
3293 	}
3294 	mutex_lock(&board_lock);
3295 	list_del(&ctlr->list);
3296 	mutex_unlock(&board_lock);
3297 
3298 	device_del(&ctlr->dev);
3299 
3300 	/* Free bus id */
3301 	mutex_lock(&board_lock);
3302 	if (found == ctlr)
3303 		idr_remove(&spi_master_idr, id);
3304 	mutex_unlock(&board_lock);
3305 
3306 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3307 		mutex_unlock(&ctlr->add_lock);
3308 
3309 	/* Release the last reference on the controller if its driver
3310 	 * has not yet been converted to devm_spi_alloc_master/slave().
3311 	 */
3312 	if (!ctlr->devm_allocated)
3313 		put_device(&ctlr->dev);
3314 }
3315 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3316 
__spi_check_suspended(const struct spi_controller * ctlr)3317 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3318 {
3319 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3320 }
3321 
__spi_mark_suspended(struct spi_controller * ctlr)3322 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3323 {
3324 	mutex_lock(&ctlr->bus_lock_mutex);
3325 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3326 	mutex_unlock(&ctlr->bus_lock_mutex);
3327 }
3328 
__spi_mark_resumed(struct spi_controller * ctlr)3329 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3330 {
3331 	mutex_lock(&ctlr->bus_lock_mutex);
3332 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3333 	mutex_unlock(&ctlr->bus_lock_mutex);
3334 }
3335 
spi_controller_suspend(struct spi_controller * ctlr)3336 int spi_controller_suspend(struct spi_controller *ctlr)
3337 {
3338 	int ret = 0;
3339 
3340 	/* Basically no-ops for non-queued controllers */
3341 	if (ctlr->queued) {
3342 		ret = spi_stop_queue(ctlr);
3343 		if (ret)
3344 			dev_err(&ctlr->dev, "queue stop failed\n");
3345 	}
3346 
3347 	__spi_mark_suspended(ctlr);
3348 	return ret;
3349 }
3350 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3351 
spi_controller_resume(struct spi_controller * ctlr)3352 int spi_controller_resume(struct spi_controller *ctlr)
3353 {
3354 	int ret = 0;
3355 
3356 	__spi_mark_resumed(ctlr);
3357 
3358 	if (ctlr->queued) {
3359 		ret = spi_start_queue(ctlr);
3360 		if (ret)
3361 			dev_err(&ctlr->dev, "queue restart failed\n");
3362 	}
3363 	return ret;
3364 }
3365 EXPORT_SYMBOL_GPL(spi_controller_resume);
3366 
3367 /*-------------------------------------------------------------------------*/
3368 
3369 /* Core methods for spi_message alterations */
3370 
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3371 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3372 					    struct spi_message *msg,
3373 					    void *res)
3374 {
3375 	struct spi_replaced_transfers *rxfer = res;
3376 	size_t i;
3377 
3378 	/* Call extra callback if requested */
3379 	if (rxfer->release)
3380 		rxfer->release(ctlr, msg, res);
3381 
3382 	/* Insert replaced transfers back into the message */
3383 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3384 
3385 	/* Remove the formerly inserted entries */
3386 	for (i = 0; i < rxfer->inserted; i++)
3387 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3388 }
3389 
3390 /**
3391  * spi_replace_transfers - replace transfers with several transfers
3392  *                         and register change with spi_message.resources
3393  * @msg:           the spi_message we work upon
3394  * @xfer_first:    the first spi_transfer we want to replace
3395  * @remove:        number of transfers to remove
3396  * @insert:        the number of transfers we want to insert instead
3397  * @release:       extra release code necessary in some circumstances
3398  * @extradatasize: extra data to allocate (with alignment guarantees
3399  *                 of struct @spi_transfer)
3400  * @gfp:           gfp flags
3401  *
3402  * Returns: pointer to @spi_replaced_transfers,
3403  *          PTR_ERR(...) in case of errors.
3404  */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3405 static struct spi_replaced_transfers *spi_replace_transfers(
3406 	struct spi_message *msg,
3407 	struct spi_transfer *xfer_first,
3408 	size_t remove,
3409 	size_t insert,
3410 	spi_replaced_release_t release,
3411 	size_t extradatasize,
3412 	gfp_t gfp)
3413 {
3414 	struct spi_replaced_transfers *rxfer;
3415 	struct spi_transfer *xfer;
3416 	size_t i;
3417 
3418 	/* Allocate the structure using spi_res */
3419 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3420 			      struct_size(rxfer, inserted_transfers, insert)
3421 			      + extradatasize,
3422 			      gfp);
3423 	if (!rxfer)
3424 		return ERR_PTR(-ENOMEM);
3425 
3426 	/* The release code to invoke before running the generic release */
3427 	rxfer->release = release;
3428 
3429 	/* Assign extradata */
3430 	if (extradatasize)
3431 		rxfer->extradata =
3432 			&rxfer->inserted_transfers[insert];
3433 
3434 	/* Init the replaced_transfers list */
3435 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3436 
3437 	/*
3438 	 * Assign the list_entry after which we should reinsert
3439 	 * the @replaced_transfers - it may be spi_message.messages!
3440 	 */
3441 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3442 
3443 	/* Remove the requested number of transfers */
3444 	for (i = 0; i < remove; i++) {
3445 		/*
3446 		 * If the entry after replaced_after it is msg->transfers
3447 		 * then we have been requested to remove more transfers
3448 		 * than are in the list.
3449 		 */
3450 		if (rxfer->replaced_after->next == &msg->transfers) {
3451 			dev_err(&msg->spi->dev,
3452 				"requested to remove more spi_transfers than are available\n");
3453 			/* Insert replaced transfers back into the message */
3454 			list_splice(&rxfer->replaced_transfers,
3455 				    rxfer->replaced_after);
3456 
3457 			/* Free the spi_replace_transfer structure... */
3458 			spi_res_free(rxfer);
3459 
3460 			/* ...and return with an error */
3461 			return ERR_PTR(-EINVAL);
3462 		}
3463 
3464 		/*
3465 		 * Remove the entry after replaced_after from list of
3466 		 * transfers and add it to list of replaced_transfers.
3467 		 */
3468 		list_move_tail(rxfer->replaced_after->next,
3469 			       &rxfer->replaced_transfers);
3470 	}
3471 
3472 	/*
3473 	 * Create copy of the given xfer with identical settings
3474 	 * based on the first transfer to get removed.
3475 	 */
3476 	for (i = 0; i < insert; i++) {
3477 		/* We need to run in reverse order */
3478 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3479 
3480 		/* Copy all spi_transfer data */
3481 		memcpy(xfer, xfer_first, sizeof(*xfer));
3482 
3483 		/* Add to list */
3484 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3485 
3486 		/* Clear cs_change and delay for all but the last */
3487 		if (i) {
3488 			xfer->cs_change = false;
3489 			xfer->delay.value = 0;
3490 		}
3491 	}
3492 
3493 	/* Set up inserted... */
3494 	rxfer->inserted = insert;
3495 
3496 	/* ...and register it with spi_res/spi_message */
3497 	spi_res_add(msg, rxfer);
3498 
3499 	return rxfer;
3500 }
3501 
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3502 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3503 					struct spi_message *msg,
3504 					struct spi_transfer **xferp,
3505 					size_t maxsize,
3506 					gfp_t gfp)
3507 {
3508 	struct spi_transfer *xfer = *xferp, *xfers;
3509 	struct spi_replaced_transfers *srt;
3510 	size_t offset;
3511 	size_t count, i;
3512 
3513 	/* Calculate how many we have to replace */
3514 	count = DIV_ROUND_UP(xfer->len, maxsize);
3515 
3516 	/* Create replacement */
3517 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3518 	if (IS_ERR(srt))
3519 		return PTR_ERR(srt);
3520 	xfers = srt->inserted_transfers;
3521 
3522 	/*
3523 	 * Now handle each of those newly inserted spi_transfers.
3524 	 * Note that the replacements spi_transfers all are preset
3525 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3526 	 * are all identical (as well as most others)
3527 	 * so we just have to fix up len and the pointers.
3528 	 *
3529 	 * This also includes support for the depreciated
3530 	 * spi_message.is_dma_mapped interface.
3531 	 */
3532 
3533 	/*
3534 	 * The first transfer just needs the length modified, so we
3535 	 * run it outside the loop.
3536 	 */
3537 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3538 
3539 	/* All the others need rx_buf/tx_buf also set */
3540 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3541 		/* Update rx_buf, tx_buf and dma */
3542 		if (xfers[i].rx_buf)
3543 			xfers[i].rx_buf += offset;
3544 		if (xfers[i].rx_dma)
3545 			xfers[i].rx_dma += offset;
3546 		if (xfers[i].tx_buf)
3547 			xfers[i].tx_buf += offset;
3548 		if (xfers[i].tx_dma)
3549 			xfers[i].tx_dma += offset;
3550 
3551 		/* Update length */
3552 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3553 	}
3554 
3555 	/*
3556 	 * We set up xferp to the last entry we have inserted,
3557 	 * so that we skip those already split transfers.
3558 	 */
3559 	*xferp = &xfers[count - 1];
3560 
3561 	/* Increment statistics counters */
3562 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3563 				       transfers_split_maxsize);
3564 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3565 				       transfers_split_maxsize);
3566 
3567 	return 0;
3568 }
3569 
3570 /**
3571  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3572  *                               when an individual transfer exceeds a
3573  *                               certain size
3574  * @ctlr:    the @spi_controller for this transfer
3575  * @msg:   the @spi_message to transform
3576  * @maxsize:  the maximum when to apply this
3577  * @gfp: GFP allocation flags
3578  *
3579  * Return: status of transformation
3580  */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3581 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3582 				struct spi_message *msg,
3583 				size_t maxsize,
3584 				gfp_t gfp)
3585 {
3586 	struct spi_transfer *xfer;
3587 	int ret;
3588 
3589 	/*
3590 	 * Iterate over the transfer_list,
3591 	 * but note that xfer is advanced to the last transfer inserted
3592 	 * to avoid checking sizes again unnecessarily (also xfer does
3593 	 * potentially belong to a different list by the time the
3594 	 * replacement has happened).
3595 	 */
3596 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3597 		if (xfer->len > maxsize) {
3598 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3599 							   maxsize, gfp);
3600 			if (ret)
3601 				return ret;
3602 		}
3603 	}
3604 
3605 	return 0;
3606 }
3607 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3608 
3609 /*-------------------------------------------------------------------------*/
3610 
3611 /* Core methods for SPI controller protocol drivers.  Some of the
3612  * other core methods are currently defined as inline functions.
3613  */
3614 
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3615 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3616 					u8 bits_per_word)
3617 {
3618 	if (ctlr->bits_per_word_mask) {
3619 		/* Only 32 bits fit in the mask */
3620 		if (bits_per_word > 32)
3621 			return -EINVAL;
3622 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3623 			return -EINVAL;
3624 	}
3625 
3626 	return 0;
3627 }
3628 
3629 /**
3630  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3631  * @spi: the device that requires specific CS timing configuration
3632  *
3633  * Return: zero on success, else a negative error code.
3634  */
spi_set_cs_timing(struct spi_device * spi)3635 static int spi_set_cs_timing(struct spi_device *spi)
3636 {
3637 	struct device *parent = spi->controller->dev.parent;
3638 	int status = 0;
3639 
3640 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3641 		if (spi->controller->auto_runtime_pm) {
3642 			status = pm_runtime_get_sync(parent);
3643 			if (status < 0) {
3644 				pm_runtime_put_noidle(parent);
3645 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3646 					status);
3647 				return status;
3648 			}
3649 
3650 			status = spi->controller->set_cs_timing(spi);
3651 			pm_runtime_mark_last_busy(parent);
3652 			pm_runtime_put_autosuspend(parent);
3653 		} else {
3654 			status = spi->controller->set_cs_timing(spi);
3655 		}
3656 	}
3657 	return status;
3658 }
3659 
3660 /**
3661  * spi_setup - setup SPI mode and clock rate
3662  * @spi: the device whose settings are being modified
3663  * Context: can sleep, and no requests are queued to the device
3664  *
3665  * SPI protocol drivers may need to update the transfer mode if the
3666  * device doesn't work with its default.  They may likewise need
3667  * to update clock rates or word sizes from initial values.  This function
3668  * changes those settings, and must be called from a context that can sleep.
3669  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3670  * effect the next time the device is selected and data is transferred to
3671  * or from it.  When this function returns, the spi device is deselected.
3672  *
3673  * Note that this call will fail if the protocol driver specifies an option
3674  * that the underlying controller or its driver does not support.  For
3675  * example, not all hardware supports wire transfers using nine bit words,
3676  * LSB-first wire encoding, or active-high chipselects.
3677  *
3678  * Return: zero on success, else a negative error code.
3679  */
spi_setup(struct spi_device * spi)3680 int spi_setup(struct spi_device *spi)
3681 {
3682 	unsigned	bad_bits, ugly_bits;
3683 	int		status = 0;
3684 
3685 	/*
3686 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3687 	 * are set at the same time.
3688 	 */
3689 	if ((hweight_long(spi->mode &
3690 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3691 	    (hweight_long(spi->mode &
3692 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3693 		dev_err(&spi->dev,
3694 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3695 		return -EINVAL;
3696 	}
3697 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3698 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3699 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3700 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3701 		return -EINVAL;
3702 	/*
3703 	 * Help drivers fail *cleanly* when they need options
3704 	 * that aren't supported with their current controller.
3705 	 * SPI_CS_WORD has a fallback software implementation,
3706 	 * so it is ignored here.
3707 	 */
3708 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3709 				 SPI_NO_TX | SPI_NO_RX);
3710 	ugly_bits = bad_bits &
3711 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3712 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3713 	if (ugly_bits) {
3714 		dev_warn(&spi->dev,
3715 			 "setup: ignoring unsupported mode bits %x\n",
3716 			 ugly_bits);
3717 		spi->mode &= ~ugly_bits;
3718 		bad_bits &= ~ugly_bits;
3719 	}
3720 	if (bad_bits) {
3721 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3722 			bad_bits);
3723 		return -EINVAL;
3724 	}
3725 
3726 	if (!spi->bits_per_word) {
3727 		spi->bits_per_word = 8;
3728 	} else {
3729 		/*
3730 		 * Some controllers may not support the default 8 bits-per-word
3731 		 * so only perform the check when this is explicitly provided.
3732 		 */
3733 		status = __spi_validate_bits_per_word(spi->controller,
3734 						      spi->bits_per_word);
3735 		if (status)
3736 			return status;
3737 	}
3738 
3739 	if (spi->controller->max_speed_hz &&
3740 	    (!spi->max_speed_hz ||
3741 	     spi->max_speed_hz > spi->controller->max_speed_hz))
3742 		spi->max_speed_hz = spi->controller->max_speed_hz;
3743 
3744 	mutex_lock(&spi->controller->io_mutex);
3745 
3746 	if (spi->controller->setup) {
3747 		status = spi->controller->setup(spi);
3748 		if (status) {
3749 			mutex_unlock(&spi->controller->io_mutex);
3750 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3751 				status);
3752 			return status;
3753 		}
3754 	}
3755 
3756 	status = spi_set_cs_timing(spi);
3757 	if (status) {
3758 		mutex_unlock(&spi->controller->io_mutex);
3759 		return status;
3760 	}
3761 
3762 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3763 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3764 		if (status < 0) {
3765 			mutex_unlock(&spi->controller->io_mutex);
3766 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3767 				status);
3768 			return status;
3769 		}
3770 
3771 		/*
3772 		 * We do not want to return positive value from pm_runtime_get,
3773 		 * there are many instances of devices calling spi_setup() and
3774 		 * checking for a non-zero return value instead of a negative
3775 		 * return value.
3776 		 */
3777 		status = 0;
3778 
3779 		spi_set_cs(spi, false, true);
3780 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
3781 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
3782 	} else {
3783 		spi_set_cs(spi, false, true);
3784 	}
3785 
3786 	mutex_unlock(&spi->controller->io_mutex);
3787 
3788 	if (spi->rt && !spi->controller->rt) {
3789 		spi->controller->rt = true;
3790 		spi_set_thread_rt(spi->controller);
3791 	}
3792 
3793 	trace_spi_setup(spi, status);
3794 
3795 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3796 			spi->mode & SPI_MODE_X_MASK,
3797 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3798 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3799 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
3800 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
3801 			spi->bits_per_word, spi->max_speed_hz,
3802 			status);
3803 
3804 	return status;
3805 }
3806 EXPORT_SYMBOL_GPL(spi_setup);
3807 
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3808 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3809 				       struct spi_device *spi)
3810 {
3811 	int delay1, delay2;
3812 
3813 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3814 	if (delay1 < 0)
3815 		return delay1;
3816 
3817 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3818 	if (delay2 < 0)
3819 		return delay2;
3820 
3821 	if (delay1 < delay2)
3822 		memcpy(&xfer->word_delay, &spi->word_delay,
3823 		       sizeof(xfer->word_delay));
3824 
3825 	return 0;
3826 }
3827 
__spi_validate(struct spi_device * spi,struct spi_message * message)3828 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3829 {
3830 	struct spi_controller *ctlr = spi->controller;
3831 	struct spi_transfer *xfer;
3832 	int w_size;
3833 
3834 	if (list_empty(&message->transfers))
3835 		return -EINVAL;
3836 
3837 	/*
3838 	 * If an SPI controller does not support toggling the CS line on each
3839 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3840 	 * for the CS line, we can emulate the CS-per-word hardware function by
3841 	 * splitting transfers into one-word transfers and ensuring that
3842 	 * cs_change is set for each transfer.
3843 	 */
3844 	if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3845 					  spi_get_csgpiod(spi, 0))) {
3846 		size_t maxsize;
3847 		int ret;
3848 
3849 		maxsize = (spi->bits_per_word + 7) / 8;
3850 
3851 		/* spi_split_transfers_maxsize() requires message->spi */
3852 		message->spi = spi;
3853 
3854 		ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3855 						  GFP_KERNEL);
3856 		if (ret)
3857 			return ret;
3858 
3859 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
3860 			/* Don't change cs_change on the last entry in the list */
3861 			if (list_is_last(&xfer->transfer_list, &message->transfers))
3862 				break;
3863 			xfer->cs_change = 1;
3864 		}
3865 	}
3866 
3867 	/*
3868 	 * Half-duplex links include original MicroWire, and ones with
3869 	 * only one data pin like SPI_3WIRE (switches direction) or where
3870 	 * either MOSI or MISO is missing.  They can also be caused by
3871 	 * software limitations.
3872 	 */
3873 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3874 	    (spi->mode & SPI_3WIRE)) {
3875 		unsigned flags = ctlr->flags;
3876 
3877 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
3878 			if (xfer->rx_buf && xfer->tx_buf)
3879 				return -EINVAL;
3880 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3881 				return -EINVAL;
3882 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3883 				return -EINVAL;
3884 		}
3885 	}
3886 
3887 	/*
3888 	 * Set transfer bits_per_word and max speed as spi device default if
3889 	 * it is not set for this transfer.
3890 	 * Set transfer tx_nbits and rx_nbits as single transfer default
3891 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3892 	 * Ensure transfer word_delay is at least as long as that required by
3893 	 * device itself.
3894 	 */
3895 	message->frame_length = 0;
3896 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
3897 		xfer->effective_speed_hz = 0;
3898 		message->frame_length += xfer->len;
3899 		if (!xfer->bits_per_word)
3900 			xfer->bits_per_word = spi->bits_per_word;
3901 
3902 		if (!xfer->speed_hz)
3903 			xfer->speed_hz = spi->max_speed_hz;
3904 
3905 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3906 			xfer->speed_hz = ctlr->max_speed_hz;
3907 
3908 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3909 			return -EINVAL;
3910 
3911 		/*
3912 		 * SPI transfer length should be multiple of SPI word size
3913 		 * where SPI word size should be power-of-two multiple.
3914 		 */
3915 		if (xfer->bits_per_word <= 8)
3916 			w_size = 1;
3917 		else if (xfer->bits_per_word <= 16)
3918 			w_size = 2;
3919 		else
3920 			w_size = 4;
3921 
3922 		/* No partial transfers accepted */
3923 		if (xfer->len % w_size)
3924 			return -EINVAL;
3925 
3926 		if (xfer->speed_hz && ctlr->min_speed_hz &&
3927 		    xfer->speed_hz < ctlr->min_speed_hz)
3928 			return -EINVAL;
3929 
3930 		if (xfer->tx_buf && !xfer->tx_nbits)
3931 			xfer->tx_nbits = SPI_NBITS_SINGLE;
3932 		if (xfer->rx_buf && !xfer->rx_nbits)
3933 			xfer->rx_nbits = SPI_NBITS_SINGLE;
3934 		/*
3935 		 * Check transfer tx/rx_nbits:
3936 		 * 1. check the value matches one of single, dual and quad
3937 		 * 2. check tx/rx_nbits match the mode in spi_device
3938 		 */
3939 		if (xfer->tx_buf) {
3940 			if (spi->mode & SPI_NO_TX)
3941 				return -EINVAL;
3942 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3943 				xfer->tx_nbits != SPI_NBITS_DUAL &&
3944 				xfer->tx_nbits != SPI_NBITS_QUAD)
3945 				return -EINVAL;
3946 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3947 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3948 				return -EINVAL;
3949 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3950 				!(spi->mode & SPI_TX_QUAD))
3951 				return -EINVAL;
3952 		}
3953 		/* Check transfer rx_nbits */
3954 		if (xfer->rx_buf) {
3955 			if (spi->mode & SPI_NO_RX)
3956 				return -EINVAL;
3957 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3958 				xfer->rx_nbits != SPI_NBITS_DUAL &&
3959 				xfer->rx_nbits != SPI_NBITS_QUAD)
3960 				return -EINVAL;
3961 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3962 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3963 				return -EINVAL;
3964 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3965 				!(spi->mode & SPI_RX_QUAD))
3966 				return -EINVAL;
3967 		}
3968 
3969 		if (_spi_xfer_word_delay_update(xfer, spi))
3970 			return -EINVAL;
3971 	}
3972 
3973 	message->status = -EINPROGRESS;
3974 
3975 	return 0;
3976 }
3977 
__spi_async(struct spi_device * spi,struct spi_message * message)3978 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3979 {
3980 	struct spi_controller *ctlr = spi->controller;
3981 	struct spi_transfer *xfer;
3982 
3983 	/*
3984 	 * Some controllers do not support doing regular SPI transfers. Return
3985 	 * ENOTSUPP when this is the case.
3986 	 */
3987 	if (!ctlr->transfer)
3988 		return -ENOTSUPP;
3989 
3990 	message->spi = spi;
3991 
3992 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
3993 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
3994 
3995 	trace_spi_message_submit(message);
3996 
3997 	if (!ctlr->ptp_sts_supported) {
3998 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
3999 			xfer->ptp_sts_word_pre = 0;
4000 			ptp_read_system_prets(xfer->ptp_sts);
4001 		}
4002 	}
4003 
4004 	return ctlr->transfer(spi, message);
4005 }
4006 
4007 /**
4008  * spi_async - asynchronous SPI transfer
4009  * @spi: device with which data will be exchanged
4010  * @message: describes the data transfers, including completion callback
4011  * Context: any (irqs may be blocked, etc)
4012  *
4013  * This call may be used in_irq and other contexts which can't sleep,
4014  * as well as from task contexts which can sleep.
4015  *
4016  * The completion callback is invoked in a context which can't sleep.
4017  * Before that invocation, the value of message->status is undefined.
4018  * When the callback is issued, message->status holds either zero (to
4019  * indicate complete success) or a negative error code.  After that
4020  * callback returns, the driver which issued the transfer request may
4021  * deallocate the associated memory; it's no longer in use by any SPI
4022  * core or controller driver code.
4023  *
4024  * Note that although all messages to a spi_device are handled in
4025  * FIFO order, messages may go to different devices in other orders.
4026  * Some device might be higher priority, or have various "hard" access
4027  * time requirements, for example.
4028  *
4029  * On detection of any fault during the transfer, processing of
4030  * the entire message is aborted, and the device is deselected.
4031  * Until returning from the associated message completion callback,
4032  * no other spi_message queued to that device will be processed.
4033  * (This rule applies equally to all the synchronous transfer calls,
4034  * which are wrappers around this core asynchronous primitive.)
4035  *
4036  * Return: zero on success, else a negative error code.
4037  */
spi_async(struct spi_device * spi,struct spi_message * message)4038 int spi_async(struct spi_device *spi, struct spi_message *message)
4039 {
4040 	struct spi_controller *ctlr = spi->controller;
4041 	int ret;
4042 	unsigned long flags;
4043 
4044 	ret = __spi_validate(spi, message);
4045 	if (ret != 0)
4046 		return ret;
4047 
4048 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4049 
4050 	if (ctlr->bus_lock_flag)
4051 		ret = -EBUSY;
4052 	else
4053 		ret = __spi_async(spi, message);
4054 
4055 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4056 
4057 	return ret;
4058 }
4059 EXPORT_SYMBOL_GPL(spi_async);
4060 
4061 /**
4062  * spi_async_locked - version of spi_async with exclusive bus usage
4063  * @spi: device with which data will be exchanged
4064  * @message: describes the data transfers, including completion callback
4065  * Context: any (irqs may be blocked, etc)
4066  *
4067  * This call may be used in_irq and other contexts which can't sleep,
4068  * as well as from task contexts which can sleep.
4069  *
4070  * The completion callback is invoked in a context which can't sleep.
4071  * Before that invocation, the value of message->status is undefined.
4072  * When the callback is issued, message->status holds either zero (to
4073  * indicate complete success) or a negative error code.  After that
4074  * callback returns, the driver which issued the transfer request may
4075  * deallocate the associated memory; it's no longer in use by any SPI
4076  * core or controller driver code.
4077  *
4078  * Note that although all messages to a spi_device are handled in
4079  * FIFO order, messages may go to different devices in other orders.
4080  * Some device might be higher priority, or have various "hard" access
4081  * time requirements, for example.
4082  *
4083  * On detection of any fault during the transfer, processing of
4084  * the entire message is aborted, and the device is deselected.
4085  * Until returning from the associated message completion callback,
4086  * no other spi_message queued to that device will be processed.
4087  * (This rule applies equally to all the synchronous transfer calls,
4088  * which are wrappers around this core asynchronous primitive.)
4089  *
4090  * Return: zero on success, else a negative error code.
4091  */
spi_async_locked(struct spi_device * spi,struct spi_message * message)4092 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4093 {
4094 	struct spi_controller *ctlr = spi->controller;
4095 	int ret;
4096 	unsigned long flags;
4097 
4098 	ret = __spi_validate(spi, message);
4099 	if (ret != 0)
4100 		return ret;
4101 
4102 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4103 
4104 	ret = __spi_async(spi, message);
4105 
4106 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4107 
4108 	return ret;
4109 
4110 }
4111 
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4112 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4113 {
4114 	bool was_busy;
4115 	int ret;
4116 
4117 	mutex_lock(&ctlr->io_mutex);
4118 
4119 	was_busy = ctlr->busy;
4120 
4121 	ctlr->cur_msg = msg;
4122 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4123 	if (ret)
4124 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4125 	ctlr->cur_msg = NULL;
4126 	ctlr->fallback = false;
4127 
4128 	if (!was_busy) {
4129 		kfree(ctlr->dummy_rx);
4130 		ctlr->dummy_rx = NULL;
4131 		kfree(ctlr->dummy_tx);
4132 		ctlr->dummy_tx = NULL;
4133 		if (ctlr->unprepare_transfer_hardware &&
4134 		    ctlr->unprepare_transfer_hardware(ctlr))
4135 			dev_err(&ctlr->dev,
4136 				"failed to unprepare transfer hardware\n");
4137 		spi_idle_runtime_pm(ctlr);
4138 	}
4139 
4140 	mutex_unlock(&ctlr->io_mutex);
4141 }
4142 
4143 /*-------------------------------------------------------------------------*/
4144 
4145 /*
4146  * Utility methods for SPI protocol drivers, layered on
4147  * top of the core.  Some other utility methods are defined as
4148  * inline functions.
4149  */
4150 
spi_complete(void * arg)4151 static void spi_complete(void *arg)
4152 {
4153 	complete(arg);
4154 }
4155 
__spi_sync(struct spi_device * spi,struct spi_message * message)4156 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4157 {
4158 	DECLARE_COMPLETION_ONSTACK(done);
4159 	int status;
4160 	struct spi_controller *ctlr = spi->controller;
4161 
4162 	if (__spi_check_suspended(ctlr)) {
4163 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4164 		return -ESHUTDOWN;
4165 	}
4166 
4167 	status = __spi_validate(spi, message);
4168 	if (status != 0)
4169 		return status;
4170 
4171 	message->spi = spi;
4172 
4173 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4174 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4175 
4176 	/*
4177 	 * Checking queue_empty here only guarantees async/sync message
4178 	 * ordering when coming from the same context. It does not need to
4179 	 * guard against reentrancy from a different context. The io_mutex
4180 	 * will catch those cases.
4181 	 */
4182 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4183 		message->actual_length = 0;
4184 		message->status = -EINPROGRESS;
4185 
4186 		trace_spi_message_submit(message);
4187 
4188 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4189 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4190 
4191 		__spi_transfer_message_noqueue(ctlr, message);
4192 
4193 		return message->status;
4194 	}
4195 
4196 	/*
4197 	 * There are messages in the async queue that could have originated
4198 	 * from the same context, so we need to preserve ordering.
4199 	 * Therefor we send the message to the async queue and wait until they
4200 	 * are completed.
4201 	 */
4202 	message->complete = spi_complete;
4203 	message->context = &done;
4204 	status = spi_async_locked(spi, message);
4205 	if (status == 0) {
4206 		wait_for_completion(&done);
4207 		status = message->status;
4208 	}
4209 	message->context = NULL;
4210 
4211 	return status;
4212 }
4213 
4214 /**
4215  * spi_sync - blocking/synchronous SPI data transfers
4216  * @spi: device with which data will be exchanged
4217  * @message: describes the data transfers
4218  * Context: can sleep
4219  *
4220  * This call may only be used from a context that may sleep.  The sleep
4221  * is non-interruptible, and has no timeout.  Low-overhead controller
4222  * drivers may DMA directly into and out of the message buffers.
4223  *
4224  * Note that the SPI device's chip select is active during the message,
4225  * and then is normally disabled between messages.  Drivers for some
4226  * frequently-used devices may want to minimize costs of selecting a chip,
4227  * by leaving it selected in anticipation that the next message will go
4228  * to the same chip.  (That may increase power usage.)
4229  *
4230  * Also, the caller is guaranteeing that the memory associated with the
4231  * message will not be freed before this call returns.
4232  *
4233  * Return: zero on success, else a negative error code.
4234  */
spi_sync(struct spi_device * spi,struct spi_message * message)4235 int spi_sync(struct spi_device *spi, struct spi_message *message)
4236 {
4237 	int ret;
4238 
4239 	mutex_lock(&spi->controller->bus_lock_mutex);
4240 	ret = __spi_sync(spi, message);
4241 	mutex_unlock(&spi->controller->bus_lock_mutex);
4242 
4243 	return ret;
4244 }
4245 EXPORT_SYMBOL_GPL(spi_sync);
4246 
4247 /**
4248  * spi_sync_locked - version of spi_sync with exclusive bus usage
4249  * @spi: device with which data will be exchanged
4250  * @message: describes the data transfers
4251  * Context: can sleep
4252  *
4253  * This call may only be used from a context that may sleep.  The sleep
4254  * is non-interruptible, and has no timeout.  Low-overhead controller
4255  * drivers may DMA directly into and out of the message buffers.
4256  *
4257  * This call should be used by drivers that require exclusive access to the
4258  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4259  * be released by a spi_bus_unlock call when the exclusive access is over.
4260  *
4261  * Return: zero on success, else a negative error code.
4262  */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4263 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4264 {
4265 	return __spi_sync(spi, message);
4266 }
4267 EXPORT_SYMBOL_GPL(spi_sync_locked);
4268 
4269 /**
4270  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4271  * @ctlr: SPI bus master that should be locked for exclusive bus access
4272  * Context: can sleep
4273  *
4274  * This call may only be used from a context that may sleep.  The sleep
4275  * is non-interruptible, and has no timeout.
4276  *
4277  * This call should be used by drivers that require exclusive access to the
4278  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4279  * exclusive access is over. Data transfer must be done by spi_sync_locked
4280  * and spi_async_locked calls when the SPI bus lock is held.
4281  *
4282  * Return: always zero.
4283  */
spi_bus_lock(struct spi_controller * ctlr)4284 int spi_bus_lock(struct spi_controller *ctlr)
4285 {
4286 	unsigned long flags;
4287 
4288 	mutex_lock(&ctlr->bus_lock_mutex);
4289 
4290 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4291 	ctlr->bus_lock_flag = 1;
4292 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4293 
4294 	/* Mutex remains locked until spi_bus_unlock() is called */
4295 
4296 	return 0;
4297 }
4298 EXPORT_SYMBOL_GPL(spi_bus_lock);
4299 
4300 /**
4301  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4302  * @ctlr: SPI bus master that was locked for exclusive bus access
4303  * Context: can sleep
4304  *
4305  * This call may only be used from a context that may sleep.  The sleep
4306  * is non-interruptible, and has no timeout.
4307  *
4308  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4309  * call.
4310  *
4311  * Return: always zero.
4312  */
spi_bus_unlock(struct spi_controller * ctlr)4313 int spi_bus_unlock(struct spi_controller *ctlr)
4314 {
4315 	ctlr->bus_lock_flag = 0;
4316 
4317 	mutex_unlock(&ctlr->bus_lock_mutex);
4318 
4319 	return 0;
4320 }
4321 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4322 
4323 /* Portable code must never pass more than 32 bytes */
4324 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4325 
4326 static u8	*buf;
4327 
4328 /**
4329  * spi_write_then_read - SPI synchronous write followed by read
4330  * @spi: device with which data will be exchanged
4331  * @txbuf: data to be written (need not be dma-safe)
4332  * @n_tx: size of txbuf, in bytes
4333  * @rxbuf: buffer into which data will be read (need not be dma-safe)
4334  * @n_rx: size of rxbuf, in bytes
4335  * Context: can sleep
4336  *
4337  * This performs a half duplex MicroWire style transaction with the
4338  * device, sending txbuf and then reading rxbuf.  The return value
4339  * is zero for success, else a negative errno status code.
4340  * This call may only be used from a context that may sleep.
4341  *
4342  * Parameters to this routine are always copied using a small buffer.
4343  * Performance-sensitive or bulk transfer code should instead use
4344  * spi_{async,sync}() calls with dma-safe buffers.
4345  *
4346  * Return: zero on success, else a negative error code.
4347  */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4348 int spi_write_then_read(struct spi_device *spi,
4349 		const void *txbuf, unsigned n_tx,
4350 		void *rxbuf, unsigned n_rx)
4351 {
4352 	static DEFINE_MUTEX(lock);
4353 
4354 	int			status;
4355 	struct spi_message	message;
4356 	struct spi_transfer	x[2];
4357 	u8			*local_buf;
4358 
4359 	/*
4360 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4361 	 * copying here, (as a pure convenience thing), but we can
4362 	 * keep heap costs out of the hot path unless someone else is
4363 	 * using the pre-allocated buffer or the transfer is too large.
4364 	 */
4365 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4366 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4367 				    GFP_KERNEL | GFP_DMA);
4368 		if (!local_buf)
4369 			return -ENOMEM;
4370 	} else {
4371 		local_buf = buf;
4372 	}
4373 
4374 	spi_message_init(&message);
4375 	memset(x, 0, sizeof(x));
4376 	if (n_tx) {
4377 		x[0].len = n_tx;
4378 		spi_message_add_tail(&x[0], &message);
4379 	}
4380 	if (n_rx) {
4381 		x[1].len = n_rx;
4382 		spi_message_add_tail(&x[1], &message);
4383 	}
4384 
4385 	memcpy(local_buf, txbuf, n_tx);
4386 	x[0].tx_buf = local_buf;
4387 	x[1].rx_buf = local_buf + n_tx;
4388 
4389 	/* Do the i/o */
4390 	status = spi_sync(spi, &message);
4391 	if (status == 0)
4392 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4393 
4394 	if (x[0].tx_buf == buf)
4395 		mutex_unlock(&lock);
4396 	else
4397 		kfree(local_buf);
4398 
4399 	return status;
4400 }
4401 EXPORT_SYMBOL_GPL(spi_write_then_read);
4402 
4403 /*-------------------------------------------------------------------------*/
4404 
4405 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4406 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4407 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4408 {
4409 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4410 
4411 	return dev ? to_spi_device(dev) : NULL;
4412 }
4413 
4414 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4415 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4416 {
4417 	struct device *dev;
4418 
4419 	dev = class_find_device_by_of_node(&spi_master_class, node);
4420 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4421 		dev = class_find_device_by_of_node(&spi_slave_class, node);
4422 	if (!dev)
4423 		return NULL;
4424 
4425 	/* Reference got in class_find_device */
4426 	return container_of(dev, struct spi_controller, dev);
4427 }
4428 
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4429 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4430 			 void *arg)
4431 {
4432 	struct of_reconfig_data *rd = arg;
4433 	struct spi_controller *ctlr;
4434 	struct spi_device *spi;
4435 
4436 	switch (of_reconfig_get_state_change(action, arg)) {
4437 	case OF_RECONFIG_CHANGE_ADD:
4438 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4439 		if (ctlr == NULL)
4440 			return NOTIFY_OK;	/* Not for us */
4441 
4442 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4443 			put_device(&ctlr->dev);
4444 			return NOTIFY_OK;
4445 		}
4446 
4447 		/*
4448 		 * Clear the flag before adding the device so that fw_devlink
4449 		 * doesn't skip adding consumers to this device.
4450 		 */
4451 		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4452 		spi = of_register_spi_device(ctlr, rd->dn);
4453 		put_device(&ctlr->dev);
4454 
4455 		if (IS_ERR(spi)) {
4456 			pr_err("%s: failed to create for '%pOF'\n",
4457 					__func__, rd->dn);
4458 			of_node_clear_flag(rd->dn, OF_POPULATED);
4459 			return notifier_from_errno(PTR_ERR(spi));
4460 		}
4461 		break;
4462 
4463 	case OF_RECONFIG_CHANGE_REMOVE:
4464 		/* Already depopulated? */
4465 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4466 			return NOTIFY_OK;
4467 
4468 		/* Find our device by node */
4469 		spi = of_find_spi_device_by_node(rd->dn);
4470 		if (spi == NULL)
4471 			return NOTIFY_OK;	/* No? not meant for us */
4472 
4473 		/* Unregister takes one ref away */
4474 		spi_unregister_device(spi);
4475 
4476 		/* And put the reference of the find */
4477 		put_device(&spi->dev);
4478 		break;
4479 	}
4480 
4481 	return NOTIFY_OK;
4482 }
4483 
4484 static struct notifier_block spi_of_notifier = {
4485 	.notifier_call = of_spi_notify,
4486 };
4487 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4488 extern struct notifier_block spi_of_notifier;
4489 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4490 
4491 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4492 static int spi_acpi_controller_match(struct device *dev, const void *data)
4493 {
4494 	return ACPI_COMPANION(dev->parent) == data;
4495 }
4496 
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4497 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4498 {
4499 	struct device *dev;
4500 
4501 	dev = class_find_device(&spi_master_class, NULL, adev,
4502 				spi_acpi_controller_match);
4503 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4504 		dev = class_find_device(&spi_slave_class, NULL, adev,
4505 					spi_acpi_controller_match);
4506 	if (!dev)
4507 		return NULL;
4508 
4509 	return container_of(dev, struct spi_controller, dev);
4510 }
4511 
acpi_spi_find_device_by_adev(struct acpi_device * adev)4512 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4513 {
4514 	struct device *dev;
4515 
4516 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4517 	return to_spi_device(dev);
4518 }
4519 
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4520 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4521 			   void *arg)
4522 {
4523 	struct acpi_device *adev = arg;
4524 	struct spi_controller *ctlr;
4525 	struct spi_device *spi;
4526 
4527 	switch (value) {
4528 	case ACPI_RECONFIG_DEVICE_ADD:
4529 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4530 		if (!ctlr)
4531 			break;
4532 
4533 		acpi_register_spi_device(ctlr, adev);
4534 		put_device(&ctlr->dev);
4535 		break;
4536 	case ACPI_RECONFIG_DEVICE_REMOVE:
4537 		if (!acpi_device_enumerated(adev))
4538 			break;
4539 
4540 		spi = acpi_spi_find_device_by_adev(adev);
4541 		if (!spi)
4542 			break;
4543 
4544 		spi_unregister_device(spi);
4545 		put_device(&spi->dev);
4546 		break;
4547 	}
4548 
4549 	return NOTIFY_OK;
4550 }
4551 
4552 static struct notifier_block spi_acpi_notifier = {
4553 	.notifier_call = acpi_spi_notify,
4554 };
4555 #else
4556 extern struct notifier_block spi_acpi_notifier;
4557 #endif
4558 
spi_init(void)4559 static int __init spi_init(void)
4560 {
4561 	int	status;
4562 
4563 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4564 	if (!buf) {
4565 		status = -ENOMEM;
4566 		goto err0;
4567 	}
4568 
4569 	status = bus_register(&spi_bus_type);
4570 	if (status < 0)
4571 		goto err1;
4572 
4573 	status = class_register(&spi_master_class);
4574 	if (status < 0)
4575 		goto err2;
4576 
4577 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4578 		status = class_register(&spi_slave_class);
4579 		if (status < 0)
4580 			goto err3;
4581 	}
4582 
4583 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4584 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4585 	if (IS_ENABLED(CONFIG_ACPI))
4586 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4587 
4588 	return 0;
4589 
4590 err3:
4591 	class_unregister(&spi_master_class);
4592 err2:
4593 	bus_unregister(&spi_bus_type);
4594 err1:
4595 	kfree(buf);
4596 	buf = NULL;
4597 err0:
4598 	return status;
4599 }
4600 
4601 /*
4602  * A board_info is normally registered in arch_initcall(),
4603  * but even essential drivers wait till later.
4604  *
4605  * REVISIT only boardinfo really needs static linking. The rest (device and
4606  * driver registration) _could_ be dynamically linked (modular) ... Costs
4607  * include needing to have boardinfo data structures be much more public.
4608  */
4609 postcore_initcall(spi_init);
4610