• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Greybus SPI library
3  *
4  * Copyright 2014-2016 Google Inc.
5  * Copyright 2014-2016 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 
16 #include "greybus.h"
17 #include "spilib.h"
18 
19 struct gb_spilib {
20 	struct gb_connection	*connection;
21 	struct device		*parent;
22 	struct spi_transfer	*first_xfer;
23 	struct spi_transfer	*last_xfer;
24 	struct spilib_ops	*ops;
25 	u32			rx_xfer_offset;
26 	u32			tx_xfer_offset;
27 	u32			last_xfer_size;
28 	unsigned int		op_timeout;
29 	u16			mode;
30 	u16			flags;
31 	u32			bits_per_word_mask;
32 	u8			num_chipselect;
33 	u32			min_speed_hz;
34 	u32			max_speed_hz;
35 };
36 
37 #define GB_SPI_STATE_MSG_DONE		((void *)0)
38 #define GB_SPI_STATE_MSG_IDLE		((void *)1)
39 #define GB_SPI_STATE_MSG_RUNNING	((void *)2)
40 #define GB_SPI_STATE_OP_READY		((void *)3)
41 #define GB_SPI_STATE_OP_DONE		((void *)4)
42 #define GB_SPI_STATE_MSG_ERROR		((void *)-1)
43 
44 #define XFER_TIMEOUT_TOLERANCE		200
45 
get_master_from_spi(struct gb_spilib * spi)46 static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
47 {
48 	return gb_connection_get_data(spi->connection);
49 }
50 
tx_header_fit_operation(u32 tx_size,u32 count,size_t data_max)51 static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
52 {
53 	size_t headers_size;
54 
55 	data_max -= sizeof(struct gb_spi_transfer_request);
56 	headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
57 
58 	return tx_size + headers_size > data_max ? 0 : 1;
59 }
60 
calc_rx_xfer_size(u32 rx_size,u32 * tx_xfer_size,u32 len,size_t data_max)61 static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
62 				size_t data_max)
63 {
64 	size_t rx_xfer_size;
65 
66 	data_max -= sizeof(struct gb_spi_transfer_response);
67 
68 	if (rx_size + len > data_max)
69 		rx_xfer_size = data_max - rx_size;
70 	else
71 		rx_xfer_size = len;
72 
73 	/* if this is a write_read, for symmetry read the same as write */
74 	if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
75 		rx_xfer_size = *tx_xfer_size;
76 	if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
77 		*tx_xfer_size = rx_xfer_size;
78 
79 	return rx_xfer_size;
80 }
81 
calc_tx_xfer_size(u32 tx_size,u32 count,size_t len,size_t data_max)82 static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
83 				size_t data_max)
84 {
85 	size_t headers_size;
86 
87 	data_max -= sizeof(struct gb_spi_transfer_request);
88 	headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
89 
90 	if (tx_size + headers_size + len > data_max)
91 		return data_max - (tx_size + sizeof(struct gb_spi_transfer));
92 
93 	return len;
94 }
95 
clean_xfer_state(struct gb_spilib * spi)96 static void clean_xfer_state(struct gb_spilib *spi)
97 {
98 	spi->first_xfer = NULL;
99 	spi->last_xfer = NULL;
100 	spi->rx_xfer_offset = 0;
101 	spi->tx_xfer_offset = 0;
102 	spi->last_xfer_size = 0;
103 	spi->op_timeout = 0;
104 }
105 
is_last_xfer_done(struct gb_spilib * spi)106 static bool is_last_xfer_done(struct gb_spilib *spi)
107 {
108 	struct spi_transfer *last_xfer = spi->last_xfer;
109 
110 	if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
111 	    (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
112 		return true;
113 
114 	return false;
115 }
116 
setup_next_xfer(struct gb_spilib * spi,struct spi_message * msg)117 static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
118 {
119 	struct spi_transfer *last_xfer = spi->last_xfer;
120 
121 	if (msg->state != GB_SPI_STATE_OP_DONE)
122 		return 0;
123 
124 	/*
125 	 * if we transferred all content of the last transfer, reset values and
126 	 * check if this was the last transfer in the message
127 	 */
128 	if (is_last_xfer_done(spi)) {
129 		spi->tx_xfer_offset = 0;
130 		spi->rx_xfer_offset = 0;
131 		spi->op_timeout = 0;
132 		if (last_xfer == list_last_entry(&msg->transfers,
133 						 struct spi_transfer,
134 						 transfer_list))
135 			msg->state = GB_SPI_STATE_MSG_DONE;
136 		else
137 			spi->first_xfer = list_next_entry(last_xfer,
138 							  transfer_list);
139 		return 0;
140 	}
141 
142 	spi->first_xfer = last_xfer;
143 	if (last_xfer->tx_buf)
144 		spi->tx_xfer_offset += spi->last_xfer_size;
145 
146 	if (last_xfer->rx_buf)
147 		spi->rx_xfer_offset += spi->last_xfer_size;
148 
149 	return 0;
150 }
151 
get_next_xfer(struct spi_transfer * xfer,struct spi_message * msg)152 static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
153 					  struct spi_message *msg)
154 {
155 	if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
156 				    transfer_list))
157 		return NULL;
158 
159 	return list_next_entry(xfer, transfer_list);
160 }
161 
162 /* Routines to transfer data */
gb_spi_operation_create(struct gb_spilib * spi,struct gb_connection * connection,struct spi_message * msg)163 static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
164 		struct gb_connection *connection, struct spi_message *msg)
165 {
166 	struct gb_spi_transfer_request *request;
167 	struct spi_device *dev = msg->spi;
168 	struct spi_transfer *xfer;
169 	struct gb_spi_transfer *gb_xfer;
170 	struct gb_operation *operation;
171 	u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
172 	u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
173 	u32 total_len = 0;
174 	unsigned int xfer_timeout;
175 	size_t data_max;
176 	void *tx_data;
177 
178 	data_max = gb_operation_get_payload_size_max(connection);
179 	xfer = spi->first_xfer;
180 
181 	/* Find number of transfers queued and tx/rx length in the message */
182 
183 	while (msg->state != GB_SPI_STATE_OP_READY) {
184 		msg->state = GB_SPI_STATE_MSG_RUNNING;
185 		spi->last_xfer = xfer;
186 
187 		if (!xfer->tx_buf && !xfer->rx_buf) {
188 			dev_err(spi->parent,
189 				"bufferless transfer, length %u\n", xfer->len);
190 			msg->state = GB_SPI_STATE_MSG_ERROR;
191 			return NULL;
192 		}
193 
194 		tx_xfer_size = 0;
195 		rx_xfer_size = 0;
196 
197 		if (xfer->tx_buf) {
198 			len = xfer->len - spi->tx_xfer_offset;
199 			if (!tx_header_fit_operation(tx_size, count, data_max))
200 				break;
201 			tx_xfer_size = calc_tx_xfer_size(tx_size, count,
202 							 len, data_max);
203 			spi->last_xfer_size = tx_xfer_size;
204 		}
205 
206 		if (xfer->rx_buf) {
207 			len = xfer->len - spi->rx_xfer_offset;
208 			rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
209 							 len, data_max);
210 			spi->last_xfer_size = rx_xfer_size;
211 		}
212 
213 		tx_size += tx_xfer_size;
214 		rx_size += rx_xfer_size;
215 
216 		total_len += spi->last_xfer_size;
217 		count++;
218 
219 		xfer = get_next_xfer(xfer, msg);
220 		if (!xfer || total_len >= data_max)
221 			msg->state = GB_SPI_STATE_OP_READY;
222 	}
223 
224 	/*
225 	 * In addition to space for all message descriptors we need
226 	 * to have enough to hold all tx data.
227 	 */
228 	request_size = sizeof(*request);
229 	request_size += count * sizeof(*gb_xfer);
230 	request_size += tx_size;
231 
232 	/* Response consists only of incoming data */
233 	operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
234 					request_size, rx_size, GFP_KERNEL);
235 	if (!operation)
236 		return NULL;
237 
238 	request = operation->request->payload;
239 	request->count = cpu_to_le16(count);
240 	request->mode = dev->mode;
241 	request->chip_select = dev->chip_select;
242 
243 	gb_xfer = &request->transfers[0];
244 	tx_data = gb_xfer + count;	/* place tx data after last gb_xfer */
245 
246 	/* Fill in the transfers array */
247 	xfer = spi->first_xfer;
248 	while (msg->state != GB_SPI_STATE_OP_DONE) {
249 		if (xfer == spi->last_xfer)
250 			xfer_len = spi->last_xfer_size;
251 		else
252 			xfer_len = xfer->len;
253 
254 		/* make sure we do not timeout in a slow transfer */
255 		xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
256 		xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
257 
258 		if (xfer_timeout > spi->op_timeout)
259 			spi->op_timeout = xfer_timeout;
260 
261 		gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
262 		gb_xfer->len = cpu_to_le32(xfer_len);
263 		gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
264 		gb_xfer->cs_change = xfer->cs_change;
265 		gb_xfer->bits_per_word = xfer->bits_per_word;
266 
267 		/* Copy tx data */
268 		if (xfer->tx_buf) {
269 			gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
270 			memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
271 			       xfer_len);
272 			tx_data += xfer_len;
273 		}
274 
275 		if (xfer->rx_buf)
276 			gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
277 
278 		if (xfer == spi->last_xfer) {
279 			if (!is_last_xfer_done(spi))
280 				gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
281 			msg->state = GB_SPI_STATE_OP_DONE;
282 			continue;
283 		}
284 
285 		gb_xfer++;
286 		xfer = get_next_xfer(xfer, msg);
287 	}
288 
289 	msg->actual_length += total_len;
290 
291 	return operation;
292 }
293 
gb_spi_decode_response(struct gb_spilib * spi,struct spi_message * msg,struct gb_spi_transfer_response * response)294 static void gb_spi_decode_response(struct gb_spilib *spi,
295 				   struct spi_message *msg,
296 				   struct gb_spi_transfer_response *response)
297 {
298 	struct spi_transfer *xfer = spi->first_xfer;
299 	void *rx_data = response->data;
300 	u32 xfer_len;
301 
302 	while (xfer) {
303 		/* Copy rx data */
304 		if (xfer->rx_buf) {
305 			if (xfer == spi->first_xfer)
306 				xfer_len = xfer->len - spi->rx_xfer_offset;
307 			else if (xfer == spi->last_xfer)
308 				xfer_len = spi->last_xfer_size;
309 			else
310 				xfer_len = xfer->len;
311 
312 			memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
313 			       xfer_len);
314 			rx_data += xfer_len;
315 		}
316 
317 		if (xfer == spi->last_xfer)
318 			break;
319 
320 		xfer = list_next_entry(xfer, transfer_list);
321 	}
322 }
323 
gb_spi_transfer_one_message(struct spi_master * master,struct spi_message * msg)324 static int gb_spi_transfer_one_message(struct spi_master *master,
325 				       struct spi_message *msg)
326 {
327 	struct gb_spilib *spi = spi_master_get_devdata(master);
328 	struct gb_connection *connection = spi->connection;
329 	struct gb_spi_transfer_response *response;
330 	struct gb_operation *operation;
331 	int ret = 0;
332 
333 	spi->first_xfer = list_first_entry_or_null(&msg->transfers,
334 						   struct spi_transfer,
335 						   transfer_list);
336 	if (!spi->first_xfer) {
337 		ret = -ENOMEM;
338 		goto out;
339 	}
340 
341 	msg->state = GB_SPI_STATE_MSG_IDLE;
342 
343 	while (msg->state != GB_SPI_STATE_MSG_DONE &&
344 	       msg->state != GB_SPI_STATE_MSG_ERROR) {
345 		operation = gb_spi_operation_create(spi, connection, msg);
346 		if (!operation) {
347 			msg->state = GB_SPI_STATE_MSG_ERROR;
348 			ret = -EINVAL;
349 			continue;
350 		}
351 
352 		ret = gb_operation_request_send_sync_timeout(operation,
353 							     spi->op_timeout);
354 		if (!ret) {
355 			response = operation->response->payload;
356 			if (response)
357 				gb_spi_decode_response(spi, msg, response);
358 		} else {
359 			dev_err(spi->parent,
360 				"transfer operation failed: %d\n", ret);
361 			msg->state = GB_SPI_STATE_MSG_ERROR;
362 		}
363 
364 		gb_operation_put(operation);
365 		setup_next_xfer(spi, msg);
366 	}
367 
368 out:
369 	msg->status = ret;
370 	clean_xfer_state(spi);
371 	spi_finalize_current_message(master);
372 
373 	return ret;
374 }
375 
gb_spi_prepare_transfer_hardware(struct spi_master * master)376 static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
377 {
378 	struct gb_spilib *spi = spi_master_get_devdata(master);
379 
380 	return spi->ops->prepare_transfer_hardware(spi->parent);
381 }
382 
gb_spi_unprepare_transfer_hardware(struct spi_master * master)383 static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
384 {
385 	struct gb_spilib *spi = spi_master_get_devdata(master);
386 
387 	spi->ops->unprepare_transfer_hardware(spi->parent);
388 
389 	return 0;
390 }
391 
gb_spi_setup(struct spi_device * spi)392 static int gb_spi_setup(struct spi_device *spi)
393 {
394 	/* Nothing to do for now */
395 	return 0;
396 }
397 
gb_spi_cleanup(struct spi_device * spi)398 static void gb_spi_cleanup(struct spi_device *spi)
399 {
400 	/* Nothing to do for now */
401 }
402 
403 /* Routines to get controller information */
404 
405 /*
406  * Map Greybus spi mode bits/flags/bpw into Linux ones.
407  * All bits are same for now and so these macro's return same values.
408  */
409 #define gb_spi_mode_map(mode) mode
410 #define gb_spi_flags_map(flags) flags
411 
gb_spi_get_master_config(struct gb_spilib * spi)412 static int gb_spi_get_master_config(struct gb_spilib *spi)
413 {
414 	struct gb_spi_master_config_response response;
415 	u16 mode, flags;
416 	int ret;
417 
418 	ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
419 				NULL, 0, &response, sizeof(response));
420 	if (ret < 0)
421 		return ret;
422 
423 	mode = le16_to_cpu(response.mode);
424 	spi->mode = gb_spi_mode_map(mode);
425 
426 	flags = le16_to_cpu(response.flags);
427 	spi->flags = gb_spi_flags_map(flags);
428 
429 	spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
430 	spi->num_chipselect = response.num_chipselect;
431 
432 	spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
433 	spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
434 
435 	return 0;
436 }
437 
gb_spi_setup_device(struct gb_spilib * spi,u8 cs)438 static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
439 {
440 	struct spi_master *master = get_master_from_spi(spi);
441 	struct gb_spi_device_config_request request;
442 	struct gb_spi_device_config_response response;
443 	struct spi_board_info spi_board = { {0} };
444 	struct spi_device *spidev;
445 	int ret;
446 	u8 dev_type;
447 
448 	request.chip_select = cs;
449 
450 	ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
451 				&request, sizeof(request),
452 				&response, sizeof(response));
453 	if (ret < 0)
454 		return ret;
455 
456 	dev_type = response.device_type;
457 
458 	if (dev_type == GB_SPI_SPI_DEV)
459 		strlcpy(spi_board.modalias, "spidev",
460 			sizeof(spi_board.modalias));
461 	else if (dev_type == GB_SPI_SPI_NOR)
462 		strlcpy(spi_board.modalias, "spi-nor",
463 			sizeof(spi_board.modalias));
464 	else if (dev_type == GB_SPI_SPI_MODALIAS)
465 		memcpy(spi_board.modalias, response.name,
466 		       sizeof(spi_board.modalias));
467 	else
468 		return -EINVAL;
469 
470 	spi_board.mode		= le16_to_cpu(response.mode);
471 	spi_board.bus_num	= master->bus_num;
472 	spi_board.chip_select	= cs;
473 	spi_board.max_speed_hz	= le32_to_cpu(response.max_speed_hz);
474 
475 	spidev = spi_new_device(master, &spi_board);
476 	if (!spidev)
477 		return -EINVAL;
478 
479 	return 0;
480 }
481 
gb_spilib_master_init(struct gb_connection * connection,struct device * dev,struct spilib_ops * ops)482 int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
483 			  struct spilib_ops *ops)
484 {
485 	struct gb_spilib *spi;
486 	struct spi_master *master;
487 	int ret;
488 	u8 i;
489 
490 	/* Allocate master with space for data */
491 	master = spi_alloc_master(dev, sizeof(*spi));
492 	if (!master) {
493 		dev_err(dev, "cannot alloc SPI master\n");
494 		return -ENOMEM;
495 	}
496 
497 	spi = spi_master_get_devdata(master);
498 	spi->connection = connection;
499 	gb_connection_set_data(connection, master);
500 	spi->parent = dev;
501 	spi->ops = ops;
502 
503 	/* get master configuration */
504 	ret = gb_spi_get_master_config(spi);
505 	if (ret)
506 		goto exit_spi_put;
507 
508 	master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
509 	master->num_chipselect = spi->num_chipselect;
510 	master->mode_bits = spi->mode;
511 	master->flags = spi->flags;
512 	master->bits_per_word_mask = spi->bits_per_word_mask;
513 
514 	/* Attach methods */
515 	master->cleanup = gb_spi_cleanup;
516 	master->setup = gb_spi_setup;
517 	master->transfer_one_message = gb_spi_transfer_one_message;
518 
519 	if (ops && ops->prepare_transfer_hardware) {
520 		master->prepare_transfer_hardware =
521 			gb_spi_prepare_transfer_hardware;
522 	}
523 
524 	if (ops && ops->unprepare_transfer_hardware) {
525 		master->unprepare_transfer_hardware =
526 			gb_spi_unprepare_transfer_hardware;
527 	}
528 
529 	master->auto_runtime_pm = true;
530 
531 	ret = spi_register_master(master);
532 	if (ret < 0)
533 		goto exit_spi_put;
534 
535 	/* now, fetch the devices configuration */
536 	for (i = 0; i < spi->num_chipselect; i++) {
537 		ret = gb_spi_setup_device(spi, i);
538 		if (ret < 0) {
539 			dev_err(dev, "failed to allocate spi device %d: %d\n",
540 				i, ret);
541 			goto exit_spi_unregister;
542 		}
543 	}
544 
545 	return 0;
546 
547 exit_spi_put:
548 	spi_master_put(master);
549 
550 	return ret;
551 
552 exit_spi_unregister:
553 	spi_unregister_master(master);
554 
555 	return ret;
556 }
557 EXPORT_SYMBOL_GPL(gb_spilib_master_init);
558 
gb_spilib_master_exit(struct gb_connection * connection)559 void gb_spilib_master_exit(struct gb_connection *connection)
560 {
561 	struct spi_master *master = gb_connection_get_data(connection);
562 
563 	spi_unregister_master(master);
564 }
565 EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
566 
567 MODULE_LICENSE("GPL v2");
568