• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License rev 2 and
6  * only rev 2 as published by the free Software foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
25 
26 #define QUP_CONFIG			0x0000
27 #define QUP_STATE			0x0004
28 #define QUP_IO_M_MODES			0x0008
29 #define QUP_SW_RESET			0x000c
30 #define QUP_OPERATIONAL			0x0018
31 #define QUP_ERROR_FLAGS			0x001c
32 #define QUP_ERROR_FLAGS_EN		0x0020
33 #define QUP_OPERATIONAL_MASK		0x0028
34 #define QUP_HW_VERSION			0x0030
35 #define QUP_MX_OUTPUT_CNT		0x0100
36 #define QUP_OUTPUT_FIFO			0x0110
37 #define QUP_MX_WRITE_CNT		0x0150
38 #define QUP_MX_INPUT_CNT		0x0200
39 #define QUP_MX_READ_CNT			0x0208
40 #define QUP_INPUT_FIFO			0x0218
41 
42 #define SPI_CONFIG			0x0300
43 #define SPI_IO_CONTROL			0x0304
44 #define SPI_ERROR_FLAGS			0x0308
45 #define SPI_ERROR_FLAGS_EN		0x030c
46 
47 /* QUP_CONFIG fields */
48 #define QUP_CONFIG_SPI_MODE		(1 << 8)
49 #define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
50 #define QUP_CONFIG_NO_INPUT		BIT(7)
51 #define QUP_CONFIG_NO_OUTPUT		BIT(6)
52 #define QUP_CONFIG_N			0x001f
53 
54 /* QUP_STATE fields */
55 #define QUP_STATE_VALID			BIT(2)
56 #define QUP_STATE_RESET			0
57 #define QUP_STATE_RUN			1
58 #define QUP_STATE_PAUSE			3
59 #define QUP_STATE_MASK			3
60 #define QUP_STATE_CLEAR			2
61 
62 #define QUP_HW_VERSION_2_1_1		0x20010001
63 
64 /* QUP_IO_M_MODES fields */
65 #define QUP_IO_M_PACK_EN		BIT(15)
66 #define QUP_IO_M_UNPACK_EN		BIT(14)
67 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
68 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
69 #define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
70 #define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
71 
72 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
73 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
74 #define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
75 #define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
76 
77 #define QUP_IO_M_MODE_FIFO		0
78 #define QUP_IO_M_MODE_BLOCK		1
79 #define QUP_IO_M_MODE_DMOV		2
80 #define QUP_IO_M_MODE_BAM		3
81 
82 /* QUP_OPERATIONAL fields */
83 #define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
84 #define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
85 #define QUP_OP_IN_SERVICE_FLAG		BIT(9)
86 #define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
87 #define QUP_OP_IN_FIFO_FULL		BIT(7)
88 #define QUP_OP_OUT_FIFO_FULL		BIT(6)
89 #define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
90 #define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
91 
92 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
93 #define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
94 #define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
95 #define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
96 #define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
97 
98 /* SPI_CONFIG fields */
99 #define SPI_CONFIG_HS_MODE		BIT(10)
100 #define SPI_CONFIG_INPUT_FIRST		BIT(9)
101 #define SPI_CONFIG_LOOPBACK		BIT(8)
102 
103 /* SPI_IO_CONTROL fields */
104 #define SPI_IO_C_FORCE_CS		BIT(11)
105 #define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
106 #define SPI_IO_C_MX_CS_MODE		BIT(8)
107 #define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
108 #define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
109 #define SPI_IO_C_CS_SELECT_MASK		0x000c
110 #define SPI_IO_C_TRISTATE_CS		BIT(1)
111 #define SPI_IO_C_NO_TRI_STATE		BIT(0)
112 
113 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
114 #define SPI_ERROR_CLK_OVER_RUN		BIT(1)
115 #define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
116 
117 #define SPI_NUM_CHIPSELECTS		4
118 
119 /* high speed mode is when bus rate is greater then 26MHz */
120 #define SPI_HS_MIN_RATE			26000000
121 #define SPI_MAX_RATE			50000000
122 
123 #define SPI_DELAY_THRESHOLD		1
124 #define SPI_DELAY_RETRY			10
125 
126 struct spi_qup {
127 	void __iomem		*base;
128 	struct device		*dev;
129 	struct clk		*cclk;	/* core clock */
130 	struct clk		*iclk;	/* interface clock */
131 	int			irq;
132 	spinlock_t		lock;
133 
134 	int			in_fifo_sz;
135 	int			out_fifo_sz;
136 	int			in_blk_sz;
137 	int			out_blk_sz;
138 
139 	struct spi_transfer	*xfer;
140 	struct completion	done;
141 	int			error;
142 	int			w_size;	/* bytes per SPI word */
143 	int			tx_bytes;
144 	int			rx_bytes;
145 	int			qup_v1;
146 };
147 
148 
spi_qup_is_valid_state(struct spi_qup * controller)149 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
150 {
151 	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
152 
153 	return opstate & QUP_STATE_VALID;
154 }
155 
spi_qup_set_state(struct spi_qup * controller,u32 state)156 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
157 {
158 	unsigned long loop;
159 	u32 cur_state;
160 
161 	loop = 0;
162 	while (!spi_qup_is_valid_state(controller)) {
163 
164 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
165 
166 		if (++loop > SPI_DELAY_RETRY)
167 			return -EIO;
168 	}
169 
170 	if (loop)
171 		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
172 			loop, state);
173 
174 	cur_state = readl_relaxed(controller->base + QUP_STATE);
175 	/*
176 	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
177 	 * of (b10) are required
178 	 */
179 	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
180 	    (state == QUP_STATE_RESET)) {
181 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
182 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
183 	} else {
184 		cur_state &= ~QUP_STATE_MASK;
185 		cur_state |= state;
186 		writel_relaxed(cur_state, controller->base + QUP_STATE);
187 	}
188 
189 	loop = 0;
190 	while (!spi_qup_is_valid_state(controller)) {
191 
192 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
193 
194 		if (++loop > SPI_DELAY_RETRY)
195 			return -EIO;
196 	}
197 
198 	return 0;
199 }
200 
201 
spi_qup_fifo_read(struct spi_qup * controller,struct spi_transfer * xfer)202 static void spi_qup_fifo_read(struct spi_qup *controller,
203 			    struct spi_transfer *xfer)
204 {
205 	u8 *rx_buf = xfer->rx_buf;
206 	u32 word, state;
207 	int idx, shift, w_size;
208 
209 	w_size = controller->w_size;
210 
211 	while (controller->rx_bytes < xfer->len) {
212 
213 		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
214 		if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
215 			break;
216 
217 		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
218 
219 		if (!rx_buf) {
220 			controller->rx_bytes += w_size;
221 			continue;
222 		}
223 
224 		for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
225 			/*
226 			 * The data format depends on bytes per SPI word:
227 			 *  4 bytes: 0x12345678
228 			 *  2 bytes: 0x00001234
229 			 *  1 byte : 0x00000012
230 			 */
231 			shift = BITS_PER_BYTE;
232 			shift *= (w_size - idx - 1);
233 			rx_buf[controller->rx_bytes] = word >> shift;
234 		}
235 	}
236 }
237 
spi_qup_fifo_write(struct spi_qup * controller,struct spi_transfer * xfer)238 static void spi_qup_fifo_write(struct spi_qup *controller,
239 			    struct spi_transfer *xfer)
240 {
241 	const u8 *tx_buf = xfer->tx_buf;
242 	u32 word, state, data;
243 	int idx, w_size;
244 
245 	w_size = controller->w_size;
246 
247 	while (controller->tx_bytes < xfer->len) {
248 
249 		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
250 		if (state & QUP_OP_OUT_FIFO_FULL)
251 			break;
252 
253 		word = 0;
254 		for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
255 
256 			if (!tx_buf) {
257 				controller->tx_bytes += w_size;
258 				break;
259 			}
260 
261 			data = tx_buf[controller->tx_bytes];
262 			word |= data << (BITS_PER_BYTE * (3 - idx));
263 		}
264 
265 		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
266 	}
267 }
268 
spi_qup_qup_irq(int irq,void * dev_id)269 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
270 {
271 	struct spi_qup *controller = dev_id;
272 	struct spi_transfer *xfer;
273 	u32 opflags, qup_err, spi_err;
274 	unsigned long flags;
275 	int error = 0;
276 
277 	spin_lock_irqsave(&controller->lock, flags);
278 	xfer = controller->xfer;
279 	controller->xfer = NULL;
280 	spin_unlock_irqrestore(&controller->lock, flags);
281 
282 	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
283 	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
284 	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
285 
286 	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
287 	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
288 	writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
289 
290 	if (!xfer) {
291 		dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
292 				    qup_err, spi_err, opflags);
293 		return IRQ_HANDLED;
294 	}
295 
296 	if (qup_err) {
297 		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
298 			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
299 		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
300 			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
301 		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
302 			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
303 		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
304 			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
305 
306 		error = -EIO;
307 	}
308 
309 	if (spi_err) {
310 		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
311 			dev_warn(controller->dev, "CLK_OVER_RUN\n");
312 		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
313 			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
314 
315 		error = -EIO;
316 	}
317 
318 	if (opflags & QUP_OP_IN_SERVICE_FLAG)
319 		spi_qup_fifo_read(controller, xfer);
320 
321 	if (opflags & QUP_OP_OUT_SERVICE_FLAG)
322 		spi_qup_fifo_write(controller, xfer);
323 
324 	spin_lock_irqsave(&controller->lock, flags);
325 	controller->error = error;
326 	controller->xfer = xfer;
327 	spin_unlock_irqrestore(&controller->lock, flags);
328 
329 	if (controller->rx_bytes == xfer->len || error)
330 		complete(&controller->done);
331 
332 	return IRQ_HANDLED;
333 }
334 
335 
336 /* set clock freq ... bits per word */
spi_qup_io_config(struct spi_device * spi,struct spi_transfer * xfer)337 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
338 {
339 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
340 	u32 config, iomode, mode;
341 	int ret, n_words, w_size;
342 
343 	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
344 		dev_err(controller->dev, "too big size for loopback %d > %d\n",
345 			xfer->len, controller->in_fifo_sz);
346 		return -EIO;
347 	}
348 
349 	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
350 	if (ret) {
351 		dev_err(controller->dev, "fail to set frequency %d",
352 			xfer->speed_hz);
353 		return -EIO;
354 	}
355 
356 	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
357 		dev_err(controller->dev, "cannot set RESET state\n");
358 		return -EIO;
359 	}
360 
361 	w_size = 4;
362 	if (xfer->bits_per_word <= 8)
363 		w_size = 1;
364 	else if (xfer->bits_per_word <= 16)
365 		w_size = 2;
366 
367 	n_words = xfer->len / w_size;
368 	controller->w_size = w_size;
369 
370 	if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
371 		mode = QUP_IO_M_MODE_FIFO;
372 		writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
373 		writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
374 		/* must be zero for FIFO */
375 		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
376 		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
377 	} else {
378 		mode = QUP_IO_M_MODE_BLOCK;
379 		writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
380 		writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
381 		/* must be zero for BLOCK and BAM */
382 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
383 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
384 	}
385 
386 	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
387 	/* Set input and output transfer mode */
388 	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
389 	iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
390 	iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
391 	iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
392 
393 	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
394 
395 	config = readl_relaxed(controller->base + SPI_CONFIG);
396 
397 	if (spi->mode & SPI_LOOP)
398 		config |= SPI_CONFIG_LOOPBACK;
399 	else
400 		config &= ~SPI_CONFIG_LOOPBACK;
401 
402 	if (spi->mode & SPI_CPHA)
403 		config &= ~SPI_CONFIG_INPUT_FIRST;
404 	else
405 		config |= SPI_CONFIG_INPUT_FIRST;
406 
407 	/*
408 	 * HS_MODE improves signal stability for spi-clk high rates,
409 	 * but is invalid in loop back mode.
410 	 */
411 	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
412 		config |= SPI_CONFIG_HS_MODE;
413 	else
414 		config &= ~SPI_CONFIG_HS_MODE;
415 
416 	writel_relaxed(config, controller->base + SPI_CONFIG);
417 
418 	config = readl_relaxed(controller->base + QUP_CONFIG);
419 	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
420 	config |= xfer->bits_per_word - 1;
421 	config |= QUP_CONFIG_SPI_MODE;
422 	writel_relaxed(config, controller->base + QUP_CONFIG);
423 
424 	/* only write to OPERATIONAL_MASK when register is present */
425 	if (!controller->qup_v1)
426 		writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
427 	return 0;
428 }
429 
spi_qup_transfer_one(struct spi_master * master,struct spi_device * spi,struct spi_transfer * xfer)430 static int spi_qup_transfer_one(struct spi_master *master,
431 			      struct spi_device *spi,
432 			      struct spi_transfer *xfer)
433 {
434 	struct spi_qup *controller = spi_master_get_devdata(master);
435 	unsigned long timeout, flags;
436 	int ret = -EIO;
437 
438 	ret = spi_qup_io_config(spi, xfer);
439 	if (ret)
440 		return ret;
441 
442 	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
443 	timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
444 	timeout = 100 * msecs_to_jiffies(timeout);
445 
446 	reinit_completion(&controller->done);
447 
448 	spin_lock_irqsave(&controller->lock, flags);
449 	controller->xfer     = xfer;
450 	controller->error    = 0;
451 	controller->rx_bytes = 0;
452 	controller->tx_bytes = 0;
453 	spin_unlock_irqrestore(&controller->lock, flags);
454 
455 	if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
456 		dev_warn(controller->dev, "cannot set RUN state\n");
457 		goto exit;
458 	}
459 
460 	if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
461 		dev_warn(controller->dev, "cannot set PAUSE state\n");
462 		goto exit;
463 	}
464 
465 	spi_qup_fifo_write(controller, xfer);
466 
467 	if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
468 		dev_warn(controller->dev, "cannot set EXECUTE state\n");
469 		goto exit;
470 	}
471 
472 	if (!wait_for_completion_timeout(&controller->done, timeout))
473 		ret = -ETIMEDOUT;
474 exit:
475 	spi_qup_set_state(controller, QUP_STATE_RESET);
476 	spin_lock_irqsave(&controller->lock, flags);
477 	controller->xfer = NULL;
478 	if (!ret)
479 		ret = controller->error;
480 	spin_unlock_irqrestore(&controller->lock, flags);
481 	return ret;
482 }
483 
spi_qup_probe(struct platform_device * pdev)484 static int spi_qup_probe(struct platform_device *pdev)
485 {
486 	struct spi_master *master;
487 	struct clk *iclk, *cclk;
488 	struct spi_qup *controller;
489 	struct resource *res;
490 	struct device *dev;
491 	void __iomem *base;
492 	u32 max_freq, iomode, num_cs;
493 	int ret, irq, size;
494 
495 	dev = &pdev->dev;
496 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
497 	base = devm_ioremap_resource(dev, res);
498 	if (IS_ERR(base))
499 		return PTR_ERR(base);
500 
501 	irq = platform_get_irq(pdev, 0);
502 	if (irq < 0)
503 		return irq;
504 
505 	cclk = devm_clk_get(dev, "core");
506 	if (IS_ERR(cclk))
507 		return PTR_ERR(cclk);
508 
509 	iclk = devm_clk_get(dev, "iface");
510 	if (IS_ERR(iclk))
511 		return PTR_ERR(iclk);
512 
513 	/* This is optional parameter */
514 	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
515 		max_freq = SPI_MAX_RATE;
516 
517 	if (!max_freq || max_freq > SPI_MAX_RATE) {
518 		dev_err(dev, "invalid clock frequency %d\n", max_freq);
519 		return -ENXIO;
520 	}
521 
522 	ret = clk_prepare_enable(cclk);
523 	if (ret) {
524 		dev_err(dev, "cannot enable core clock\n");
525 		return ret;
526 	}
527 
528 	ret = clk_prepare_enable(iclk);
529 	if (ret) {
530 		clk_disable_unprepare(cclk);
531 		dev_err(dev, "cannot enable iface clock\n");
532 		return ret;
533 	}
534 
535 	master = spi_alloc_master(dev, sizeof(struct spi_qup));
536 	if (!master) {
537 		clk_disable_unprepare(cclk);
538 		clk_disable_unprepare(iclk);
539 		dev_err(dev, "cannot allocate master\n");
540 		return -ENOMEM;
541 	}
542 
543 	/* use num-cs unless not present or out of range */
544 	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
545 	    num_cs > SPI_NUM_CHIPSELECTS)
546 		master->num_chipselect = SPI_NUM_CHIPSELECTS;
547 	else
548 		master->num_chipselect = num_cs;
549 
550 	master->bus_num = pdev->id;
551 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
552 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
553 	master->max_speed_hz = max_freq;
554 	master->transfer_one = spi_qup_transfer_one;
555 	master->dev.of_node = pdev->dev.of_node;
556 	master->auto_runtime_pm = true;
557 
558 	platform_set_drvdata(pdev, master);
559 
560 	controller = spi_master_get_devdata(master);
561 
562 	controller->dev = dev;
563 	controller->base = base;
564 	controller->iclk = iclk;
565 	controller->cclk = cclk;
566 	controller->irq = irq;
567 
568 	/* set v1 flag if device is version 1 */
569 	if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
570 		controller->qup_v1 = 1;
571 
572 	spin_lock_init(&controller->lock);
573 	init_completion(&controller->done);
574 
575 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
576 
577 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
578 	if (size)
579 		controller->out_blk_sz = size * 16;
580 	else
581 		controller->out_blk_sz = 4;
582 
583 	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
584 	if (size)
585 		controller->in_blk_sz = size * 16;
586 	else
587 		controller->in_blk_sz = 4;
588 
589 	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
590 	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
591 
592 	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
593 	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
594 
595 	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
596 		 controller->in_blk_sz, controller->in_fifo_sz,
597 		 controller->out_blk_sz, controller->out_fifo_sz);
598 
599 	writel_relaxed(1, base + QUP_SW_RESET);
600 
601 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
602 	if (ret) {
603 		dev_err(dev, "cannot set RESET state\n");
604 		goto error;
605 	}
606 
607 	writel_relaxed(0, base + QUP_OPERATIONAL);
608 	writel_relaxed(0, base + QUP_IO_M_MODES);
609 
610 	if (!controller->qup_v1)
611 		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
612 
613 	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
614 		       base + SPI_ERROR_FLAGS_EN);
615 
616 	/* if earlier version of the QUP, disable INPUT_OVERRUN */
617 	if (controller->qup_v1)
618 		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
619 			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
620 			base + QUP_ERROR_FLAGS_EN);
621 
622 	writel_relaxed(0, base + SPI_CONFIG);
623 	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
624 
625 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
626 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
627 	if (ret)
628 		goto error;
629 
630 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
631 	pm_runtime_use_autosuspend(dev);
632 	pm_runtime_set_active(dev);
633 	pm_runtime_enable(dev);
634 
635 	ret = devm_spi_register_master(dev, master);
636 	if (ret)
637 		goto disable_pm;
638 
639 	return 0;
640 
641 disable_pm:
642 	pm_runtime_disable(&pdev->dev);
643 error:
644 	clk_disable_unprepare(cclk);
645 	clk_disable_unprepare(iclk);
646 	spi_master_put(master);
647 	return ret;
648 }
649 
650 #ifdef CONFIG_PM_RUNTIME
spi_qup_pm_suspend_runtime(struct device * device)651 static int spi_qup_pm_suspend_runtime(struct device *device)
652 {
653 	struct spi_master *master = dev_get_drvdata(device);
654 	struct spi_qup *controller = spi_master_get_devdata(master);
655 	u32 config;
656 
657 	/* Enable clocks auto gaiting */
658 	config = readl(controller->base + QUP_CONFIG);
659 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
660 	writel_relaxed(config, controller->base + QUP_CONFIG);
661 	return 0;
662 }
663 
spi_qup_pm_resume_runtime(struct device * device)664 static int spi_qup_pm_resume_runtime(struct device *device)
665 {
666 	struct spi_master *master = dev_get_drvdata(device);
667 	struct spi_qup *controller = spi_master_get_devdata(master);
668 	u32 config;
669 
670 	/* Disable clocks auto gaiting */
671 	config = readl_relaxed(controller->base + QUP_CONFIG);
672 	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
673 	writel_relaxed(config, controller->base + QUP_CONFIG);
674 	return 0;
675 }
676 #endif /* CONFIG_PM_RUNTIME */
677 
678 #ifdef CONFIG_PM_SLEEP
spi_qup_suspend(struct device * device)679 static int spi_qup_suspend(struct device *device)
680 {
681 	struct spi_master *master = dev_get_drvdata(device);
682 	struct spi_qup *controller = spi_master_get_devdata(master);
683 	int ret;
684 
685 	ret = spi_master_suspend(master);
686 	if (ret)
687 		return ret;
688 
689 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
690 	if (ret)
691 		return ret;
692 
693 	clk_disable_unprepare(controller->cclk);
694 	clk_disable_unprepare(controller->iclk);
695 	return 0;
696 }
697 
spi_qup_resume(struct device * device)698 static int spi_qup_resume(struct device *device)
699 {
700 	struct spi_master *master = dev_get_drvdata(device);
701 	struct spi_qup *controller = spi_master_get_devdata(master);
702 	int ret;
703 
704 	ret = clk_prepare_enable(controller->iclk);
705 	if (ret)
706 		return ret;
707 
708 	ret = clk_prepare_enable(controller->cclk);
709 	if (ret)
710 		return ret;
711 
712 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
713 	if (ret)
714 		return ret;
715 
716 	return spi_master_resume(master);
717 }
718 #endif /* CONFIG_PM_SLEEP */
719 
spi_qup_remove(struct platform_device * pdev)720 static int spi_qup_remove(struct platform_device *pdev)
721 {
722 	struct spi_master *master = dev_get_drvdata(&pdev->dev);
723 	struct spi_qup *controller = spi_master_get_devdata(master);
724 	int ret;
725 
726 	ret = pm_runtime_get_sync(&pdev->dev);
727 	if (ret < 0)
728 		return ret;
729 
730 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
731 	if (ret)
732 		return ret;
733 
734 	clk_disable_unprepare(controller->cclk);
735 	clk_disable_unprepare(controller->iclk);
736 
737 	pm_runtime_put_noidle(&pdev->dev);
738 	pm_runtime_disable(&pdev->dev);
739 	return 0;
740 }
741 
742 static const struct of_device_id spi_qup_dt_match[] = {
743 	{ .compatible = "qcom,spi-qup-v1.1.1", },
744 	{ .compatible = "qcom,spi-qup-v2.1.1", },
745 	{ .compatible = "qcom,spi-qup-v2.2.1", },
746 	{ }
747 };
748 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
749 
750 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
751 	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
752 	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
753 			   spi_qup_pm_resume_runtime,
754 			   NULL)
755 };
756 
757 static struct platform_driver spi_qup_driver = {
758 	.driver = {
759 		.name		= "spi_qup",
760 		.owner		= THIS_MODULE,
761 		.pm		= &spi_qup_dev_pm_ops,
762 		.of_match_table = spi_qup_dt_match,
763 	},
764 	.probe = spi_qup_probe,
765 	.remove = spi_qup_remove,
766 };
767 module_platform_driver(spi_qup_driver);
768 
769 MODULE_LICENSE("GPL v2");
770 MODULE_ALIAS("platform:spi_qup");
771