1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for Atmel QSPI Controller
4 *
5 * Copyright (C) 2015 Atmel Corporation
6 * Copyright (C) 2018 Cryptera A/S
7 *
8 * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
9 * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
10 *
11 * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/spi/spi-mem.h>
26
27 /* QSPI register offsets */
28 #define QSPI_CR 0x0000 /* Control Register */
29 #define QSPI_MR 0x0004 /* Mode Register */
30 #define QSPI_RD 0x0008 /* Receive Data Register */
31 #define QSPI_TD 0x000c /* Transmit Data Register */
32 #define QSPI_SR 0x0010 /* Status Register */
33 #define QSPI_IER 0x0014 /* Interrupt Enable Register */
34 #define QSPI_IDR 0x0018 /* Interrupt Disable Register */
35 #define QSPI_IMR 0x001c /* Interrupt Mask Register */
36 #define QSPI_SCR 0x0020 /* Serial Clock Register */
37
38 #define QSPI_IAR 0x0030 /* Instruction Address Register */
39 #define QSPI_ICR 0x0034 /* Instruction Code Register */
40 #define QSPI_WICR 0x0034 /* Write Instruction Code Register */
41 #define QSPI_IFR 0x0038 /* Instruction Frame Register */
42 #define QSPI_RICR 0x003C /* Read Instruction Code Register */
43
44 #define QSPI_SMR 0x0040 /* Scrambling Mode Register */
45 #define QSPI_SKR 0x0044 /* Scrambling Key Register */
46
47 #define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
48 #define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
49
50 #define QSPI_VERSION 0x00FC /* Version Register */
51
52
53 /* Bitfields in QSPI_CR (Control Register) */
54 #define QSPI_CR_QSPIEN BIT(0)
55 #define QSPI_CR_QSPIDIS BIT(1)
56 #define QSPI_CR_SWRST BIT(7)
57 #define QSPI_CR_LASTXFER BIT(24)
58
59 /* Bitfields in QSPI_MR (Mode Register) */
60 #define QSPI_MR_SMM BIT(0)
61 #define QSPI_MR_LLB BIT(1)
62 #define QSPI_MR_WDRBT BIT(2)
63 #define QSPI_MR_SMRM BIT(3)
64 #define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
65 #define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
66 #define QSPI_MR_CSMODE_LASTXFER (1 << 4)
67 #define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
68 #define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
69 #define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
70 #define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
71 #define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
72 #define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
73 #define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
74
75 /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
76 #define QSPI_SR_RDRF BIT(0)
77 #define QSPI_SR_TDRE BIT(1)
78 #define QSPI_SR_TXEMPTY BIT(2)
79 #define QSPI_SR_OVRES BIT(3)
80 #define QSPI_SR_CSR BIT(8)
81 #define QSPI_SR_CSS BIT(9)
82 #define QSPI_SR_INSTRE BIT(10)
83 #define QSPI_SR_QSPIENS BIT(24)
84
85 #define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
86
87 /* Bitfields in QSPI_SCR (Serial Clock Register) */
88 #define QSPI_SCR_CPOL BIT(0)
89 #define QSPI_SCR_CPHA BIT(1)
90 #define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
91 #define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
92 #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
93 #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
94
95 /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
96 #define QSPI_ICR_INST_MASK GENMASK(7, 0)
97 #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
98 #define QSPI_ICR_OPT_MASK GENMASK(23, 16)
99 #define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
100
101 /* Bitfields in QSPI_IFR (Instruction Frame Register) */
102 #define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
103 #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
104 #define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
105 #define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
106 #define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
107 #define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
108 #define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
109 #define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
110 #define QSPI_IFR_INSTEN BIT(4)
111 #define QSPI_IFR_ADDREN BIT(5)
112 #define QSPI_IFR_OPTEN BIT(6)
113 #define QSPI_IFR_DATAEN BIT(7)
114 #define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
115 #define QSPI_IFR_OPTL_1BIT (0 << 8)
116 #define QSPI_IFR_OPTL_2BIT (1 << 8)
117 #define QSPI_IFR_OPTL_4BIT (2 << 8)
118 #define QSPI_IFR_OPTL_8BIT (3 << 8)
119 #define QSPI_IFR_ADDRL BIT(10)
120 #define QSPI_IFR_TFRTYP_MEM BIT(12)
121 #define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
122 #define QSPI_IFR_CRM BIT(14)
123 #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
124 #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
125 #define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
126
127 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */
128 #define QSPI_SMR_SCREN BIT(0)
129 #define QSPI_SMR_RVDIS BIT(1)
130
131 /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
132 #define QSPI_WPMR_WPEN BIT(0)
133 #define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
134 #define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
135
136 /* Bitfields in QSPI_WPSR (Write Protection Status Register) */
137 #define QSPI_WPSR_WPVS BIT(0)
138 #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
139 #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
140
141 #define ATMEL_QSPI_TIMEOUT 1000 /* ms */
142
143 struct atmel_qspi_caps {
144 bool has_qspick;
145 bool has_ricr;
146 };
147
148 struct atmel_qspi_ops;
149
150 struct atmel_qspi {
151 void __iomem *regs;
152 void __iomem *mem;
153 struct clk *pclk;
154 struct clk *qspick;
155 struct platform_device *pdev;
156 const struct atmel_qspi_caps *caps;
157 const struct atmel_qspi_ops *ops;
158 resource_size_t mmap_size;
159 u32 pending;
160 u32 irq_mask;
161 u32 mr;
162 u32 scr;
163 struct completion cmd_completion;
164 };
165
166 struct atmel_qspi_ops {
167 int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
168 u32 *offset);
169 int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
170 u32 offset);
171 };
172
173 struct atmel_qspi_mode {
174 u8 cmd_buswidth;
175 u8 addr_buswidth;
176 u8 data_buswidth;
177 u32 config;
178 };
179
180 static const struct atmel_qspi_mode atmel_qspi_modes[] = {
181 { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
182 { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
183 { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
184 { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
185 { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
186 { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
187 { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
188 };
189
190 #ifdef VERBOSE_DEBUG
atmel_qspi_reg_name(u32 offset,char * tmp,size_t sz)191 static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
192 {
193 switch (offset) {
194 case QSPI_CR:
195 return "CR";
196 case QSPI_MR:
197 return "MR";
198 case QSPI_RD:
199 return "RD";
200 case QSPI_TD:
201 return "TD";
202 case QSPI_SR:
203 return "SR";
204 case QSPI_IER:
205 return "IER";
206 case QSPI_IDR:
207 return "IDR";
208 case QSPI_IMR:
209 return "IMR";
210 case QSPI_SCR:
211 return "SCR";
212 case QSPI_IAR:
213 return "IAR";
214 case QSPI_ICR:
215 return "ICR/WICR";
216 case QSPI_IFR:
217 return "IFR";
218 case QSPI_RICR:
219 return "RICR";
220 case QSPI_SMR:
221 return "SMR";
222 case QSPI_SKR:
223 return "SKR";
224 case QSPI_WPMR:
225 return "WPMR";
226 case QSPI_WPSR:
227 return "WPSR";
228 case QSPI_VERSION:
229 return "VERSION";
230 default:
231 snprintf(tmp, sz, "0x%02x", offset);
232 break;
233 }
234
235 return tmp;
236 }
237 #endif /* VERBOSE_DEBUG */
238
atmel_qspi_read(struct atmel_qspi * aq,u32 offset)239 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
240 {
241 u32 value = readl_relaxed(aq->regs + offset);
242
243 #ifdef VERBOSE_DEBUG
244 char tmp[8];
245
246 dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
247 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
248 #endif /* VERBOSE_DEBUG */
249
250 return value;
251 }
252
atmel_qspi_write(u32 value,struct atmel_qspi * aq,u32 offset)253 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
254 {
255 #ifdef VERBOSE_DEBUG
256 char tmp[8];
257
258 dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
259 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
260 #endif /* VERBOSE_DEBUG */
261
262 writel_relaxed(value, aq->regs + offset);
263 }
264
atmel_qspi_is_compatible(const struct spi_mem_op * op,const struct atmel_qspi_mode * mode)265 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
266 const struct atmel_qspi_mode *mode)
267 {
268 if (op->cmd.buswidth != mode->cmd_buswidth)
269 return false;
270
271 if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
272 return false;
273
274 if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
275 return false;
276
277 return true;
278 }
279
atmel_qspi_find_mode(const struct spi_mem_op * op)280 static int atmel_qspi_find_mode(const struct spi_mem_op *op)
281 {
282 u32 i;
283
284 for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
285 if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
286 return i;
287
288 return -EOPNOTSUPP;
289 }
290
atmel_qspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)291 static bool atmel_qspi_supports_op(struct spi_mem *mem,
292 const struct spi_mem_op *op)
293 {
294 if (!spi_mem_default_supports_op(mem, op))
295 return false;
296
297 if (atmel_qspi_find_mode(op) < 0)
298 return false;
299
300 /* special case not supported by hardware */
301 if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
302 op->dummy.nbytes == 0)
303 return false;
304
305 return true;
306 }
307
atmel_qspi_set_cfg(struct atmel_qspi * aq,const struct spi_mem_op * op,u32 * offset)308 static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
309 const struct spi_mem_op *op, u32 *offset)
310 {
311 u32 iar, icr, ifr;
312 u32 dummy_cycles = 0;
313 int mode;
314
315 iar = 0;
316 icr = QSPI_ICR_INST(op->cmd.opcode);
317 ifr = QSPI_IFR_INSTEN;
318
319 mode = atmel_qspi_find_mode(op);
320 if (mode < 0)
321 return mode;
322 ifr |= atmel_qspi_modes[mode].config;
323
324 if (op->dummy.nbytes)
325 dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
326
327 /*
328 * The controller allows 24 and 32-bit addressing while NAND-flash
329 * requires 16-bit long. Handling 8-bit long addresses is done using
330 * the option field. For the 16-bit addresses, the workaround depends
331 * of the number of requested dummy bits. If there are 8 or more dummy
332 * cycles, the address is shifted and sent with the first dummy byte.
333 * Otherwise opcode is disabled and the first byte of the address
334 * contains the command opcode (works only if the opcode and address
335 * use the same buswidth). The limitation is when the 16-bit address is
336 * used without enough dummy cycles and the opcode is using a different
337 * buswidth than the address.
338 */
339 if (op->addr.buswidth) {
340 switch (op->addr.nbytes) {
341 case 0:
342 break;
343 case 1:
344 ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
345 icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
346 break;
347 case 2:
348 if (dummy_cycles < 8 / op->addr.buswidth) {
349 ifr &= ~QSPI_IFR_INSTEN;
350 ifr |= QSPI_IFR_ADDREN;
351 iar = (op->cmd.opcode << 16) |
352 (op->addr.val & 0xffff);
353 } else {
354 ifr |= QSPI_IFR_ADDREN;
355 iar = (op->addr.val << 8) & 0xffffff;
356 dummy_cycles -= 8 / op->addr.buswidth;
357 }
358 break;
359 case 3:
360 ifr |= QSPI_IFR_ADDREN;
361 iar = op->addr.val & 0xffffff;
362 break;
363 case 4:
364 ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
365 iar = op->addr.val & 0x7ffffff;
366 break;
367 default:
368 return -ENOTSUPP;
369 }
370 }
371
372 /* offset of the data access in the QSPI memory space */
373 *offset = iar;
374
375 /* Set number of dummy cycles */
376 if (dummy_cycles)
377 ifr |= QSPI_IFR_NBDUM(dummy_cycles);
378
379 /* Set data enable and data transfer type. */
380 if (op->data.nbytes) {
381 ifr |= QSPI_IFR_DATAEN;
382
383 if (op->addr.nbytes)
384 ifr |= QSPI_IFR_TFRTYP_MEM;
385 }
386
387 /*
388 * If the QSPI controller is set in regular SPI mode, set it in
389 * Serial Memory Mode (SMM).
390 */
391 if (!(aq->mr & QSPI_MR_SMM)) {
392 aq->mr |= QSPI_MR_SMM;
393 atmel_qspi_write(aq->mr, aq, QSPI_MR);
394 }
395
396 /* Clear pending interrupts */
397 (void)atmel_qspi_read(aq, QSPI_SR);
398
399 /* Set QSPI Instruction Frame registers. */
400 if (op->addr.nbytes && !op->data.nbytes)
401 atmel_qspi_write(iar, aq, QSPI_IAR);
402
403 if (aq->caps->has_ricr) {
404 if (op->data.dir == SPI_MEM_DATA_IN)
405 atmel_qspi_write(icr, aq, QSPI_RICR);
406 else
407 atmel_qspi_write(icr, aq, QSPI_WICR);
408 } else {
409 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
410 ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
411
412 atmel_qspi_write(icr, aq, QSPI_ICR);
413 }
414
415 atmel_qspi_write(ifr, aq, QSPI_IFR);
416
417 return 0;
418 }
419
atmel_qspi_wait_for_completion(struct atmel_qspi * aq,u32 irq_mask)420 static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
421 {
422 int err = 0;
423 u32 sr;
424
425 /* Poll INSTRuction End status */
426 sr = atmel_qspi_read(aq, QSPI_SR);
427 if ((sr & irq_mask) == irq_mask)
428 return 0;
429
430 /* Wait for INSTRuction End interrupt */
431 reinit_completion(&aq->cmd_completion);
432 aq->pending = sr & irq_mask;
433 aq->irq_mask = irq_mask;
434 atmel_qspi_write(irq_mask, aq, QSPI_IER);
435 if (!wait_for_completion_timeout(&aq->cmd_completion,
436 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
437 err = -ETIMEDOUT;
438 atmel_qspi_write(irq_mask, aq, QSPI_IDR);
439
440 return err;
441 }
442
atmel_qspi_transfer(struct spi_mem * mem,const struct spi_mem_op * op,u32 offset)443 static int atmel_qspi_transfer(struct spi_mem *mem,
444 const struct spi_mem_op *op, u32 offset)
445 {
446 struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
447
448 /* Skip to the final steps if there is no data */
449 if (!op->data.nbytes)
450 return atmel_qspi_wait_for_completion(aq,
451 QSPI_SR_CMD_COMPLETED);
452
453 /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
454 (void)atmel_qspi_read(aq, QSPI_IFR);
455
456 /* Send/Receive data */
457 if (op->data.dir == SPI_MEM_DATA_IN) {
458 memcpy_fromio(op->data.buf.in, aq->mem + offset,
459 op->data.nbytes);
460
461 /* Synchronize AHB and APB accesses again */
462 rmb();
463 } else {
464 memcpy_toio(aq->mem + offset, op->data.buf.out,
465 op->data.nbytes);
466
467 /* Synchronize AHB and APB accesses again */
468 wmb();
469 }
470
471 /* Release the chip-select */
472 atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
473
474 return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
475 }
476
atmel_qspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)477 static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
478 {
479 struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
480 u32 offset;
481 int err;
482
483 /*
484 * Check if the address exceeds the MMIO window size. An improvement
485 * would be to add support for regular SPI mode and fall back to it
486 * when the flash memories overrun the controller's memory space.
487 */
488 if (op->addr.val + op->data.nbytes > aq->mmap_size)
489 return -EOPNOTSUPP;
490
491 if (op->addr.nbytes > 4)
492 return -EOPNOTSUPP;
493
494 err = pm_runtime_resume_and_get(&aq->pdev->dev);
495 if (err < 0)
496 return err;
497
498 err = aq->ops->set_cfg(aq, op, &offset);
499 if (err)
500 goto pm_runtime_put;
501
502 err = aq->ops->transfer(mem, op, offset);
503
504 pm_runtime_put:
505 pm_runtime_mark_last_busy(&aq->pdev->dev);
506 pm_runtime_put_autosuspend(&aq->pdev->dev);
507 return err;
508 }
509
atmel_qspi_get_name(struct spi_mem * spimem)510 static const char *atmel_qspi_get_name(struct spi_mem *spimem)
511 {
512 return dev_name(spimem->spi->dev.parent);
513 }
514
515 static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
516 .supports_op = atmel_qspi_supports_op,
517 .exec_op = atmel_qspi_exec_op,
518 .get_name = atmel_qspi_get_name
519 };
520
atmel_qspi_setup(struct spi_device * spi)521 static int atmel_qspi_setup(struct spi_device *spi)
522 {
523 struct spi_controller *ctrl = spi->controller;
524 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
525 unsigned long src_rate;
526 u32 scbr;
527 int ret;
528
529 if (ctrl->busy)
530 return -EBUSY;
531
532 if (!spi->max_speed_hz)
533 return -EINVAL;
534
535 src_rate = clk_get_rate(aq->pclk);
536 if (!src_rate)
537 return -EINVAL;
538
539 /* Compute the QSPI baudrate */
540 scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
541 if (scbr > 0)
542 scbr--;
543
544 ret = pm_runtime_resume_and_get(ctrl->dev.parent);
545 if (ret < 0)
546 return ret;
547
548 aq->scr &= ~QSPI_SCR_SCBR_MASK;
549 aq->scr |= QSPI_SCR_SCBR(scbr);
550 atmel_qspi_write(aq->scr, aq, QSPI_SCR);
551
552 pm_runtime_mark_last_busy(ctrl->dev.parent);
553 pm_runtime_put_autosuspend(ctrl->dev.parent);
554
555 return 0;
556 }
557
atmel_qspi_set_cs_timing(struct spi_device * spi)558 static int atmel_qspi_set_cs_timing(struct spi_device *spi)
559 {
560 struct spi_controller *ctrl = spi->controller;
561 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
562 unsigned long clk_rate;
563 u32 cs_setup;
564 int delay;
565 int ret;
566
567 delay = spi_delay_to_ns(&spi->cs_setup, NULL);
568 if (delay <= 0)
569 return delay;
570
571 clk_rate = clk_get_rate(aq->pclk);
572 if (!clk_rate)
573 return -EINVAL;
574
575 cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
576 1000);
577
578 ret = pm_runtime_resume_and_get(ctrl->dev.parent);
579 if (ret < 0)
580 return ret;
581
582 aq->scr &= ~QSPI_SCR_DLYBS_MASK;
583 aq->scr |= QSPI_SCR_DLYBS(cs_setup);
584 atmel_qspi_write(aq->scr, aq, QSPI_SCR);
585
586 pm_runtime_mark_last_busy(ctrl->dev.parent);
587 pm_runtime_put_autosuspend(ctrl->dev.parent);
588
589 return 0;
590 }
591
atmel_qspi_init(struct atmel_qspi * aq)592 static void atmel_qspi_init(struct atmel_qspi *aq)
593 {
594 /* Reset the QSPI controller */
595 atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
596
597 /* Set the QSPI controller by default in Serial Memory Mode */
598 aq->mr |= QSPI_MR_SMM;
599 atmel_qspi_write(aq->mr, aq, QSPI_MR);
600
601 /* Enable the QSPI controller */
602 atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
603 }
604
atmel_qspi_interrupt(int irq,void * dev_id)605 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
606 {
607 struct atmel_qspi *aq = dev_id;
608 u32 status, mask, pending;
609
610 status = atmel_qspi_read(aq, QSPI_SR);
611 mask = atmel_qspi_read(aq, QSPI_IMR);
612 pending = status & mask;
613
614 if (!pending)
615 return IRQ_NONE;
616
617 aq->pending |= pending;
618 if ((aq->pending & aq->irq_mask) == aq->irq_mask)
619 complete(&aq->cmd_completion);
620
621 return IRQ_HANDLED;
622 }
623
624 static const struct atmel_qspi_ops atmel_qspi_ops = {
625 .set_cfg = atmel_qspi_set_cfg,
626 .transfer = atmel_qspi_transfer,
627 };
628
atmel_qspi_probe(struct platform_device * pdev)629 static int atmel_qspi_probe(struct platform_device *pdev)
630 {
631 struct spi_controller *ctrl;
632 struct atmel_qspi *aq;
633 struct resource *res;
634 int irq, err = 0;
635
636 ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
637 if (!ctrl)
638 return -ENOMEM;
639
640 ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
641 ctrl->setup = atmel_qspi_setup;
642 ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
643 ctrl->bus_num = -1;
644 ctrl->mem_ops = &atmel_qspi_mem_ops;
645 ctrl->num_chipselect = 1;
646 ctrl->dev.of_node = pdev->dev.of_node;
647 platform_set_drvdata(pdev, ctrl);
648
649 aq = spi_controller_get_devdata(ctrl);
650
651 init_completion(&aq->cmd_completion);
652 aq->pdev = pdev;
653 aq->ops = &atmel_qspi_ops;
654
655 /* Map the registers */
656 aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
657 if (IS_ERR(aq->regs))
658 return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
659 "missing registers\n");
660
661 /* Map the AHB memory */
662 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
663 aq->mem = devm_ioremap_resource(&pdev->dev, res);
664 if (IS_ERR(aq->mem))
665 return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
666 "missing AHB memory\n");
667
668 aq->mmap_size = resource_size(res);
669
670 /* Get the peripheral clock */
671 aq->pclk = devm_clk_get(&pdev->dev, "pclk");
672 if (IS_ERR(aq->pclk))
673 aq->pclk = devm_clk_get(&pdev->dev, NULL);
674
675 if (IS_ERR(aq->pclk))
676 return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
677 "missing peripheral clock\n");
678
679 /* Enable the peripheral clock */
680 err = clk_prepare_enable(aq->pclk);
681 if (err)
682 return dev_err_probe(&pdev->dev, err,
683 "failed to enable the peripheral clock\n");
684
685 aq->caps = of_device_get_match_data(&pdev->dev);
686 if (!aq->caps) {
687 dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
688 err = -EINVAL;
689 goto disable_pclk;
690 }
691
692 if (aq->caps->has_qspick) {
693 /* Get the QSPI system clock */
694 aq->qspick = devm_clk_get(&pdev->dev, "qspick");
695 if (IS_ERR(aq->qspick)) {
696 dev_err(&pdev->dev, "missing system clock\n");
697 err = PTR_ERR(aq->qspick);
698 goto disable_pclk;
699 }
700
701 /* Enable the QSPI system clock */
702 err = clk_prepare_enable(aq->qspick);
703 if (err) {
704 dev_err(&pdev->dev,
705 "failed to enable the QSPI system clock\n");
706 goto disable_pclk;
707 }
708 }
709
710 /* Request the IRQ */
711 irq = platform_get_irq(pdev, 0);
712 if (irq < 0) {
713 err = irq;
714 goto disable_qspick;
715 }
716 err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
717 0, dev_name(&pdev->dev), aq);
718 if (err)
719 goto disable_qspick;
720
721 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
722 pm_runtime_use_autosuspend(&pdev->dev);
723 pm_runtime_set_active(&pdev->dev);
724 pm_runtime_enable(&pdev->dev);
725 pm_runtime_get_noresume(&pdev->dev);
726
727 atmel_qspi_init(aq);
728
729 err = spi_register_controller(ctrl);
730 if (err) {
731 pm_runtime_put_noidle(&pdev->dev);
732 pm_runtime_disable(&pdev->dev);
733 pm_runtime_set_suspended(&pdev->dev);
734 pm_runtime_dont_use_autosuspend(&pdev->dev);
735 goto disable_qspick;
736 }
737 pm_runtime_mark_last_busy(&pdev->dev);
738 pm_runtime_put_autosuspend(&pdev->dev);
739
740 return 0;
741
742 disable_qspick:
743 clk_disable_unprepare(aq->qspick);
744 disable_pclk:
745 clk_disable_unprepare(aq->pclk);
746
747 return err;
748 }
749
atmel_qspi_remove(struct platform_device * pdev)750 static void atmel_qspi_remove(struct platform_device *pdev)
751 {
752 struct spi_controller *ctrl = platform_get_drvdata(pdev);
753 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
754 int ret;
755
756 spi_unregister_controller(ctrl);
757
758 ret = pm_runtime_get_sync(&pdev->dev);
759 if (ret >= 0) {
760 atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
761 clk_disable(aq->qspick);
762 clk_disable(aq->pclk);
763 } else {
764 /*
765 * atmel_qspi_runtime_{suspend,resume} just disable and enable
766 * the two clks respectively. So after resume failed these are
767 * off, and we skip hardware access and disabling these clks again.
768 */
769 dev_warn(&pdev->dev, "Failed to resume device on remove\n");
770 }
771
772 clk_unprepare(aq->qspick);
773 clk_unprepare(aq->pclk);
774
775 pm_runtime_disable(&pdev->dev);
776 pm_runtime_dont_use_autosuspend(&pdev->dev);
777 pm_runtime_put_noidle(&pdev->dev);
778 }
779
atmel_qspi_suspend(struct device * dev)780 static int __maybe_unused atmel_qspi_suspend(struct device *dev)
781 {
782 struct spi_controller *ctrl = dev_get_drvdata(dev);
783 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
784 int ret;
785
786 ret = pm_runtime_resume_and_get(dev);
787 if (ret < 0)
788 return ret;
789
790 atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
791
792 pm_runtime_mark_last_busy(dev);
793 pm_runtime_force_suspend(dev);
794
795 clk_unprepare(aq->qspick);
796 clk_unprepare(aq->pclk);
797
798 return 0;
799 }
800
atmel_qspi_resume(struct device * dev)801 static int __maybe_unused atmel_qspi_resume(struct device *dev)
802 {
803 struct spi_controller *ctrl = dev_get_drvdata(dev);
804 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
805 int ret;
806
807 ret = clk_prepare(aq->pclk);
808 if (ret)
809 return ret;
810
811 ret = clk_prepare(aq->qspick);
812 if (ret) {
813 clk_unprepare(aq->pclk);
814 return ret;
815 }
816
817 ret = pm_runtime_force_resume(dev);
818 if (ret < 0)
819 return ret;
820
821 atmel_qspi_init(aq);
822
823 atmel_qspi_write(aq->scr, aq, QSPI_SCR);
824
825 pm_runtime_mark_last_busy(dev);
826 pm_runtime_put_autosuspend(dev);
827
828 return 0;
829 }
830
atmel_qspi_runtime_suspend(struct device * dev)831 static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
832 {
833 struct spi_controller *ctrl = dev_get_drvdata(dev);
834 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
835
836 clk_disable(aq->qspick);
837 clk_disable(aq->pclk);
838
839 return 0;
840 }
841
atmel_qspi_runtime_resume(struct device * dev)842 static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
843 {
844 struct spi_controller *ctrl = dev_get_drvdata(dev);
845 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
846 int ret;
847
848 ret = clk_enable(aq->pclk);
849 if (ret)
850 return ret;
851
852 ret = clk_enable(aq->qspick);
853 if (ret)
854 clk_disable(aq->pclk);
855
856 return ret;
857 }
858
859 static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
860 SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
861 SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
862 atmel_qspi_runtime_resume, NULL)
863 };
864
865 static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
866
867 static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
868 .has_qspick = true,
869 .has_ricr = true,
870 };
871
872 static const struct of_device_id atmel_qspi_dt_ids[] = {
873 {
874 .compatible = "atmel,sama5d2-qspi",
875 .data = &atmel_sama5d2_qspi_caps,
876 },
877 {
878 .compatible = "microchip,sam9x60-qspi",
879 .data = &atmel_sam9x60_qspi_caps,
880 },
881 { /* sentinel */ }
882 };
883
884 MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
885
886 static struct platform_driver atmel_qspi_driver = {
887 .driver = {
888 .name = "atmel_qspi",
889 .of_match_table = atmel_qspi_dt_ids,
890 .pm = pm_ptr(&atmel_qspi_pm_ops),
891 },
892 .probe = atmel_qspi_probe,
893 .remove_new = atmel_qspi_remove,
894 };
895 module_platform_driver(atmel_qspi_driver);
896
897 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
898 MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
899 MODULE_DESCRIPTION("Atmel QSPI Controller driver");
900 MODULE_LICENSE("GPL v2");
901