1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright © 2010-2015 Broadcom Corporation
4 */
5
6 #include <linux/clk.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/platform_device.h>
12 #include <linux/platform_data/brcmnand.h>
13 #include <linux/err.h>
14 #include <linux/completion.h>
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/ioport.h>
19 #include <linux/bug.h>
20 #include <linux/kernel.h>
21 #include <linux/bitops.h>
22 #include <linux/mm.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/rawnand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/of.h>
27 #include <linux/of_platform.h>
28 #include <linux/slab.h>
29 #include <linux/static_key.h>
30 #include <linux/list.h>
31 #include <linux/log2.h>
32
33 #include "brcmnand.h"
34
35 /*
36 * This flag controls if WP stays on between erase/write commands to mitigate
37 * flash corruption due to power glitches. Values:
38 * 0: NAND_WP is not used or not available
39 * 1: NAND_WP is set by default, cleared for erase/write operations
40 * 2: NAND_WP is always cleared
41 */
42 static int wp_on = 1;
43 module_param(wp_on, int, 0444);
44
45 /***********************************************************************
46 * Definitions
47 ***********************************************************************/
48
49 #define DRV_NAME "brcmnand"
50
51 #define CMD_NULL 0x00
52 #define CMD_PAGE_READ 0x01
53 #define CMD_SPARE_AREA_READ 0x02
54 #define CMD_STATUS_READ 0x03
55 #define CMD_PROGRAM_PAGE 0x04
56 #define CMD_PROGRAM_SPARE_AREA 0x05
57 #define CMD_COPY_BACK 0x06
58 #define CMD_DEVICE_ID_READ 0x07
59 #define CMD_BLOCK_ERASE 0x08
60 #define CMD_FLASH_RESET 0x09
61 #define CMD_BLOCKS_LOCK 0x0a
62 #define CMD_BLOCKS_LOCK_DOWN 0x0b
63 #define CMD_BLOCKS_UNLOCK 0x0c
64 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
65 #define CMD_PARAMETER_READ 0x0e
66 #define CMD_PARAMETER_CHANGE_COL 0x0f
67 #define CMD_LOW_LEVEL_OP 0x10
68
69 struct brcm_nand_dma_desc {
70 u32 next_desc;
71 u32 next_desc_ext;
72 u32 cmd_irq;
73 u32 dram_addr;
74 u32 dram_addr_ext;
75 u32 tfr_len;
76 u32 total_len;
77 u32 flash_addr;
78 u32 flash_addr_ext;
79 u32 cs;
80 u32 pad2[5];
81 u32 status_valid;
82 } __packed;
83
84 /* Bitfields for brcm_nand_dma_desc::status_valid */
85 #define FLASH_DMA_ECC_ERROR (1 << 8)
86 #define FLASH_DMA_CORR_ERROR (1 << 9)
87
88 /* Bitfields for DMA_MODE */
89 #define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
90 #define FLASH_DMA_MODE_MODE BIT(0) /* link list */
91 #define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
92 FLASH_DMA_MODE_MODE)
93
94 /* 512B flash cache in the NAND controller HW */
95 #define FC_SHIFT 9U
96 #define FC_BYTES 512U
97 #define FC_WORDS (FC_BYTES >> 2)
98
99 #define BRCMNAND_MIN_PAGESIZE 512
100 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
101 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
102
103 #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
104 #define NAND_POLL_STATUS_TIMEOUT_MS 100
105
106 #define EDU_CMD_WRITE 0x00
107 #define EDU_CMD_READ 0x01
108 #define EDU_STATUS_ACTIVE BIT(0)
109 #define EDU_ERR_STATUS_ERRACK BIT(0)
110 #define EDU_DONE_MASK GENMASK(1, 0)
111
112 #define EDU_CONFIG_MODE_NAND BIT(0)
113 #define EDU_CONFIG_SWAP_BYTE BIT(1)
114 #ifdef CONFIG_CPU_BIG_ENDIAN
115 #define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
116 #else
117 #define EDU_CONFIG_SWAP_CFG 0
118 #endif
119
120 /* edu registers */
121 enum edu_reg {
122 EDU_CONFIG = 0,
123 EDU_DRAM_ADDR,
124 EDU_EXT_ADDR,
125 EDU_LENGTH,
126 EDU_CMD,
127 EDU_STOP,
128 EDU_STATUS,
129 EDU_DONE,
130 EDU_ERR_STATUS,
131 };
132
133 static const u16 edu_regs[] = {
134 [EDU_CONFIG] = 0x00,
135 [EDU_DRAM_ADDR] = 0x04,
136 [EDU_EXT_ADDR] = 0x08,
137 [EDU_LENGTH] = 0x0c,
138 [EDU_CMD] = 0x10,
139 [EDU_STOP] = 0x14,
140 [EDU_STATUS] = 0x18,
141 [EDU_DONE] = 0x1c,
142 [EDU_ERR_STATUS] = 0x20,
143 };
144
145 /* flash_dma registers */
146 enum flash_dma_reg {
147 FLASH_DMA_REVISION = 0,
148 FLASH_DMA_FIRST_DESC,
149 FLASH_DMA_FIRST_DESC_EXT,
150 FLASH_DMA_CTRL,
151 FLASH_DMA_MODE,
152 FLASH_DMA_STATUS,
153 FLASH_DMA_INTERRUPT_DESC,
154 FLASH_DMA_INTERRUPT_DESC_EXT,
155 FLASH_DMA_ERROR_STATUS,
156 FLASH_DMA_CURRENT_DESC,
157 FLASH_DMA_CURRENT_DESC_EXT,
158 };
159
160 /* flash_dma registers v0*/
161 static const u16 flash_dma_regs_v0[] = {
162 [FLASH_DMA_REVISION] = 0x00,
163 [FLASH_DMA_FIRST_DESC] = 0x04,
164 [FLASH_DMA_CTRL] = 0x08,
165 [FLASH_DMA_MODE] = 0x0c,
166 [FLASH_DMA_STATUS] = 0x10,
167 [FLASH_DMA_INTERRUPT_DESC] = 0x14,
168 [FLASH_DMA_ERROR_STATUS] = 0x18,
169 [FLASH_DMA_CURRENT_DESC] = 0x1c,
170 };
171
172 /* flash_dma registers v1*/
173 static const u16 flash_dma_regs_v1[] = {
174 [FLASH_DMA_REVISION] = 0x00,
175 [FLASH_DMA_FIRST_DESC] = 0x04,
176 [FLASH_DMA_FIRST_DESC_EXT] = 0x08,
177 [FLASH_DMA_CTRL] = 0x0c,
178 [FLASH_DMA_MODE] = 0x10,
179 [FLASH_DMA_STATUS] = 0x14,
180 [FLASH_DMA_INTERRUPT_DESC] = 0x18,
181 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
182 [FLASH_DMA_ERROR_STATUS] = 0x20,
183 [FLASH_DMA_CURRENT_DESC] = 0x24,
184 [FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
185 };
186
187 /* flash_dma registers v4 */
188 static const u16 flash_dma_regs_v4[] = {
189 [FLASH_DMA_REVISION] = 0x00,
190 [FLASH_DMA_FIRST_DESC] = 0x08,
191 [FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
192 [FLASH_DMA_CTRL] = 0x10,
193 [FLASH_DMA_MODE] = 0x14,
194 [FLASH_DMA_STATUS] = 0x18,
195 [FLASH_DMA_INTERRUPT_DESC] = 0x20,
196 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
197 [FLASH_DMA_ERROR_STATUS] = 0x28,
198 [FLASH_DMA_CURRENT_DESC] = 0x30,
199 [FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
200 };
201
202 /* Controller feature flags */
203 enum {
204 BRCMNAND_HAS_1K_SECTORS = BIT(0),
205 BRCMNAND_HAS_PREFETCH = BIT(1),
206 BRCMNAND_HAS_CACHE_MODE = BIT(2),
207 BRCMNAND_HAS_WP = BIT(3),
208 };
209
210 struct brcmnand_host;
211
212 static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key);
213
214 struct brcmnand_controller {
215 struct device *dev;
216 struct nand_controller controller;
217 void __iomem *nand_base;
218 void __iomem *nand_fc; /* flash cache */
219 void __iomem *flash_dma_base;
220 int irq;
221 unsigned int dma_irq;
222 int nand_version;
223
224 /* Some SoCs provide custom interrupt status register(s) */
225 struct brcmnand_soc *soc;
226
227 /* Some SoCs have a gateable clock for the controller */
228 struct clk *clk;
229
230 int cmd_pending;
231 bool dma_pending;
232 bool edu_pending;
233 struct completion done;
234 struct completion dma_done;
235 struct completion edu_done;
236
237 /* List of NAND hosts (one for each chip-select) */
238 struct list_head host_list;
239
240 /* EDU info, per-transaction */
241 const u16 *edu_offsets;
242 void __iomem *edu_base;
243 int edu_irq;
244 int edu_count;
245 u64 edu_dram_addr;
246 u32 edu_ext_addr;
247 u32 edu_cmd;
248 u32 edu_config;
249 int sas; /* spare area size, per flash cache */
250 int sector_size_1k;
251 u8 *oob;
252
253 /* flash_dma reg */
254 const u16 *flash_dma_offsets;
255 struct brcm_nand_dma_desc *dma_desc;
256 dma_addr_t dma_pa;
257
258 int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
259 u8 *oob, u32 len, u8 dma_cmd);
260
261 /* in-memory cache of the FLASH_CACHE, used only for some commands */
262 u8 flash_cache[FC_BYTES];
263
264 /* Controller revision details */
265 const u16 *reg_offsets;
266 unsigned int reg_spacing; /* between CS1, CS2, ... regs */
267 const u8 *cs_offsets; /* within each chip-select */
268 const u8 *cs0_offsets; /* within CS0, if different */
269 unsigned int max_block_size;
270 const unsigned int *block_sizes;
271 unsigned int max_page_size;
272 const unsigned int *page_sizes;
273 unsigned int page_size_shift;
274 unsigned int max_oob;
275 u32 ecc_level_shift;
276 u32 features;
277
278 /* for low-power standby/resume only */
279 u32 nand_cs_nand_select;
280 u32 nand_cs_nand_xor;
281 u32 corr_stat_threshold;
282 u32 flash_dma_mode;
283 u32 flash_edu_mode;
284 bool pio_poll_mode;
285 };
286
287 struct brcmnand_cfg {
288 u64 device_size;
289 unsigned int block_size;
290 unsigned int page_size;
291 unsigned int spare_area_size;
292 unsigned int device_width;
293 unsigned int col_adr_bytes;
294 unsigned int blk_adr_bytes;
295 unsigned int ful_adr_bytes;
296 unsigned int sector_size_1k;
297 unsigned int ecc_level;
298 /* use for low-power standby/resume only */
299 u32 acc_control;
300 u32 config;
301 u32 config_ext;
302 u32 timing_1;
303 u32 timing_2;
304 };
305
306 struct brcmnand_host {
307 struct list_head node;
308
309 struct nand_chip chip;
310 struct platform_device *pdev;
311 int cs;
312
313 unsigned int last_cmd;
314 unsigned int last_byte;
315 u64 last_addr;
316 struct brcmnand_cfg hwcfg;
317 struct brcmnand_controller *ctrl;
318 };
319
320 enum brcmnand_reg {
321 BRCMNAND_CMD_START = 0,
322 BRCMNAND_CMD_EXT_ADDRESS,
323 BRCMNAND_CMD_ADDRESS,
324 BRCMNAND_INTFC_STATUS,
325 BRCMNAND_CS_SELECT,
326 BRCMNAND_CS_XOR,
327 BRCMNAND_LL_OP,
328 BRCMNAND_CS0_BASE,
329 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
330 BRCMNAND_CORR_THRESHOLD,
331 BRCMNAND_CORR_THRESHOLD_EXT,
332 BRCMNAND_UNCORR_COUNT,
333 BRCMNAND_CORR_COUNT,
334 BRCMNAND_CORR_EXT_ADDR,
335 BRCMNAND_CORR_ADDR,
336 BRCMNAND_UNCORR_EXT_ADDR,
337 BRCMNAND_UNCORR_ADDR,
338 BRCMNAND_SEMAPHORE,
339 BRCMNAND_ID,
340 BRCMNAND_ID_EXT,
341 BRCMNAND_LL_RDATA,
342 BRCMNAND_OOB_READ_BASE,
343 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
344 BRCMNAND_OOB_WRITE_BASE,
345 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
346 BRCMNAND_FC_BASE,
347 };
348
349 /* BRCMNAND v2.1-v2.2 */
350 static const u16 brcmnand_regs_v21[] = {
351 [BRCMNAND_CMD_START] = 0x04,
352 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
353 [BRCMNAND_CMD_ADDRESS] = 0x0c,
354 [BRCMNAND_INTFC_STATUS] = 0x5c,
355 [BRCMNAND_CS_SELECT] = 0x14,
356 [BRCMNAND_CS_XOR] = 0x18,
357 [BRCMNAND_LL_OP] = 0,
358 [BRCMNAND_CS0_BASE] = 0x40,
359 [BRCMNAND_CS1_BASE] = 0,
360 [BRCMNAND_CORR_THRESHOLD] = 0,
361 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
362 [BRCMNAND_UNCORR_COUNT] = 0,
363 [BRCMNAND_CORR_COUNT] = 0,
364 [BRCMNAND_CORR_EXT_ADDR] = 0x60,
365 [BRCMNAND_CORR_ADDR] = 0x64,
366 [BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
367 [BRCMNAND_UNCORR_ADDR] = 0x6c,
368 [BRCMNAND_SEMAPHORE] = 0x50,
369 [BRCMNAND_ID] = 0x54,
370 [BRCMNAND_ID_EXT] = 0,
371 [BRCMNAND_LL_RDATA] = 0,
372 [BRCMNAND_OOB_READ_BASE] = 0x20,
373 [BRCMNAND_OOB_READ_10_BASE] = 0,
374 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
375 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
376 [BRCMNAND_FC_BASE] = 0x200,
377 };
378
379 /* BRCMNAND v3.3-v4.0 */
380 static const u16 brcmnand_regs_v33[] = {
381 [BRCMNAND_CMD_START] = 0x04,
382 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
383 [BRCMNAND_CMD_ADDRESS] = 0x0c,
384 [BRCMNAND_INTFC_STATUS] = 0x6c,
385 [BRCMNAND_CS_SELECT] = 0x14,
386 [BRCMNAND_CS_XOR] = 0x18,
387 [BRCMNAND_LL_OP] = 0x178,
388 [BRCMNAND_CS0_BASE] = 0x40,
389 [BRCMNAND_CS1_BASE] = 0xd0,
390 [BRCMNAND_CORR_THRESHOLD] = 0x84,
391 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
392 [BRCMNAND_UNCORR_COUNT] = 0,
393 [BRCMNAND_CORR_COUNT] = 0,
394 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
395 [BRCMNAND_CORR_ADDR] = 0x74,
396 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
397 [BRCMNAND_UNCORR_ADDR] = 0x7c,
398 [BRCMNAND_SEMAPHORE] = 0x58,
399 [BRCMNAND_ID] = 0x60,
400 [BRCMNAND_ID_EXT] = 0x64,
401 [BRCMNAND_LL_RDATA] = 0x17c,
402 [BRCMNAND_OOB_READ_BASE] = 0x20,
403 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
404 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
405 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
406 [BRCMNAND_FC_BASE] = 0x200,
407 };
408
409 /* BRCMNAND v5.0 */
410 static const u16 brcmnand_regs_v50[] = {
411 [BRCMNAND_CMD_START] = 0x04,
412 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
413 [BRCMNAND_CMD_ADDRESS] = 0x0c,
414 [BRCMNAND_INTFC_STATUS] = 0x6c,
415 [BRCMNAND_CS_SELECT] = 0x14,
416 [BRCMNAND_CS_XOR] = 0x18,
417 [BRCMNAND_LL_OP] = 0x178,
418 [BRCMNAND_CS0_BASE] = 0x40,
419 [BRCMNAND_CS1_BASE] = 0xd0,
420 [BRCMNAND_CORR_THRESHOLD] = 0x84,
421 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
422 [BRCMNAND_UNCORR_COUNT] = 0,
423 [BRCMNAND_CORR_COUNT] = 0,
424 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
425 [BRCMNAND_CORR_ADDR] = 0x74,
426 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
427 [BRCMNAND_UNCORR_ADDR] = 0x7c,
428 [BRCMNAND_SEMAPHORE] = 0x58,
429 [BRCMNAND_ID] = 0x60,
430 [BRCMNAND_ID_EXT] = 0x64,
431 [BRCMNAND_LL_RDATA] = 0x17c,
432 [BRCMNAND_OOB_READ_BASE] = 0x20,
433 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
434 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
435 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
436 [BRCMNAND_FC_BASE] = 0x200,
437 };
438
439 /* BRCMNAND v6.0 - v7.1 */
440 static const u16 brcmnand_regs_v60[] = {
441 [BRCMNAND_CMD_START] = 0x04,
442 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
443 [BRCMNAND_CMD_ADDRESS] = 0x0c,
444 [BRCMNAND_INTFC_STATUS] = 0x14,
445 [BRCMNAND_CS_SELECT] = 0x18,
446 [BRCMNAND_CS_XOR] = 0x1c,
447 [BRCMNAND_LL_OP] = 0x20,
448 [BRCMNAND_CS0_BASE] = 0x50,
449 [BRCMNAND_CS1_BASE] = 0,
450 [BRCMNAND_CORR_THRESHOLD] = 0xc0,
451 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
452 [BRCMNAND_UNCORR_COUNT] = 0xfc,
453 [BRCMNAND_CORR_COUNT] = 0x100,
454 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
455 [BRCMNAND_CORR_ADDR] = 0x110,
456 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
457 [BRCMNAND_UNCORR_ADDR] = 0x118,
458 [BRCMNAND_SEMAPHORE] = 0x150,
459 [BRCMNAND_ID] = 0x194,
460 [BRCMNAND_ID_EXT] = 0x198,
461 [BRCMNAND_LL_RDATA] = 0x19c,
462 [BRCMNAND_OOB_READ_BASE] = 0x200,
463 [BRCMNAND_OOB_READ_10_BASE] = 0,
464 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
465 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
466 [BRCMNAND_FC_BASE] = 0x400,
467 };
468
469 /* BRCMNAND v7.1 */
470 static const u16 brcmnand_regs_v71[] = {
471 [BRCMNAND_CMD_START] = 0x04,
472 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
473 [BRCMNAND_CMD_ADDRESS] = 0x0c,
474 [BRCMNAND_INTFC_STATUS] = 0x14,
475 [BRCMNAND_CS_SELECT] = 0x18,
476 [BRCMNAND_CS_XOR] = 0x1c,
477 [BRCMNAND_LL_OP] = 0x20,
478 [BRCMNAND_CS0_BASE] = 0x50,
479 [BRCMNAND_CS1_BASE] = 0,
480 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
481 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
482 [BRCMNAND_UNCORR_COUNT] = 0xfc,
483 [BRCMNAND_CORR_COUNT] = 0x100,
484 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
485 [BRCMNAND_CORR_ADDR] = 0x110,
486 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
487 [BRCMNAND_UNCORR_ADDR] = 0x118,
488 [BRCMNAND_SEMAPHORE] = 0x150,
489 [BRCMNAND_ID] = 0x194,
490 [BRCMNAND_ID_EXT] = 0x198,
491 [BRCMNAND_LL_RDATA] = 0x19c,
492 [BRCMNAND_OOB_READ_BASE] = 0x200,
493 [BRCMNAND_OOB_READ_10_BASE] = 0,
494 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
495 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
496 [BRCMNAND_FC_BASE] = 0x400,
497 };
498
499 /* BRCMNAND v7.2 */
500 static const u16 brcmnand_regs_v72[] = {
501 [BRCMNAND_CMD_START] = 0x04,
502 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
503 [BRCMNAND_CMD_ADDRESS] = 0x0c,
504 [BRCMNAND_INTFC_STATUS] = 0x14,
505 [BRCMNAND_CS_SELECT] = 0x18,
506 [BRCMNAND_CS_XOR] = 0x1c,
507 [BRCMNAND_LL_OP] = 0x20,
508 [BRCMNAND_CS0_BASE] = 0x50,
509 [BRCMNAND_CS1_BASE] = 0,
510 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
511 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
512 [BRCMNAND_UNCORR_COUNT] = 0xfc,
513 [BRCMNAND_CORR_COUNT] = 0x100,
514 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
515 [BRCMNAND_CORR_ADDR] = 0x110,
516 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
517 [BRCMNAND_UNCORR_ADDR] = 0x118,
518 [BRCMNAND_SEMAPHORE] = 0x150,
519 [BRCMNAND_ID] = 0x194,
520 [BRCMNAND_ID_EXT] = 0x198,
521 [BRCMNAND_LL_RDATA] = 0x19c,
522 [BRCMNAND_OOB_READ_BASE] = 0x200,
523 [BRCMNAND_OOB_READ_10_BASE] = 0,
524 [BRCMNAND_OOB_WRITE_BASE] = 0x400,
525 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
526 [BRCMNAND_FC_BASE] = 0x600,
527 };
528
529 enum brcmnand_cs_reg {
530 BRCMNAND_CS_CFG_EXT = 0,
531 BRCMNAND_CS_CFG,
532 BRCMNAND_CS_ACC_CONTROL,
533 BRCMNAND_CS_TIMING1,
534 BRCMNAND_CS_TIMING2,
535 };
536
537 /* Per chip-select offsets for v7.1 */
538 static const u8 brcmnand_cs_offsets_v71[] = {
539 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
540 [BRCMNAND_CS_CFG_EXT] = 0x04,
541 [BRCMNAND_CS_CFG] = 0x08,
542 [BRCMNAND_CS_TIMING1] = 0x0c,
543 [BRCMNAND_CS_TIMING2] = 0x10,
544 };
545
546 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
547 static const u8 brcmnand_cs_offsets[] = {
548 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
549 [BRCMNAND_CS_CFG_EXT] = 0x04,
550 [BRCMNAND_CS_CFG] = 0x04,
551 [BRCMNAND_CS_TIMING1] = 0x08,
552 [BRCMNAND_CS_TIMING2] = 0x0c,
553 };
554
555 /* Per chip-select offset for <= v5.0 on CS0 only */
556 static const u8 brcmnand_cs_offsets_cs0[] = {
557 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
558 [BRCMNAND_CS_CFG_EXT] = 0x08,
559 [BRCMNAND_CS_CFG] = 0x08,
560 [BRCMNAND_CS_TIMING1] = 0x10,
561 [BRCMNAND_CS_TIMING2] = 0x14,
562 };
563
564 /*
565 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
566 * one config register, but once the bitfields overflowed, newer controllers
567 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
568 */
569 enum {
570 CFG_BLK_ADR_BYTES_SHIFT = 8,
571 CFG_COL_ADR_BYTES_SHIFT = 12,
572 CFG_FUL_ADR_BYTES_SHIFT = 16,
573 CFG_BUS_WIDTH_SHIFT = 23,
574 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
575 CFG_DEVICE_SIZE_SHIFT = 24,
576
577 /* Only for v2.1 */
578 CFG_PAGE_SIZE_SHIFT_v2_1 = 30,
579
580 /* Only for pre-v7.1 (with no CFG_EXT register) */
581 CFG_PAGE_SIZE_SHIFT = 20,
582 CFG_BLK_SIZE_SHIFT = 28,
583
584 /* Only for v7.1+ (with CFG_EXT register) */
585 CFG_EXT_PAGE_SIZE_SHIFT = 0,
586 CFG_EXT_BLK_SIZE_SHIFT = 4,
587 };
588
589 /* BRCMNAND_INTFC_STATUS */
590 enum {
591 INTFC_FLASH_STATUS = GENMASK(7, 0),
592
593 INTFC_ERASED = BIT(27),
594 INTFC_OOB_VALID = BIT(28),
595 INTFC_CACHE_VALID = BIT(29),
596 INTFC_FLASH_READY = BIT(30),
597 INTFC_CTLR_READY = BIT(31),
598 };
599
600 /***********************************************************************
601 * NAND ACC CONTROL bitfield
602 *
603 * Some bits have remained constant throughout hardware revision, while
604 * others have shifted around.
605 ***********************************************************************/
606
607 /* Constant for all versions (where supported) */
608 enum {
609 /* See BRCMNAND_HAS_CACHE_MODE */
610 ACC_CONTROL_CACHE_MODE = BIT(22),
611
612 /* See BRCMNAND_HAS_PREFETCH */
613 ACC_CONTROL_PREFETCH = BIT(23),
614
615 ACC_CONTROL_PAGE_HIT = BIT(24),
616 ACC_CONTROL_WR_PREEMPT = BIT(25),
617 ACC_CONTROL_PARTIAL_PAGE = BIT(26),
618 ACC_CONTROL_RD_ERASED = BIT(27),
619 ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
620 ACC_CONTROL_WR_ECC = BIT(30),
621 ACC_CONTROL_RD_ECC = BIT(31),
622 };
623
624 #define ACC_CONTROL_ECC_SHIFT 16
625 /* Only for v7.2 */
626 #define ACC_CONTROL_ECC_EXT_SHIFT 13
627
brcmnand_non_mmio_ops(struct brcmnand_controller * ctrl)628 static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
629 {
630 #if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
631 return static_branch_unlikely(&brcmnand_soc_has_ops_key);
632 #else
633 return false;
634 #endif
635 }
636
nand_readreg(struct brcmnand_controller * ctrl,u32 offs)637 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
638 {
639 if (brcmnand_non_mmio_ops(ctrl))
640 return brcmnand_soc_read(ctrl->soc, offs);
641 return brcmnand_readl(ctrl->nand_base + offs);
642 }
643
nand_writereg(struct brcmnand_controller * ctrl,u32 offs,u32 val)644 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
645 u32 val)
646 {
647 if (brcmnand_non_mmio_ops(ctrl))
648 brcmnand_soc_write(ctrl->soc, val, offs);
649 else
650 brcmnand_writel(val, ctrl->nand_base + offs);
651 }
652
brcmnand_revision_init(struct brcmnand_controller * ctrl)653 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
654 {
655 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
656 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
657 static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
658 static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
659 static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
660 static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
661 static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
662
663 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
664
665 /* Only support v2.1+ */
666 if (ctrl->nand_version < 0x0201) {
667 dev_err(ctrl->dev, "version %#x not supported\n",
668 ctrl->nand_version);
669 return -ENODEV;
670 }
671
672 /* Register offsets */
673 if (ctrl->nand_version >= 0x0702)
674 ctrl->reg_offsets = brcmnand_regs_v72;
675 else if (ctrl->nand_version == 0x0701)
676 ctrl->reg_offsets = brcmnand_regs_v71;
677 else if (ctrl->nand_version >= 0x0600)
678 ctrl->reg_offsets = brcmnand_regs_v60;
679 else if (ctrl->nand_version >= 0x0500)
680 ctrl->reg_offsets = brcmnand_regs_v50;
681 else if (ctrl->nand_version >= 0x0303)
682 ctrl->reg_offsets = brcmnand_regs_v33;
683 else if (ctrl->nand_version >= 0x0201)
684 ctrl->reg_offsets = brcmnand_regs_v21;
685
686 /* Chip-select stride */
687 if (ctrl->nand_version >= 0x0701)
688 ctrl->reg_spacing = 0x14;
689 else
690 ctrl->reg_spacing = 0x10;
691
692 /* Per chip-select registers */
693 if (ctrl->nand_version >= 0x0701) {
694 ctrl->cs_offsets = brcmnand_cs_offsets_v71;
695 } else {
696 ctrl->cs_offsets = brcmnand_cs_offsets;
697
698 /* v3.3-5.0 have a different CS0 offset layout */
699 if (ctrl->nand_version >= 0x0303 &&
700 ctrl->nand_version <= 0x0500)
701 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
702 }
703
704 /* Page / block sizes */
705 if (ctrl->nand_version >= 0x0701) {
706 /* >= v7.1 use nice power-of-2 values! */
707 ctrl->max_page_size = 16 * 1024;
708 ctrl->max_block_size = 2 * 1024 * 1024;
709 } else {
710 if (ctrl->nand_version >= 0x0304)
711 ctrl->page_sizes = page_sizes_v3_4;
712 else if (ctrl->nand_version >= 0x0202)
713 ctrl->page_sizes = page_sizes_v2_2;
714 else
715 ctrl->page_sizes = page_sizes_v2_1;
716
717 if (ctrl->nand_version >= 0x0202)
718 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
719 else
720 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
721
722 if (ctrl->nand_version >= 0x0600)
723 ctrl->block_sizes = block_sizes_v6;
724 else if (ctrl->nand_version >= 0x0400)
725 ctrl->block_sizes = block_sizes_v4;
726 else if (ctrl->nand_version >= 0x0202)
727 ctrl->block_sizes = block_sizes_v2_2;
728 else
729 ctrl->block_sizes = block_sizes_v2_1;
730
731 if (ctrl->nand_version < 0x0400) {
732 if (ctrl->nand_version < 0x0202)
733 ctrl->max_page_size = 2048;
734 else
735 ctrl->max_page_size = 4096;
736 ctrl->max_block_size = 512 * 1024;
737 }
738 }
739
740 /* Maximum spare area sector size (per 512B) */
741 if (ctrl->nand_version == 0x0702)
742 ctrl->max_oob = 128;
743 else if (ctrl->nand_version >= 0x0600)
744 ctrl->max_oob = 64;
745 else if (ctrl->nand_version >= 0x0500)
746 ctrl->max_oob = 32;
747 else
748 ctrl->max_oob = 16;
749
750 /* v6.0 and newer (except v6.1) have prefetch support */
751 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
752 ctrl->features |= BRCMNAND_HAS_PREFETCH;
753
754 /*
755 * v6.x has cache mode, but it's implemented differently. Ignore it for
756 * now.
757 */
758 if (ctrl->nand_version >= 0x0700)
759 ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
760
761 if (ctrl->nand_version >= 0x0500)
762 ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
763
764 if (ctrl->nand_version >= 0x0700)
765 ctrl->features |= BRCMNAND_HAS_WP;
766 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
767 ctrl->features |= BRCMNAND_HAS_WP;
768
769 /* v7.2 has different ecc level shift in the acc register */
770 if (ctrl->nand_version == 0x0702)
771 ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
772 else
773 ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
774
775 return 0;
776 }
777
brcmnand_flash_dma_revision_init(struct brcmnand_controller * ctrl)778 static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
779 {
780 /* flash_dma register offsets */
781 if (ctrl->nand_version >= 0x0703)
782 ctrl->flash_dma_offsets = flash_dma_regs_v4;
783 else if (ctrl->nand_version == 0x0602)
784 ctrl->flash_dma_offsets = flash_dma_regs_v0;
785 else
786 ctrl->flash_dma_offsets = flash_dma_regs_v1;
787 }
788
brcmnand_read_reg(struct brcmnand_controller * ctrl,enum brcmnand_reg reg)789 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
790 enum brcmnand_reg reg)
791 {
792 u16 offs = ctrl->reg_offsets[reg];
793
794 if (offs)
795 return nand_readreg(ctrl, offs);
796 else
797 return 0;
798 }
799
brcmnand_write_reg(struct brcmnand_controller * ctrl,enum brcmnand_reg reg,u32 val)800 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
801 enum brcmnand_reg reg, u32 val)
802 {
803 u16 offs = ctrl->reg_offsets[reg];
804
805 if (offs)
806 nand_writereg(ctrl, offs, val);
807 }
808
brcmnand_rmw_reg(struct brcmnand_controller * ctrl,enum brcmnand_reg reg,u32 mask,unsigned int shift,u32 val)809 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
810 enum brcmnand_reg reg, u32 mask, unsigned
811 int shift, u32 val)
812 {
813 u32 tmp = brcmnand_read_reg(ctrl, reg);
814
815 tmp &= ~mask;
816 tmp |= val << shift;
817 brcmnand_write_reg(ctrl, reg, tmp);
818 }
819
brcmnand_read_fc(struct brcmnand_controller * ctrl,int word)820 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
821 {
822 if (brcmnand_non_mmio_ops(ctrl))
823 return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
824 return __raw_readl(ctrl->nand_fc + word * 4);
825 }
826
brcmnand_write_fc(struct brcmnand_controller * ctrl,int word,u32 val)827 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
828 int word, u32 val)
829 {
830 if (brcmnand_non_mmio_ops(ctrl))
831 brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
832 else
833 __raw_writel(val, ctrl->nand_fc + word * 4);
834 }
835
edu_writel(struct brcmnand_controller * ctrl,enum edu_reg reg,u32 val)836 static inline void edu_writel(struct brcmnand_controller *ctrl,
837 enum edu_reg reg, u32 val)
838 {
839 u16 offs = ctrl->edu_offsets[reg];
840
841 brcmnand_writel(val, ctrl->edu_base + offs);
842 }
843
edu_readl(struct brcmnand_controller * ctrl,enum edu_reg reg)844 static inline u32 edu_readl(struct brcmnand_controller *ctrl,
845 enum edu_reg reg)
846 {
847 u16 offs = ctrl->edu_offsets[reg];
848
849 return brcmnand_readl(ctrl->edu_base + offs);
850 }
851
brcmnand_clear_ecc_addr(struct brcmnand_controller * ctrl)852 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
853 {
854
855 /* Clear error addresses */
856 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
857 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
858 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
859 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
860 }
861
brcmnand_get_uncorrecc_addr(struct brcmnand_controller * ctrl)862 static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
863 {
864 u64 err_addr;
865
866 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
867 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
868 BRCMNAND_UNCORR_EXT_ADDR)
869 & 0xffff) << 32);
870
871 return err_addr;
872 }
873
brcmnand_get_correcc_addr(struct brcmnand_controller * ctrl)874 static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
875 {
876 u64 err_addr;
877
878 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
879 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
880 BRCMNAND_CORR_EXT_ADDR)
881 & 0xffff) << 32);
882
883 return err_addr;
884 }
885
brcmnand_set_cmd_addr(struct mtd_info * mtd,u64 addr)886 static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
887 {
888 struct nand_chip *chip = mtd_to_nand(mtd);
889 struct brcmnand_host *host = nand_get_controller_data(chip);
890 struct brcmnand_controller *ctrl = host->ctrl;
891
892 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
893 (host->cs << 16) | ((addr >> 32) & 0xffff));
894 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
895 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
896 lower_32_bits(addr));
897 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
898 }
899
brcmnand_cs_offset(struct brcmnand_controller * ctrl,int cs,enum brcmnand_cs_reg reg)900 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
901 enum brcmnand_cs_reg reg)
902 {
903 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
904 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
905 u8 cs_offs;
906
907 if (cs == 0 && ctrl->cs0_offsets)
908 cs_offs = ctrl->cs0_offsets[reg];
909 else
910 cs_offs = ctrl->cs_offsets[reg];
911
912 if (cs && offs_cs1)
913 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
914
915 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
916 }
917
brcmnand_count_corrected(struct brcmnand_controller * ctrl)918 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
919 {
920 if (ctrl->nand_version < 0x0600)
921 return 1;
922 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
923 }
924
brcmnand_wr_corr_thresh(struct brcmnand_host * host,u8 val)925 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
926 {
927 struct brcmnand_controller *ctrl = host->ctrl;
928 unsigned int shift = 0, bits;
929 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
930 int cs = host->cs;
931
932 if (!ctrl->reg_offsets[reg])
933 return;
934
935 if (ctrl->nand_version == 0x0702)
936 bits = 7;
937 else if (ctrl->nand_version >= 0x0600)
938 bits = 6;
939 else if (ctrl->nand_version >= 0x0500)
940 bits = 5;
941 else
942 bits = 4;
943
944 if (ctrl->nand_version >= 0x0702) {
945 if (cs >= 4)
946 reg = BRCMNAND_CORR_THRESHOLD_EXT;
947 shift = (cs % 4) * bits;
948 } else if (ctrl->nand_version >= 0x0600) {
949 if (cs >= 5)
950 reg = BRCMNAND_CORR_THRESHOLD_EXT;
951 shift = (cs % 5) * bits;
952 }
953 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
954 }
955
brcmnand_cmd_shift(struct brcmnand_controller * ctrl)956 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
957 {
958 /* Kludge for the BCMA-based NAND controller which does not actually
959 * shift the command
960 */
961 if (ctrl->nand_version == 0x0304 && brcmnand_non_mmio_ops(ctrl))
962 return 0;
963
964 if (ctrl->nand_version < 0x0602)
965 return 24;
966 return 0;
967 }
968
brcmnand_spare_area_mask(struct brcmnand_controller * ctrl)969 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
970 {
971 if (ctrl->nand_version == 0x0702)
972 return GENMASK(7, 0);
973 else if (ctrl->nand_version >= 0x0600)
974 return GENMASK(6, 0);
975 else if (ctrl->nand_version >= 0x0303)
976 return GENMASK(5, 0);
977 else
978 return GENMASK(4, 0);
979 }
980
brcmnand_ecc_level_mask(struct brcmnand_controller * ctrl)981 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
982 {
983 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
984
985 mask <<= ACC_CONTROL_ECC_SHIFT;
986
987 /* v7.2 includes additional ECC levels */
988 if (ctrl->nand_version == 0x0702)
989 mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
990
991 return mask;
992 }
993
brcmnand_set_ecc_enabled(struct brcmnand_host * host,int en)994 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
995 {
996 struct brcmnand_controller *ctrl = host->ctrl;
997 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
998 u32 acc_control = nand_readreg(ctrl, offs);
999 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
1000
1001 if (en) {
1002 acc_control |= ecc_flags; /* enable RD/WR ECC */
1003 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
1004 acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
1005 } else {
1006 acc_control &= ~ecc_flags; /* disable RD/WR ECC */
1007 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
1008 }
1009
1010 nand_writereg(ctrl, offs, acc_control);
1011 }
1012
brcmnand_sector_1k_shift(struct brcmnand_controller * ctrl)1013 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
1014 {
1015 if (ctrl->nand_version >= 0x0702)
1016 return 9;
1017 else if (ctrl->nand_version >= 0x0600)
1018 return 7;
1019 else if (ctrl->nand_version >= 0x0500)
1020 return 6;
1021 else
1022 return -1;
1023 }
1024
brcmnand_get_sector_size_1k(struct brcmnand_host * host)1025 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
1026 {
1027 struct brcmnand_controller *ctrl = host->ctrl;
1028 int shift = brcmnand_sector_1k_shift(ctrl);
1029 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1030 BRCMNAND_CS_ACC_CONTROL);
1031
1032 if (shift < 0)
1033 return 0;
1034
1035 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
1036 }
1037
brcmnand_set_sector_size_1k(struct brcmnand_host * host,int val)1038 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
1039 {
1040 struct brcmnand_controller *ctrl = host->ctrl;
1041 int shift = brcmnand_sector_1k_shift(ctrl);
1042 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1043 BRCMNAND_CS_ACC_CONTROL);
1044 u32 tmp;
1045
1046 if (shift < 0)
1047 return;
1048
1049 tmp = nand_readreg(ctrl, acc_control_offs);
1050 tmp &= ~(1 << shift);
1051 tmp |= (!!val) << shift;
1052 nand_writereg(ctrl, acc_control_offs, tmp);
1053 }
1054
1055 /***********************************************************************
1056 * CS_NAND_SELECT
1057 ***********************************************************************/
1058
1059 enum {
1060 CS_SELECT_NAND_WP = BIT(29),
1061 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
1062 };
1063
bcmnand_ctrl_poll_status(struct brcmnand_controller * ctrl,u32 mask,u32 expected_val,unsigned long timeout_ms)1064 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
1065 u32 mask, u32 expected_val,
1066 unsigned long timeout_ms)
1067 {
1068 unsigned long limit;
1069 u32 val;
1070
1071 if (!timeout_ms)
1072 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
1073
1074 limit = jiffies + msecs_to_jiffies(timeout_ms);
1075 do {
1076 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1077 if ((val & mask) == expected_val)
1078 return 0;
1079
1080 cpu_relax();
1081 } while (time_after(limit, jiffies));
1082
1083 /*
1084 * do a final check after time out in case the CPU was busy and the driver
1085 * did not get enough time to perform the polling to avoid false alarms
1086 */
1087 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1088 if ((val & mask) == expected_val)
1089 return 0;
1090
1091 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
1092 expected_val, val & mask);
1093
1094 return -ETIMEDOUT;
1095 }
1096
brcmnand_set_wp(struct brcmnand_controller * ctrl,bool en)1097 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
1098 {
1099 u32 val = en ? CS_SELECT_NAND_WP : 0;
1100
1101 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
1102 }
1103
1104 /***********************************************************************
1105 * Flash DMA
1106 ***********************************************************************/
1107
has_flash_dma(struct brcmnand_controller * ctrl)1108 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
1109 {
1110 return ctrl->flash_dma_base;
1111 }
1112
has_edu(struct brcmnand_controller * ctrl)1113 static inline bool has_edu(struct brcmnand_controller *ctrl)
1114 {
1115 return ctrl->edu_base;
1116 }
1117
use_dma(struct brcmnand_controller * ctrl)1118 static inline bool use_dma(struct brcmnand_controller *ctrl)
1119 {
1120 return has_flash_dma(ctrl) || has_edu(ctrl);
1121 }
1122
disable_ctrl_irqs(struct brcmnand_controller * ctrl)1123 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
1124 {
1125 if (ctrl->pio_poll_mode)
1126 return;
1127
1128 if (has_flash_dma(ctrl)) {
1129 ctrl->flash_dma_base = NULL;
1130 disable_irq(ctrl->dma_irq);
1131 }
1132
1133 disable_irq(ctrl->irq);
1134 ctrl->pio_poll_mode = true;
1135 }
1136
flash_dma_buf_ok(const void * buf)1137 static inline bool flash_dma_buf_ok(const void *buf)
1138 {
1139 return buf && !is_vmalloc_addr(buf) &&
1140 likely(IS_ALIGNED((uintptr_t)buf, 4));
1141 }
1142
flash_dma_writel(struct brcmnand_controller * ctrl,enum flash_dma_reg dma_reg,u32 val)1143 static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
1144 enum flash_dma_reg dma_reg, u32 val)
1145 {
1146 u16 offs = ctrl->flash_dma_offsets[dma_reg];
1147
1148 brcmnand_writel(val, ctrl->flash_dma_base + offs);
1149 }
1150
flash_dma_readl(struct brcmnand_controller * ctrl,enum flash_dma_reg dma_reg)1151 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
1152 enum flash_dma_reg dma_reg)
1153 {
1154 u16 offs = ctrl->flash_dma_offsets[dma_reg];
1155
1156 return brcmnand_readl(ctrl->flash_dma_base + offs);
1157 }
1158
1159 /* Low-level operation types: command, address, write, or read */
1160 enum brcmnand_llop_type {
1161 LL_OP_CMD,
1162 LL_OP_ADDR,
1163 LL_OP_WR,
1164 LL_OP_RD,
1165 };
1166
1167 /***********************************************************************
1168 * Internal support functions
1169 ***********************************************************************/
1170
is_hamming_ecc(struct brcmnand_controller * ctrl,struct brcmnand_cfg * cfg)1171 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
1172 struct brcmnand_cfg *cfg)
1173 {
1174 if (ctrl->nand_version <= 0x0701)
1175 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
1176 cfg->ecc_level == 15;
1177 else
1178 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
1179 cfg->ecc_level == 15) ||
1180 (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
1181 }
1182
1183 /*
1184 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
1185 * the layout/configuration.
1186 * Returns -ERRCODE on failure.
1187 */
brcmnand_hamming_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1188 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
1189 struct mtd_oob_region *oobregion)
1190 {
1191 struct nand_chip *chip = mtd_to_nand(mtd);
1192 struct brcmnand_host *host = nand_get_controller_data(chip);
1193 struct brcmnand_cfg *cfg = &host->hwcfg;
1194 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1195 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1196
1197 if (section >= sectors)
1198 return -ERANGE;
1199
1200 oobregion->offset = (section * sas) + 6;
1201 oobregion->length = 3;
1202
1203 return 0;
1204 }
1205
brcmnand_hamming_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1206 static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
1207 struct mtd_oob_region *oobregion)
1208 {
1209 struct nand_chip *chip = mtd_to_nand(mtd);
1210 struct brcmnand_host *host = nand_get_controller_data(chip);
1211 struct brcmnand_cfg *cfg = &host->hwcfg;
1212 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1213 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1214 u32 next;
1215
1216 if (section > sectors)
1217 return -ERANGE;
1218
1219 next = (section * sas);
1220 if (section < sectors)
1221 next += 6;
1222
1223 if (section) {
1224 oobregion->offset = ((section - 1) * sas) + 9;
1225 } else {
1226 if (cfg->page_size > 512) {
1227 /* Large page NAND uses first 2 bytes for BBI */
1228 oobregion->offset = 2;
1229 } else {
1230 /* Small page NAND uses last byte before ECC for BBI */
1231 oobregion->offset = 0;
1232 next--;
1233 }
1234 }
1235
1236 oobregion->length = next - oobregion->offset;
1237
1238 return 0;
1239 }
1240
1241 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
1242 .ecc = brcmnand_hamming_ooblayout_ecc,
1243 .free = brcmnand_hamming_ooblayout_free,
1244 };
1245
brcmnand_bch_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1246 static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
1247 struct mtd_oob_region *oobregion)
1248 {
1249 struct nand_chip *chip = mtd_to_nand(mtd);
1250 struct brcmnand_host *host = nand_get_controller_data(chip);
1251 struct brcmnand_cfg *cfg = &host->hwcfg;
1252 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1253 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1254
1255 if (section >= sectors)
1256 return -ERANGE;
1257
1258 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
1259 oobregion->length = chip->ecc.bytes;
1260
1261 return 0;
1262 }
1263
brcmnand_bch_ooblayout_free_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1264 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
1265 struct mtd_oob_region *oobregion)
1266 {
1267 struct nand_chip *chip = mtd_to_nand(mtd);
1268 struct brcmnand_host *host = nand_get_controller_data(chip);
1269 struct brcmnand_cfg *cfg = &host->hwcfg;
1270 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1271 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1272
1273 if (section >= sectors)
1274 return -ERANGE;
1275
1276 if (sas <= chip->ecc.bytes)
1277 return 0;
1278
1279 oobregion->offset = section * sas;
1280 oobregion->length = sas - chip->ecc.bytes;
1281
1282 if (!section) {
1283 oobregion->offset++;
1284 oobregion->length--;
1285 }
1286
1287 return 0;
1288 }
1289
brcmnand_bch_ooblayout_free_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1290 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
1291 struct mtd_oob_region *oobregion)
1292 {
1293 struct nand_chip *chip = mtd_to_nand(mtd);
1294 struct brcmnand_host *host = nand_get_controller_data(chip);
1295 struct brcmnand_cfg *cfg = &host->hwcfg;
1296 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1297
1298 if (section > 1 || sas - chip->ecc.bytes < 6 ||
1299 (section && sas - chip->ecc.bytes == 6))
1300 return -ERANGE;
1301
1302 if (!section) {
1303 oobregion->offset = 0;
1304 oobregion->length = 5;
1305 } else {
1306 oobregion->offset = 6;
1307 oobregion->length = sas - chip->ecc.bytes - 6;
1308 }
1309
1310 return 0;
1311 }
1312
1313 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
1314 .ecc = brcmnand_bch_ooblayout_ecc,
1315 .free = brcmnand_bch_ooblayout_free_lp,
1316 };
1317
1318 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
1319 .ecc = brcmnand_bch_ooblayout_ecc,
1320 .free = brcmnand_bch_ooblayout_free_sp,
1321 };
1322
brcmstb_choose_ecc_layout(struct brcmnand_host * host)1323 static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
1324 {
1325 struct brcmnand_cfg *p = &host->hwcfg;
1326 struct mtd_info *mtd = nand_to_mtd(&host->chip);
1327 struct nand_ecc_ctrl *ecc = &host->chip.ecc;
1328 unsigned int ecc_level = p->ecc_level;
1329 int sas = p->spare_area_size << p->sector_size_1k;
1330 int sectors = p->page_size / (512 << p->sector_size_1k);
1331
1332 if (p->sector_size_1k)
1333 ecc_level <<= 1;
1334
1335 if (is_hamming_ecc(host->ctrl, p)) {
1336 ecc->bytes = 3 * sectors;
1337 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
1338 return 0;
1339 }
1340
1341 /*
1342 * CONTROLLER_VERSION:
1343 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
1344 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
1345 * But we will just be conservative.
1346 */
1347 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
1348 if (p->page_size == 512)
1349 mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
1350 else
1351 mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
1352
1353 if (ecc->bytes >= sas) {
1354 dev_err(&host->pdev->dev,
1355 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
1356 ecc->bytes, sas);
1357 return -EINVAL;
1358 }
1359
1360 return 0;
1361 }
1362
brcmnand_wp(struct mtd_info * mtd,int wp)1363 static void brcmnand_wp(struct mtd_info *mtd, int wp)
1364 {
1365 struct nand_chip *chip = mtd_to_nand(mtd);
1366 struct brcmnand_host *host = nand_get_controller_data(chip);
1367 struct brcmnand_controller *ctrl = host->ctrl;
1368
1369 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1370 static int old_wp = -1;
1371 int ret;
1372
1373 if (old_wp != wp) {
1374 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1375 old_wp = wp;
1376 }
1377
1378 /*
1379 * make sure ctrl/flash ready before and after
1380 * changing state of #WP pin
1381 */
1382 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1383 NAND_STATUS_READY,
1384 NAND_CTRL_RDY |
1385 NAND_STATUS_READY, 0);
1386 if (ret)
1387 return;
1388
1389 brcmnand_set_wp(ctrl, wp);
1390 nand_status_op(chip, NULL);
1391 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1392 ret = bcmnand_ctrl_poll_status(ctrl,
1393 NAND_CTRL_RDY |
1394 NAND_STATUS_READY |
1395 NAND_STATUS_WP,
1396 NAND_CTRL_RDY |
1397 NAND_STATUS_READY |
1398 (wp ? 0 : NAND_STATUS_WP), 0);
1399
1400 if (ret)
1401 dev_err_ratelimited(&host->pdev->dev,
1402 "nand #WP expected %s\n",
1403 wp ? "on" : "off");
1404 }
1405 }
1406
1407 /* Helper functions for reading and writing OOB registers */
oob_reg_read(struct brcmnand_controller * ctrl,u32 offs)1408 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
1409 {
1410 u16 offset0, offset10, reg_offs;
1411
1412 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
1413 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
1414
1415 if (offs >= ctrl->max_oob)
1416 return 0x77;
1417
1418 if (offs >= 16 && offset10)
1419 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1420 else
1421 reg_offs = offset0 + (offs & ~0x03);
1422
1423 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
1424 }
1425
oob_reg_write(struct brcmnand_controller * ctrl,u32 offs,u32 data)1426 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
1427 u32 data)
1428 {
1429 u16 offset0, offset10, reg_offs;
1430
1431 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
1432 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
1433
1434 if (offs >= ctrl->max_oob)
1435 return;
1436
1437 if (offs >= 16 && offset10)
1438 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1439 else
1440 reg_offs = offset0 + (offs & ~0x03);
1441
1442 nand_writereg(ctrl, reg_offs, data);
1443 }
1444
1445 /*
1446 * read_oob_from_regs - read data from OOB registers
1447 * @ctrl: NAND controller
1448 * @i: sub-page sector index
1449 * @oob: buffer to read to
1450 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1451 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1452 */
read_oob_from_regs(struct brcmnand_controller * ctrl,int i,u8 * oob,int sas,int sector_1k)1453 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
1454 int sas, int sector_1k)
1455 {
1456 int tbytes = sas << sector_1k;
1457 int j;
1458
1459 /* Adjust OOB values for 1K sector size */
1460 if (sector_1k && (i & 0x01))
1461 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1462 tbytes = min_t(int, tbytes, ctrl->max_oob);
1463
1464 for (j = 0; j < tbytes; j++)
1465 oob[j] = oob_reg_read(ctrl, j);
1466 return tbytes;
1467 }
1468
1469 /*
1470 * write_oob_to_regs - write data to OOB registers
1471 * @i: sub-page sector index
1472 * @oob: buffer to write from
1473 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1474 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1475 */
write_oob_to_regs(struct brcmnand_controller * ctrl,int i,const u8 * oob,int sas,int sector_1k)1476 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
1477 const u8 *oob, int sas, int sector_1k)
1478 {
1479 int tbytes = sas << sector_1k;
1480 int j, k = 0;
1481 u32 last = 0xffffffff;
1482 u8 *plast = (u8 *)&last;
1483
1484 /* Adjust OOB values for 1K sector size */
1485 if (sector_1k && (i & 0x01))
1486 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1487 tbytes = min_t(int, tbytes, ctrl->max_oob);
1488
1489 /*
1490 * tbytes may not be multiple of words. Make sure we don't read out of
1491 * the boundary and stop at last word.
1492 */
1493 for (j = 0; (j + 3) < tbytes; j += 4)
1494 oob_reg_write(ctrl, j,
1495 (oob[j + 0] << 24) |
1496 (oob[j + 1] << 16) |
1497 (oob[j + 2] << 8) |
1498 (oob[j + 3] << 0));
1499
1500 /* handle the remaing bytes */
1501 while (j < tbytes)
1502 plast[k++] = oob[j++];
1503
1504 if (tbytes & 0x3)
1505 oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
1506
1507 return tbytes;
1508 }
1509
brcmnand_edu_init(struct brcmnand_controller * ctrl)1510 static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
1511 {
1512 /* initialize edu */
1513 edu_writel(ctrl, EDU_ERR_STATUS, 0);
1514 edu_readl(ctrl, EDU_ERR_STATUS);
1515 edu_writel(ctrl, EDU_DONE, 0);
1516 edu_writel(ctrl, EDU_DONE, 0);
1517 edu_writel(ctrl, EDU_DONE, 0);
1518 edu_writel(ctrl, EDU_DONE, 0);
1519 edu_readl(ctrl, EDU_DONE);
1520 }
1521
1522 /* edu irq */
brcmnand_edu_irq(int irq,void * data)1523 static irqreturn_t brcmnand_edu_irq(int irq, void *data)
1524 {
1525 struct brcmnand_controller *ctrl = data;
1526
1527 if (ctrl->edu_count) {
1528 ctrl->edu_count--;
1529 while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
1530 udelay(1);
1531 edu_writel(ctrl, EDU_DONE, 0);
1532 edu_readl(ctrl, EDU_DONE);
1533 }
1534
1535 if (ctrl->edu_count) {
1536 ctrl->edu_dram_addr += FC_BYTES;
1537 ctrl->edu_ext_addr += FC_BYTES;
1538
1539 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
1540 edu_readl(ctrl, EDU_DRAM_ADDR);
1541 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
1542 edu_readl(ctrl, EDU_EXT_ADDR);
1543
1544 if (ctrl->oob) {
1545 if (ctrl->edu_cmd == EDU_CMD_READ) {
1546 ctrl->oob += read_oob_from_regs(ctrl,
1547 ctrl->edu_count + 1,
1548 ctrl->oob, ctrl->sas,
1549 ctrl->sector_size_1k);
1550 } else {
1551 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1552 ctrl->edu_ext_addr);
1553 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1554 ctrl->oob += write_oob_to_regs(ctrl,
1555 ctrl->edu_count,
1556 ctrl->oob, ctrl->sas,
1557 ctrl->sector_size_1k);
1558 }
1559 }
1560
1561 mb(); /* flush previous writes */
1562 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
1563 edu_readl(ctrl, EDU_CMD);
1564
1565 return IRQ_HANDLED;
1566 }
1567
1568 complete(&ctrl->edu_done);
1569
1570 return IRQ_HANDLED;
1571 }
1572
brcmnand_ctlrdy_irq(int irq,void * data)1573 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
1574 {
1575 struct brcmnand_controller *ctrl = data;
1576
1577 /* Discard all NAND_CTLRDY interrupts during DMA */
1578 if (ctrl->dma_pending)
1579 return IRQ_HANDLED;
1580
1581 /* check if you need to piggy back on the ctrlrdy irq */
1582 if (ctrl->edu_pending) {
1583 if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
1584 /* Discard interrupts while using dedicated edu irq */
1585 return IRQ_HANDLED;
1586
1587 /* no registered edu irq, call handler */
1588 return brcmnand_edu_irq(irq, data);
1589 }
1590
1591 complete(&ctrl->done);
1592 return IRQ_HANDLED;
1593 }
1594
1595 /* Handle SoC-specific interrupt hardware */
brcmnand_irq(int irq,void * data)1596 static irqreturn_t brcmnand_irq(int irq, void *data)
1597 {
1598 struct brcmnand_controller *ctrl = data;
1599
1600 if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1601 return brcmnand_ctlrdy_irq(irq, data);
1602
1603 return IRQ_NONE;
1604 }
1605
brcmnand_dma_irq(int irq,void * data)1606 static irqreturn_t brcmnand_dma_irq(int irq, void *data)
1607 {
1608 struct brcmnand_controller *ctrl = data;
1609
1610 complete(&ctrl->dma_done);
1611
1612 return IRQ_HANDLED;
1613 }
1614
brcmnand_send_cmd(struct brcmnand_host * host,int cmd)1615 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1616 {
1617 struct brcmnand_controller *ctrl = host->ctrl;
1618 int ret;
1619 u64 cmd_addr;
1620
1621 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1622
1623 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
1624
1625 /*
1626 * If we came here through _panic_write and there is a pending
1627 * command, try to wait for it. If it times out, rather than
1628 * hitting BUG_ON, just return so we don't crash while crashing.
1629 */
1630 if (oops_in_progress) {
1631 if (ctrl->cmd_pending &&
1632 bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
1633 return;
1634 } else
1635 BUG_ON(ctrl->cmd_pending != 0);
1636 ctrl->cmd_pending = cmd;
1637
1638 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1639 WARN_ON(ret);
1640
1641 mb(); /* flush previous writes */
1642 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1643 cmd << brcmnand_cmd_shift(ctrl));
1644 }
1645
1646 /***********************************************************************
1647 * NAND MTD API: read/program/erase
1648 ***********************************************************************/
1649
brcmnand_cmd_ctrl(struct nand_chip * chip,int dat,unsigned int ctrl)1650 static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
1651 unsigned int ctrl)
1652 {
1653 /* intentionally left blank */
1654 }
1655
brcmstb_nand_wait_for_completion(struct nand_chip * chip)1656 static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
1657 {
1658 struct brcmnand_host *host = nand_get_controller_data(chip);
1659 struct brcmnand_controller *ctrl = host->ctrl;
1660 struct mtd_info *mtd = nand_to_mtd(chip);
1661 bool err = false;
1662 int sts;
1663
1664 if (mtd->oops_panic_write || ctrl->irq < 0) {
1665 /* switch to interrupt polling and PIO mode */
1666 disable_ctrl_irqs(ctrl);
1667 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
1668 NAND_CTRL_RDY, 0);
1669 err = (sts < 0) ? true : false;
1670 } else {
1671 unsigned long timeo = msecs_to_jiffies(
1672 NAND_POLL_STATUS_TIMEOUT_MS);
1673 /* wait for completion interrupt */
1674 sts = wait_for_completion_timeout(&ctrl->done, timeo);
1675 err = (sts <= 0) ? true : false;
1676 }
1677
1678 return err;
1679 }
1680
brcmnand_waitfunc(struct nand_chip * chip)1681 static int brcmnand_waitfunc(struct nand_chip *chip)
1682 {
1683 struct brcmnand_host *host = nand_get_controller_data(chip);
1684 struct brcmnand_controller *ctrl = host->ctrl;
1685 bool err = false;
1686
1687 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1688 if (ctrl->cmd_pending)
1689 err = brcmstb_nand_wait_for_completion(chip);
1690
1691 if (err) {
1692 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1693 >> brcmnand_cmd_shift(ctrl);
1694
1695 dev_err_ratelimited(ctrl->dev,
1696 "timeout waiting for command %#02x\n", cmd);
1697 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1698 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1699 }
1700 ctrl->cmd_pending = 0;
1701 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1702 INTFC_FLASH_STATUS;
1703 }
1704
1705 enum {
1706 LLOP_RE = BIT(16),
1707 LLOP_WE = BIT(17),
1708 LLOP_ALE = BIT(18),
1709 LLOP_CLE = BIT(19),
1710 LLOP_RETURN_IDLE = BIT(31),
1711
1712 LLOP_DATA_MASK = GENMASK(15, 0),
1713 };
1714
brcmnand_low_level_op(struct brcmnand_host * host,enum brcmnand_llop_type type,u32 data,bool last_op)1715 static int brcmnand_low_level_op(struct brcmnand_host *host,
1716 enum brcmnand_llop_type type, u32 data,
1717 bool last_op)
1718 {
1719 struct nand_chip *chip = &host->chip;
1720 struct brcmnand_controller *ctrl = host->ctrl;
1721 u32 tmp;
1722
1723 tmp = data & LLOP_DATA_MASK;
1724 switch (type) {
1725 case LL_OP_CMD:
1726 tmp |= LLOP_WE | LLOP_CLE;
1727 break;
1728 case LL_OP_ADDR:
1729 /* WE | ALE */
1730 tmp |= LLOP_WE | LLOP_ALE;
1731 break;
1732 case LL_OP_WR:
1733 /* WE */
1734 tmp |= LLOP_WE;
1735 break;
1736 case LL_OP_RD:
1737 /* RE */
1738 tmp |= LLOP_RE;
1739 break;
1740 }
1741 if (last_op)
1742 /* RETURN_IDLE */
1743 tmp |= LLOP_RETURN_IDLE;
1744
1745 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1746
1747 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1748 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1749
1750 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
1751 return brcmnand_waitfunc(chip);
1752 }
1753
brcmnand_cmdfunc(struct nand_chip * chip,unsigned command,int column,int page_addr)1754 static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
1755 int column, int page_addr)
1756 {
1757 struct mtd_info *mtd = nand_to_mtd(chip);
1758 struct brcmnand_host *host = nand_get_controller_data(chip);
1759 struct brcmnand_controller *ctrl = host->ctrl;
1760 u64 addr = (u64)page_addr << chip->page_shift;
1761 int native_cmd = 0;
1762
1763 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
1764 command == NAND_CMD_RNDOUT)
1765 addr = (u64)column;
1766 /* Avoid propagating a negative, don't-care address */
1767 else if (page_addr < 0)
1768 addr = 0;
1769
1770 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1771 (unsigned long long)addr);
1772
1773 host->last_cmd = command;
1774 host->last_byte = 0;
1775 host->last_addr = addr;
1776
1777 switch (command) {
1778 case NAND_CMD_RESET:
1779 native_cmd = CMD_FLASH_RESET;
1780 break;
1781 case NAND_CMD_STATUS:
1782 native_cmd = CMD_STATUS_READ;
1783 break;
1784 case NAND_CMD_READID:
1785 native_cmd = CMD_DEVICE_ID_READ;
1786 break;
1787 case NAND_CMD_READOOB:
1788 native_cmd = CMD_SPARE_AREA_READ;
1789 break;
1790 case NAND_CMD_ERASE1:
1791 native_cmd = CMD_BLOCK_ERASE;
1792 brcmnand_wp(mtd, 0);
1793 break;
1794 case NAND_CMD_PARAM:
1795 native_cmd = CMD_PARAMETER_READ;
1796 break;
1797 case NAND_CMD_SET_FEATURES:
1798 case NAND_CMD_GET_FEATURES:
1799 brcmnand_low_level_op(host, LL_OP_CMD, command, false);
1800 brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
1801 break;
1802 case NAND_CMD_RNDOUT:
1803 native_cmd = CMD_PARAMETER_CHANGE_COL;
1804 addr &= ~((u64)(FC_BYTES - 1));
1805 /*
1806 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1807 * NB: hwcfg.sector_size_1k may not be initialized yet
1808 */
1809 if (brcmnand_get_sector_size_1k(host)) {
1810 host->hwcfg.sector_size_1k =
1811 brcmnand_get_sector_size_1k(host);
1812 brcmnand_set_sector_size_1k(host, 0);
1813 }
1814 break;
1815 }
1816
1817 if (!native_cmd)
1818 return;
1819
1820 brcmnand_set_cmd_addr(mtd, addr);
1821 brcmnand_send_cmd(host, native_cmd);
1822 brcmnand_waitfunc(chip);
1823
1824 if (native_cmd == CMD_PARAMETER_READ ||
1825 native_cmd == CMD_PARAMETER_CHANGE_COL) {
1826 /* Copy flash cache word-wise */
1827 u32 *flash_cache = (u32 *)ctrl->flash_cache;
1828 int i;
1829
1830 brcmnand_soc_data_bus_prepare(ctrl->soc, true);
1831
1832 /*
1833 * Must cache the FLASH_CACHE now, since changes in
1834 * SECTOR_SIZE_1K may invalidate it
1835 */
1836 for (i = 0; i < FC_WORDS; i++)
1837 /*
1838 * Flash cache is big endian for parameter pages, at
1839 * least on STB SoCs
1840 */
1841 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
1842
1843 brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
1844
1845 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1846 if (host->hwcfg.sector_size_1k)
1847 brcmnand_set_sector_size_1k(host,
1848 host->hwcfg.sector_size_1k);
1849 }
1850
1851 /* Re-enable protection is necessary only after erase */
1852 if (command == NAND_CMD_ERASE1)
1853 brcmnand_wp(mtd, 1);
1854 }
1855
brcmnand_read_byte(struct nand_chip * chip)1856 static uint8_t brcmnand_read_byte(struct nand_chip *chip)
1857 {
1858 struct brcmnand_host *host = nand_get_controller_data(chip);
1859 struct brcmnand_controller *ctrl = host->ctrl;
1860 uint8_t ret = 0;
1861 int addr, offs;
1862
1863 switch (host->last_cmd) {
1864 case NAND_CMD_READID:
1865 if (host->last_byte < 4)
1866 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1867 (24 - (host->last_byte << 3));
1868 else if (host->last_byte < 8)
1869 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1870 (56 - (host->last_byte << 3));
1871 break;
1872
1873 case NAND_CMD_READOOB:
1874 ret = oob_reg_read(ctrl, host->last_byte);
1875 break;
1876
1877 case NAND_CMD_STATUS:
1878 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1879 INTFC_FLASH_STATUS;
1880 if (wp_on) /* hide WP status */
1881 ret |= NAND_STATUS_WP;
1882 break;
1883
1884 case NAND_CMD_PARAM:
1885 case NAND_CMD_RNDOUT:
1886 addr = host->last_addr + host->last_byte;
1887 offs = addr & (FC_BYTES - 1);
1888
1889 /* At FC_BYTES boundary, switch to next column */
1890 if (host->last_byte > 0 && offs == 0)
1891 nand_change_read_column_op(chip, addr, NULL, 0, false);
1892
1893 ret = ctrl->flash_cache[offs];
1894 break;
1895 case NAND_CMD_GET_FEATURES:
1896 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
1897 ret = 0;
1898 } else {
1899 bool last = host->last_byte ==
1900 ONFI_SUBFEATURE_PARAM_LEN - 1;
1901 brcmnand_low_level_op(host, LL_OP_RD, 0, last);
1902 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1903 }
1904 }
1905
1906 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1907 host->last_byte++;
1908
1909 return ret;
1910 }
1911
brcmnand_read_buf(struct nand_chip * chip,uint8_t * buf,int len)1912 static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
1913 {
1914 int i;
1915
1916 for (i = 0; i < len; i++, buf++)
1917 *buf = brcmnand_read_byte(chip);
1918 }
1919
brcmnand_write_buf(struct nand_chip * chip,const uint8_t * buf,int len)1920 static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
1921 int len)
1922 {
1923 int i;
1924 struct brcmnand_host *host = nand_get_controller_data(chip);
1925
1926 switch (host->last_cmd) {
1927 case NAND_CMD_SET_FEATURES:
1928 for (i = 0; i < len; i++)
1929 brcmnand_low_level_op(host, LL_OP_WR, buf[i],
1930 (i + 1) == len);
1931 break;
1932 default:
1933 BUG();
1934 break;
1935 }
1936 }
1937
1938 /*
1939 * Kick EDU engine
1940 */
brcmnand_edu_trans(struct brcmnand_host * host,u64 addr,u32 * buf,u8 * oob,u32 len,u8 cmd)1941 static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
1942 u8 *oob, u32 len, u8 cmd)
1943 {
1944 struct brcmnand_controller *ctrl = host->ctrl;
1945 struct brcmnand_cfg *cfg = &host->hwcfg;
1946 unsigned long timeo = msecs_to_jiffies(200);
1947 int ret = 0;
1948 int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1949 u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
1950 unsigned int trans = len >> FC_SHIFT;
1951 dma_addr_t pa;
1952
1953 dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
1954 "read" : "write"), buf, oob);
1955
1956 pa = dma_map_single(ctrl->dev, buf, len, dir);
1957 if (dma_mapping_error(ctrl->dev, pa)) {
1958 dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
1959 return -ENOMEM;
1960 }
1961
1962 ctrl->edu_pending = true;
1963 ctrl->edu_dram_addr = pa;
1964 ctrl->edu_ext_addr = addr;
1965 ctrl->edu_cmd = edu_cmd;
1966 ctrl->edu_count = trans;
1967 ctrl->sas = cfg->spare_area_size;
1968 ctrl->oob = oob;
1969
1970 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
1971 edu_readl(ctrl, EDU_DRAM_ADDR);
1972 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
1973 edu_readl(ctrl, EDU_EXT_ADDR);
1974 edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
1975 edu_readl(ctrl, EDU_LENGTH);
1976
1977 if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_WRITE)) {
1978 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1979 ctrl->edu_ext_addr);
1980 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1981 ctrl->oob += write_oob_to_regs(ctrl,
1982 1,
1983 ctrl->oob, ctrl->sas,
1984 ctrl->sector_size_1k);
1985 }
1986
1987 /* Start edu engine */
1988 mb(); /* flush previous writes */
1989 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
1990 edu_readl(ctrl, EDU_CMD);
1991
1992 if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
1993 dev_err(ctrl->dev,
1994 "timeout waiting for EDU; status %#x, error status %#x\n",
1995 edu_readl(ctrl, EDU_STATUS),
1996 edu_readl(ctrl, EDU_ERR_STATUS));
1997 }
1998
1999 dma_unmap_single(ctrl->dev, pa, len, dir);
2000
2001 /* read last subpage oob */
2002 if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_READ)) {
2003 ctrl->oob += read_oob_from_regs(ctrl,
2004 1,
2005 ctrl->oob, ctrl->sas,
2006 ctrl->sector_size_1k);
2007 }
2008
2009 /* for program page check NAND status */
2010 if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
2011 INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
2012 edu_cmd == EDU_CMD_WRITE) {
2013 dev_info(ctrl->dev, "program failed at %llx\n",
2014 (unsigned long long)addr);
2015 ret = -EIO;
2016 }
2017
2018 /* Make sure the EDU status is clean */
2019 if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
2020 dev_warn(ctrl->dev, "EDU still active: %#x\n",
2021 edu_readl(ctrl, EDU_STATUS));
2022
2023 if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
2024 dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
2025 (unsigned long long)addr);
2026 ret = -EIO;
2027 }
2028
2029 ctrl->edu_pending = false;
2030 brcmnand_edu_init(ctrl);
2031 edu_writel(ctrl, EDU_STOP, 0); /* force stop */
2032 edu_readl(ctrl, EDU_STOP);
2033
2034 if (!ret && edu_cmd == EDU_CMD_READ) {
2035 u64 err_addr = 0;
2036
2037 /*
2038 * check for ECC errors here, subpage ECC errors are
2039 * retained in ECC error address register
2040 */
2041 err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2042 if (!err_addr) {
2043 err_addr = brcmnand_get_correcc_addr(ctrl);
2044 if (err_addr)
2045 ret = -EUCLEAN;
2046 } else
2047 ret = -EBADMSG;
2048 }
2049
2050 return ret;
2051 }
2052
2053 /*
2054 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
2055 * following ahead of time:
2056 * - Is this descriptor the beginning or end of a linked list?
2057 * - What is the (DMA) address of the next descriptor in the linked list?
2058 */
brcmnand_fill_dma_desc(struct brcmnand_host * host,struct brcm_nand_dma_desc * desc,u64 addr,dma_addr_t buf,u32 len,u8 dma_cmd,bool begin,bool end,dma_addr_t next_desc)2059 static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
2060 struct brcm_nand_dma_desc *desc, u64 addr,
2061 dma_addr_t buf, u32 len, u8 dma_cmd,
2062 bool begin, bool end,
2063 dma_addr_t next_desc)
2064 {
2065 memset(desc, 0, sizeof(*desc));
2066 /* Descriptors are written in native byte order (wordwise) */
2067 desc->next_desc = lower_32_bits(next_desc);
2068 desc->next_desc_ext = upper_32_bits(next_desc);
2069 desc->cmd_irq = (dma_cmd << 24) |
2070 (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
2071 (!!begin) | ((!!end) << 1); /* head, tail */
2072 #ifdef CONFIG_CPU_BIG_ENDIAN
2073 desc->cmd_irq |= 0x01 << 12;
2074 #endif
2075 desc->dram_addr = lower_32_bits(buf);
2076 desc->dram_addr_ext = upper_32_bits(buf);
2077 desc->tfr_len = len;
2078 desc->total_len = len;
2079 desc->flash_addr = lower_32_bits(addr);
2080 desc->flash_addr_ext = upper_32_bits(addr);
2081 desc->cs = host->cs;
2082 desc->status_valid = 0x01;
2083 return 0;
2084 }
2085
2086 /*
2087 * Kick the FLASH_DMA engine, with a given DMA descriptor
2088 */
brcmnand_dma_run(struct brcmnand_host * host,dma_addr_t desc)2089 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
2090 {
2091 struct brcmnand_controller *ctrl = host->ctrl;
2092 unsigned long timeo = msecs_to_jiffies(100);
2093
2094 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
2095 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
2096 if (ctrl->nand_version > 0x0602) {
2097 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
2098 upper_32_bits(desc));
2099 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
2100 }
2101
2102 /* Start FLASH_DMA engine */
2103 ctrl->dma_pending = true;
2104 mb(); /* flush previous writes */
2105 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
2106
2107 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
2108 dev_err(ctrl->dev,
2109 "timeout waiting for DMA; status %#x, error status %#x\n",
2110 flash_dma_readl(ctrl, FLASH_DMA_STATUS),
2111 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
2112 }
2113 ctrl->dma_pending = false;
2114 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
2115 }
2116
brcmnand_dma_trans(struct brcmnand_host * host,u64 addr,u32 * buf,u8 * oob,u32 len,u8 dma_cmd)2117 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
2118 u8 *oob, u32 len, u8 dma_cmd)
2119 {
2120 struct brcmnand_controller *ctrl = host->ctrl;
2121 dma_addr_t buf_pa;
2122 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2123
2124 buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
2125 if (dma_mapping_error(ctrl->dev, buf_pa)) {
2126 dev_err(ctrl->dev, "unable to map buffer for DMA\n");
2127 return -ENOMEM;
2128 }
2129
2130 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
2131 dma_cmd, true, true, 0);
2132
2133 brcmnand_dma_run(host, ctrl->dma_pa);
2134
2135 dma_unmap_single(ctrl->dev, buf_pa, len, dir);
2136
2137 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
2138 return -EBADMSG;
2139 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
2140 return -EUCLEAN;
2141
2142 return 0;
2143 }
2144
2145 /*
2146 * Assumes proper CS is already set
2147 */
brcmnand_read_by_pio(struct mtd_info * mtd,struct nand_chip * chip,u64 addr,unsigned int trans,u32 * buf,u8 * oob,u64 * err_addr)2148 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
2149 u64 addr, unsigned int trans, u32 *buf,
2150 u8 *oob, u64 *err_addr)
2151 {
2152 struct brcmnand_host *host = nand_get_controller_data(chip);
2153 struct brcmnand_controller *ctrl = host->ctrl;
2154 int i, j, ret = 0;
2155
2156 brcmnand_clear_ecc_addr(ctrl);
2157
2158 for (i = 0; i < trans; i++, addr += FC_BYTES) {
2159 brcmnand_set_cmd_addr(mtd, addr);
2160 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
2161 brcmnand_send_cmd(host, CMD_PAGE_READ);
2162 brcmnand_waitfunc(chip);
2163
2164 if (likely(buf)) {
2165 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
2166
2167 for (j = 0; j < FC_WORDS; j++, buf++)
2168 *buf = brcmnand_read_fc(ctrl, j);
2169
2170 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
2171 }
2172
2173 if (oob)
2174 oob += read_oob_from_regs(ctrl, i, oob,
2175 mtd->oobsize / trans,
2176 host->hwcfg.sector_size_1k);
2177
2178 if (ret != -EBADMSG) {
2179 *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
2180
2181 if (*err_addr)
2182 ret = -EBADMSG;
2183 }
2184
2185 if (!ret) {
2186 *err_addr = brcmnand_get_correcc_addr(ctrl);
2187
2188 if (*err_addr)
2189 ret = -EUCLEAN;
2190 }
2191 }
2192
2193 return ret;
2194 }
2195
2196 /*
2197 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
2198 * error
2199 *
2200 * Because the HW ECC signals an ECC error if an erase paged has even a single
2201 * bitflip, we must check each ECC error to see if it is actually an erased
2202 * page with bitflips, not a truly corrupted page.
2203 *
2204 * On a real error, return a negative error code (-EBADMSG for ECC error), and
2205 * buf will contain raw data.
2206 * Otherwise, buf gets filled with 0xffs and return the maximum number of
2207 * bitflips-per-ECC-sector to the caller.
2208 *
2209 */
brcmstb_nand_verify_erased_page(struct mtd_info * mtd,struct nand_chip * chip,void * buf,u64 addr)2210 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
2211 struct nand_chip *chip, void *buf, u64 addr)
2212 {
2213 struct mtd_oob_region ecc;
2214 int i;
2215 int bitflips = 0;
2216 int page = addr >> chip->page_shift;
2217 int ret;
2218 void *ecc_bytes;
2219 void *ecc_chunk;
2220
2221 if (!buf)
2222 buf = nand_get_data_buf(chip);
2223
2224 /* read without ecc for verification */
2225 ret = chip->ecc.read_page_raw(chip, buf, true, page);
2226 if (ret)
2227 return ret;
2228
2229 for (i = 0; i < chip->ecc.steps; i++) {
2230 ecc_chunk = buf + chip->ecc.size * i;
2231
2232 mtd_ooblayout_ecc(mtd, i, &ecc);
2233 ecc_bytes = chip->oob_poi + ecc.offset;
2234
2235 ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
2236 ecc_bytes, ecc.length,
2237 NULL, 0,
2238 chip->ecc.strength);
2239 if (ret < 0)
2240 return ret;
2241
2242 bitflips = max(bitflips, ret);
2243 }
2244
2245 return bitflips;
2246 }
2247
brcmnand_read(struct mtd_info * mtd,struct nand_chip * chip,u64 addr,unsigned int trans,u32 * buf,u8 * oob)2248 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
2249 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
2250 {
2251 struct brcmnand_host *host = nand_get_controller_data(chip);
2252 struct brcmnand_controller *ctrl = host->ctrl;
2253 u64 err_addr = 0;
2254 int err;
2255 bool retry = true;
2256 bool edu_err = false;
2257
2258 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
2259
2260 try_dmaread:
2261 brcmnand_clear_ecc_addr(ctrl);
2262
2263 if (ctrl->dma_trans && (has_edu(ctrl) || !oob) &&
2264 flash_dma_buf_ok(buf)) {
2265 err = ctrl->dma_trans(host, addr, buf, oob,
2266 trans * FC_BYTES,
2267 CMD_PAGE_READ);
2268
2269 if (err) {
2270 if (mtd_is_bitflip_or_eccerr(err))
2271 err_addr = addr;
2272 else
2273 return -EIO;
2274 }
2275
2276 if (has_edu(ctrl) && err_addr)
2277 edu_err = true;
2278
2279 } else {
2280 if (oob)
2281 memset(oob, 0x99, mtd->oobsize);
2282
2283 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
2284 oob, &err_addr);
2285 }
2286
2287 if (mtd_is_eccerr(err)) {
2288 /*
2289 * On controller version and 7.0, 7.1 , DMA read after a
2290 * prior PIO read that reported uncorrectable error,
2291 * the DMA engine captures this error following DMA read
2292 * cleared only on subsequent DMA read, so just retry once
2293 * to clear a possible false error reported for current DMA
2294 * read
2295 */
2296 if ((ctrl->nand_version == 0x0700) ||
2297 (ctrl->nand_version == 0x0701)) {
2298 if (retry) {
2299 retry = false;
2300 goto try_dmaread;
2301 }
2302 }
2303
2304 /*
2305 * Controller version 7.2 has hw encoder to detect erased page
2306 * bitflips, apply sw verification for older controllers only
2307 */
2308 if (ctrl->nand_version < 0x0702) {
2309 err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
2310 addr);
2311 /* erased page bitflips corrected */
2312 if (err >= 0)
2313 return err;
2314 }
2315
2316 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
2317 (unsigned long long)err_addr);
2318 mtd->ecc_stats.failed++;
2319 /* NAND layer expects zero on ECC errors */
2320 return 0;
2321 }
2322
2323 if (mtd_is_bitflip(err)) {
2324 unsigned int corrected = brcmnand_count_corrected(ctrl);
2325
2326 /* in case of EDU correctable error we read again using PIO */
2327 if (edu_err)
2328 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
2329 oob, &err_addr);
2330
2331 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
2332 (unsigned long long)err_addr);
2333 mtd->ecc_stats.corrected += corrected;
2334 /* Always exceed the software-imposed threshold */
2335 return max(mtd->bitflip_threshold, corrected);
2336 }
2337
2338 return 0;
2339 }
2340
brcmnand_read_page(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2341 static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
2342 int oob_required, int page)
2343 {
2344 struct mtd_info *mtd = nand_to_mtd(chip);
2345 struct brcmnand_host *host = nand_get_controller_data(chip);
2346 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
2347
2348 nand_read_page_op(chip, page, 0, NULL, 0);
2349
2350 return brcmnand_read(mtd, chip, host->last_addr,
2351 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
2352 }
2353
brcmnand_read_page_raw(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2354 static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
2355 int oob_required, int page)
2356 {
2357 struct brcmnand_host *host = nand_get_controller_data(chip);
2358 struct mtd_info *mtd = nand_to_mtd(chip);
2359 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
2360 int ret;
2361
2362 nand_read_page_op(chip, page, 0, NULL, 0);
2363
2364 brcmnand_set_ecc_enabled(host, 0);
2365 ret = brcmnand_read(mtd, chip, host->last_addr,
2366 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
2367 brcmnand_set_ecc_enabled(host, 1);
2368 return ret;
2369 }
2370
brcmnand_read_oob(struct nand_chip * chip,int page)2371 static int brcmnand_read_oob(struct nand_chip *chip, int page)
2372 {
2373 struct mtd_info *mtd = nand_to_mtd(chip);
2374
2375 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
2376 mtd->writesize >> FC_SHIFT,
2377 NULL, (u8 *)chip->oob_poi);
2378 }
2379
brcmnand_read_oob_raw(struct nand_chip * chip,int page)2380 static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
2381 {
2382 struct mtd_info *mtd = nand_to_mtd(chip);
2383 struct brcmnand_host *host = nand_get_controller_data(chip);
2384
2385 brcmnand_set_ecc_enabled(host, 0);
2386 brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
2387 mtd->writesize >> FC_SHIFT,
2388 NULL, (u8 *)chip->oob_poi);
2389 brcmnand_set_ecc_enabled(host, 1);
2390 return 0;
2391 }
2392
brcmnand_write(struct mtd_info * mtd,struct nand_chip * chip,u64 addr,const u32 * buf,u8 * oob)2393 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
2394 u64 addr, const u32 *buf, u8 *oob)
2395 {
2396 struct brcmnand_host *host = nand_get_controller_data(chip);
2397 struct brcmnand_controller *ctrl = host->ctrl;
2398 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
2399 int status, ret = 0;
2400
2401 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
2402
2403 if (unlikely((unsigned long)buf & 0x03)) {
2404 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
2405 buf = (u32 *)((unsigned long)buf & ~0x03);
2406 }
2407
2408 brcmnand_wp(mtd, 0);
2409
2410 for (i = 0; i < ctrl->max_oob; i += 4)
2411 oob_reg_write(ctrl, i, 0xffffffff);
2412
2413 if (mtd->oops_panic_write)
2414 /* switch to interrupt polling and PIO mode */
2415 disable_ctrl_irqs(ctrl);
2416
2417 if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
2418 if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize,
2419 CMD_PROGRAM_PAGE))
2420
2421 ret = -EIO;
2422
2423 goto out;
2424 }
2425
2426 for (i = 0; i < trans; i++, addr += FC_BYTES) {
2427 /* full address MUST be set before populating FC */
2428 brcmnand_set_cmd_addr(mtd, addr);
2429
2430 if (buf) {
2431 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
2432
2433 for (j = 0; j < FC_WORDS; j++, buf++)
2434 brcmnand_write_fc(ctrl, j, *buf);
2435
2436 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
2437 } else if (oob) {
2438 for (j = 0; j < FC_WORDS; j++)
2439 brcmnand_write_fc(ctrl, j, 0xffffffff);
2440 }
2441
2442 if (oob) {
2443 oob += write_oob_to_regs(ctrl, i, oob,
2444 mtd->oobsize / trans,
2445 host->hwcfg.sector_size_1k);
2446 }
2447
2448 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
2449 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
2450 status = brcmnand_waitfunc(chip);
2451
2452 if (status & NAND_STATUS_FAIL) {
2453 dev_info(ctrl->dev, "program failed at %llx\n",
2454 (unsigned long long)addr);
2455 ret = -EIO;
2456 goto out;
2457 }
2458 }
2459 out:
2460 brcmnand_wp(mtd, 1);
2461 return ret;
2462 }
2463
brcmnand_write_page(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2464 static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
2465 int oob_required, int page)
2466 {
2467 struct mtd_info *mtd = nand_to_mtd(chip);
2468 struct brcmnand_host *host = nand_get_controller_data(chip);
2469 void *oob = oob_required ? chip->oob_poi : NULL;
2470
2471 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2472 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
2473
2474 return nand_prog_page_end_op(chip);
2475 }
2476
brcmnand_write_page_raw(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2477 static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
2478 int oob_required, int page)
2479 {
2480 struct mtd_info *mtd = nand_to_mtd(chip);
2481 struct brcmnand_host *host = nand_get_controller_data(chip);
2482 void *oob = oob_required ? chip->oob_poi : NULL;
2483
2484 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2485 brcmnand_set_ecc_enabled(host, 0);
2486 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
2487 brcmnand_set_ecc_enabled(host, 1);
2488
2489 return nand_prog_page_end_op(chip);
2490 }
2491
brcmnand_write_oob(struct nand_chip * chip,int page)2492 static int brcmnand_write_oob(struct nand_chip *chip, int page)
2493 {
2494 return brcmnand_write(nand_to_mtd(chip), chip,
2495 (u64)page << chip->page_shift, NULL,
2496 chip->oob_poi);
2497 }
2498
brcmnand_write_oob_raw(struct nand_chip * chip,int page)2499 static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
2500 {
2501 struct mtd_info *mtd = nand_to_mtd(chip);
2502 struct brcmnand_host *host = nand_get_controller_data(chip);
2503 int ret;
2504
2505 brcmnand_set_ecc_enabled(host, 0);
2506 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
2507 (u8 *)chip->oob_poi);
2508 brcmnand_set_ecc_enabled(host, 1);
2509
2510 return ret;
2511 }
2512
2513 /***********************************************************************
2514 * Per-CS setup (1 NAND device)
2515 ***********************************************************************/
2516
brcmnand_set_cfg(struct brcmnand_host * host,struct brcmnand_cfg * cfg)2517 static int brcmnand_set_cfg(struct brcmnand_host *host,
2518 struct brcmnand_cfg *cfg)
2519 {
2520 struct brcmnand_controller *ctrl = host->ctrl;
2521 struct nand_chip *chip = &host->chip;
2522 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2523 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2524 BRCMNAND_CS_CFG_EXT);
2525 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2526 BRCMNAND_CS_ACC_CONTROL);
2527 u8 block_size = 0, page_size = 0, device_size = 0;
2528 u32 tmp;
2529
2530 if (ctrl->block_sizes) {
2531 int i, found;
2532
2533 for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
2534 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
2535 block_size = i;
2536 found = 1;
2537 }
2538 if (!found) {
2539 dev_warn(ctrl->dev, "invalid block size %u\n",
2540 cfg->block_size);
2541 return -EINVAL;
2542 }
2543 } else {
2544 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
2545 }
2546
2547 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
2548 cfg->block_size > ctrl->max_block_size)) {
2549 dev_warn(ctrl->dev, "invalid block size %u\n",
2550 cfg->block_size);
2551 block_size = 0;
2552 }
2553
2554 if (ctrl->page_sizes) {
2555 int i, found;
2556
2557 for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
2558 if (ctrl->page_sizes[i] == cfg->page_size) {
2559 page_size = i;
2560 found = 1;
2561 }
2562 if (!found) {
2563 dev_warn(ctrl->dev, "invalid page size %u\n",
2564 cfg->page_size);
2565 return -EINVAL;
2566 }
2567 } else {
2568 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
2569 }
2570
2571 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
2572 cfg->page_size > ctrl->max_page_size)) {
2573 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
2574 return -EINVAL;
2575 }
2576
2577 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
2578 dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
2579 (unsigned long long)cfg->device_size);
2580 return -EINVAL;
2581 }
2582 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
2583
2584 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
2585 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
2586 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
2587 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
2588 (device_size << CFG_DEVICE_SIZE_SHIFT);
2589 if (cfg_offs == cfg_ext_offs) {
2590 tmp |= (page_size << ctrl->page_size_shift) |
2591 (block_size << CFG_BLK_SIZE_SHIFT);
2592 nand_writereg(ctrl, cfg_offs, tmp);
2593 } else {
2594 nand_writereg(ctrl, cfg_offs, tmp);
2595 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
2596 (block_size << CFG_EXT_BLK_SIZE_SHIFT);
2597 nand_writereg(ctrl, cfg_ext_offs, tmp);
2598 }
2599
2600 tmp = nand_readreg(ctrl, acc_control_offs);
2601 tmp &= ~brcmnand_ecc_level_mask(ctrl);
2602 tmp &= ~brcmnand_spare_area_mask(ctrl);
2603 if (ctrl->nand_version >= 0x0302) {
2604 tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
2605 tmp |= cfg->spare_area_size;
2606 }
2607 nand_writereg(ctrl, acc_control_offs, tmp);
2608
2609 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
2610
2611 /* threshold = ceil(BCH-level * 0.75) */
2612 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
2613
2614 return 0;
2615 }
2616
brcmnand_print_cfg(struct brcmnand_host * host,char * buf,struct brcmnand_cfg * cfg)2617 static void brcmnand_print_cfg(struct brcmnand_host *host,
2618 char *buf, struct brcmnand_cfg *cfg)
2619 {
2620 buf += sprintf(buf,
2621 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
2622 (unsigned long long)cfg->device_size >> 20,
2623 cfg->block_size >> 10,
2624 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
2625 cfg->page_size >= 1024 ? "KiB" : "B",
2626 cfg->spare_area_size, cfg->device_width);
2627
2628 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
2629 if (is_hamming_ecc(host->ctrl, cfg))
2630 sprintf(buf, ", Hamming ECC");
2631 else if (cfg->sector_size_1k)
2632 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
2633 else
2634 sprintf(buf, ", BCH-%u", cfg->ecc_level);
2635 }
2636
2637 /*
2638 * Minimum number of bytes to address a page. Calculated as:
2639 * roundup(log2(size / page-size) / 8)
2640 *
2641 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
2642 * OK because many other things will break if 'size' is irregular...
2643 */
get_blk_adr_bytes(u64 size,u32 writesize)2644 static inline int get_blk_adr_bytes(u64 size, u32 writesize)
2645 {
2646 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
2647 }
2648
brcmnand_setup_dev(struct brcmnand_host * host)2649 static int brcmnand_setup_dev(struct brcmnand_host *host)
2650 {
2651 struct mtd_info *mtd = nand_to_mtd(&host->chip);
2652 struct nand_chip *chip = &host->chip;
2653 const struct nand_ecc_props *requirements =
2654 nanddev_get_ecc_requirements(&chip->base);
2655 struct nand_memory_organization *memorg =
2656 nanddev_get_memorg(&chip->base);
2657 struct brcmnand_controller *ctrl = host->ctrl;
2658 struct brcmnand_cfg *cfg = &host->hwcfg;
2659 char msg[128];
2660 u32 offs, tmp, oob_sector;
2661 int ret;
2662
2663 memset(cfg, 0, sizeof(*cfg));
2664
2665 ret = of_property_read_u32(nand_get_flash_node(chip),
2666 "brcm,nand-oob-sector-size",
2667 &oob_sector);
2668 if (ret) {
2669 /* Use detected size */
2670 cfg->spare_area_size = mtd->oobsize /
2671 (mtd->writesize >> FC_SHIFT);
2672 } else {
2673 cfg->spare_area_size = oob_sector;
2674 }
2675 if (cfg->spare_area_size > ctrl->max_oob)
2676 cfg->spare_area_size = ctrl->max_oob;
2677 /*
2678 * Set mtd and memorg oobsize to be consistent with controller's
2679 * spare_area_size, as the rest is inaccessible.
2680 */
2681 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
2682 memorg->oobsize = mtd->oobsize;
2683
2684 cfg->device_size = mtd->size;
2685 cfg->block_size = mtd->erasesize;
2686 cfg->page_size = mtd->writesize;
2687 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
2688 cfg->col_adr_bytes = 2;
2689 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
2690
2691 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
2692 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
2693 chip->ecc.engine_type);
2694 return -EINVAL;
2695 }
2696
2697 if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
2698 if (chip->ecc.strength == 1 && chip->ecc.size == 512)
2699 /* Default to Hamming for 1-bit ECC, if unspecified */
2700 chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
2701 else
2702 /* Otherwise, BCH */
2703 chip->ecc.algo = NAND_ECC_ALGO_BCH;
2704 }
2705
2706 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
2707 (chip->ecc.strength != 1 || chip->ecc.size != 512)) {
2708 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
2709 chip->ecc.strength, chip->ecc.size);
2710 return -EINVAL;
2711 }
2712
2713 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
2714 (!chip->ecc.size || !chip->ecc.strength)) {
2715 if (requirements->step_size && requirements->strength) {
2716 /* use detected ECC parameters */
2717 chip->ecc.size = requirements->step_size;
2718 chip->ecc.strength = requirements->strength;
2719 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
2720 chip->ecc.size, chip->ecc.strength);
2721 }
2722 }
2723
2724 switch (chip->ecc.size) {
2725 case 512:
2726 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
2727 cfg->ecc_level = 15;
2728 else
2729 cfg->ecc_level = chip->ecc.strength;
2730 cfg->sector_size_1k = 0;
2731 break;
2732 case 1024:
2733 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
2734 dev_err(ctrl->dev, "1KB sectors not supported\n");
2735 return -EINVAL;
2736 }
2737 if (chip->ecc.strength & 0x1) {
2738 dev_err(ctrl->dev,
2739 "odd ECC not supported with 1KB sectors\n");
2740 return -EINVAL;
2741 }
2742
2743 cfg->ecc_level = chip->ecc.strength >> 1;
2744 cfg->sector_size_1k = 1;
2745 break;
2746 default:
2747 dev_err(ctrl->dev, "unsupported ECC size: %d\n",
2748 chip->ecc.size);
2749 return -EINVAL;
2750 }
2751
2752 cfg->ful_adr_bytes = cfg->blk_adr_bytes;
2753 if (mtd->writesize > 512)
2754 cfg->ful_adr_bytes += cfg->col_adr_bytes;
2755 else
2756 cfg->ful_adr_bytes += 1;
2757
2758 ret = brcmnand_set_cfg(host, cfg);
2759 if (ret)
2760 return ret;
2761
2762 brcmnand_set_ecc_enabled(host, 1);
2763
2764 brcmnand_print_cfg(host, msg, cfg);
2765 dev_info(ctrl->dev, "detected %s\n", msg);
2766
2767 /* Configure ACC_CONTROL */
2768 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
2769 tmp = nand_readreg(ctrl, offs);
2770 tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
2771 tmp &= ~ACC_CONTROL_RD_ERASED;
2772
2773 /* We need to turn on Read from erased paged protected by ECC */
2774 if (ctrl->nand_version >= 0x0702)
2775 tmp |= ACC_CONTROL_RD_ERASED;
2776 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
2777 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2778 tmp &= ~ACC_CONTROL_PREFETCH;
2779
2780 nand_writereg(ctrl, offs, tmp);
2781
2782 return 0;
2783 }
2784
brcmnand_attach_chip(struct nand_chip * chip)2785 static int brcmnand_attach_chip(struct nand_chip *chip)
2786 {
2787 struct mtd_info *mtd = nand_to_mtd(chip);
2788 struct brcmnand_host *host = nand_get_controller_data(chip);
2789 int ret;
2790
2791 chip->options |= NAND_NO_SUBPAGE_WRITE;
2792 /*
2793 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
2794 * to/from, and have nand_base pass us a bounce buffer instead, as
2795 * needed.
2796 */
2797 chip->options |= NAND_USES_DMA;
2798
2799 if (chip->bbt_options & NAND_BBT_USE_FLASH)
2800 chip->bbt_options |= NAND_BBT_NO_OOB;
2801
2802 if (brcmnand_setup_dev(host))
2803 return -ENXIO;
2804
2805 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
2806
2807 /* only use our internal HW threshold */
2808 mtd->bitflip_threshold = 1;
2809
2810 ret = brcmstb_choose_ecc_layout(host);
2811
2812 /* If OOB is written with ECC enabled it will cause ECC errors */
2813 if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
2814 chip->ecc.write_oob = brcmnand_write_oob_raw;
2815 chip->ecc.read_oob = brcmnand_read_oob_raw;
2816 }
2817
2818 return ret;
2819 }
2820
2821 static const struct nand_controller_ops brcmnand_controller_ops = {
2822 .attach_chip = brcmnand_attach_chip,
2823 };
2824
brcmnand_init_cs(struct brcmnand_host * host,const char * const * part_probe_types)2825 static int brcmnand_init_cs(struct brcmnand_host *host,
2826 const char * const *part_probe_types)
2827 {
2828 struct brcmnand_controller *ctrl = host->ctrl;
2829 struct device *dev = ctrl->dev;
2830 struct mtd_info *mtd;
2831 struct nand_chip *chip;
2832 int ret;
2833 u16 cfg_offs;
2834
2835 mtd = nand_to_mtd(&host->chip);
2836 chip = &host->chip;
2837
2838 nand_set_controller_data(chip, host);
2839 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "brcmnand.%d",
2840 host->cs);
2841 if (!mtd->name)
2842 return -ENOMEM;
2843
2844 mtd->owner = THIS_MODULE;
2845 mtd->dev.parent = dev;
2846
2847 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
2848 chip->legacy.cmdfunc = brcmnand_cmdfunc;
2849 chip->legacy.waitfunc = brcmnand_waitfunc;
2850 chip->legacy.read_byte = brcmnand_read_byte;
2851 chip->legacy.read_buf = brcmnand_read_buf;
2852 chip->legacy.write_buf = brcmnand_write_buf;
2853
2854 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2855 chip->ecc.read_page = brcmnand_read_page;
2856 chip->ecc.write_page = brcmnand_write_page;
2857 chip->ecc.read_page_raw = brcmnand_read_page_raw;
2858 chip->ecc.write_page_raw = brcmnand_write_page_raw;
2859 chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
2860 chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
2861 chip->ecc.read_oob = brcmnand_read_oob;
2862 chip->ecc.write_oob = brcmnand_write_oob;
2863
2864 chip->controller = &ctrl->controller;
2865
2866 /*
2867 * The bootloader might have configured 16bit mode but
2868 * NAND READID command only works in 8bit mode. We force
2869 * 8bit mode here to ensure that NAND READID commands works.
2870 */
2871 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2872 nand_writereg(ctrl, cfg_offs,
2873 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
2874
2875 ret = nand_scan(chip, 1);
2876 if (ret)
2877 return ret;
2878
2879 ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
2880 if (ret)
2881 nand_cleanup(chip);
2882
2883 return ret;
2884 }
2885
brcmnand_save_restore_cs_config(struct brcmnand_host * host,int restore)2886 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
2887 int restore)
2888 {
2889 struct brcmnand_controller *ctrl = host->ctrl;
2890 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2891 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2892 BRCMNAND_CS_CFG_EXT);
2893 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2894 BRCMNAND_CS_ACC_CONTROL);
2895 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2896 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2897
2898 if (restore) {
2899 nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2900 if (cfg_offs != cfg_ext_offs)
2901 nand_writereg(ctrl, cfg_ext_offs,
2902 host->hwcfg.config_ext);
2903 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2904 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2905 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2906 } else {
2907 host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2908 if (cfg_offs != cfg_ext_offs)
2909 host->hwcfg.config_ext =
2910 nand_readreg(ctrl, cfg_ext_offs);
2911 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2912 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2913 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2914 }
2915 }
2916
brcmnand_suspend(struct device * dev)2917 static int brcmnand_suspend(struct device *dev)
2918 {
2919 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2920 struct brcmnand_host *host;
2921
2922 list_for_each_entry(host, &ctrl->host_list, node)
2923 brcmnand_save_restore_cs_config(host, 0);
2924
2925 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2926 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2927 ctrl->corr_stat_threshold =
2928 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2929
2930 if (has_flash_dma(ctrl))
2931 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2932 else if (has_edu(ctrl))
2933 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
2934
2935 return 0;
2936 }
2937
brcmnand_resume(struct device * dev)2938 static int brcmnand_resume(struct device *dev)
2939 {
2940 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2941 struct brcmnand_host *host;
2942
2943 if (has_flash_dma(ctrl)) {
2944 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2945 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2946 }
2947
2948 if (has_edu(ctrl)) {
2949 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
2950 edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
2951 edu_readl(ctrl, EDU_CONFIG);
2952 brcmnand_edu_init(ctrl);
2953 }
2954
2955 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2956 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2957 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2958 ctrl->corr_stat_threshold);
2959 if (ctrl->soc) {
2960 /* Clear/re-enable interrupt */
2961 ctrl->soc->ctlrdy_ack(ctrl->soc);
2962 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2963 }
2964
2965 list_for_each_entry(host, &ctrl->host_list, node) {
2966 struct nand_chip *chip = &host->chip;
2967
2968 brcmnand_save_restore_cs_config(host, 1);
2969
2970 /* Reset the chip, required by some chips after power-up */
2971 nand_reset_op(chip);
2972 }
2973
2974 return 0;
2975 }
2976
2977 const struct dev_pm_ops brcmnand_pm_ops = {
2978 .suspend = brcmnand_suspend,
2979 .resume = brcmnand_resume,
2980 };
2981 EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
2982
2983 static const struct of_device_id __maybe_unused brcmnand_of_match[] = {
2984 { .compatible = "brcm,brcmnand-v2.1" },
2985 { .compatible = "brcm,brcmnand-v2.2" },
2986 { .compatible = "brcm,brcmnand-v4.0" },
2987 { .compatible = "brcm,brcmnand-v5.0" },
2988 { .compatible = "brcm,brcmnand-v6.0" },
2989 { .compatible = "brcm,brcmnand-v6.1" },
2990 { .compatible = "brcm,brcmnand-v6.2" },
2991 { .compatible = "brcm,brcmnand-v7.0" },
2992 { .compatible = "brcm,brcmnand-v7.1" },
2993 { .compatible = "brcm,brcmnand-v7.2" },
2994 { .compatible = "brcm,brcmnand-v7.3" },
2995 {},
2996 };
2997 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
2998
2999 /***********************************************************************
3000 * Platform driver setup (per controller)
3001 ***********************************************************************/
brcmnand_edu_setup(struct platform_device * pdev)3002 static int brcmnand_edu_setup(struct platform_device *pdev)
3003 {
3004 struct device *dev = &pdev->dev;
3005 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
3006 struct resource *res;
3007 int ret;
3008
3009 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
3010 if (res) {
3011 ctrl->edu_base = devm_ioremap_resource(dev, res);
3012 if (IS_ERR(ctrl->edu_base))
3013 return PTR_ERR(ctrl->edu_base);
3014
3015 ctrl->edu_offsets = edu_regs;
3016
3017 edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
3018 EDU_CONFIG_SWAP_CFG);
3019 edu_readl(ctrl, EDU_CONFIG);
3020
3021 /* initialize edu */
3022 brcmnand_edu_init(ctrl);
3023
3024 ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
3025 if (ctrl->edu_irq < 0) {
3026 dev_warn(dev,
3027 "FLASH EDU enabled, using ctlrdy irq\n");
3028 } else {
3029 ret = devm_request_irq(dev, ctrl->edu_irq,
3030 brcmnand_edu_irq, 0,
3031 "brcmnand-edu", ctrl);
3032 if (ret < 0) {
3033 dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
3034 ctrl->edu_irq, ret);
3035 return ret;
3036 }
3037
3038 dev_info(dev, "FLASH EDU enabled using irq %u\n",
3039 ctrl->edu_irq);
3040 }
3041 }
3042
3043 return 0;
3044 }
3045
brcmnand_probe(struct platform_device * pdev,struct brcmnand_soc * soc)3046 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
3047 {
3048 struct brcmnand_platform_data *pd = dev_get_platdata(&pdev->dev);
3049 struct device *dev = &pdev->dev;
3050 struct device_node *dn = dev->of_node, *child;
3051 struct brcmnand_controller *ctrl;
3052 struct brcmnand_host *host;
3053 struct resource *res;
3054 int ret;
3055
3056 if (dn && !of_match_node(brcmnand_of_match, dn))
3057 return -ENODEV;
3058
3059 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
3060 if (!ctrl)
3061 return -ENOMEM;
3062
3063 dev_set_drvdata(dev, ctrl);
3064 ctrl->dev = dev;
3065 ctrl->soc = soc;
3066
3067 /* Enable the static key if the soc provides I/O operations indicating
3068 * that a non-memory mapped IO access path must be used
3069 */
3070 if (brcmnand_soc_has_ops(ctrl->soc))
3071 static_branch_enable(&brcmnand_soc_has_ops_key);
3072
3073 init_completion(&ctrl->done);
3074 init_completion(&ctrl->dma_done);
3075 init_completion(&ctrl->edu_done);
3076 nand_controller_init(&ctrl->controller);
3077 ctrl->controller.ops = &brcmnand_controller_ops;
3078 INIT_LIST_HEAD(&ctrl->host_list);
3079
3080 /* NAND register range */
3081 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3082 ctrl->nand_base = devm_ioremap_resource(dev, res);
3083 if (IS_ERR(ctrl->nand_base) && !brcmnand_soc_has_ops(soc))
3084 return PTR_ERR(ctrl->nand_base);
3085
3086 /* Enable clock before using NAND registers */
3087 ctrl->clk = devm_clk_get(dev, "nand");
3088 if (!IS_ERR(ctrl->clk)) {
3089 ret = clk_prepare_enable(ctrl->clk);
3090 if (ret)
3091 return ret;
3092 } else {
3093 ret = PTR_ERR(ctrl->clk);
3094 if (ret == -EPROBE_DEFER)
3095 return ret;
3096
3097 ctrl->clk = NULL;
3098 }
3099
3100 /* Initialize NAND revision */
3101 ret = brcmnand_revision_init(ctrl);
3102 if (ret)
3103 goto err;
3104
3105 /*
3106 * Most chips have this cache at a fixed offset within 'nand' block.
3107 * Some must specify this region separately.
3108 */
3109 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
3110 if (res) {
3111 ctrl->nand_fc = devm_ioremap_resource(dev, res);
3112 if (IS_ERR(ctrl->nand_fc)) {
3113 ret = PTR_ERR(ctrl->nand_fc);
3114 goto err;
3115 }
3116 } else {
3117 ctrl->nand_fc = ctrl->nand_base +
3118 ctrl->reg_offsets[BRCMNAND_FC_BASE];
3119 }
3120
3121 /* FLASH_DMA */
3122 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
3123 if (res) {
3124 ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
3125 if (IS_ERR(ctrl->flash_dma_base)) {
3126 ret = PTR_ERR(ctrl->flash_dma_base);
3127 goto err;
3128 }
3129
3130 /* initialize the dma version */
3131 brcmnand_flash_dma_revision_init(ctrl);
3132
3133 ret = -EIO;
3134 if (ctrl->nand_version >= 0x0700)
3135 ret = dma_set_mask_and_coherent(&pdev->dev,
3136 DMA_BIT_MASK(40));
3137 if (ret)
3138 ret = dma_set_mask_and_coherent(&pdev->dev,
3139 DMA_BIT_MASK(32));
3140 if (ret)
3141 goto err;
3142
3143 /* linked-list and stop on error */
3144 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
3145 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
3146
3147 /* Allocate descriptor(s) */
3148 ctrl->dma_desc = dmam_alloc_coherent(dev,
3149 sizeof(*ctrl->dma_desc),
3150 &ctrl->dma_pa, GFP_KERNEL);
3151 if (!ctrl->dma_desc) {
3152 ret = -ENOMEM;
3153 goto err;
3154 }
3155
3156 ctrl->dma_irq = platform_get_irq(pdev, 1);
3157 if ((int)ctrl->dma_irq < 0) {
3158 dev_err(dev, "missing FLASH_DMA IRQ\n");
3159 ret = -ENODEV;
3160 goto err;
3161 }
3162
3163 ret = devm_request_irq(dev, ctrl->dma_irq,
3164 brcmnand_dma_irq, 0, DRV_NAME,
3165 ctrl);
3166 if (ret < 0) {
3167 dev_err(dev, "can't allocate IRQ %d: error %d\n",
3168 ctrl->dma_irq, ret);
3169 goto err;
3170 }
3171
3172 dev_info(dev, "enabling FLASH_DMA\n");
3173 /* set flash dma transfer function to call */
3174 ctrl->dma_trans = brcmnand_dma_trans;
3175 } else {
3176 ret = brcmnand_edu_setup(pdev);
3177 if (ret < 0)
3178 goto err;
3179
3180 if (has_edu(ctrl))
3181 /* set edu transfer function to call */
3182 ctrl->dma_trans = brcmnand_edu_trans;
3183 }
3184
3185 /* Disable automatic device ID config, direct addressing */
3186 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
3187 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
3188 /* Disable XOR addressing */
3189 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
3190
3191 if (ctrl->features & BRCMNAND_HAS_WP) {
3192 /* Permanently disable write protection */
3193 if (wp_on == 2)
3194 brcmnand_set_wp(ctrl, false);
3195 } else {
3196 wp_on = 0;
3197 }
3198
3199 /* IRQ */
3200 ctrl->irq = platform_get_irq_optional(pdev, 0);
3201 if (ctrl->irq > 0) {
3202 /*
3203 * Some SoCs integrate this controller (e.g., its interrupt bits) in
3204 * interesting ways
3205 */
3206 if (soc) {
3207 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
3208 DRV_NAME, ctrl);
3209
3210 /* Enable interrupt */
3211 ctrl->soc->ctlrdy_ack(ctrl->soc);
3212 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
3213 } else {
3214 /* Use standard interrupt infrastructure */
3215 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
3216 DRV_NAME, ctrl);
3217 }
3218 if (ret < 0) {
3219 dev_err(dev, "can't allocate IRQ %d: error %d\n",
3220 ctrl->irq, ret);
3221 goto err;
3222 }
3223 }
3224
3225 for_each_available_child_of_node(dn, child) {
3226 if (of_device_is_compatible(child, "brcm,nandcs")) {
3227
3228 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
3229 if (!host) {
3230 of_node_put(child);
3231 ret = -ENOMEM;
3232 goto err;
3233 }
3234 host->pdev = pdev;
3235 host->ctrl = ctrl;
3236
3237 ret = of_property_read_u32(child, "reg", &host->cs);
3238 if (ret) {
3239 dev_err(dev, "can't get chip-select\n");
3240 devm_kfree(dev, host);
3241 continue;
3242 }
3243
3244 nand_set_flash_node(&host->chip, child);
3245
3246 ret = brcmnand_init_cs(host, NULL);
3247 if (ret) {
3248 devm_kfree(dev, host);
3249 continue; /* Try all chip-selects */
3250 }
3251
3252 list_add_tail(&host->node, &ctrl->host_list);
3253 }
3254 }
3255
3256 if (!list_empty(&ctrl->host_list))
3257 return 0;
3258
3259 if (!pd) {
3260 ret = -ENODEV;
3261 goto err;
3262 }
3263
3264 /* If we got there we must have been probing via platform data */
3265 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
3266 if (!host) {
3267 ret = -ENOMEM;
3268 goto err;
3269 }
3270 host->pdev = pdev;
3271 host->ctrl = ctrl;
3272 host->cs = pd->chip_select;
3273 host->chip.ecc.size = pd->ecc_stepsize;
3274 host->chip.ecc.strength = pd->ecc_strength;
3275
3276 ret = brcmnand_init_cs(host, pd->part_probe_types);
3277 if (ret)
3278 goto err;
3279
3280 list_add_tail(&host->node, &ctrl->host_list);
3281
3282 /* No chip-selects could initialize properly */
3283 if (list_empty(&ctrl->host_list)) {
3284 ret = -ENODEV;
3285 goto err;
3286 }
3287
3288 return 0;
3289
3290 err:
3291 clk_disable_unprepare(ctrl->clk);
3292 return ret;
3293
3294 }
3295 EXPORT_SYMBOL_GPL(brcmnand_probe);
3296
brcmnand_remove(struct platform_device * pdev)3297 int brcmnand_remove(struct platform_device *pdev)
3298 {
3299 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
3300 struct brcmnand_host *host;
3301 struct nand_chip *chip;
3302 int ret;
3303
3304 list_for_each_entry(host, &ctrl->host_list, node) {
3305 chip = &host->chip;
3306 ret = mtd_device_unregister(nand_to_mtd(chip));
3307 WARN_ON(ret);
3308 nand_cleanup(chip);
3309 }
3310
3311 clk_disable_unprepare(ctrl->clk);
3312
3313 dev_set_drvdata(&pdev->dev, NULL);
3314
3315 return 0;
3316 }
3317 EXPORT_SYMBOL_GPL(brcmnand_remove);
3318
3319 MODULE_LICENSE("GPL v2");
3320 MODULE_AUTHOR("Kevin Cernekee");
3321 MODULE_AUTHOR("Brian Norris");
3322 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
3323 MODULE_ALIAS("platform:brcmnand");
3324