1 /*
2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <stdint.h>
8
9 #include <platform_def.h>
10
11 #include <arch_helpers.h>
12 #include <common/debug.h>
13 #include <drivers/io/io_block.h>
14 #include <lib/mmio.h>
15 #include <lib/utils_def.h>
16
17 #include "uniphier.h"
18
19 #define NAND_CMD_READ0 0
20 #define NAND_CMD_READSTART 0x30
21
22 #define DENALI_ECC_ENABLE 0x0e0
23 #define DENALI_PAGES_PER_BLOCK 0x150
24 #define DENALI_DEVICE_MAIN_AREA_SIZE 0x170
25 #define DENALI_DEVICE_SPARE_AREA_SIZE 0x180
26 #define DENALI_TWO_ROW_ADDR_CYCLES 0x190
27 #define DENALI_INTR_STATUS0 0x410
28 #define DENALI_INTR_ECC_UNCOR_ERR BIT(1)
29 #define DENALI_INTR_DMA_CMD_COMP BIT(2)
30 #define DENALI_INTR_INT_ACT BIT(12)
31
32 #define DENALI_DMA_ENABLE 0x700
33
34 #define DENALI_HOST_ADDR 0x00
35 #define DENALI_HOST_DATA 0x10
36
37 #define DENALI_MAP01 (1 << 26)
38 #define DENALI_MAP10 (2 << 26)
39 #define DENALI_MAP11 (3 << 26)
40
41 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0)
42 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1)
43 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2)
44
45 #define DENALI_ACCESS_DEFAULT_AREA 0x42
46
47 #define UNIPHIER_NAND_BBT_UNKNOWN 0xff
48
49 struct uniphier_nand {
50 uintptr_t host_base;
51 uintptr_t reg_base;
52 int pages_per_block;
53 int page_size;
54 int two_row_addr_cycles;
55 uint8_t bbt[16];
56 };
57
58 struct uniphier_nand uniphier_nand;
59
uniphier_nand_host_write(struct uniphier_nand * nand,uint32_t addr,uint32_t data)60 static void uniphier_nand_host_write(struct uniphier_nand *nand,
61 uint32_t addr, uint32_t data)
62 {
63 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr);
64 mmio_write_32(nand->host_base + DENALI_HOST_DATA, data);
65 }
66
uniphier_nand_host_read(struct uniphier_nand * nand,uint32_t addr)67 static uint32_t uniphier_nand_host_read(struct uniphier_nand *nand,
68 uint32_t addr)
69 {
70 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr);
71 return mmio_read_32(nand->host_base + DENALI_HOST_DATA);
72 }
73
uniphier_nand_block_isbad(struct uniphier_nand * nand,int block)74 static int uniphier_nand_block_isbad(struct uniphier_nand *nand, int block)
75 {
76 int page = nand->pages_per_block * block;
77 int column = nand->page_size;
78 uint8_t bbm;
79 uint32_t status;
80 int is_bad;
81
82 /* use cache if available */
83 if (block < ARRAY_SIZE(nand->bbt) &&
84 nand->bbt[block] != UNIPHIER_NAND_BBT_UNKNOWN)
85 return nand->bbt[block];
86
87 mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 0);
88
89 mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1);
90
91 uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READ0);
92 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, column & 0xff);
93 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (column >> 8) & 0xff);
94 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, page & 0xff);
95 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (page >> 8) & 0xff);
96 if (!nand->two_row_addr_cycles)
97 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR,
98 (page >> 16) & 0xff);
99 uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READSTART);
100
101 do {
102 status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0);
103 } while (!(status & DENALI_INTR_INT_ACT));
104
105 bbm = uniphier_nand_host_read(nand, DENALI_MAP11_DATA);
106
107 is_bad = bbm != 0xff;
108
109 /* if possible, save the result for future re-use */
110 if (block < ARRAY_SIZE(nand->bbt))
111 nand->bbt[block] = is_bad;
112
113 if (is_bad)
114 WARN("found bad block at %d. skip.\n", block);
115
116 return is_bad;
117 }
118
uniphier_nand_read_pages(struct uniphier_nand * nand,uintptr_t buf,int page_start,int page_count)119 static int uniphier_nand_read_pages(struct uniphier_nand *nand, uintptr_t buf,
120 int page_start, int page_count)
121 {
122 uint32_t status;
123
124 mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 1);
125 mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 1);
126
127 mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1);
128
129 /* use Data DMA (64bit) */
130 mmio_write_32(nand->host_base + DENALI_HOST_ADDR,
131 DENALI_MAP10 | page_start);
132
133 /*
134 * 1. setup transfer type, interrupt when complete,
135 * burst len = 64 bytes, the number of pages
136 */
137 mmio_write_32(nand->host_base + DENALI_HOST_DATA,
138 0x01002000 | (64 << 16) | page_count);
139
140 /* 2. set memory low address */
141 mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf);
142
143 /* 3. set memory high address */
144 mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf >> 32);
145
146 do {
147 status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0);
148 } while (!(status & DENALI_INTR_DMA_CMD_COMP));
149
150 mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 0);
151
152 if (status & DENALI_INTR_ECC_UNCOR_ERR) {
153 ERROR("uncorrectable error in page range %d-%d",
154 page_start, page_start + page_count - 1);
155 return -EBADMSG;
156 }
157
158 return 0;
159 }
160
__uniphier_nand_read(struct uniphier_nand * nand,int lba,uintptr_t buf,size_t size)161 static size_t __uniphier_nand_read(struct uniphier_nand *nand, int lba,
162 uintptr_t buf, size_t size)
163 {
164 int pages_per_block = nand->pages_per_block;
165 int page_size = nand->page_size;
166 int blocks_to_skip = lba / pages_per_block;
167 int pages_to_read = div_round_up(size, page_size);
168 int page = lba % pages_per_block;
169 int block = 0;
170 uintptr_t p = buf;
171 int page_count, ret;
172
173 while (blocks_to_skip) {
174 ret = uniphier_nand_block_isbad(nand, block);
175 if (ret < 0)
176 goto out;
177
178 if (!ret)
179 blocks_to_skip--;
180
181 block++;
182 }
183
184 while (pages_to_read) {
185 ret = uniphier_nand_block_isbad(nand, block);
186 if (ret < 0)
187 goto out;
188
189 if (ret) {
190 block++;
191 continue;
192 }
193
194 page_count = MIN(pages_per_block - page, pages_to_read);
195
196 ret = uniphier_nand_read_pages(nand, p,
197 block * pages_per_block + page,
198 page_count);
199 if (ret)
200 goto out;
201
202 block++;
203 page = 0;
204 p += page_size * page_count;
205 pages_to_read -= page_count;
206 }
207
208 out:
209 /* number of read bytes */
210 return MIN(size, p - buf);
211 }
212
uniphier_nand_read(int lba,uintptr_t buf,size_t size)213 static size_t uniphier_nand_read(int lba, uintptr_t buf, size_t size)
214 {
215 size_t count;
216
217 inv_dcache_range(buf, size);
218
219 count = __uniphier_nand_read(&uniphier_nand, lba, buf, size);
220
221 inv_dcache_range(buf, size);
222
223 return count;
224 }
225
226 static struct io_block_dev_spec uniphier_nand_dev_spec = {
227 .ops = {
228 .read = uniphier_nand_read,
229 },
230 /* fill .block_size at run-time */
231 };
232
uniphier_nand_hw_init(struct uniphier_nand * nand)233 static int uniphier_nand_hw_init(struct uniphier_nand *nand)
234 {
235 int i;
236
237 for (i = 0; i < ARRAY_SIZE(nand->bbt); i++)
238 nand->bbt[i] = UNIPHIER_NAND_BBT_UNKNOWN;
239
240 nand->host_base = 0x68000000;
241 nand->reg_base = 0x68100000;
242
243 nand->pages_per_block =
244 mmio_read_32(nand->reg_base + DENALI_PAGES_PER_BLOCK);
245
246 nand->page_size =
247 mmio_read_32(nand->reg_base + DENALI_DEVICE_MAIN_AREA_SIZE);
248
249 if (mmio_read_32(nand->reg_base + DENALI_TWO_ROW_ADDR_CYCLES) & BIT(0))
250 nand->two_row_addr_cycles = 1;
251
252 uniphier_nand_host_write(nand, DENALI_MAP10,
253 DENALI_ACCESS_DEFAULT_AREA);
254
255 return 0;
256 }
257
uniphier_nand_init(struct io_block_dev_spec ** block_dev_spec)258 int uniphier_nand_init(struct io_block_dev_spec **block_dev_spec)
259 {
260 int ret;
261
262 ret = uniphier_nand_hw_init(&uniphier_nand);
263 if (ret)
264 return ret;
265
266 uniphier_nand_dev_spec.block_size = uniphier_nand.page_size;
267
268 *block_dev_spec = &uniphier_nand_dev_spec;
269
270 return 0;
271 }
272