1 /*
2 * Copyright (c) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "hifmc100_spi_nor.h"
17 #include "asm/platform.h"
18 #include "hisoc/flash.h"
19 #include "los_vm_phys.h"
20 #include "securec.h"
21
22 #include "device_resource_if.h"
23 #include "hdf_device_desc.h"
24 #include "hdf_log.h"
25 #include "mtd_spi_nor.h"
26 #include "osal_io.h"
27 #include "osal_mem.h"
28 #include "osal_mutex.h"
29 #include "platform_core.h"
30
31 #include "hifmc100.h"
32
HifmcCntlrReadSpinorInfo(struct SpinorInfo * info,const struct DeviceResourceNode * node)33 static int32_t HifmcCntlrReadSpinorInfo(struct SpinorInfo *info, const struct DeviceResourceNode *node)
34 {
35 int32_t ret;
36 struct DeviceResourceIface *drsOps = NULL;
37
38 drsOps = DeviceResourceGetIfaceInstance(HDF_CONFIG_SOURCE);
39 if (drsOps == NULL) {
40 HDF_LOGE("%s: invalid drs ops", __func__);
41 return HDF_ERR_NOT_SUPPORT;
42 }
43
44 ret = drsOps->GetString(node, "name", &info->name, "unkown");
45 if (ret != HDF_SUCCESS || info->name == NULL) {
46 HDF_LOGE("%s: read reg base fail:%d", __func__, ret);
47 return ret;
48 }
49
50 ret = drsOps->GetElemNum(node, "id");
51 if (ret <= 0) {
52 HDF_LOGE("%s: get id len failed:%d", __func__, ret);
53 return ret;
54 }
55 info->idLen = ret;
56
57 ret = drsOps->GetUint8Array(node, "id", info->id, info->idLen, 0);
58 if (ret != HDF_SUCCESS) {
59 HDF_LOGE("%s: read reg base fail:%d", __func__, ret);
60 return ret;
61 }
62
63 ret = drsOps->GetUint32(node, "block_size", &info->blockSize, 0);
64 if (ret != HDF_SUCCESS) {
65 HDF_LOGE("%s: read block size failed:%d", __func__, ret);
66 return ret;
67 }
68
69 ret = drsOps->GetUint32(node, "chip_size", &info->chipSize, 0);
70 if (ret != HDF_SUCCESS) {
71 HDF_LOGE("%s: read block size failed:%d", __func__, ret);
72 return ret;
73 }
74
75 ret = drsOps->GetUint32(node, "addr_cycle", &info->addrCycle, 0);
76 if (ret != HDF_SUCCESS) {
77 HDF_LOGE("%s: read addr cycle failed:%d", __func__, ret);
78 return ret;
79 }
80
81 if ((ret = HifmcCntlrReadSpiOp(&info->eraseCfg, drsOps->GetChildNode(node, "erase_op"))) != HDF_SUCCESS ||
82 (ret = HifmcCntlrReadSpiOp(&info->writeCfg, drsOps->GetChildNode(node, "write_op"))) != HDF_SUCCESS ||
83 (ret = HifmcCntlrReadSpiOp(&info->readCfg, drsOps->GetChildNode(node, "read_op"))) != HDF_SUCCESS) {
84 return ret;
85 }
86
87 return HDF_SUCCESS;
88 }
89
HifmcCntlrSearchSpinorInfo(struct HifmcCntlr * cntlr,struct SpiFlash * spi)90 int32_t HifmcCntlrSearchSpinorInfo(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
91 {
92 unsigned int i;
93 int32_t ret;
94 struct SpinorInfo spinorInfo;
95 const struct DeviceResourceNode *childNode = NULL;
96 const struct DeviceResourceNode *tableNode = NULL;
97
98 tableNode = HifmcCntlrGetDevTableNode(cntlr);
99 if (tableNode == NULL) {
100 return HDF_ERR_NOT_SUPPORT;
101 }
102
103 DEV_RES_NODE_FOR_EACH_CHILD_NODE(tableNode, childNode) {
104 ret = HifmcCntlrReadSpinorInfo(&spinorInfo, childNode);
105 if (ret != HDF_SUCCESS) {
106 return HDF_ERR_IO;
107 }
108 if (memcmp(spinorInfo.id, spi->mtd.id, spinorInfo.idLen) == 0) {
109 spi->mtd.chipName = spinorInfo.name;
110 spi->mtd.idLen = spinorInfo.idLen;
111 spi->mtd.capacity = spinorInfo.chipSize;
112 spi->mtd.eraseSize = spinorInfo.blockSize;
113 spi->addrCycle = spinorInfo.addrCycle;
114 spi->eraseCfg = spinorInfo.eraseCfg;
115 spi->writeCfg = spinorInfo.writeCfg;
116 spi->readCfg = spinorInfo.readCfg;
117 return SpinorGetSpiOps(spi);
118 }
119 }
120
121 HDF_LOGE("%s: dev id not support", __func__);
122 for (i = 0; i < sizeof(spi->mtd.id); i++) {
123 HDF_LOGE("%s: mtd->id[%i] = %x", __func__, i, spi->mtd.id[i]);
124 }
125 return HDF_ERR_NOT_SUPPORT;
126 }
127
HifmcCntlrReadSpinorReg(struct HifmcCntlr * cntlr,struct SpiFlash * spi,uint8_t cmd)128 uint8_t HifmcCntlrReadSpinorReg(struct HifmcCntlr *cntlr, struct SpiFlash *spi, uint8_t cmd)
129 {
130 uint8_t status;
131 unsigned long reg;
132
133 #ifdef MTD_DEBUG
134 HDF_LOGD("%s: start read spi register:%#x", __func__, cmd);
135 #endif
136 reg = HIFMC_OP_CFG_FM_CS(spi->cs);
137 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
138
139 if (cmd == MTD_SPI_CMD_RDSR) {
140 reg = HIFMC_OP_READ_STATUS_EN(1) | HIFMC_OP_REG_OP_START;
141 goto __CMD_CFG_DONE;
142 }
143
144 reg = cmd;
145 HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
146
147 reg = HIFMC_DATA_NUM_CNT(1);
148 HIFMC_REG_WRITE(cntlr, reg, HIFMC_DATA_NUM_REG_OFF);
149
150 reg = HIFMC_OP_CMD1_EN(1) |
151 HIFMC_OP_READ_DATA_EN(1) |
152 HIFMC_OP_REG_OP_START;
153
154 __CMD_CFG_DONE:
155 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
156
157 HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
158
159 if (cmd == MTD_SPI_CMD_RDSR) {
160 status = HIFMC_REG_READ(cntlr, HIFMC_FLASH_INFO_REG_OFF);
161 } else {
162 status = OSAL_READB(cntlr->memBase);
163 }
164 #ifdef MTD_DEBUG
165 HDF_LOGD("%s: end read spi register:%#x, val:%#x", __func__, cmd, status);
166 #endif
167
168 return status;
169 }
170
HifmcCntlrReadIdSpiNor(struct HifmcCntlr * cntlr,uint8_t cs,uint8_t * id,size_t len)171 int32_t HifmcCntlrReadIdSpiNor(struct HifmcCntlr *cntlr, uint8_t cs, uint8_t *id, size_t len)
172 {
173 int32_t ret;
174 unsigned long reg;
175
176 if (len < MTD_FLASH_ID_LEN_MAX) {
177 HDF_LOGE("%s: buf not enough(len: %u, expected %u)", __func__, len, MTD_FLASH_ID_LEN_MAX);
178 return HDF_ERR_INVALID_PARAM;
179 }
180
181 reg = HIFMC_CMD_CMD1(MTD_SPI_CMD_RDID);
182 HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
183
184 reg = HIFMC_OP_CFG_FM_CS(cs);
185 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
186
187 reg = HIFMC_DATA_NUM_CNT(MTD_FLASH_ID_LEN_MAX);
188 HIFMC_REG_WRITE(cntlr, reg, HIFMC_DATA_NUM_REG_OFF);
189
190 reg = HIFMC_OP_CMD1_EN(1) | HIFMC_OP_READ_DATA_EN(1) | HIFMC_OP_REG_OP_START;
191 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
192
193 HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
194
195 ret = memcpy_s(id, MTD_FLASH_ID_LEN_MAX, (const void *)cntlr->memBase, MTD_FLASH_ID_LEN_MAX);
196 if (ret != EOK) {
197 HDF_LOGE("%s: copy id buf failed : %d", __func__, ret);
198 return HDF_PLT_ERR_OS_API;
199 }
200
201 return HDF_SUCCESS;
202 }
203
HifmcCntlrEraseOneBlock(struct HifmcCntlr * cntlr,struct SpiFlash * spi,off_t from)204 static int32_t HifmcCntlrEraseOneBlock(struct HifmcCntlr *cntlr, struct SpiFlash *spi, off_t from)
205 {
206 int32_t ret;
207 unsigned long reg;
208
209 ret = SpiFlashWaitReady(spi);
210 if (ret != HDF_SUCCESS) {
211 return ret;
212 }
213
214 ret = SpiFlashWriteEnable(spi);
215 if (ret != HDF_SUCCESS) {
216 return ret;
217 }
218
219 // set system clock for erase
220 ret = HifmcCntlrSetSysClock(cntlr, spi->eraseCfg.clock, 1);
221
222 reg = HIFMC_CMD_CMD1(spi->eraseCfg.cmd);
223 HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
224
225 reg = from;
226 HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRL_REG_OFF);
227
228 reg = HIFMC_OP_CFG_FM_CS(spi->cs) |
229 HIFMC_OP_CFG_MEM_IF_TYPE(spi->eraseCfg.ifType) |
230 HIFMC_OP_CFG_ADDR_NUM(spi->addrCycle) |
231 HIFMC_OP_CFG_DUMMY_NUM(spi->eraseCfg.dummy);
232 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
233
234 reg = HIFMC_OP_CMD1_EN(1) |
235 HIFMC_OP_ADDR_EN(1) |
236 HIFMC_OP_REG_OP_START;
237 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
238
239 HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
240
241 return HDF_SUCCESS;
242 }
243
HifmcSpinorErase(struct MtdDevice * mtdDevice,off_t from,size_t len,off_t * failAddr)244 static int32_t HifmcSpinorErase(struct MtdDevice *mtdDevice, off_t from, size_t len, off_t *failAddr)
245 {
246 int32_t ret;
247 struct HifmcCntlr *cntlr = NULL;
248 struct SpiFlash *spi = NULL;
249
250 if (mtdDevice == NULL || mtdDevice->cntlr == NULL) {
251 HDF_LOGE("%s: mtd or cntlr is null", __func__);
252 return HDF_ERR_INVALID_OBJECT;
253 }
254 cntlr = mtdDevice->cntlr;
255
256 spi = CONTAINER_OF(mtdDevice, struct SpiFlash, mtd);
257 // lock cntlr
258 (void)OsalMutexLock(&cntlr->mutex);
259 while (len) {
260 #ifdef MTD_DEBUG
261 HDF_LOGD("%s: start erase one block, addr=[%jd]", __func__, from);
262 #endif
263 ret = HifmcCntlrEraseOneBlock(cntlr, spi, from);
264 if (ret != HDF_SUCCESS) {
265 *failAddr = from;
266 (void)OsalMutexUnlock(&cntlr->mutex);
267 return ret;
268 }
269 from += mtdDevice->eraseSize;
270 len -= mtdDevice->eraseSize;
271 }
272 // unlock cntlr
273 (void)OsalMutexUnlock(&cntlr->mutex);
274
275 return HDF_SUCCESS;
276 }
277
HifmcCntlrDmaTransfer(struct HifmcCntlr * cntlr,struct SpiFlash * spi,off_t offset,uint8_t * buf,size_t len,int wr)278 static int32_t HifmcCntlrDmaTransfer(struct HifmcCntlr *cntlr, struct SpiFlash *spi,
279 off_t offset, uint8_t *buf, size_t len, int wr)
280 {
281 uint8_t ifType;
282 uint8_t dummy;
283 uint8_t wCmd;
284 uint8_t rCmd;
285 unsigned long reg;
286
287 #ifdef MTD_DEBUG
288 if (wr == 1)
289 HDF_LOGD("%s: start dma transfer => [%jd], len[%zu], wr=%d, buf=%p", __func__, offset, len, wr, buf);
290 #endif
291 if (wr == 1) {
292 MtdDmaCacheClean((void *)buf, len);
293 } else {
294 MtdDmaCacheInv((void *)buf, len);
295 }
296
297 reg = HIFMC_INT_CLR_ALL;
298 HIFMC_REG_WRITE(cntlr, reg, HIFMC_INT_CLR_REG_OFF);
299
300 reg = offset;
301 HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRL_REG_OFF);
302
303 ifType = (wr == 1) ? spi->writeCfg.ifType : spi->readCfg.ifType;
304 dummy = (wr == 1) ? spi->writeCfg.dummy : spi->readCfg.dummy;
305 wCmd = (wr == 1) ? spi->writeCfg.cmd : 0;
306 rCmd = (wr == 0) ? spi->readCfg.cmd : 0;
307
308 reg = HIFMC_OP_CFG_FM_CS(spi->cs) |
309 HIFMC_OP_CFG_MEM_IF_TYPE(ifType) |
310 HIFMC_OP_CFG_ADDR_NUM(spi->addrCycle) |
311 HIFMC_OP_CFG_DUMMY_NUM(dummy);
312 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
313
314 reg = HIFMC_DMA_LEN_SET(len);
315 HIFMC_REG_WRITE(cntlr, reg, HIFMC_DMA_LEN_REG_OFF);
316
317 reg = (unsigned long)((uintptr_t)buf);
318 HIFMC_REG_WRITE(cntlr, reg, HIFMC_DMA_SADDR_D0_REG_OFF);
319
320 reg = HIFMC_OP_CTRL_RD_OPCODE(rCmd) |
321 HIFMC_OP_CTRL_WR_OPCODE(wCmd) |
322 HIFMC_OP_CTRL_RW_OP(wr) |
323 HIFMC_OP_CTRL_DMA_OP_READY;
324 HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CTRL_REG_OFF);
325
326 HIFMC_DMA_WAIT_INT_FINISH(cntlr);
327 if (wr == 0) {
328 MtdDmaCacheInv((void *)buf, len);
329 }
330
331 #ifdef MTD_DEBUG
332 HDF_LOGD("%s: end dma transfer", __func__);
333 #endif
334 return HDF_SUCCESS;
335 }
336
HifmcCntlrDmaWriteReadOnce(struct SpiFlash * spi,off_t offset,uint8_t * buf,size_t num,int wr)337 static int32_t HifmcCntlrDmaWriteReadOnce(struct SpiFlash *spi, off_t offset, uint8_t *buf, size_t num, int wr)
338 {
339 int32_t ret;
340 struct HifmcCntlr *cntlr = (struct HifmcCntlr *)spi->mtd.cntlr;
341
342 if (num == 0) {
343 return HDF_SUCCESS;
344 }
345
346 if (num > HIFMC_DMA_ALIGN_MASK) {
347 if (((uintptr_t)buf & HIFMC_DMA_ALIGN_MASK) != 0) {
348 HDF_LOGE("%s: block buf not aligned by : %u", __func__, HIFMC_DMA_ALIGN_MASK);
349 return HDF_ERR_INVALID_PARAM;
350 }
351 ret = HifmcCntlrDmaTransfer(cntlr, spi, offset,
352 (uint8_t *)(uintptr_t)LOS_PaddrQuery((void *)(buf)), num, wr);
353 return ret;
354 }
355
356 if (wr == 1) { // write
357 if (LOS_CopyToKernel((void *)cntlr->dmaBuffer, num, (void *)buf, num) != 0) {
358 HDF_LOGE("%s: copy from user failed, num = %zu", __func__, num);
359 return HDF_ERR_IO;
360 }
361 }
362
363 ret = HifmcCntlrDmaTransfer(cntlr, spi, offset,
364 (uint8_t *)(uintptr_t)LOS_PaddrQuery((void *)(cntlr->dmaBuffer)), num, wr);
365 if (ret != HDF_SUCCESS) {
366 HDF_LOGE("%s: dma transfer failed : %d(num = %zu)", __func__, ret, num);
367 return ret;
368 }
369
370 if (wr == 0) { // read
371 if (LOS_CopyFromKernel((void *)buf, num, (void *)cntlr->dmaBuffer, num) != 0) {
372 HDF_LOGE("%s: copy to user failed, num = %zu", __func__, num);
373 return HDF_ERR_IO;
374 }
375 }
376
377 return ret;
378 }
379
HifmcCntlrDmaWriteRead(struct MtdDevice * mtdDevice,off_t offset,size_t len,uint8_t * buf,int wr)380 static int32_t HifmcCntlrDmaWriteRead(struct MtdDevice *mtdDevice, off_t offset, size_t len, uint8_t *buf, int wr)
381 {
382 unsigned int i;
383 int32_t ret;
384 size_t num;
385 size_t sizeL;
386 size_t sizeM;
387 size_t sizeR;
388 size_t *sizeArray[] = {&sizeL, &sizeM, &sizeR};
389 struct HifmcCntlr *cntlr = (struct HifmcCntlr *)mtdDevice->cntlr;
390 struct SpiFlash *spi = CONTAINER_OF(mtdDevice, struct SpiFlash, mtd);
391
392 if ((ret = SpiFlashWaitReady(spi)) != HDF_SUCCESS) {
393 return ret;
394 }
395
396 if (wr == 1 && (ret = SpiFlashWriteEnable(spi)) != HDF_SUCCESS) {
397 return ret;
398 }
399
400 // set system clock for erase
401 ret = HifmcCntlrSetSysClock(cntlr, (wr == 1) ? spi->writeCfg.clock : spi->readCfg.clock, 1);
402 if (ret != HDF_SUCCESS) {
403 return ret;
404 }
405
406 sizeL = HIFMC_DMA_ALIGN_SIZE - ((uintptr_t)buf & HIFMC_DMA_ALIGN_MASK); // be careful!
407 if (sizeL > len) {
408 sizeL = len;
409 }
410 len -= sizeL;
411
412 sizeM = len & (~HIFMC_DMA_ALIGN_MASK);
413 len -= sizeM;
414
415 sizeR = len;
416
417 for (i = 0, num = *sizeArray[0]; i < (sizeof(sizeArray) / sizeof(sizeArray[0])); i++) {
418 num = *sizeArray[i];
419 ret = HifmcCntlrDmaWriteReadOnce(spi, offset, buf, num, wr);
420 if (ret != HDF_SUCCESS) {
421 HDF_LOGE("%s: dma trans failed(num = %zu)", __func__, num);
422 return HDF_ERR_IO;
423 }
424 offset += num;
425 buf += num;
426 }
427
428 return HDF_SUCCESS;
429 }
430
HifmcSpinorWrite(struct MtdDevice * mtdDevice,off_t to,size_t len,const uint8_t * buf)431 static int32_t HifmcSpinorWrite(struct MtdDevice *mtdDevice, off_t to, size_t len, const uint8_t *buf)
432 {
433 int32_t ret;
434 struct HifmcCntlr *cntlr = NULL;
435
436 if (mtdDevice == NULL) {
437 return HDF_ERR_INVALID_OBJECT;
438 }
439 cntlr = mtdDevice->cntlr;
440
441 #ifdef MTD_DEBUG
442 HDF_LOGD("%s: start write buf(%p) to=%jd len=%zu", __func__, buf, to, len);
443 #endif
444 // lock cntlr
445 (void)OsalMutexLock(&cntlr->mutex);
446 ret = HifmcCntlrDmaWriteRead(mtdDevice, to, len, (uint8_t *)buf, 1);
447 // unlock cntlr
448 (void)OsalMutexUnlock(&cntlr->mutex);
449
450 #ifdef MTD_DEBUG
451 HDF_LOGD("%s: start write buf(%p) to=%jd len=%zu, done!", __func__, buf, to, len);
452 #endif
453 return ret;
454 }
455
HifmcSpinorRead(struct MtdDevice * mtdDevice,off_t from,size_t len,uint8_t * buf)456 static int32_t HifmcSpinorRead(struct MtdDevice *mtdDevice, off_t from, size_t len, uint8_t *buf)
457 {
458 int32_t ret;
459 struct HifmcCntlr *cntlr = NULL;
460
461 if (mtdDevice == NULL) {
462 return HDF_ERR_INVALID_OBJECT;
463 }
464 cntlr = mtdDevice->cntlr;
465
466 #ifdef MTD_DEBUG
467 HDF_LOGD("%s: start read buf:%p from=%jd len=%zu", __func__, buf, from, len);
468 #endif
469 // lock cntlr
470 (void)OsalMutexLock(&cntlr->mutex);
471 ret = HifmcCntlrDmaWriteRead(mtdDevice, from, len, buf, 0);
472 // unlock cntlr
473 (void)OsalMutexUnlock(&cntlr->mutex);
474
475 return ret;
476 }
477
478 struct MtdDeviceMethod g_hifmcMtdMethodSpinor = {
479 .read = HifmcSpinorRead,
480 .write = HifmcSpinorWrite,
481 .erase = HifmcSpinorErase,
482 };
483
HifmcCntlrInitSpinorDevice(struct HifmcCntlr * cntlr,struct SpiFlash * spi)484 int32_t HifmcCntlrInitSpinorDevice(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
485 {
486 int32_t ret;
487
488 (void)cntlr;
489 spi->mtd.index = 0;
490 spi->mtd.name = "mtd0";
491 spi->mtd.ops = &g_hifmcMtdMethodSpinor;
492 spi->mtd.type = MTD_TYPE_SPI_NOR;
493 spi->mtd.writeSize = 1;
494 spi->mtd.readSize = 1;
495
496 ret = SpiFlashQeEnable(spi);
497 if (ret != HDF_SUCCESS) {
498 return ret;
499 }
500
501 ret = SpiFlashEntry4Addr(spi, (GET_FMC_BOOT_MODE == MTD_SPI_ADDR_3BYTE));
502 if (ret != HDF_SUCCESS) {
503 return ret;
504 }
505
506 return HDF_SUCCESS;
507 }
508