• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "hifmc100_spi_nand.h"
17 
18 #include "asm/platform.h"
19 #include "hisoc/flash.h"
20 #include "los_vm_phys.h"
21 #include "securec.h"
22 
23 #include "device_resource_if.h"
24 #include "hdf_device_desc.h"
25 #include "hdf_log.h"
26 #include "mtd_core.h"
27 #include "mtd_spi_nand.h"
28 #include "mtd_spi_nor.h"
29 #include "osal_io.h"
30 #include "osal_mem.h"
31 #include "osal_mutex.h"
32 #include "platform_core.h"
33 
34 #define SPI_NAND_ADDR_REG_SHIFT 16
35 
HifmcCntlrReadSpinandInfo(struct SpinandInfo * info,const struct DeviceResourceNode * node)36 static int32_t HifmcCntlrReadSpinandInfo(struct SpinandInfo *info, const struct DeviceResourceNode *node)
37 {
38     int32_t ret;
39     struct DeviceResourceIface *drsOps = NULL;
40 
41     drsOps = DeviceResourceGetIfaceInstance(HDF_CONFIG_SOURCE);
42     if (drsOps == NULL) {
43         HDF_LOGE("%s: invalid drs ops", __func__);
44         return HDF_ERR_NOT_SUPPORT;
45     }
46 
47     ret = drsOps->GetString(node, "name", &info->name, "unkown");
48     if (ret != HDF_SUCCESS || info->name == NULL) {
49         HDF_LOGE("%s: read reg base fail:%d", __func__, ret);
50         return ret;
51     }
52 
53     ret = drsOps->GetElemNum(node, "id");
54     if (ret <= 0 || ret > MTD_FLASH_ID_LEN_MAX) {
55         HDF_LOGE("%s: get id len failed:%d", __func__, ret);
56         return ret;
57     }
58     info->idLen = ret;
59 
60     ret = drsOps->GetUint8Array(node, "id", info->id, info->idLen, 0);
61     if (ret != HDF_SUCCESS) {
62         HDF_LOGE("%s: read reg base fail:%d", __func__, ret);
63         return ret;
64     }
65 
66     ret = drsOps->GetUint32(node, "chip_size", &info->chipSize, 0);
67     if (ret != HDF_SUCCESS) {
68         HDF_LOGE("%s: read block size failed:%d", __func__, ret);
69         return ret;
70     }
71 
72     ret = drsOps->GetUint32(node, "block_size", &info->blockSize, 0);
73     if (ret != HDF_SUCCESS) {
74         HDF_LOGE("%s: read block size failed:%d", __func__, ret);
75         return ret;
76     }
77 
78     ret = drsOps->GetUint32(node, "page_size", &info->pageSize, 0);
79     if (ret != HDF_SUCCESS) {
80         HDF_LOGE("%s: read page size failed:%d", __func__, ret);
81         return ret;
82     }
83 
84     ret = drsOps->GetUint32(node, "oob_size", &info->oobSize, 0);
85     if (ret != HDF_SUCCESS) {
86         HDF_LOGI("%s: no oob size found", __func__);
87         info->oobSize = 0;
88     }
89 
90     if ((ret = HifmcCntlrReadSpiOp(&info->eraseCfg, drsOps->GetChildNode(node, "erase_op"))) != HDF_SUCCESS ||
91         (ret = HifmcCntlrReadSpiOp(&info->writeCfg, drsOps->GetChildNode(node, "write_op"))) != HDF_SUCCESS ||
92         (ret = HifmcCntlrReadSpiOp(&info->readCfg, drsOps->GetChildNode(node, "read_op"))) != HDF_SUCCESS) {
93         return ret;
94     }
95 
96     return HDF_SUCCESS;
97 }
98 
HifmcCntlrFillSpiNand(struct SpiFlash * spi,struct SpinandInfo * spinandInfo)99 static void HifmcCntlrFillSpiNand(struct SpiFlash *spi, struct SpinandInfo *spinandInfo)
100 {
101     spi->mtd.chipName = spinandInfo->name;
102     spi->mtd.idLen = spinandInfo->idLen;
103     spi->mtd.capacity = spinandInfo->chipSize;
104     spi->mtd.eraseSize = spinandInfo->blockSize;
105     spi->mtd.writeSize = spinandInfo->pageSize;
106     spi->mtd.writeSizeShift = MtdFfs(spinandInfo->pageSize) - 1;
107     spi->mtd.readSize = spinandInfo->pageSize;
108     spi->mtd.oobSize = spinandInfo->oobSize;
109     spi->eraseCfg = spinandInfo->eraseCfg;
110     spi->writeCfg = spinandInfo->writeCfg;
111     spi->readCfg = spinandInfo->readCfg;
112 }
113 
HifmcCntlrSearchSpinandInfo(struct HifmcCntlr * cntlr,struct SpiFlash * spi)114 int32_t HifmcCntlrSearchSpinandInfo(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
115 {
116     unsigned int i;
117     int32_t ret;
118     struct SpinandInfo spinandInfo;
119     const struct DeviceResourceNode *childNode = NULL;
120     const struct DeviceResourceNode *tableNode = NULL;
121 
122     tableNode = HifmcCntlrGetDevTableNode(cntlr);
123     if (tableNode == NULL) {
124         return HDF_ERR_NOT_SUPPORT;
125     }
126 
127     DEV_RES_NODE_FOR_EACH_CHILD_NODE(tableNode, childNode) {
128         ret = HifmcCntlrReadSpinandInfo(&spinandInfo, childNode);
129         if (ret != HDF_SUCCESS) {
130             return HDF_ERR_IO;
131         }
132         if (memcmp(spinandInfo.id, spi->mtd.id, spinandInfo.idLen) == 0) {
133             HifmcCntlrFillSpiNand(spi, &spinandInfo);
134             return HDF_SUCCESS;
135         }
136     }
137 
138     HDF_LOGE("%s: dev id not support", __func__);
139     for (i = 0; i < sizeof(spi->mtd.id); i++) {
140         HDF_LOGE("%s: mtd->id[%i] = 0x%x", __func__, i, spi->mtd.id[i]);
141     }
142     return HDF_ERR_NOT_SUPPORT;
143 }
144 
HifmcCntlrReadSpinandReg(struct HifmcCntlr * cntlr,struct SpiFlash * spi,uint8_t cmd)145 uint8_t HifmcCntlrReadSpinandReg(struct HifmcCntlr *cntlr, struct SpiFlash *spi, uint8_t cmd)
146 {
147     uint8_t status;
148     unsigned int reg;
149 
150 #ifdef MTD_DEBUG
151     HDF_LOGD("%s: start read spi register:%#x", __func__, cmd);
152 #endif
153     reg = HIFMC_OP_CFG_FM_CS(spi->cs);
154     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
155 
156     if (cmd == MTD_SPI_CMD_RDSR) {
157         reg = HIFMC_OP_READ_STATUS_EN(1) | HIFMC_OP_REG_OP_START;
158         goto __CMD_CFG_DONE;
159     }
160 
161     reg = cmd;
162     HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
163 
164     reg = HIFMC_DATA_NUM_CNT(1);
165     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DATA_NUM_REG_OFF);
166 
167     reg = HIFMC_OP_CMD1_EN(1) |
168           HIFMC_OP_READ_DATA_EN(1) |
169           HIFMC_OP_REG_OP_START;
170 
171 __CMD_CFG_DONE:
172     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
173 
174     HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
175 
176     if (cmd == MTD_SPI_CMD_RDSR) {
177         status = HIFMC_REG_READ(cntlr, HIFMC_FLASH_INFO_REG_OFF);
178     } else {
179         status = OSAL_READB(cntlr->memBase);
180     }
181 #ifdef MTD_DEBUG
182     HDF_LOGD("%s: end read spi register:%#x, val:%#x", __func__, cmd, status);
183 #endif
184 
185     return status;
186 }
187 
HifmcCntlrEcc0Switch(struct HifmcCntlr * cntlr,int enable)188 static void HifmcCntlrEcc0Switch(struct HifmcCntlr *cntlr, int enable)
189 {
190     unsigned int config;
191 
192     if (enable == 0) {
193         config = cntlr->cfg;
194     } else {
195         config = cntlr->cfg & (~HIFMC_ECC_TYPE_MASK);
196     }
197     HIFMC_REG_WRITE(cntlr, config, HIFMC_CFG_REG_OFF);
198 }
199 
HifmcCntlrDevFeatureOp(struct HifmcCntlr * cntlr,struct SpiFlash * spi,bool isGet,uint8_t addr,uint8_t * val)200 int32_t HifmcCntlrDevFeatureOp(struct HifmcCntlr *cntlr, struct SpiFlash *spi,
201     bool isGet, uint8_t addr, uint8_t *val)
202 {
203     unsigned int reg;
204 
205     if (isGet && (addr == MTD_SPI_NAND_STATUS_ADDR)) {
206 #ifdef MTD_DEBUG
207         HDF_LOGD("%s: start get status", __func__);
208 #endif
209         reg = HIFMC_OP_CFG_FM_CS(spi->cs);
210         HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
211 
212         reg = HIFMC_OP_READ_STATUS_EN(1) | HIFMC_OP_REG_OP_START;
213         HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
214 
215         HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
216 
217         *val = HIFMC_REG_READ(cntlr, HIFMC_FLASH_INFO_REG_OFF);
218 #ifdef MTD_DEBUG
219         HDF_LOGD("%s: end get status:%#x", __func__, *val);
220 #endif
221         return HDF_SUCCESS;
222     }
223 
224 #ifdef MTD_DEBUG
225     HDF_LOGD("%s: start %s feature(addr:0x%x)", __func__, isGet ? "get" : "set", addr);
226 #endif
227     HifmcCntlrEcc0Switch(cntlr, 1);
228 
229     reg = HIFMC_CMD_CMD1(isGet ? MTD_SPI_CMD_GET_FEATURE : MTD_SPI_CMD_SET_FEATURE);
230     HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
231 
232     HIFMC_REG_WRITE(cntlr, addr, HIFMC_ADDRL_REG_OFF);
233 
234     reg = HIFMC_OP_CFG_FM_CS(spi->cs) | HIFMC_OP_CFG_ADDR_NUM(1);
235     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
236 
237     reg = HIFMC_DATA_NUM_CNT(1);
238     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DATA_NUM_REG_OFF);
239 
240     reg = HIFMC_OP_CMD1_EN(1) | HIFMC_OP_ADDR_EN(1) | HIFMC_OP_REG_OP_START;
241 
242     if (!isGet) {
243         reg |= HIFMC_OP_WRITE_DATA_EN(1);
244         OSAL_WRITEB(*val, cntlr->memBase);
245     } else {
246         reg |= HIFMC_OP_READ_DATA_EN(1);
247     }
248     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
249 
250     HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
251 
252     if (isGet) {
253         *val = OSAL_READB(cntlr->memBase);
254     }
255     HifmcCntlrEcc0Switch(cntlr, 0);
256 #ifdef MTD_DEBUG
257     HDF_LOGD("%s: end %s feature:%#x(addr:0x%x)", __func__, isGet ? "get" : "set", *val, addr);
258 #endif
259     return HDF_SUCCESS;
260 }
261 
HifmcCntlrReadIdSpiNand(struct HifmcCntlr * cntlr,uint8_t cs,uint8_t * id,size_t len)262 int32_t HifmcCntlrReadIdSpiNand(struct HifmcCntlr *cntlr, uint8_t cs, uint8_t *id, size_t len)
263 {
264     int32_t ret;
265     unsigned int reg;
266 
267     if (len > MTD_FLASH_ID_LEN_MAX) {
268         HDF_LOGE("%s: buf not enough(len: %u, expected %u)", __func__, len, MTD_FLASH_ID_LEN_MAX);
269         return HDF_ERR_INVALID_PARAM;
270     }
271 
272     HifmcCntlrEcc0Switch(cntlr, 1);
273 
274     reg = HIFMC_CMD_CMD1(MTD_SPI_CMD_RDID);
275     HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
276 
277     reg = MTD_SPI_NAND_RDID_ADDR;
278     HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRL_REG_OFF);
279 
280     reg = HIFMC_OP_CFG_FM_CS(cs) |
281           HIFMC_OP_CFG_ADDR_NUM(1);
282     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
283 
284     reg = HIFMC_DATA_NUM_CNT(MTD_FLASH_ID_LEN_MAX);
285     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DATA_NUM_REG_OFF);
286 
287     reg = HIFMC_OP_CMD1_EN(1) |
288           HIFMC_OP_ADDR_EN(1) |
289           HIFMC_OP_READ_DATA_EN(1) |
290           HIFMC_OP_REG_OP_START;
291     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
292 
293     HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
294 
295     ret = memcpy_s(id, MTD_FLASH_ID_LEN_MAX, (const void *)cntlr->memBase, MTD_FLASH_ID_LEN_MAX);
296     if (ret != EOK) {
297         HDF_LOGE("%s: copy id buf failed : %d", __func__, ret);
298         return HDF_PLT_ERR_OS_API;
299     }
300     HifmcCntlrEcc0Switch(cntlr, 0);
301     return HDF_SUCCESS;
302 }
303 
HifmcCntlrEraseOneBlock(struct HifmcCntlr * cntlr,struct SpiFlash * spi,off_t addr)304 static int32_t HifmcCntlrEraseOneBlock(struct HifmcCntlr *cntlr, struct SpiFlash *spi, off_t addr)
305 {
306     int32_t ret;
307     uint8_t status;
308     unsigned int reg;
309 
310     ret = SpiFlashWaitReady(spi);
311     if (ret != HDF_SUCCESS) {
312         return ret;
313     }
314 
315     ret = SpiFlashWriteEnable(spi);
316     if (ret != HDF_SUCCESS) {
317         return ret;
318     }
319 
320     // set system clock for erase
321     ret = HifmcCntlrSetSysClock(cntlr, spi->eraseCfg.clock, 1);
322 
323     reg = HIFMC_INT_CLR_ALL;
324     HIFMC_REG_WRITE(cntlr, reg, HIFMC_INT_CLR_REG_OFF);
325 
326     reg = HIFMC_CMD_CMD1(spi->eraseCfg.cmd);
327     HIFMC_REG_WRITE(cntlr, reg, HIFMC_CMD_REG_OFF);
328 
329     reg = addr >> spi->mtd.writeSizeShift;
330     HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRL_REG_OFF);
331 
332     reg =  HIFMC_OP_CFG_FM_CS(spi->cs) |
333            HIFMC_OP_CFG_MEM_IF_TYPE(spi->eraseCfg.ifType) |
334            HIFMC_OP_CFG_ADDR_NUM(MTD_SPI_STD_OP_ADDR_NUM) |
335            HIFMC_OP_CFG_DUMMY_NUM(spi->eraseCfg.dummy);
336     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
337 
338     reg = HIFMC_OP_CMD1_EN(1) |
339           HIFMC_OP_ADDR_EN(1) |
340           HIFMC_OP_REG_OP_START;
341     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_REG_OFF);
342 
343     HIFMC_CMD_WAIT_CPU_FINISH(cntlr);
344 
345     ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_STATUS_ADDR, &status);
346     if (ret != HDF_SUCCESS) {
347         return ret;
348     }
349     if (status & MTD_SPI_NAND_ERASE_FAIL) {
350         HDF_LOGE("%s: erase fail, status = 0x%0x", __func__, status);
351         return HDF_ERR_IO;
352     }
353 
354     return HDF_SUCCESS;
355 }
356 
HifmcCntlrReadBuf(struct HifmcCntlr * cntlr,uint8_t * buf,size_t len,off_t offset)357 static inline uint16_t HifmcCntlrReadBuf(struct HifmcCntlr *cntlr, uint8_t *buf, size_t len, off_t offset)
358 {
359     int32_t ret;
360 
361     if (len == 0) {
362         return HDF_ERR_INVALID_PARAM;
363     }
364     ret = memcpy_s((void *)buf, len, (void *)(cntlr->buffer + offset), len);
365     if (ret != 0) {
366         HDF_LOGE("%s: memcpy failed, ret = %d", __func__, ret);
367         return HDF_PLT_ERR_OS_API;
368     }
369     return HDF_SUCCESS;
370 }
371 
372 struct HifmcEccInfo {
373     unsigned int pageSize;
374     unsigned int eccType;
375     unsigned int oobSize;
376 };
377 
378 static struct HifmcEccInfo g_hifmcEccInfoTable[] = {
379     {MTD_NAND_PAGE_SIZE_4K, MTD_NAND_ECC_24BIT_1K,  200},
380     {MTD_NAND_PAGE_SIZE_4K, MTD_NAND_ECC_16BIT_1K,  144},
381     {MTD_NAND_PAGE_SIZE_4K, MTD_NAND_ECC_8BIT_1K,   88},
382     {MTD_NAND_PAGE_SIZE_4K, MTD_NAND_ECC_0BIT,      32},
383     {MTD_NAND_PAGE_SIZE_2K, MTD_NAND_ECC_24BIT_1K,  128},
384     {MTD_NAND_PAGE_SIZE_2K, MTD_NAND_ECC_16BIT_1K,  88},
385     {MTD_NAND_PAGE_SIZE_2K, MTD_NAND_ECC_8BIT_1K,   64},
386     {MTD_NAND_PAGE_SIZE_2K, MTD_NAND_ECC_0BIT,      32},
387     {0, 0, 0},
388 };
389 
HifmcCntlrGetEccInfo(struct SpiFlash * spi)390 static struct HifmcEccInfo *HifmcCntlrGetEccInfo(struct SpiFlash *spi)
391 {
392     struct HifmcEccInfo *best = NULL;
393     struct HifmcEccInfo *tab = g_hifmcEccInfoTable;
394 
395     for (; tab->pageSize != 0; tab++) {
396         if (tab->pageSize != spi->mtd.writeSize) {
397             continue;
398         }
399         if (tab->oobSize > spi->mtd.oobSize) {
400             continue;
401         }
402         if (best == NULL || best->eccType < tab->eccType) {
403             best = tab;
404         }
405     }
406     if (best == NULL) {
407         HDF_LOGW("%s: Not support pagesize:%zu, oobsize:%zu",
408             __func__, spi->mtd.writeSize, spi->mtd.oobSize);
409     }
410     return best;
411 }
412 
HifmcGetPageSizeConfig(size_t pageSize)413 static int HifmcGetPageSizeConfig(size_t pageSize)
414 {
415     switch (pageSize) {
416         case MTD_NAND_PAGE_SIZE_2K:
417             return HIFMC_PAGE_SIZE_2K;
418         case MTD_NAND_PAGE_SIZE_4K:
419             return HIFMC_PAGE_SIZE_4K;
420         case MTD_NAND_PAGE_SIZE_8K:
421             return HIFMC_PAGE_SIZE_8K;
422         case MTD_NAND_PAGE_SIZE_16K:
423             return HIFMC_PAGE_SIZE_16K;
424         default:
425             return HDF_ERR_NOT_SUPPORT;
426     }
427 }
428 
HifmcGetEccTypeConfig(unsigned int eccType)429 static int HifmcGetEccTypeConfig(unsigned int eccType)
430 {
431     static unsigned int eccCfgTab[] = {
432         [MTD_NAND_ECC_0BIT] = HIFMC_ECC_0BIT,
433         [MTD_NAND_ECC_8BIT_1K] = HIFMC_ECC_8BIT,
434         [MTD_NAND_ECC_16BIT_1K] = HIFMC_ECC_16BIT,
435         [MTD_NAND_ECC_24BIT_1K] = HIFMC_ECC_24BIT,
436         [MTD_NAND_ECC_28BIT_1K] = HIFMC_ECC_28BIT,
437         [MTD_NAND_ECC_40BIT_1K] = HIFMC_ECC_40BIT,
438         [MTD_NAND_ECC_64BIT_1K] = HIFMC_ECC_64BIT,
439     };
440 
441     if ((size_t)eccType >= (sizeof(eccCfgTab) / sizeof(eccCfgTab[0]))) {
442         HDF_LOGE("%s: ecc type:%u not support", __func__, eccType);
443         return HDF_ERR_NOT_SUPPORT;
444     }
445     return eccCfgTab[eccType];
446 }
447 
HifmcCntlrInitOob(struct HifmcCntlr * cntlr,struct SpiFlash * spi)448 int32_t HifmcCntlrInitOob(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
449 {
450     unsigned int reg;
451     uint8_t pageCfg;
452     uint8_t eccCfg;
453     struct HifmcEccInfo *ecc = NULL;
454 
455 #ifdef MTD_DEBUG
456     HDF_LOGD("%s: start oob resize", __func__);
457 #endif
458 
459     ecc = HifmcCntlrGetEccInfo(spi);
460     if (ecc == NULL) {
461         return HDF_ERR_NOT_SUPPORT;
462     }
463 
464     pageCfg = HifmcGetPageSizeConfig(ecc->pageSize);
465     reg = HIFMC_REG_READ(cntlr, HIFMC_CFG_REG_OFF);
466     reg &= ~HIFMC_PAGE_SIZE_MASK;
467     reg |= HIFMC_CFG_PAGE_SIZE(pageCfg);
468 
469     eccCfg = HifmcGetEccTypeConfig(ecc->eccType);
470     reg &= ~HIFMC_ECC_TYPE_MASK;
471     reg |= HIFMC_CFG_ECC_TYPE(eccCfg);
472     HIFMC_REG_WRITE(cntlr, reg, HIFMC_CFG_REG_OFF);
473     cntlr->cfg = reg;
474 
475 #ifdef MTD_DEBUG
476     HDF_LOGD("%s: config pagesize:%u ecctype:%u oobsize:%u",
477         __func__, ecc->pageSize, ecc->eccType, ecc->oobSize);
478 #endif
479     if (ecc->eccType != MTD_NAND_ECC_0BIT) {
480         cntlr->oobSize = spi->mtd.oobSize = ecc->oobSize;
481     }
482     cntlr->pageSize = ecc->pageSize;
483     cntlr->eccType = ecc->eccType;
484 
485 #ifdef MTD_DEBUG
486     HDF_LOGD("%s: end oob resize", __func__);
487 #endif
488 
489     return HDF_SUCCESS;
490 }
491 
HifmcMtdEraseSpinand(struct MtdDevice * mtdDevice,off_t addr,size_t len,off_t * faddr)492 static int32_t HifmcMtdEraseSpinand(struct MtdDevice *mtdDevice, off_t addr, size_t len, off_t *faddr)
493 {
494     int32_t ret;
495     struct HifmcCntlr *cntlr = NULL;
496     struct SpiFlash *spi = NULL;
497 
498     if (mtdDevice == NULL || mtdDevice->cntlr == NULL) {
499         HDF_LOGE("%s: mtd or cntlr is null", __func__);
500         return HDF_ERR_INVALID_OBJECT;
501     }
502     cntlr = mtdDevice->cntlr;
503     spi = CONTAINER_OF(mtdDevice, struct SpiFlash, mtd);
504 
505     if (addr % mtdDevice->eraseSize != 0 || len % mtdDevice->eraseSize != 0) {
506         HDF_LOGE("%s: not aligned by page:%zu(addr:%jd, len:%zu)", __func__, mtdDevice->eraseSize, addr, len);
507         return HDF_ERR_NOT_SUPPORT;
508     }
509     if ((len + addr) > mtdDevice->capacity) {
510         HDF_LOGE("%s: out of range, addr:%jd, len:%zu", __func__, addr, len);
511         return HDF_ERR_NOT_SUPPORT;
512     }
513     if (len < mtdDevice->eraseSize) {
514         HDF_LOGE("%s: erase size:%zu < one block: 0x%zu", __func__, len, mtdDevice->eraseSize);
515         return HDF_ERR_NOT_SUPPORT;
516     }
517 
518     // lock cntlr
519     (void)OsalMutexLock(&cntlr->mutex);
520     while (len) {
521 #ifdef MTD_DEBUG
522         HDF_LOGD("%s: start erase one block, addr=[%jd]", __func__, addr);
523 #endif
524         ret = HifmcCntlrEraseOneBlock(cntlr, spi, addr);
525         if (ret != HDF_SUCCESS) {
526             if (faddr != NULL) {
527                 *faddr = addr;
528             }
529             (void)OsalMutexUnlock(&cntlr->mutex);
530             return ret;
531         }
532         addr += mtdDevice->eraseSize;
533         len -= mtdDevice->eraseSize;
534     }
535     // unlock cntlr
536     (void)OsalMutexUnlock(&cntlr->mutex);
537     return HDF_SUCCESS;
538 }
539 
HifmcCntlrWriteBuf(struct HifmcCntlr * cntlr,struct SpiFlash * spi,const uint8_t * buf,size_t len,off_t offset)540 static int32_t HifmcCntlrWriteBuf(struct HifmcCntlr *cntlr,
541     struct SpiFlash *spi, const uint8_t *buf, size_t len, off_t offset)
542 {
543     int32_t ret;
544 
545     ret = memset_s((void *)(cntlr->buffer + spi->mtd.writeSize), spi->mtd.oobSize, 0xff, spi->mtd.oobSize);
546     if (ret != 0) {
547         HDF_LOGE("%s: memset_s failed!", __func__);
548         return HDF_PLT_ERR_OS_API;
549     }
550 
551     if ((offset + len) > cntlr->bufferSize) {
552         HDF_LOGE("%s: invalid parms, offset:%jd, len:%zu(buffer size:%zu)",
553             __func__, offset, len, cntlr->bufferSize);
554         return HDF_ERR_INVALID_PARAM;
555     }
556 
557     ret = memcpy_s((void *)(cntlr->buffer + offset), len, (void *)buf, len);
558     if (ret != 0) {
559         HDF_LOGE("%s: memcpy_s failed!", __func__);
560         return HDF_PLT_ERR_OS_API;
561     }
562 
563     return HDF_SUCCESS;
564 }
565 
HifmcCntlrPageProgram(struct HifmcCntlr * cntlr,struct SpiFlash * spi,uint32_t page)566 static int32_t HifmcCntlrPageProgram(struct HifmcCntlr *cntlr, struct SpiFlash *spi, uint32_t page)
567 {
568     int32_t ret;
569     unsigned int reg;
570 
571 #ifdef MTD_DEBUG
572     HDF_LOGD("%s: start program page @0x%x", __func__, page);
573 #endif
574 
575     ret = SpiFlashWaitReady(spi);
576     if (ret != HDF_SUCCESS) {
577         return ret;
578     }
579 
580     ret = SpiFlashWriteEnable(spi);
581     if (ret != HDF_SUCCESS) {
582         return ret;
583     }
584 
585     ret = HifmcCntlrSetSysClock(cntlr, spi->writeCfg.clock, 1);
586     if (ret != HDF_SUCCESS) {
587         return ret;
588     }
589 
590     // enter normal mode
591     reg = HIFMC_REG_READ(cntlr, HIFMC_CFG_REG_OFF);
592     reg &= (~HIFMC_OP_MODE_MASK);
593     reg |= HIFMC_CFG_OP_MODE(HIFMC_OP_MODE_NORMAL);
594     HIFMC_REG_WRITE(cntlr, reg, HIFMC_CFG_REG_OFF);
595 
596     reg = HIFMC_INT_CLR_ALL;
597     HIFMC_REG_WRITE(cntlr, reg, HIFMC_INT_CLR_REG_OFF);
598 
599     reg = (page >> SPI_NAND_ADDR_REG_SHIFT) & 0xff;   // higher bits
600     HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRH_REG_OFF);
601     reg = (page & 0xffff) << SPI_NAND_ADDR_REG_SHIFT; // lower bits
602     HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRL_REG_OFF);
603 
604     MtdDmaCacheClean(cntlr->dmaBuffer, spi->mtd.writeSize + spi->mtd.oobSize);
605     reg = (uintptr_t)LOS_PaddrQuery(cntlr->dmaBuffer);
606     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DMA_SADDR_D0_REG_OFF);
607     reg = (uintptr_t)LOS_PaddrQuery(cntlr->dmaBuffer + spi->mtd.writeSize);
608     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DMA_SADDR_OOB_REG_OFF);
609 
610     reg = HIFMC_OP_CFG_FM_CS(spi->cs);
611     reg |= HIFMC_OP_CFG_MEM_IF_TYPE(spi->writeCfg.ifType);
612     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
613 
614     reg = HIFMC_OP_CTRL_WR_OPCODE(spi->writeCfg.cmd);
615     reg |= HIFMC_OP_CTRL_DMA_OP(HIFMC_OP_CTRL_TYPE_DMA);
616     reg |= HIFMC_OP_CTRL_RW_OP(HIFMC_OP_CTRL_OP_WRITE);
617     reg |= HIFMC_OP_CTRL_DMA_OP_READY;
618     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CTRL_REG_OFF);
619 
620     HIFMC_DMA_WAIT_INT_FINISH(cntlr);
621 #ifdef MTD_DEBUG
622     HDF_LOGD("%s: end program page @0x%x", __func__, page);
623 #endif
624     return HDF_SUCCESS;
625 }
626 
HifmcCntlrReadOnePageToBuf(struct HifmcCntlr * cntlr,struct SpiFlash * spi,size_t page)627 static int32_t HifmcCntlrReadOnePageToBuf(struct HifmcCntlr *cntlr, struct SpiFlash *spi, size_t page)
628 {
629     int32_t ret;
630     unsigned int reg;
631 
632 #ifdef MTD_DEBUG
633     HDF_LOGD("%s: start read page:0x%x", __func__, page);
634 #endif
635 
636     ret = SpiFlashWaitReady(spi);
637     if (ret != HDF_SUCCESS) {
638         return ret;
639     }
640 
641     ret = HifmcCntlrSetSysClock(cntlr, spi->readCfg.clock, 1);
642     if (ret != HDF_SUCCESS) {
643         return ret;
644     }
645 
646     reg = HIFMC_INT_CLR_ALL;
647     HIFMC_REG_WRITE(cntlr, reg, HIFMC_INT_CLR_REG_OFF);
648 
649     reg = HIFMC_OP_CFG_FM_CS(spi->cs);
650     reg |= HIFMC_OP_CFG_MEM_IF_TYPE(spi->readCfg.ifType);
651     reg |= HIFMC_OP_CFG_DUMMY_NUM(spi->readCfg.dummy);
652     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CFG_REG_OFF);
653 
654     reg = (page >> 16) & 0xff; // write higher bits
655     HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRH_REG_OFF);
656     reg = (page & 0xffff) << 16; // write lower bits
657     HIFMC_REG_WRITE(cntlr, reg, HIFMC_ADDRL_REG_OFF);
658 
659     MtdDmaCacheInv(cntlr->dmaBuffer, spi->mtd.writeSize + spi->mtd.oobSize);
660 
661     reg = (uintptr_t)LOS_PaddrQuery(cntlr->dmaBuffer);
662     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DMA_SADDR_D0_REG_OFF);
663     reg += spi->mtd.writeSize;
664     HIFMC_REG_WRITE(cntlr, reg, HIFMC_DMA_SADDR_OOB_REG_OFF);
665 
666     reg = HIFMC_OP_CTRL_RD_OPCODE(spi->readCfg.cmd);
667     reg |= HIFMC_OP_CTRL_DMA_OP(HIFMC_OP_CTRL_TYPE_DMA);
668     reg |= HIFMC_OP_CTRL_RW_OP(HIFMC_OP_CTRL_OP_READ);
669     reg |= HIFMC_OP_CTRL_DMA_OP_READY;
670     HIFMC_REG_WRITE(cntlr, reg, HIFMC_OP_CTRL_REG_OFF);
671 
672     HIFMC_DMA_WAIT_INT_FINISH(cntlr);
673 
674     MtdDmaCacheInv(cntlr->dmaBuffer, spi->mtd.writeSize + spi->mtd.oobSize);
675 
676 #ifdef MTD_DEBUG
677     HDF_LOGD("%s: end read page:0x%x", __func__, page);
678 #endif
679     return HDF_SUCCESS;
680 }
681 
HifmcMtdIsBadBlockSpinand(struct MtdDevice * mtdDevice,off_t addr)682 static bool HifmcMtdIsBadBlockSpinand(struct MtdDevice *mtdDevice, off_t addr)
683 {
684     int32_t ret;
685     uint8_t bb[MTD_NAND_BB_SIZE];
686     size_t page;
687     struct SpiFlash *spi = CONTAINER_OF(mtdDevice, struct SpiFlash, mtd);
688     struct HifmcCntlr *cntlr = (struct HifmcCntlr *)spi->mtd.cntlr;
689 
690     page = (addr >> mtdDevice->writeSizeShift);
691     ret = HifmcCntlrReadOnePageToBuf(cntlr, spi, page);
692     if (ret != HDF_SUCCESS) {
693         return false;
694     }
695 
696     ret = HifmcCntlrReadBuf(cntlr, bb, MTD_NAND_BB_SIZE, spi->mtd.writeSize);
697     if (ret != HDF_SUCCESS) {
698         return false;
699     }
700 
701 #ifdef MTD_DEBUG
702     HDF_LOGD("%s: bb[0] = 0x%x, bb[1] = 0x%x", __func__, bb[0], bb[1]);
703 #endif
704     if (bb[0] != (uint8_t)0xff || bb[1] != (uint8_t)0xff) {
705         return true;
706     }
707     return false;
708 }
709 
HifmcMtdMarkBadBlockSpinand(struct MtdDevice * mtdDevice,off_t addr)710 static int32_t HifmcMtdMarkBadBlockSpinand(struct MtdDevice *mtdDevice, off_t addr)
711 {
712     int32_t ret;
713     uint8_t bb[MTD_NAND_BB_SIZE];
714     uint8_t status;
715     off_t page;
716     struct HifmcCntlr *cntlr = NULL;
717     struct SpiFlash *spi = CONTAINER_OF(mtdDevice, struct SpiFlash, mtd);
718 
719     page = addr >> mtdDevice->writeSizeShift;
720 
721     ret = memset_s(bb, MTD_NAND_BB_SIZE, 0x00, MTD_NAND_BB_SIZE);
722     if (ret != 0) {
723         HDF_LOGE("%s: memset_s failed!", __func__);
724         return HDF_PLT_ERR_OS_API;
725     }
726 
727     cntlr = (struct HifmcCntlr *)spi->mtd.cntlr;
728     ret = HifmcCntlrWriteBuf(cntlr, spi, bb, MTD_NAND_BB_SIZE, spi->mtd.writeSize);
729     if (ret != HDF_SUCCESS) {
730         return ret;
731     }
732 
733     ret = HifmcCntlrPageProgram(cntlr, spi, page);
734     if (ret != HDF_SUCCESS) {
735         return ret;
736     }
737 
738     ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_STATUS_ADDR, &status);
739     if (ret != HDF_SUCCESS) {
740         return ret;
741     }
742     if ((status & MTD_SPI_NAND_PROG_FAIL) != 0) {
743         HDF_LOGE("%s: page[0x%jx] program failed, status:0x%x", __func__, page, status);
744         return HDF_ERR_IO;
745     }
746 
747     return HDF_SUCCESS;
748 }
749 
HifmcCntlrWriteOnePage(struct HifmcCntlr * cntlr,struct SpiFlash * spi,struct MtdPage * mtdPage)750 static int32_t HifmcCntlrWriteOnePage(struct HifmcCntlr *cntlr, struct SpiFlash *spi, struct MtdPage *mtdPage)
751 {
752     int32_t ret;
753     uint8_t status;
754     uint8_t badFlag[MTD_NAND_BB_SIZE];
755     uint8_t *oobBuf = NULL;
756     size_t oobLen;
757     size_t page;
758     size_t to = mtdPage->addr;
759 
760     if ((to & (spi->mtd.writeSize - 1)) != 0) {
761         HDF_LOGE("%s: addr:%zu not aligned by page size:%zu", __func__, to, spi->mtd.writeSize);
762         return HDF_ERR_INVALID_PARAM;
763     }
764     page = (to >> spi->mtd.writeSizeShift);
765 
766     ret = HifmcCntlrWriteBuf(cntlr, spi, mtdPage->dataBuf, mtdPage->dataLen, 0);
767     if (ret != HDF_SUCCESS) {
768         return ret;
769     }
770 
771     if (mtdPage->oobLen == 0 || mtdPage->oobBuf == NULL) {
772         ret = memset_s(badFlag, MTD_NAND_BB_SIZE, 0xff, MTD_NAND_BB_SIZE);
773         if (ret != 0) {
774             HDF_LOGE("%s: memset_s failed!", __func__);
775             return HDF_PLT_ERR_OS_API;
776         }
777         oobBuf = badFlag;
778         oobLen = MTD_NAND_BB_SIZE;
779     } else {
780         oobBuf = mtdPage->oobBuf;
781         oobLen = mtdPage->oobLen;
782     }
783 
784     ret = HifmcCntlrWriteBuf(cntlr, spi, oobBuf, oobLen, spi->mtd.writeSize);
785     if (ret != HDF_SUCCESS) {
786         return ret;
787     }
788 
789     ret = HifmcCntlrPageProgram(cntlr, spi, page);
790     if (ret != HDF_SUCCESS) {
791         return ret;
792     }
793 
794     ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_STATUS_ADDR, &status);
795     if (ret != HDF_SUCCESS) {
796         return ret;
797     }
798     if ((status & MTD_SPI_NAND_PROG_FAIL) != 0) {
799         HDF_LOGE("%s: page[0x%0x] program failed, status:0x%x", __func__, page, status);
800         return HDF_ERR_IO;
801     }
802 
803     return HDF_SUCCESS;
804 }
805 
HifmcCntlrReadOnePage(struct HifmcCntlr * cntlr,struct SpiFlash * spi,struct MtdPage * mtdPage)806 static int32_t HifmcCntlrReadOnePage(struct HifmcCntlr *cntlr, struct SpiFlash *spi, struct MtdPage *mtdPage)
807 {
808     int32_t ret;
809     size_t page;
810     size_t offset;
811     size_t from = mtdPage->addr;
812 
813     page = (from >> spi->mtd.writeSizeShift);
814     offset = from & (spi->mtd.writeSize - 1);
815 
816     ret = HifmcCntlrReadOnePageToBuf(cntlr, spi, page);
817     if (ret != HDF_SUCCESS) {
818         return ret;
819     }
820 
821     ret = HifmcCntlrReadBuf(cntlr, mtdPage->dataBuf, mtdPage->dataLen, offset);
822     if (ret != HDF_SUCCESS) {
823         return false;
824     }
825 
826     if (mtdPage->oobLen != 0 && mtdPage->oobBuf != 0) {
827         ret = HifmcCntlrReadBuf(cntlr, mtdPage->oobBuf, mtdPage->oobLen, spi->mtd.writeSize);
828         if (ret != HDF_SUCCESS) {
829             return false;
830         }
831     }
832 
833     return HDF_SUCCESS;
834 }
835 
HifmcMtdPageTransfer(struct MtdDevice * mtdDevice,struct MtdPage * mtdPage)836 static int32_t HifmcMtdPageTransfer(struct MtdDevice *mtdDevice, struct MtdPage *mtdPage)
837 {
838     struct SpiFlash *spi = NULL;
839     struct HifmcCntlr *cntlr = NULL;
840 
841     spi = CONTAINER_OF(mtdDevice, struct SpiFlash, mtd);
842     cntlr = (struct HifmcCntlr *)mtdDevice->cntlr;
843     if (mtdPage->type == MTD_MSG_TYPE_READ) {
844         return HifmcCntlrReadOnePage(cntlr, spi, mtdPage);
845     } else if (mtdPage->type == MTD_MSG_TYPE_WRITE) {
846         return HifmcCntlrWriteOnePage(cntlr, spi, mtdPage);
847     }
848     return HDF_ERR_NOT_SUPPORT;
849 }
850 
851 struct MtdDeviceMethod g_hifmcMtdMethodSpinand = {
852     .erase = HifmcMtdEraseSpinand,
853     .pageTransfer = HifmcMtdPageTransfer,
854     .isBadBlock = HifmcMtdIsBadBlockSpinand,
855     .markBadBlock = HifmcMtdMarkBadBlockSpinand,
856 };
857 
HifmcCntlrDevWpDisable(struct HifmcCntlr * cntlr,struct SpiFlash * spi)858 static int32_t HifmcCntlrDevWpDisable(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
859 {
860     int32_t ret;
861     uint8_t protect;
862 
863     ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_PROTECT_ADDR, &protect);
864     if (ret != HDF_SUCCESS) {
865         return ret;
866     }
867     if (SPI_NAND_ANY_BP_ENABLE(protect)) {
868         protect &= ~SPI_NAND_ALL_BP_MASK;
869         ret = HifmcCntlrDevFeatureOp(cntlr, spi, false, MTD_SPI_NAND_PROTECT_ADDR, &protect);
870         if (ret != HDF_SUCCESS) {
871             return ret;
872         }
873         ret = SpiFlashWaitReady(spi);
874         if (ret != HDF_SUCCESS) {
875             return ret;
876         }
877         ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_PROTECT_ADDR, &protect);
878         if (ret != HDF_SUCCESS) {
879             return ret;
880         }
881         if (SPI_NAND_ANY_BP_ENABLE(protect)) {
882             HDF_LOGE("%s: disable write protection failed, protect=0x%x", __func__, protect);
883             return HDF_ERR_IO;
884         }
885     }
886     return HDF_SUCCESS;
887 }
888 
HifmcCntlrDevEccDisable(struct HifmcCntlr * cntlr,struct SpiFlash * spi)889 static int32_t HifmcCntlrDevEccDisable(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
890 {
891     int32_t ret;
892     uint8_t feature;
893 
894     ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_FEATURE_ADDR, &feature);
895     if (ret != HDF_SUCCESS) {
896         return ret;
897     }
898     if ((feature & MTD_SPI_FEATURE_ECC_ENABLE) != 0) {
899         feature &= ~MTD_SPI_FEATURE_ECC_ENABLE;
900         ret = HifmcCntlrDevFeatureOp(cntlr, spi, false, MTD_SPI_NAND_FEATURE_ADDR, &feature);
901         if (ret != HDF_SUCCESS) {
902             return ret;
903         }
904         ret = SpiFlashWaitReady(spi);
905         if (ret != HDF_SUCCESS) {
906             return ret;
907         }
908         ret = HifmcCntlrDevFeatureOp(cntlr, spi, true, MTD_SPI_NAND_FEATURE_ADDR, &feature);
909         if (ret != HDF_SUCCESS) {
910             return ret;
911         }
912         if ((feature & MTD_SPI_FEATURE_ECC_ENABLE) != 0) {
913             HDF_LOGE("%s: disable chip internal ecc failed, feature=0x%x", __func__, feature);
914             return HDF_ERR_IO;
915         }
916     }
917     return HDF_SUCCESS;
918 }
919 
HifmcCntlrInitSpinandDevice(struct HifmcCntlr * cntlr,struct SpiFlash * spi)920 int32_t HifmcCntlrInitSpinandDevice(struct HifmcCntlr *cntlr, struct SpiFlash *spi)
921 {
922     int32_t ret;
923 
924     (void)cntlr;
925 
926     spi->mtd.index = 0;
927     spi->mtd.name = "spinand0";
928     spi->mtd.ops = &g_hifmcMtdMethodSpinand;
929     spi->mtd.type = MTD_TYPE_SPI_NAND;
930 
931     ret = SpinandGetSpiOps(spi);
932     if (ret != HDF_SUCCESS) {
933         return ret;
934     }
935 
936     ret = SpiFlashQeEnable(spi);
937     if (ret != HDF_SUCCESS) {
938         return ret;
939     }
940 
941     ret = HifmcCntlrDevWpDisable(cntlr, spi);
942     if (ret != HDF_SUCCESS) {
943         return ret;
944     }
945 
946     ret = HifmcCntlrDevEccDisable(cntlr, spi);
947     if (ret != HDF_SUCCESS) {
948         return ret;
949     }
950 
951     return HDF_SUCCESS;
952 }
953