• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * Simple virtio-mmio block driver emulating MMC device (spec 4.3).
17  */
18 
19 #include "los_vm_iomap.h"
20 #include "mmc_block.h"
21 #include "dmac_core.h"
22 #include "osal.h"
23 #include "osal/osal_io.h"
24 #include "virtmmio.h"
25 
26 /*
27  * Kernel take care lock & cache(bcache), we only take care I/O.
28  * When I/O is carrying, we must wait for completion. Since any
29  * time there is only one I/O request come here, we can use the
30  * most simple virt-queue -- only have 4 descriptors: one for
31  * "request header", one for "I/O buffer", one for "response",
32  * and one left unused. That is, the driver and the device are
33  * always in synchonous mode!
34  */
35 #define VIRTQ_REQUEST_QSZ       4
36 
37 #define VIRTIO_BLK_F_RO         (1 << 5)
38 #define VIRTIO_BLK_F_BLK_SIZE   (1 << 6)
39 #define VIRTMMIO_BLK_NAME       "virtblock"
40 #define VIRTBLK_DRIVER          "/dev/mmcblk"
41 #define VIRTBLK_DEF_BLK_SIZE    8192
42 
43 struct VirtblkConfig {
44     uint64_t capacity;
45     uint32_t segMax;
46     struct VirtblkGeometry {
47         uint16_t cylinders;
48         uint8_t heads;
49         uint8_t sectors;
50     } geometry;
51     uint32_t blkSize;
52     uint8_t otherFieldsOmitted[0];
53 };
54 
55 /* request type: only support IN, OUT */
56 #define VIRTIO_BLK_T_IN             0
57 #define VIRTIO_BLK_T_OUT            1
58 #define VIRTIO_BLK_T_FLUSH          4
59 #define VIRTIO_BLK_T_DISCARD        11
60 #define VIRTIO_BLK_T_WRITE_ZEROES   13
61 
62 /* response status */
63 #define VIRTIO_BLK_S_OK             0
64 #define VIRTIO_BLK_S_IOERR          1
65 #define VIRTIO_BLK_S_UNSUPP         2
66 
67 struct VirtblkReq {
68     uint32_t type;
69     uint32_t reserved;
70     uint64_t startSector;
71 };
72 
73 struct Virtblk {
74     struct VirtmmioDev dev;
75 
76     uint64_t capacity;      /* in 512-byte-sectors */
77     uint32_t blkSize;       /* block(cluster) size */
78     struct VirtblkReq req;  /* static memory for request */
79     uint8_t resp;           /*              and response */
80     DmacEvent event;        /* for waiting I/O completion */
81 };
82 
83 #define FAT32_MAX_CLUSTER_SECS  128
84 
Feature0(uint32_t features,uint32_t * supported,void * dev)85 static bool Feature0(uint32_t features, uint32_t *supported, void *dev)
86 {
87     struct Virtblk *blk = dev;
88     struct VirtblkConfig *conf = (void *)(blk->dev.base + VIRTMMIO_REG_CONFIG);
89     uint32_t bs;
90 
91     if (features & VIRTIO_BLK_F_RO) {
92         HDF_LOGE("[%s]not support readonly device", __func__);
93         return false;
94     }
95 
96     blk->blkSize = VIRTBLK_DEF_BLK_SIZE;
97     if (features & VIRTIO_BLK_F_BLK_SIZE) {
98         bs = conf->blkSize;
99         if ((bs >= MMC_SEC_SIZE) && (bs <= FAT32_MAX_CLUSTER_SECS * MMC_SEC_SIZE) &&
100             ((bs & (bs - 1)) == 0)) {
101             blk->blkSize = bs;
102             *supported |= VIRTIO_BLK_F_BLK_SIZE;
103         }
104     }
105 
106     blk->capacity = conf->capacity;
107     return true;
108 }
109 
Feature1(uint32_t features,uint32_t * supported,void * dev)110 static bool Feature1(uint32_t features, uint32_t *supported, void *dev)
111 {
112     (void)dev;
113     if (features & VIRTIO_F_VERSION_1) {
114         *supported |= VIRTIO_F_VERSION_1;
115     } else {
116         HDF_LOGE("[%s]virtio-mmio block has no VERSION_1 feature", __func__);
117         return false;
118     }
119 
120     return true;
121 }
122 
PopulateRequestQ(const struct Virtblk * blk)123 static void PopulateRequestQ(const struct Virtblk *blk)
124 {
125     const struct Virtq *q = &blk->dev.vq[0];
126     int i = 0;
127 
128     q->desc[i].pAddr = VMM_TO_DMA_ADDR((VADDR_T)&blk->req);
129     q->desc[i].len = sizeof(struct VirtblkReq);
130     q->desc[i].flag = VIRTQ_DESC_F_NEXT;
131     q->desc[i].next = i + 1;
132 
133     i++;
134     q->desc[i].next = i + 1;
135 
136     i++;
137     q->desc[i].pAddr = VMM_TO_DMA_ADDR((VADDR_T)&blk->resp);
138     q->desc[i].len = sizeof(uint8_t);
139     q->desc[i].flag = VIRTQ_DESC_F_WRITE;
140 }
141 
VirtblkIO(struct Virtblk * blk,uint32_t cmd,uint64_t startSector,uint8_t * buf,uint32_t sectors)142 static uint8_t VirtblkIO(struct Virtblk *blk, uint32_t cmd, uint64_t startSector,
143                          uint8_t *buf, uint32_t sectors)
144 {
145     uint32_t ret;
146     struct Virtq *q = &blk->dev.vq[0];
147 
148     /* fill in and notify virt queue */
149     blk->req.type = cmd;
150     blk->req.startSector = startSector;
151     q->desc[1].pAddr = VMM_TO_DMA_ADDR((VADDR_T)buf);
152     q->desc[1].len = sectors * MMC_SEC_SIZE;
153     if (cmd == VIRTIO_BLK_T_IN) {
154         q->desc[1].flag = VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE;
155     } else { /* must be VIRTIO_BLK_T_OUT */
156         q->desc[1].flag = VIRTQ_DESC_F_NEXT;
157     }
158     q->avail->ring[q->avail->index % q->qsz] = 0;
159     DSB;
160     q->avail->index++;
161     OSAL_WRITEL(0, blk->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
162 
163     /* wait for completion */
164     if ((ret = DmaEventWait(&blk->event, 1, HDF_WAIT_FOREVER)) != 1) {
165         HDF_LOGE("[%s]FATAL: wait event failed: %u", __func__, ret);
166         return VIRTIO_BLK_S_IOERR;
167     }
168 
169     return blk->resp;
170 }
171 
VirtblkIRQhandle(uint32_t swIrq,void * dev)172 static uint32_t VirtblkIRQhandle(uint32_t swIrq, void *dev)
173 {
174     (void)swIrq;
175     struct Virtblk *blk = dev;
176     struct Virtq *q = &blk->dev.vq[0];
177 
178     if (!(OSAL_READL(blk->dev.base + VIRTMMIO_REG_INTERRUPTSTATUS) & VIRTMMIO_IRQ_NOTIFY_USED)) {
179         return 1;
180     }
181 
182     (void)DmaEventSignal(&blk->event, 1);
183     q->last++;
184 
185     OSAL_WRITEL(VIRTMMIO_IRQ_NOTIFY_USED, blk->dev.base + VIRTMMIO_REG_INTERRUPTACK);
186     return 0;
187 }
188 
VirtblkDeInit(struct Virtblk * blk)189 static void VirtblkDeInit(struct Virtblk *blk)
190 {
191     if (blk->dev.irq & ~_IRQ_MASK) {
192         OsalUnregisterIrq(blk->dev.irq & _IRQ_MASK, blk);
193     }
194     LOS_DmaMemFree(blk);
195 }
196 
VirtblkInitDev(void)197 static struct Virtblk *VirtblkInitDev(void)
198 {
199     struct Virtblk *blk = NULL;
200     VADDR_T base;
201     uint16_t qsz;
202     int len, ret;
203 
204     len = sizeof(struct Virtblk) + VirtqSize(VIRTQ_REQUEST_QSZ);
205     if ((blk = LOS_DmaMemAlloc(NULL, len, sizeof(void *), DMA_CACHE)) == NULL) {
206         HDF_LOGE("[%s]alloc virtio-block memory failed", __func__);
207         return NULL;
208     }
209     memset_s(blk, len, 0, len);
210 
211     if (!VirtmmioDiscover(VIRTMMIO_DEVICE_ID_BLK, &blk->dev)) {
212         goto ERR_OUT;
213     }
214 
215     VirtmmioInitBegin(&blk->dev);
216     if (!VirtmmioNegotiate(&blk->dev, Feature0, Feature1, blk)) {
217         goto ERR_OUT1;
218     }
219     base = ALIGN((VADDR_T)blk + sizeof(struct Virtblk), VIRTQ_ALIGN_DESC);
220     qsz = VIRTQ_REQUEST_QSZ;
221     if (VirtmmioConfigQueue(&blk->dev, base, &qsz, 1) == 0) {
222         goto ERR_OUT1;
223     }
224 
225     if ((ret = DmaEventInit(&blk->event)) != HDF_SUCCESS) {
226         HDF_LOGE("[%s]initialize event control block failed: %#x", __func__, ret);
227         goto ERR_OUT1;
228     }
229     ret = OsalRegisterIrq(blk->dev.irq, OSAL_IRQF_TRIGGER_NONE, (OsalIRQHandle)VirtblkIRQhandle,
230                           VIRTMMIO_BLK_NAME, blk);
231     if (ret != HDF_SUCCESS) {
232         HDF_LOGE("[%s]register IRQ failed: %d", __func__, ret);
233         goto ERR_OUT1;
234     }
235     blk->dev.irq |= ~_IRQ_MASK;
236 
237     PopulateRequestQ(blk);
238     VritmmioInitEnd(&blk->dev);  /* now virt queue can be used */
239     return blk;
240 
241 ERR_OUT1:
242     VirtmmioInitFailed(&blk->dev);
243 ERR_OUT:
244     VirtblkDeInit(blk);
245     return NULL;
246 }
247 
248 
249 /*
250  * MMC code
251  *
252  * HDF MmcCntlr act like an application adapter, they discover MMC device,
253  * send I/O request, receive response data.
254  * We act like a card adapter, receive requests, control MMC bus, drive 'card'
255  * execution, and report result. Yes, we are part of MmcCntlr -- MMC controller!
256  * Every hardware internal information are in our scope, such as state, CRC, RCA.
257  * So, we COULD(SHOULD) safely ignore them!
258  */
259 
260 #define OCR_LE_2G       (0x00FF8080 | MMC_CARD_BUSY_STATUS)
261 #define OCR_GT_2G       (0x40FF8080 | MMC_CARD_BUSY_STATUS)
262 #define CAPACITY_2G     (0x80000000 / 512)
263 #define READ_BL_LEN     11
264 #define C_SIZE_MULT     7
265 #define U32_BITS        32
266 
267 /*
268  * Example bits: start=62 bits=4 value=0b1011
269  *
270  *             bit127
271  *  resp[0]
272  *  resp[1]                         1   0
273  *  resp[2]      1    1
274  *  resp[3]
275  *                                     bit0
276  *
277  * NOTE: no error check, related 'resp' bits must be zeroed and set only once.
278  */
FillCidCsdBits(uint32_t * resp,int start,int bits,uint32_t value)279 static void FillCidCsdBits(uint32_t *resp, int start, int bits, uint32_t value)
280 {
281     uint32_t index, lsb;
282 
283     index = CID_LEN - 1 - start / U32_BITS; /* CSD has the same length */
284     lsb = start % U32_BITS;
285     resp[index] |= value << lsb;
286 
287     if (lsb + bits > U32_BITS) {
288         resp[index - 1] |= value >> (U32_BITS - lsb);
289     }
290 }
291 
292 #define MMC_CID_CBX_SBIT    112
293 #define MMC_CID_CBX_WIDTH   2
294 #define MMC_CID_PNM_SBYTE   3
295 #define MMC_CID_PNM_BYTES   6
296 #define MMC_CID_PSN_SBYTE   10
VirtMmcFillRespCid(struct MmcCmd * cmd,const struct Virtblk * blk)297 static void VirtMmcFillRespCid(struct MmcCmd *cmd, const struct Virtblk *blk)
298 {
299     uint8_t *b = (uint8_t *)cmd->resp;
300 
301     /* embedded card, so can leverage kernel eMMC rootfs support */
302     FillCidCsdBits(cmd->resp, MMC_CID_CBX_SBIT, MMC_CID_CBX_WIDTH, 1);
303 
304     (void)memcpy_s(&b[MMC_CID_PNM_SBYTE], MMC_CID_PNM_BYTES, VIRTMMIO_BLK_NAME, MMC_CID_PNM_BYTES);
305     *(uint32_t *)&b[MMC_CID_PSN_SBYTE] = (uint32_t)blk; /* unique sn */
306     /* leave other fields random */
307 }
308 
309 #define MMC_CSD_STRUCT_SBIT     126
310 #define MMC_CSD_STRUCT_WIDTH    2
311 
312 #define MMC_CSD_VERS_SBIT       122
313 #define MMC_CSD_VERS_WIDTH      4
314 
315 #define MMC_CSD_CCC_SBIT        84
316 #define MMC_CSD_CCC_WIDTH       12
317 
318 #define MMC_CSD_RBLEN_SBIT      80
319 #define MMC_CSD_RBLEN_WIDTH     4
320 
321 #define MMC_CSD_RBPART_SBIT     79
322 
323 #define MMC_CSD_WBMISALIGN_SBIT 78
324 
325 #define MMC_CSD_RBMISALIGN_SBIT 77
326 
327 #define MMC_CSD_DSRIMP_SBIT     76
328 
329 #define MMC_CSD_CSIZE_SBIT      62
330 #define MMC_CSD_CSIZE_WIDTH     12
331 #define MMC_CSD_CSIZE_VAL       0xFFF
332 
333 #define MMC_CSD_CSIZEMUL_SBIT   47
334 #define MMC_CSD_CSIZEMUL_WIDTH  3
335 
336 #define MMC_CSD_EGRPSIZE_SBIT   42
337 #define MMC_CSD_EGRPSIZE_WIDTH  5
338 #define MMC_CSD_EGRPSIZE_VAL    31
339 
340 #define MMC_CSD_EGRPMULT_SBIT   37
341 #define MMC_CSD_EGRPMULT_WIDTH  5
342 #define MMC_CSD_EGRPMULT_VAL    15
343 
344 #define MMC_CSD_WBLEN_SBIT      22
345 #define MMC_CSD_WBLEN_WIDTH     4
346 
347 #define MMC_CSD_WBPART_SBIT     21
348 
349 #define MMC_CSD_FFORMGRP_SBIT   15
350 
351 #define MMC_CSD_FFORMAT_SBIT    10
352 #define MMC_CSD_FFORMAT_WIDTH   2
VirtMmcFillRespCsd(struct MmcCmd * cmd,const struct Virtblk * blk)353 static void VirtMmcFillRespCsd(struct MmcCmd *cmd, const struct Virtblk *blk)
354 {
355     FillCidCsdBits(cmd->resp, MMC_CSD_STRUCT_SBIT, MMC_CSD_STRUCT_WIDTH, MMC_CSD_STRUCTURE_VER_1_2);
356     FillCidCsdBits(cmd->resp, MMC_CSD_VERS_SBIT, MMC_CSD_VERS_WIDTH, MMC_CSD_SPEC_VER_4);
357     FillCidCsdBits(cmd->resp, MMC_CSD_CCC_SBIT, MMC_CSD_CCC_WIDTH, MMC_CSD_CCC_BASIC |
358                                                 MMC_CSD_CCC_BLOCK_READ | MMC_CSD_CCC_BLOCK_WRITE);
359     FillCidCsdBits(cmd->resp, MMC_CSD_RBPART_SBIT, 1, 0);       /* READ_BL_PARTIAL: no */
360     FillCidCsdBits(cmd->resp, MMC_CSD_WBMISALIGN_SBIT, 1, 0);   /* WRITE_BLK_MISALIGN: no */
361     FillCidCsdBits(cmd->resp, MMC_CSD_RBMISALIGN_SBIT, 1, 0);   /* READ_BLK_MISALIGN: no */
362     FillCidCsdBits(cmd->resp, MMC_CSD_DSRIMP_SBIT, 1, 0);       /* DSR_IMP: no */
363     if (blk->capacity > CAPACITY_2G) {
364         uint32_t e = U32_BITS - __builtin_clz(blk->blkSize) - 1;
365         FillCidCsdBits(cmd->resp, MMC_CSD_RBLEN_SBIT, MMC_CSD_RBLEN_WIDTH, e);  /* READ_BL_LEN */
366         FillCidCsdBits(cmd->resp, MMC_CSD_WBLEN_SBIT, MMC_CSD_WBLEN_WIDTH, e);  /* WRITE_BL_LEN */
367         FillCidCsdBits(cmd->resp, MMC_CSD_CSIZE_SBIT, MMC_CSD_CSIZE_WIDTH, MMC_CSD_CSIZE_VAL);
368     } else {                                /* ensure c_size can up to 2G (512B can't) */
369         FillCidCsdBits(cmd->resp, MMC_CSD_RBLEN_SBIT, MMC_CSD_RBLEN_WIDTH, READ_BL_LEN);
370         FillCidCsdBits(cmd->resp, MMC_CSD_WBLEN_SBIT, MMC_CSD_WBLEN_WIDTH, READ_BL_LEN);
371         uint32_t size = blk->capacity*MMC_SEC_SIZE / (1<<READ_BL_LEN) / (1<<(C_SIZE_MULT+2)) - 1;
372         FillCidCsdBits(cmd->resp, MMC_CSD_CSIZE_SBIT, MMC_CSD_CSIZE_WIDTH, size);   /* C_SIZE */
373     }
374     FillCidCsdBits(cmd->resp, MMC_CSD_CSIZEMUL_SBIT, MMC_CSD_CSIZEMUL_WIDTH, C_SIZE_MULT);
375     FillCidCsdBits(cmd->resp, MMC_CSD_EGRPSIZE_SBIT, MMC_CSD_EGRPSIZE_WIDTH, MMC_CSD_EGRPSIZE_VAL);
376     FillCidCsdBits(cmd->resp, MMC_CSD_EGRPMULT_SBIT, MMC_CSD_EGRPMULT_WIDTH, MMC_CSD_EGRPMULT_VAL);
377     FillCidCsdBits(cmd->resp, MMC_CSD_WBPART_SBIT, 1, 0);   /* WRITE_BL_PARTIAL: no */
378     FillCidCsdBits(cmd->resp, MMC_CSD_FFORMGRP_SBIT, 1, 0); /* FILE_FORMAT_GRP */
379     FillCidCsdBits(cmd->resp, MMC_CSD_FFORMAT_SBIT, MMC_CSD_FFORMAT_WIDTH, 0);  /* hard disk-like */
380     /* leave other fields random */
381 }
382 
383 #define EMMC_EXT_CSD_CMD_SET_REV    189
384 #define EMMC_EXT_CSD_CMD_SET        191
385 #define EMMC_EXT_CSD_ACC_SIZE       225
386 #define EMMC_EXT_CSD_S_CMD_SET      504
VirtMmcFillDataExtCsd(const struct MmcCmd * cmd,const struct Virtblk * blk)387 static void VirtMmcFillDataExtCsd(const struct MmcCmd *cmd, const struct Virtblk *blk)
388 {
389     uint8_t *b = (uint8_t *)cmd->data->dataBuffer;
390 
391     b[EMMC_EXT_CSD_S_CMD_SET] = 0;      /* standard MMC */
392     b[EMMC_EXT_CSD_ACC_SIZE] = blk->blkSize / MMC_SEC_SIZE;
393     b[EMMC_EXT_CSD_REL_WR_SEC_C] = blk->blkSize / MMC_SEC_SIZE;
394     *(uint32_t*)&b[EMMC_EXT_CSD_SEC_CNT] = blk->capacity;
395     b[EMMC_EXT_CSD_CARD_TYPE] = EMMC_EXT_CSD_CARD_TYPE_26 | EMMC_EXT_CSD_CARD_TYPE_52;
396     b[EMMC_EXT_CSD_STRUCTURE] = EMMC_EXT_CSD_STRUCTURE_VER_1_2;
397     b[EMMC_EXT_CSD_REV] = EMMC_EXT_CSD_REV_1_3;
398     b[EMMC_EXT_CSD_CMD_SET] = 0;        /* standard MMC */
399     b[EMMC_EXT_CSD_CMD_SET_REV] = 0;    /* v4.0 */
400     b[EMMC_EXT_CSD_BUS_WIDTH] = EMMC_EXT_CSD_BUS_WIDTH_1;
401     /* leave other fields random */
402 }
403 
404 #define MMC_RESP_STATE_BIT  9
VirtMmcFillRespR1(struct MmcCmd * cmd)405 static inline void VirtMmcFillRespR1(struct MmcCmd *cmd)
406 {
407     cmd->resp[0] = READY_FOR_DATA | (STATE_READY << MMC_RESP_STATE_BIT);
408     cmd->resp[1] = cmd->cmdCode;
409 }
410 
VirtMmcIO(const struct MmcCntlr * cntlr,const struct MmcCmd * cmd)411 static int32_t VirtMmcIO(const struct MmcCntlr *cntlr, const struct MmcCmd *cmd)
412 {
413     struct Virtblk *blk = cntlr->priv;
414     uint64_t startSector = (uint64_t)cmd->argument;
415     uint32_t io, ret;
416 
417     if (cntlr->curDev->state.bits.blockAddr == 0) {
418         startSector >>= MMC_SEC_SHIFT;
419     }
420 
421     if (cmd->data->dataFlags == DATA_READ) {
422         io = VIRTIO_BLK_T_IN;
423     } else {
424         io = VIRTIO_BLK_T_OUT;
425     }
426 
427     ret = VirtblkIO(blk, io, startSector, cmd->data->dataBuffer, cmd->data->blockNum);
428     if (ret == VIRTIO_BLK_S_OK) {
429         cmd->data->returnError = HDF_SUCCESS;
430     } else {
431         HDF_LOGE("[%s]QEMU backend I/O error", __func__);
432         cmd->data->returnError = HDF_ERR_IO;
433     }
434 
435     return HDF_SUCCESS;
436 }
437 
VirtMmcDoRequest(struct MmcCntlr * cntlr,struct MmcCmd * cmd)438 static int32_t VirtMmcDoRequest(struct MmcCntlr *cntlr, struct MmcCmd *cmd)
439 {
440     if ((cntlr == NULL) || (cntlr->priv == NULL) || (cmd == NULL)) {
441         return HDF_ERR_INVALID_OBJECT;
442     }
443     struct Virtblk *blk = cntlr->priv;
444 
445     cmd->returnError = HDF_SUCCESS;
446     switch (cmd->cmdCode) {
447         case GO_IDLE_STATE:         // CMD0
448             break;
449         case SEND_OP_COND:          // CMD1
450             if (blk->capacity > CAPACITY_2G) {
451                 cmd->resp[0] = OCR_GT_2G;
452             } else {
453                 cmd->resp[0] = OCR_LE_2G;
454             }
455             break;
456         case ALL_SEND_CID:          // CMD2, fall through
457         case SEND_CID:              // CMD10
458             VirtMmcFillRespCid(cmd, blk);
459             break;
460         case SEND_EXT_CSD:          // CMD8, fall through
461             VirtMmcFillDataExtCsd(cmd, blk);
462             cmd->data->returnError = HDF_SUCCESS;
463         case SET_RELATIVE_ADDR:     // CMD3, fall through
464         case SWITCH:                // CMD6, fall through
465         case SELECT_CARD:           // CMD7, fall through
466         case SEND_STATUS:           // CMD13
467             VirtMmcFillRespR1(cmd);
468             break;
469         case SEND_CSD:              // CMD9
470             VirtMmcFillRespCsd(cmd, blk);
471             break;
472         case READ_SINGLE_BLOCK:     // CMD17, fall through
473         case READ_MULTIPLE_BLOCK:   // CMD18, fall through
474         case WRITE_BLOCK:           // CMD24, fall through
475         case WRITE_MULTIPLE_BLOCK:  // CMD25
476             return VirtMmcIO(cntlr, cmd);
477         default:
478             HDF_LOGE("[%s]unsupported command: %u", __func__, cmd->cmdCode);
479             cmd->returnError = HDF_ERR_NOT_SUPPORT;
480     }
481     return cmd->returnError;
482 }
483 
VirtMmcPlugged(struct MmcCntlr * cntlr)484 static bool VirtMmcPlugged(struct MmcCntlr *cntlr)
485 {
486     (void)cntlr;
487     return true;
488 }
489 
VirtMmcBusy(struct MmcCntlr * cntlr)490 static bool VirtMmcBusy(struct MmcCntlr *cntlr)
491 {
492     (void)cntlr;
493     return false;
494 }
495 
496 static struct MmcCntlrOps g_virtblkOps = {
497     .request = VirtMmcDoRequest,
498     .devPlugged = VirtMmcPlugged,
499     .devBusy = VirtMmcBusy,
500 };
501 
502 
503 /*
504  * HDF entry
505  */
506 
HdfVirtblkRelease(struct HdfDeviceObject * deviceObject)507 static void HdfVirtblkRelease(struct HdfDeviceObject *deviceObject)
508 {
509     struct MmcCntlr *cntlr = deviceObject->priv;
510     struct Virtblk *blk = cntlr->priv;
511 
512     if (blk) {
513         VirtblkDeInit(blk);
514     }
515     if (cntlr->curDev != NULL) {
516         MmcDeviceRemove(cntlr->curDev);
517         OsalMemFree(cntlr->curDev);
518         cntlr->curDev = NULL;
519     }
520     MmcCntlrRemove(cntlr);
521     OsalMemFree(cntlr);
522 }
523 
HdfVirtblkBind(struct HdfDeviceObject * obj)524 static int32_t HdfVirtblkBind(struct HdfDeviceObject *obj)
525 {
526     struct MmcCntlr *cntlr = NULL;
527     struct Virtblk *blk = NULL;
528     int32_t ret;
529 
530     if (obj == NULL) {
531         HDF_LOGE("[%s]HdfDeviceObject is NULL", __func__);
532         return HDF_ERR_INVALID_OBJECT;
533     }
534 
535     cntlr = OsalMemCalloc(sizeof(struct MmcCntlr));
536     if (cntlr == NULL) {
537         HDF_LOGE("[%s]alloc MmcCntlr memory failed", __func__);
538         return HDF_ERR_MALLOC_FAIL;
539     }
540 
541     if ((blk = VirtblkInitDev()) == NULL) {
542         OsalMemFree(cntlr);
543         return HDF_FAILURE;
544     }
545 
546     obj->service = &cntlr->service;
547     obj->priv = cntlr;
548     cntlr->priv = blk;
549     cntlr->ops = &g_virtblkOps;
550     cntlr->hdfDevObj = obj;
551     if ((ret = MmcCntlrParse(cntlr, obj)) != HDF_SUCCESS) {
552         goto _ERR;
553     }
554 
555     if ((ret = MmcCntlrAdd(cntlr, true)) != HDF_SUCCESS) {
556         goto _ERR;
557     }
558     (void)MmcCntlrAddDetectMsgToQueue(cntlr);
559 
560     return HDF_SUCCESS;
561 
562 _ERR:   /* Bind failure, so we must call release manually. */
563     HdfVirtblkRelease(obj);
564     return ret;
565 }
566 
HdfVirtblkInit(struct HdfDeviceObject * device)567 static int32_t HdfVirtblkInit(struct HdfDeviceObject *device)
568 {
569     if (device == NULL) {
570         HDF_LOGE("[%s]device is null", __func__);
571         return HDF_ERR_INVALID_PARAM;
572     }
573 
574     return HDF_SUCCESS;
575 }
576 
577 struct HdfDriverEntry g_virtBlockEntry = {
578     .moduleVersion = 1,
579     .moduleName = "HDF_VIRTIO_BLOCK",
580     .Bind = HdfVirtblkBind,
581     .Init = HdfVirtblkInit,
582     .Release = HdfVirtblkRelease,
583 };
584 
585 HDF_INIT(g_virtBlockEntry);
586