1 /*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "bcache.h"
33 #include "assert.h"
34 #include "stdlib.h"
35 #include "linux/delay.h"
36 #include "disk_pri.h"
37 #include "user_copy.h"
38
39 #undef HALARC_ALIGNMENT
40 #define DMA_ALLGN 64
41 #define HALARC_ALIGNMENT DMA_ALLGN
42 #define BCACHE_MAGIC_NUM 20132016
43 #define BCACHE_STATCK_SIZE 0x3000
44 #define ASYNC_EVENT_BIT 0x01
45
46 #ifdef DEBUG
47 #define D(args) printf args
48 #else
49 #define D(args)
50 #endif
51
52 #ifdef BCACHE_ANALYSE
53 UINT32 g_memSize;
54 volatile UINT32 g_blockNum;
55 volatile UINT32 g_dataSize;
56 volatile UINT8 *g_memStart;
57 volatile UINT32 g_switchTimes[CONFIG_FS_FAT_BLOCK_NUMS] = { 0 };
58 volatile UINT32 g_hitTimes[CONFIG_FS_FAT_BLOCK_NUMS] = { 0 };
59 #endif
60
BcacheAnalyse(UINT32 level)61 VOID BcacheAnalyse(UINT32 level)
62 {
63 (VOID)level;
64 #ifdef BCACHE_ANALYSE
65 int i;
66
67 PRINTK("Bcache information:\n");
68 PRINTK(" mem: %u\n", g_memSize);
69 PRINTK(" block number: %u\n", g_blockNum);
70 PRINTK("index, switch, hit\n");
71 for (i = 0; i < g_blockNum; i++) {
72 PRINTK("%5d, %6d, %3d\n", i, g_switchTimes[i], g_hitTimes[i]);
73 }
74 #else
75 PRINTK("Bcache hasn't started\n");
76 #endif
77 }
78
79 #ifdef LOSCFG_FS_FAT_CACHE_SYNC_THREAD
80
81 UINT32 g_syncThreadPrio = CONFIG_FS_FAT_SYNC_THREAD_PRIO;
82 UINT32 g_dirtyRatio = CONFIG_FS_FAT_DIRTY_RATIO;
83 UINT32 g_syncInterval = CONFIG_FS_FAT_SYNC_INTERVAL;
84
LOS_SetDirtyRatioThreshold(UINT32 dirtyRatio)85 VOID LOS_SetDirtyRatioThreshold(UINT32 dirtyRatio)
86 {
87 if ((dirtyRatio != g_dirtyRatio) && (dirtyRatio <= 100)) { /* The ratio cannot exceed 100% */
88 g_dirtyRatio = dirtyRatio;
89 }
90 }
91
LOS_SetSyncThreadInterval(UINT32 interval)92 VOID LOS_SetSyncThreadInterval(UINT32 interval)
93 {
94 g_syncInterval = interval;
95 }
96
LOS_SetSyncThreadPrio(UINT32 prio,const CHAR * name)97 INT32 LOS_SetSyncThreadPrio(UINT32 prio, const CHAR *name)
98 {
99 INT32 ret = VFS_ERROR;
100 INT32 diskID;
101 los_disk *disk = NULL;
102 if ((prio == 0) || (prio >= OS_TASK_PRIORITY_LOWEST)) { /* The priority can not be zero */
103 return ret;
104 }
105
106 g_syncThreadPrio = prio;
107
108 /*
109 * If the name is NULL, it only sets the value of a global variable,
110 * and takes effect the next time the thread is created.
111 */
112 if (name == NULL) {
113 return ENOERR;
114 }
115
116 /* If the name is not NULL, it shall return an error if can't find the disk corresponding to name. */
117 diskID = los_get_diskid_byname(name);
118 disk = get_disk(diskID);
119 if (disk == NULL) {
120 return ret;
121 }
122
123 if (pthread_mutex_lock(&disk->disk_mutex) != ENOERR) {
124 PRINT_ERR("%s %d, mutex lock fail!\n", __FUNCTION__, __LINE__);
125 return ret;
126 }
127 if ((disk->disk_status == STAT_INUSED) && (disk->bcache != NULL)) {
128 ret = LOS_TaskPriSet(disk->bcache->syncTaskId, prio);
129 }
130 if (pthread_mutex_unlock(&disk->disk_mutex) != ENOERR) {
131 PRINT_ERR("%s %d, mutex unlock fail!\n", __FUNCTION__, __LINE__);
132 return VFS_ERROR;
133 }
134 return ret;
135 }
136 #endif
137
RbFindBlock(const OsBcache * bc,UINT64 num)138 static OsBcacheBlock *RbFindBlock(const OsBcache *bc, UINT64 num)
139 {
140 OsBcacheBlock *block = NULL;
141 struct rb_node *node = bc->rbRoot.rb_node;
142
143 for (; node != NULL; node = (block->num < num) ? node->rb_right : node->rb_left) {
144 block = rb_entry(node, OsBcacheBlock, rbNode);
145 if (block->num == num) {
146 return block;
147 }
148 }
149 return NULL;
150 }
151
RbAddBlock(OsBcache * bc,OsBcacheBlock * block)152 static VOID RbAddBlock(OsBcache *bc, OsBcacheBlock *block)
153 {
154 struct rb_node *node = bc->rbRoot.rb_node;
155 struct rb_node **link = NULL;
156 OsBcacheBlock *b = NULL;
157
158 if (node == NULL) {
159 rb_link_node(&block->rbNode, NULL, &bc->rbRoot.rb_node);
160 } else {
161 for (; node != NULL; link = (b->num > block->num) ? &node->rb_left : &node->rb_right, node = *link) {
162 b = rb_entry(node, OsBcacheBlock, rbNode);
163 if (b->num == block->num) {
164 PRINT_ERR("RbAddBlock fail, b->num = %llu, block->num = %llu\n", b->num, block->num);
165 return;
166 }
167 }
168 rb_link_node(&block->rbNode, &b->rbNode, link);
169 }
170 rb_insert_color(&block->rbNode, &bc->rbRoot);
171 }
172
RbDelBlock(OsBcache * bc,OsBcacheBlock * block)173 static inline VOID RbDelBlock(OsBcache *bc, OsBcacheBlock *block)
174 {
175 rb_erase(&block->rbNode, &bc->rbRoot);
176 }
177
ListMoveBlockToHead(OsBcache * bc,OsBcacheBlock * block)178 static inline VOID ListMoveBlockToHead(OsBcache *bc, OsBcacheBlock *block)
179 {
180 LOS_ListDelete(&block->listNode);
181 LOS_ListAdd(&bc->listHead, &block->listNode);
182 }
183
FreeBlock(OsBcache * bc,OsBcacheBlock * block)184 static inline VOID FreeBlock(OsBcache *bc, OsBcacheBlock *block)
185 {
186 block->used = FALSE;
187 LOS_ListAdd(&bc->freeListHead, &block->listNode);
188 }
189
GetValLog2(UINT32 val)190 static UINT32 GetValLog2(UINT32 val)
191 {
192 UINT32 i, log2;
193
194 i = val;
195 log2 = 0;
196 while ((i & 1) == 0) { /* Check if the last bit is 1 */
197 i >>= 1;
198 log2++;
199 }
200 if (i != 1) { /* Not the power of 2 */
201 return 0;
202 }
203
204 return log2;
205 }
206
FindFlagPos(const UINT32 * arr,UINT32 len,UINT32 * p1,UINT32 * p2)207 static INT32 FindFlagPos(const UINT32 *arr, UINT32 len, UINT32 *p1, UINT32 *p2)
208 {
209 UINT32 *start = p1;
210 UINT32 *end = p2;
211 UINT32 i, j, tmp;
212 UINT32 val = 1;
213
214 *start = BCACHE_MAGIC_NUM;
215 *end = 0;
216 for (i = 0; i < len; i++) {
217 for (j = 0; j < UNSIGNED_INTEGER_BITS; j++) {
218 tmp = arr[i] << j;
219 tmp = tmp >> UNINT_MAX_SHIFT_BITS;
220 if (tmp != val) {
221 continue;
222 }
223 if (val && (*start == BCACHE_MAGIC_NUM)) {
224 *start = (i << UNINT_LOG2_SHIFT) + j;
225 val = 1 - val; /* Control parity by 0 and 1 */
226 } else if (val && (*start != BCACHE_MAGIC_NUM)) {
227 *start = 0;
228 return VFS_ERROR;
229 } else {
230 *end = (i << UNINT_LOG2_SHIFT) + j;
231 val = 1 - val; /* Control parity by 0 and 1 */
232 }
233 }
234 }
235 if (*start == BCACHE_MAGIC_NUM) {
236 *start = 0;
237 return VFS_ERROR;
238 }
239 if (*end == 0) {
240 *end = len << UNINT_LOG2_SHIFT;
241 }
242
243 return ENOERR;
244 }
245
BlockRead(OsBcache * bc,OsBcacheBlock * block,UINT8 * buf)246 static INT32 BlockRead(OsBcache *bc, OsBcacheBlock *block, UINT8 *buf)
247 {
248 INT32 ret = bc->breadFun(bc->priv, buf, bc->sectorPerBlock,
249 (block->num) << GetValLog2(bc->sectorPerBlock));
250 if (ret) {
251 PRINT_ERR("BlockRead, brread_fn error, ret = %d\n", ret);
252 if (block->modified == FALSE) {
253 if (block->listNode.pstNext != NULL) {
254 LOS_ListDelete(&block->listNode); /* list del block */
255 RbDelBlock(bc, block);
256 }
257 FreeBlock(bc, block);
258 }
259 return ret;
260 }
261
262 block->readFlag = TRUE;
263 return ENOERR;
264 }
265
BcacheGetFlag(OsBcache * bc,OsBcacheBlock * block)266 static INT32 BcacheGetFlag(OsBcache *bc, OsBcacheBlock *block)
267 {
268 UINT32 i, n, f, sectorPos, val, start, pos, currentSize;
269 UINT32 flagUse = bc->sectorPerBlock >> UNINT_LOG2_SHIFT;
270 UINT32 flag = UINT_MAX;
271 INT32 ret, bits;
272
273 if (block->readFlag == TRUE) {
274 return ENOERR;
275 }
276
277 for (i = 0; i < flagUse; i++) {
278 flag &= block->flag[i];
279 }
280
281 if (flag == UINT_MAX) {
282 return ENOERR;
283 }
284
285 ret = BlockRead(bc, block, bc->rwBuffer);
286 if (ret != ENOERR) {
287 return ret;
288 }
289
290 for (i = 0, sectorPos = 0; i < flagUse; i++) {
291 val = block->flag[i];
292 /* use unsigned integer for bit map */
293 for (f = 0, bits = UNSIGNED_INTEGER_BITS; bits > 0; val = ~(val << n), f++, bits = bits - (INT32)n) {
294 if (val == 0) {
295 n = UNSIGNED_INTEGER_BITS;
296 } else {
297 n = (UINT32)CLZ(val);
298 }
299 sectorPos += n;
300 if (((f % EVEN_JUDGED) != 0) || (n == 0)) { /* Number of leading zeros of n is zero */
301 goto LOOP;
302 }
303 if (sectorPos > ((i + 1) << UNINT_LOG2_SHIFT)) {
304 start = sectorPos - n;
305 currentSize = (((i + 1) << UNINT_LOG2_SHIFT) - start) * bc->sectorSize;
306 } else {
307 start = sectorPos - n;
308 currentSize = n * bc->sectorSize;
309 }
310 pos = start * bc->sectorSize;
311 if (memcpy_s(block->data + pos, bc->blockSize - pos, bc->rwBuffer + pos, currentSize) != EOK) {
312 return VFS_ERROR;
313 }
314 LOOP:
315 if (sectorPos > ((i + 1) << UNINT_LOG2_SHIFT)) {
316 sectorPos = (i + 1) << UNINT_LOG2_SHIFT;
317 }
318 }
319 }
320
321 return ENOERR;
322 }
323
BcacheSetFlag(const OsBcache * bc,OsBcacheBlock * block,UINT32 pos,UINT32 size)324 static VOID BcacheSetFlag(const OsBcache *bc, OsBcacheBlock *block, UINT32 pos, UINT32 size)
325 {
326 UINT32 start, num, i, j, k;
327
328 if (bc->sectorSize == 0) {
329 PRINT_ERR("BcacheSetFlag sectorSize is equal to zero! \n");
330 return;
331 }
332
333 start = pos / bc->sectorSize;
334 num = size / bc->sectorSize;
335
336 i = start / UNSIGNED_INTEGER_BITS;
337 j = start % UNSIGNED_INTEGER_BITS;
338 for (k = 0; k < num; k++) {
339 block->flag[i] |= 1u << (UNINT_MAX_SHIFT_BITS - j);
340 j++;
341 if (j == UNSIGNED_INTEGER_BITS) {
342 j = 0;
343 i++;
344 }
345 }
346 }
347
BcacheSyncBlock(OsBcache * bc,OsBcacheBlock * block)348 static INT32 BcacheSyncBlock(OsBcache *bc, OsBcacheBlock *block)
349 {
350 INT32 ret = ENOERR;
351 UINT32 len, start, end;
352
353 if (block->modified == TRUE) {
354 D(("bcache writing block = %llu\n", block->num));
355
356 ret = FindFlagPos(block->flag, bc->sectorPerBlock >> UNINT_LOG2_SHIFT, &start, &end);
357 if (ret == ENOERR) {
358 len = end - start;
359 } else {
360 ret = BcacheGetFlag(bc, block);
361 if (ret != ENOERR) {
362 return ret;
363 }
364
365 len = bc->sectorPerBlock;
366 }
367
368 ret = bc->bwriteFun(bc->priv, (const UINT8 *)(block->data + (start * bc->sectorSize)),
369 len, (block->num * bc->sectorPerBlock) + start);
370 if (ret == ENOERR) {
371 block->modified = FALSE;
372 bc->modifiedBlock--;
373 } else {
374 PRINT_ERR("BcacheSyncBlock fail, ret = %d, len = %u, block->num = %llu, start = %u\n",
375 ret, len, block->num, start);
376 }
377 }
378 return ret;
379 }
380
NumListAdd(OsBcache * bc,OsBcacheBlock * block)381 static void NumListAdd(OsBcache *bc, OsBcacheBlock *block)
382 {
383 OsBcacheBlock *temp = NULL;
384
385 LOS_DL_LIST_FOR_EACH_ENTRY(temp, &bc->numHead, OsBcacheBlock, numNode) {
386 if (temp->num > block->num) {
387 LOS_ListTailInsert(&temp->numNode, &block->numNode);
388 return;
389 }
390 }
391
392 LOS_ListTailInsert(&bc->numHead, &block->numNode);
393 }
394
AddBlock(OsBcache * bc,OsBcacheBlock * block)395 static void AddBlock(OsBcache *bc, OsBcacheBlock *block)
396 {
397 RbAddBlock(bc, block);
398 NumListAdd(bc, block);
399 bc->sumNum += block->num;
400 bc->nBlock++;
401 LOS_ListAdd(&bc->listHead, &block->listNode);
402 }
403
DelBlock(OsBcache * bc,OsBcacheBlock * block)404 static void DelBlock(OsBcache *bc, OsBcacheBlock *block)
405 {
406 LOS_ListDelete(&block->listNode); /* lru list del */
407 LOS_ListDelete(&block->numNode); /* num list del */
408 bc->sumNum -= block->num;
409 bc->nBlock--;
410 RbDelBlock(bc, block); /* rb tree del */
411 FreeBlock(bc, block); /* free list add */
412 }
413
BlockAllDirty(const OsBcache * bc,OsBcacheBlock * block)414 static BOOL BlockAllDirty(const OsBcache *bc, OsBcacheBlock *block)
415 {
416 UINT32 start = 0;
417 UINT32 end = 0;
418 UINT32 len = bc->sectorPerBlock >> UNINT_LOG2_SHIFT;
419
420 if (block->modified == TRUE) {
421 if (block->allDirty) {
422 return TRUE;
423 }
424
425 if (FindFlagPos(block->flag, len, &start, &end) == ENOERR) {
426 if ((end - start) == bc->sectorPerBlock) {
427 block->allDirty = TRUE;
428 return TRUE;
429 }
430 }
431 }
432
433 return FALSE;
434 }
435
GetBaseBlock(OsBcache * bc)436 static OsBcacheBlock *GetBaseBlock(OsBcache *bc)
437 {
438 OsBcacheBlock *base = bc->wStart;
439 OsBcacheBlock *end = bc->wEnd;
440 while (base < end) {
441 if (base->used == FALSE) {
442 base->used = TRUE;
443 LOS_ListDelete(&base->listNode);
444 return base;
445 }
446 base++;
447 }
448
449 return NULL;
450 }
451
452 /* try get free block first, if failed free a useless block */
GetSlowBlock(OsBcache * bc,BOOL read)453 static OsBcacheBlock *GetSlowBlock(OsBcache *bc, BOOL read)
454 {
455 LOS_DL_LIST *node = NULL;
456 OsBcacheBlock *block = NULL;
457
458 LOS_DL_LIST_FOR_EACH_ENTRY(block, &bc->freeListHead, OsBcacheBlock, listNode) {
459 if (block->readBuff == read) {
460 block->used = TRUE;
461 LOS_ListDelete(&block->listNode);
462 return block; /* get free one */
463 }
464 }
465
466 node = bc->listHead.pstPrev;
467 while (node != &bc->listHead) {
468 block = LOS_DL_LIST_ENTRY(node, OsBcacheBlock, listNode);
469 node = block->listNode.pstPrev;
470
471 if (block->readBuff == read) {
472 if (block->modified == TRUE) {
473 BcacheSyncBlock(bc, block);
474 }
475
476 DelBlock(bc, block);
477 block->used = TRUE;
478 LOS_ListDelete(&block->listNode);
479 return block; /* get used one */
480 }
481 }
482
483 return NULL;
484 }
485
486 /* flush combined blocks */
WriteMergedBlocks(OsBcache * bc,OsBcacheBlock * begin,int blocks)487 static VOID WriteMergedBlocks(OsBcache *bc, OsBcacheBlock *begin, int blocks)
488 {
489 INT32 ret;
490 OsBcacheBlock *cur = NULL;
491 OsBcacheBlock *next = NULL;
492 UINT32 len = blocks * bc->sectorPerBlock;
493 UINT64 pos = begin->num * bc->sectorPerBlock;
494
495 ret = bc->bwriteFun(bc->priv, (const UINT8 *)begin->data, len, pos);
496 if (ret != ENOERR) {
497 PRINT_ERR("WriteMergedBlocks bwriteFun failed ret %d\n", ret);
498 return;
499 }
500
501 bc->modifiedBlock -= blocks;
502 cur = begin;
503 while (blocks > 0) {
504 next = LOS_DL_LIST_ENTRY(cur->numNode.pstNext, OsBcacheBlock, numNode);
505 DelBlock(bc, cur);
506 cur->modified = FALSE;
507 blocks--;
508 cur = next;
509 }
510 }
511
512 /* find continue blocks and flush them */
MergeSyncBlocks(OsBcache * bc,OsBcacheBlock * start)513 static VOID MergeSyncBlocks(OsBcache *bc, OsBcacheBlock *start)
514 {
515 INT32 mergedBlock = 0;
516 OsBcacheBlock *cur = start;
517 OsBcacheBlock *last = NULL;
518
519 while (cur <= bc->wEnd) {
520 if (!cur->used || !BlockAllDirty(bc, cur)) {
521 break;
522 }
523
524 if (last && (last->num + 1 != cur->num)) {
525 break;
526 }
527
528 mergedBlock++;
529 last = cur;
530 cur++;
531 }
532
533 if (mergedBlock > 0) {
534 WriteMergedBlocks(bc, start, mergedBlock);
535 }
536 }
537
538 /* get the min write block num of block cache buffer */
GetMinWriteNum(OsBcache * bc)539 static inline UINT64 GetMinWriteNum(OsBcache *bc)
540 {
541 UINT64 ret = 0;
542 OsBcacheBlock *block = NULL;
543
544 LOS_DL_LIST_FOR_EACH_ENTRY(block, &bc->numHead, OsBcacheBlock, numNode) {
545 if (!block->readBuff) {
546 ret = block->num;
547 break;
548 }
549 }
550
551 return ret;
552 }
553
AllocNewBlock(OsBcache * bc,BOOL read,UINT64 num)554 static OsBcacheBlock *AllocNewBlock(OsBcache *bc, BOOL read, UINT64 num)
555 {
556 OsBcacheBlock *last = NULL;
557 OsBcacheBlock *prefer = NULL;
558
559 if (read) { /* read */
560 return GetSlowBlock(bc, TRUE);
561 }
562
563 /* fallback, this may happen when the block previously flushed, use read buffer */
564 if (bc->nBlock && num < GetMinWriteNum(bc)) {
565 return GetSlowBlock(bc, TRUE);
566 }
567
568 last = RbFindBlock(bc, num - 1); /* num=0 is ok */
569 if (last == NULL || last->readBuff) {
570 return GetBaseBlock(bc); /* new block */
571 }
572
573 prefer = last + 1;
574 if (prefer > bc->wEnd) {
575 prefer = bc->wStart;
576 }
577
578 /* this is a sync thread synced block! */
579 if (prefer->used && !prefer->modified) {
580 prefer->used = FALSE;
581 DelBlock(bc, prefer);
582 }
583
584 if (prefer->used) { /* do not combine with next check */
585 MergeSyncBlocks(bc, prefer); /* prefer->used may be changed here */
586 }
587
588 if (prefer->used) {
589 BcacheSyncBlock(bc, prefer);
590 DelBlock(bc, prefer);
591 }
592
593 prefer->used = TRUE;
594 LOS_ListDelete(&prefer->listNode); /* del from free list */
595
596 return prefer;
597 }
598
BcacheSync(OsBcache * bc)599 static INT32 BcacheSync(OsBcache *bc)
600 {
601 LOS_DL_LIST *node = NULL;
602 OsBcacheBlock *block = NULL;
603 INT32 ret = ENOERR;
604
605 D(("bcache cache sync\n"));
606
607 (VOID)pthread_mutex_lock(&bc->bcacheMutex);
608 node = bc->listHead.pstPrev;
609 while (&bc->listHead != node) {
610 block = LOS_DL_LIST_ENTRY(node, OsBcacheBlock, listNode);
611 ret = BcacheSyncBlock(bc, block);
612 if (ret != ENOERR) {
613 PRINT_ERR("BcacheSync error, ret = %d\n", ret);
614 break;
615 }
616 node = node->pstPrev;
617 }
618 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
619
620 return ret;
621 }
622
BlockInit(OsBcache * bc,OsBcacheBlock * block,UINT64 num)623 static VOID BlockInit(OsBcache *bc, OsBcacheBlock *block, UINT64 num)
624 {
625 (VOID)memset_s(block->flag, sizeof(block->flag), 0, sizeof(block->flag));
626 block->num = num;
627 block->readFlag = FALSE;
628 if (block->modified == TRUE) {
629 block->modified = FALSE;
630 bc->modifiedBlock--;
631 }
632 block->allDirty = FALSE;
633 }
634
BcacheGetBlock(OsBcache * bc,UINT64 num,BOOL readData,OsBcacheBlock ** dblock)635 static INT32 BcacheGetBlock(OsBcache *bc, UINT64 num, BOOL readData, OsBcacheBlock **dblock)
636 {
637 INT32 ret;
638 OsBcacheBlock *block = NULL;
639 OsBcacheBlock *first = NULL;
640
641 /*
642 * First check if the most recently used block is the requested block,
643 * this can improve performance when using byte access functions.
644 */
645 if (LOS_ListEmpty(&bc->listHead) == FALSE) {
646 first = LOS_DL_LIST_ENTRY(bc->listHead.pstNext, OsBcacheBlock, listNode);
647 block = (first->num == num) ? first : RbFindBlock(bc, num);
648 }
649
650 if (block != NULL) {
651 D(("bcache block = %llu found in cache\n", num));
652 #ifdef BCACHE_ANALYSE
653 UINT32 index = ((UINT32)(block->data - g_memStart)) / g_dataSize;
654 PRINTK(", [HIT], %llu, %u\n", num, index);
655 g_hitTimes[index]++;
656 #endif
657
658 if (first != block) {
659 ListMoveBlockToHead(bc, block);
660 }
661 *dblock = block;
662
663 if ((bc->prereadFun != NULL) && (readData == TRUE) && (block->pgHit == 1)) {
664 block->pgHit = 0;
665 bc->prereadFun(bc, block);
666 }
667
668 return ENOERR;
669 }
670
671 D(("bcache block = %llu NOT found in cache\n", num));
672
673 block = AllocNewBlock(bc, readData, num);
674 if (block == NULL) {
675 block = GetSlowBlock(bc, readData);
676 }
677
678 if (block == NULL) {
679 return -ENOMEM;
680 }
681 #ifdef BCACHE_ANALYSE
682 UINT32 index = ((UINT32)(block->data - g_memStart)) / g_dataSize;
683 PRINTK(", [MISS], %llu, %u\n", num, index);
684 g_switchTimes[index]++;
685 #endif
686 BlockInit(bc, block, num);
687
688 if (readData == TRUE) {
689 D(("bcache reading block = %llu\n", block->num));
690
691 ret = BlockRead(bc, block, block->data);
692 if (ret != ENOERR) {
693 return ret;
694 }
695 if (bc->prereadFun != NULL) {
696 bc->prereadFun(bc, block);
697 }
698 }
699
700 AddBlock(bc, block);
701
702 *dblock = block;
703 return ENOERR;
704 }
705
BcacheClearCache(OsBcache * bc)706 INT32 BcacheClearCache(OsBcache *bc)
707 {
708 OsBcacheBlock *block = NULL;
709 OsBcacheBlock *next = NULL;
710 LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(block, next, &bc->listHead, OsBcacheBlock, listNode) {
711 DelBlock(bc, block);
712 }
713 return 0;
714 }
715
BcacheInitCache(OsBcache * bc,UINT8 * memStart,UINT32 memSize,UINT32 blockSize)716 static INT32 BcacheInitCache(OsBcache *bc,
717 UINT8 *memStart,
718 UINT32 memSize,
719 UINT32 blockSize)
720 {
721 UINT8 *blockMem = NULL;
722 UINT8 *dataMem = NULL;
723 OsBcacheBlock *block = NULL;
724 UINT32 blockNum, i;
725
726 LOS_ListInit(&bc->listHead);
727 LOS_ListInit(&bc->numHead);
728 bc->sumNum = 0;
729 bc->nBlock = 0;
730
731 if (!GetValLog2(blockSize)) {
732 PRINT_ERR("GetValLog2(%u) return 0.\n", blockSize);
733 return -EINVAL;
734 }
735
736 bc->rbRoot.rb_node = NULL;
737 bc->memStart = memStart;
738 bc->blockSize = blockSize;
739 bc->blockSizeLog2 = GetValLog2(blockSize);
740 bc->modifiedBlock = 0;
741
742 /* init block memory pool */
743 LOS_ListInit(&bc->freeListHead);
744
745 blockNum = (memSize - DMA_ALLGN) / (sizeof(OsBcacheBlock) + bc->blockSize);
746 blockMem = bc->memStart;
747 dataMem = blockMem + (sizeof(OsBcacheBlock) * blockNum);
748 dataMem += ALIGN_DISP((UINTPTR)dataMem);
749
750 #ifdef BCACHE_ANALYSE
751 g_memSize = memSize;
752 g_blockNum = blockNum;
753 g_dataSize = bc->blockSize;
754 g_memStart = dataMem;
755 #endif
756
757 for (i = 0; i < blockNum; i++) {
758 block = (OsBcacheBlock *)(VOID *)blockMem;
759 block->data = dataMem;
760 block->readBuff = (i < CONFIG_FS_FAT_READ_NUMS) ? TRUE : FALSE;
761
762 if (i == CONFIG_FS_FAT_READ_NUMS) {
763 bc->wStart = block;
764 }
765
766 LOS_ListTailInsert(&bc->freeListHead, &block->listNode);
767
768 blockMem += sizeof(OsBcacheBlock);
769 dataMem += bc->blockSize;
770 }
771
772 bc->wEnd = block;
773
774 return ENOERR;
775 }
776
DrvBread(struct Vnode * priv,UINT8 * buf,UINT32 len,UINT64 pos)777 static INT32 DrvBread(struct Vnode *priv, UINT8 *buf, UINT32 len, UINT64 pos)
778 {
779 struct block_operations *bops = (struct block_operations *)((struct drv_data *)priv->data)->ops;
780
781 INT32 ret = bops->read(priv, buf, pos, len);
782 if (ret != (INT32)len) {
783 PRINT_ERR("%s failure\n", __FUNCTION__);
784 return ret;
785 }
786 return ENOERR;
787 }
788
DrvBwrite(struct Vnode * priv,const UINT8 * buf,UINT32 len,UINT64 pos)789 static INT32 DrvBwrite(struct Vnode *priv, const UINT8 *buf, UINT32 len, UINT64 pos)
790 {
791 struct block_operations *bops = (struct block_operations *)((struct drv_data *)priv->data)->ops;
792 INT32 ret = bops->write(priv, buf, pos, len);
793 if (ret != (INT32)len) {
794 PRINT_ERR("%s failure\n", __FUNCTION__);
795 return ret;
796 }
797 return ENOERR;
798 }
799
BlockCacheDrvCreate(VOID * handle,UINT8 * memStart,UINT32 memSize,UINT32 blockSize,OsBcache * bc)800 INT32 BlockCacheDrvCreate(VOID *handle,
801 UINT8 *memStart,
802 UINT32 memSize,
803 UINT32 blockSize,
804 OsBcache *bc)
805 {
806 INT32 ret;
807 bc->priv = handle;
808 bc->breadFun = DrvBread;
809 bc->bwriteFun = DrvBwrite;
810
811 ret = BcacheInitCache(bc, memStart, memSize, blockSize);
812 if (ret != ENOERR) {
813 return ret;
814 }
815
816 if (pthread_mutex_init(&bc->bcacheMutex, NULL) != ENOERR) {
817 return VFS_ERROR;
818 }
819 bc->bcacheMutex.attr.type = PTHREAD_MUTEX_RECURSIVE;
820
821 return ENOERR;
822 }
823
BlockCacheRead(OsBcache * bc,UINT8 * buf,UINT32 * len,UINT64 sector,BOOL useRead)824 INT32 BlockCacheRead(OsBcache *bc, UINT8 *buf, UINT32 *len, UINT64 sector, BOOL useRead)
825 {
826 OsBcacheBlock *block = NULL;
827 UINT8 *tempBuf = buf;
828 UINT32 size;
829 UINT32 currentSize;
830 INT32 ret = ENOERR;
831 UINT64 pos;
832 UINT64 num;
833 #ifdef BCACHE_ANALYSE
834 PRINTK("bcache read:\n");
835 #endif
836
837 if (bc == NULL || buf == NULL || len == NULL) {
838 return -EPERM;
839 }
840
841 size = *len;
842 pos = sector * bc->sectorSize;
843 num = pos >> bc->blockSizeLog2;
844 pos = pos & (bc->blockSize - 1);
845
846 while (size > 0) {
847 if ((size + pos) > bc->blockSize) {
848 currentSize = bc->blockSize - (UINT32)pos;
849 } else {
850 currentSize = size;
851 }
852
853 (VOID)pthread_mutex_lock(&bc->bcacheMutex);
854
855 /* useRead should be FALSE when reading large contiguous data */
856 ret = BcacheGetBlock(bc, num, useRead, &block);
857 if (ret != ENOERR) {
858 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
859 break;
860 }
861
862 if ((block->readFlag == FALSE) && (block->modified == TRUE)) {
863 ret = BcacheGetFlag(bc, block);
864 if (ret != ENOERR) {
865 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
866 return ret;
867 }
868 } else if ((block->readFlag == FALSE) && (block->modified == FALSE)) {
869 ret = BlockRead(bc, block, block->data);
870 if (ret != ENOERR) {
871 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
872 return ret;
873 }
874 }
875
876 if (LOS_CopyFromKernel((VOID *)tempBuf, size, (VOID *)(block->data + pos), currentSize) != EOK) {
877 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
878 return VFS_ERROR;
879 }
880
881 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
882
883 tempBuf += currentSize;
884 size -= currentSize;
885 pos = 0;
886 num++;
887 }
888 *len -= size;
889 return ret;
890 }
891
BlockCacheWrite(OsBcache * bc,const UINT8 * buf,UINT32 * len,UINT64 sector)892 INT32 BlockCacheWrite(OsBcache *bc, const UINT8 *buf, UINT32 *len, UINT64 sector)
893 {
894 OsBcacheBlock *block = NULL;
895 const UINT8 *tempBuf = buf;
896 UINT32 size = *len;
897 INT32 ret = ENOERR;
898 UINT32 currentSize;
899 UINT64 pos;
900 UINT64 num;
901 #ifdef BCACHE_ANALYSE
902 PRINTK("bcache write:\n");
903 #endif
904
905 pos = sector * bc->sectorSize;
906 num = pos >> bc->blockSizeLog2;
907 pos = pos & (bc->blockSize - 1);
908
909 D(("bcache write len = %u pos = %llu bnum = %llu\n", *len, pos, num));
910
911 while (size > 0) {
912 if ((size + pos) > bc->blockSize) {
913 currentSize = bc->blockSize - (UINT32)pos;
914 } else {
915 currentSize = size;
916 }
917
918 (VOID)pthread_mutex_lock(&bc->bcacheMutex);
919 ret = BcacheGetBlock(bc, num, FALSE, &block);
920 if (ret != ENOERR) {
921 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
922 break;
923 }
924
925 if (LOS_CopyToKernel((VOID *)(block->data + pos), bc->blockSize - (UINT32)pos,
926 (VOID *)tempBuf, currentSize) != EOK) {
927 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
928 return VFS_ERROR;
929 }
930 if (block->modified == FALSE) {
931 block->modified = TRUE;
932 bc->modifiedBlock++;
933 }
934 if ((pos == 0) && (currentSize == bc->blockSize)) {
935 (void)memset_s(block->flag, sizeof(block->flag), 0xFF, sizeof(block->flag));
936 block->allDirty = TRUE;
937 } else {
938 BcacheSetFlag(bc, block, (UINT32)pos, currentSize);
939 }
940 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
941
942 tempBuf += currentSize;
943 size -= currentSize;
944 pos = 0;
945 num++;
946 }
947 *len -= size;
948 return ret;
949 }
950
BlockCacheSync(OsBcache * bc)951 INT32 BlockCacheSync(OsBcache *bc)
952 {
953 return BcacheSync(bc);
954 }
955
OsSdSync(INT32 id)956 INT32 OsSdSync(INT32 id)
957 {
958 #ifdef LOSCFG_FS_FAT_CACHE
959 INT32 ret;
960 los_disk *disk = get_disk(id);
961 if ((disk == NULL) || (disk->disk_status == STAT_UNUSED)) {
962 return VFS_ERROR;
963 }
964 if (pthread_mutex_lock(&disk->disk_mutex) != ENOERR) {
965 PRINT_ERR("%s %d, mutex lock fail!\n", __FUNCTION__, __LINE__);
966 return VFS_ERROR;
967 }
968 if ((disk->disk_status == STAT_INUSED) && (disk->bcache != NULL)) {
969 ret = BcacheSync(disk->bcache);
970 } else {
971 ret = VFS_ERROR;
972 }
973 if (pthread_mutex_unlock(&disk->disk_mutex) != ENOERR) {
974 PRINT_ERR("%s %d, mutex unlock fail!\n", __FUNCTION__, __LINE__);
975 return VFS_ERROR;
976 }
977 return ret;
978 #else
979 return VFS_ERROR;
980 #endif
981 }
982
LOS_BcacheSyncByName(const CHAR * name)983 INT32 LOS_BcacheSyncByName(const CHAR *name)
984 {
985 INT32 diskID = los_get_diskid_byname(name);
986 return OsSdSync(diskID);
987 }
988
BcacheGetDirtyRatio(INT32 id)989 INT32 BcacheGetDirtyRatio(INT32 id)
990 {
991 #ifdef LOSCFG_FS_FAT_CACHE
992 INT32 ret;
993 los_disk *disk = get_disk(id);
994 if (disk == NULL) {
995 return VFS_ERROR;
996 }
997
998 if (pthread_mutex_lock(&disk->disk_mutex) != ENOERR) {
999 PRINT_ERR("%s %d, mutex lock fail!\n", __FUNCTION__, __LINE__);
1000 return VFS_ERROR;
1001 }
1002 if ((disk->disk_status == STAT_INUSED) && (disk->bcache != NULL)) {
1003 ret = (INT32)((disk->bcache->modifiedBlock * PERCENTAGE) / GetFatBlockNums());
1004 } else {
1005 ret = VFS_ERROR;
1006 }
1007 if (pthread_mutex_unlock(&disk->disk_mutex) != ENOERR) {
1008 PRINT_ERR("%s %d, mutex unlock fail!\n", __FUNCTION__, __LINE__);
1009 return VFS_ERROR;
1010 }
1011 return ret;
1012 #else
1013 return VFS_ERROR;
1014 #endif
1015 }
1016
LOS_GetDirtyRatioByName(const CHAR * name)1017 INT32 LOS_GetDirtyRatioByName(const CHAR *name)
1018 {
1019 INT32 diskID = los_get_diskid_byname(name);
1020 return BcacheGetDirtyRatio(diskID);
1021 }
1022
1023 #ifdef LOSCFG_FS_FAT_CACHE_SYNC_THREAD
BcacheSyncThread(UINT32 id)1024 static VOID BcacheSyncThread(UINT32 id)
1025 {
1026 INT32 diskID = (INT32)id;
1027 INT32 dirtyRatio;
1028 while (1) {
1029 dirtyRatio = BcacheGetDirtyRatio(diskID);
1030 if (dirtyRatio > (INT32)g_dirtyRatio) {
1031 (VOID)OsSdSync(diskID);
1032 }
1033 msleep(g_syncInterval);
1034 }
1035 }
1036
BcacheSyncThreadInit(OsBcache * bc,INT32 id)1037 VOID BcacheSyncThreadInit(OsBcache *bc, INT32 id)
1038 {
1039 UINT32 ret;
1040 TSK_INIT_PARAM_S appTask;
1041
1042 (VOID)memset_s(&appTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
1043 appTask.pfnTaskEntry = (TSK_ENTRY_FUNC)BcacheSyncThread;
1044 appTask.uwStackSize = BCACHE_STATCK_SIZE;
1045 appTask.pcName = "bcache_sync_task";
1046 appTask.usTaskPrio = g_syncThreadPrio;
1047 appTask.auwArgs[0] = (UINTPTR)id;
1048 appTask.uwResved = LOS_TASK_STATUS_DETACHED;
1049 ret = LOS_TaskCreate(&bc->syncTaskId, &appTask);
1050 if (ret != ENOERR) {
1051 PRINT_ERR("Bcache sync task create failed in %s, %d\n", __FUNCTION__, __LINE__);
1052 }
1053 }
1054
BcacheSyncThreadDeinit(const OsBcache * bc)1055 VOID BcacheSyncThreadDeinit(const OsBcache *bc)
1056 {
1057 if (bc != NULL) {
1058 if (LOS_TaskDelete(bc->syncTaskId) != ENOERR) {
1059 PRINT_ERR("Bcache sync task delete failed in %s, %d\n", __FUNCTION__, __LINE__);
1060 }
1061 }
1062 }
1063 #endif
1064
BlockCacheInit(struct Vnode * devNode,UINT32 sectorSize,UINT32 sectorPerBlock,UINT32 blockNum,UINT64 blockCount)1065 OsBcache *BlockCacheInit(struct Vnode *devNode, UINT32 sectorSize, UINT32 sectorPerBlock,
1066 UINT32 blockNum, UINT64 blockCount)
1067 {
1068 OsBcache *bcache = NULL;
1069 struct Vnode *blkDriver = devNode;
1070 UINT8 *bcacheMem = NULL;
1071 UINT8 *rwBuffer = NULL;
1072 UINT32 blockSize, memSize;
1073
1074 if ((blkDriver == NULL) || (sectorSize * sectorPerBlock * blockNum == 0) || (blockCount == 0)) {
1075 return NULL;
1076 }
1077
1078 blockSize = sectorSize * sectorPerBlock;
1079 if ((((UINT64)(sizeof(OsBcacheBlock) + blockSize) * blockNum) + DMA_ALLGN) > UINT_MAX) {
1080 return NULL;
1081 }
1082 memSize = ((sizeof(OsBcacheBlock) + blockSize) * blockNum) + DMA_ALLGN;
1083
1084 bcache = (OsBcache *)zalloc(sizeof(OsBcache));
1085 if (bcache == NULL) {
1086 PRINT_ERR("bcache_init : malloc %u Bytes failed!\n", sizeof(OsBcache));
1087 return NULL;
1088 }
1089
1090 bcacheMem = (UINT8 *)zalloc(memSize);
1091 if (bcacheMem == NULL) {
1092 PRINT_ERR("bcache_init : malloc %u Bytes failed!\n", memSize);
1093 goto ERROR_OUT_WITH_BCACHE;
1094 }
1095
1096 rwBuffer = (UINT8 *)memalign(DMA_ALLGN, blockSize);
1097 if (rwBuffer == NULL) {
1098 PRINT_ERR("bcache_init : malloc %u Bytes failed!\n", blockSize);
1099 goto ERROR_OUT_WITH_MEM;
1100 }
1101
1102 bcache->rwBuffer = rwBuffer;
1103 bcache->sectorSize = sectorSize;
1104 bcache->sectorPerBlock = sectorPerBlock;
1105 bcache->blockCount = blockCount;
1106
1107 if (BlockCacheDrvCreate(blkDriver, bcacheMem, memSize, blockSize, bcache) != ENOERR) {
1108 goto ERROR_OUT_WITH_BUFFER;
1109 }
1110
1111 return bcache;
1112
1113 ERROR_OUT_WITH_BUFFER:
1114 free(rwBuffer);
1115 ERROR_OUT_WITH_MEM:
1116 free(bcacheMem);
1117 ERROR_OUT_WITH_BCACHE:
1118 free(bcache);
1119 return NULL;
1120 }
1121
BlockCacheDeinit(OsBcache * bcache)1122 VOID BlockCacheDeinit(OsBcache *bcache)
1123 {
1124 if (bcache != NULL) {
1125 (VOID)pthread_mutex_destroy(&bcache->bcacheMutex);
1126 free(bcache->memStart);
1127 bcache->memStart = NULL;
1128 free(bcache->rwBuffer);
1129 bcache->rwBuffer = NULL;
1130 free(bcache);
1131 }
1132 }
1133
BcacheAsyncPrereadThread(VOID * arg)1134 static VOID BcacheAsyncPrereadThread(VOID *arg)
1135 {
1136 OsBcache *bc = (OsBcache *)arg;
1137 OsBcacheBlock *block = NULL;
1138 INT32 ret;
1139 UINT32 i;
1140
1141 for (;;) {
1142 ret = (INT32)LOS_EventRead(&bc->bcacheEvent, PREREAD_EVENT_MASK,
1143 LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);
1144 if (ret != ASYNC_EVENT_BIT) {
1145 PRINT_ERR("The event read in %s, %d is error!!!\n", __FUNCTION__, __LINE__);
1146 continue;
1147 }
1148
1149 for (i = 1; i <= PREREAD_BLOCK_NUM; i++) {
1150 if ((bc->curBlockNum + i) >= bc->blockCount) {
1151 break;
1152 }
1153
1154 (VOID)pthread_mutex_lock(&bc->bcacheMutex);
1155 ret = BcacheGetBlock(bc, bc->curBlockNum + i, TRUE, &block);
1156 if (ret != ENOERR) {
1157 PRINT_ERR("read block %llu error : %d!\n", bc->curBlockNum, ret);
1158 }
1159
1160 (VOID)pthread_mutex_unlock(&bc->bcacheMutex);
1161 }
1162
1163 if (block != NULL) {
1164 block->pgHit = 1; /* preread complete */
1165 }
1166 }
1167 }
1168
ResumeAsyncPreread(OsBcache * arg1,const OsBcacheBlock * arg2)1169 VOID ResumeAsyncPreread(OsBcache *arg1, const OsBcacheBlock *arg2)
1170 {
1171 UINT32 ret;
1172 OsBcache *bc = arg1;
1173 const OsBcacheBlock *block = arg2;
1174
1175 if (OsCurrTaskGet()->taskID != bc->prereadTaskId) {
1176 bc->curBlockNum = block->num;
1177 ret = LOS_EventWrite(&bc->bcacheEvent, ASYNC_EVENT_BIT);
1178 if (ret != ENOERR) {
1179 PRINT_ERR("Write event failed in %s, %d\n", __FUNCTION__, __LINE__);
1180 }
1181 }
1182 }
1183
BcacheAsyncPrereadInit(OsBcache * bc)1184 UINT32 BcacheAsyncPrereadInit(OsBcache *bc)
1185 {
1186 UINT32 ret;
1187 TSK_INIT_PARAM_S appTask;
1188
1189 ret = LOS_EventInit(&bc->bcacheEvent);
1190 if (ret != ENOERR) {
1191 PRINT_ERR("Async event init failed in %s, %d\n", __FUNCTION__, __LINE__);
1192 return ret;
1193 }
1194
1195 (VOID)memset_s(&appTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
1196 appTask.pfnTaskEntry = (TSK_ENTRY_FUNC)BcacheAsyncPrereadThread;
1197 appTask.uwStackSize = BCACHE_STATCK_SIZE;
1198 appTask.pcName = "bcache_async_task";
1199 appTask.usTaskPrio = BCACHE_PREREAD_PRIO;
1200 appTask.auwArgs[0] = (UINTPTR)bc;
1201 appTask.uwResved = LOS_TASK_STATUS_DETACHED;
1202 ret = LOS_TaskCreate(&bc->prereadTaskId, &appTask);
1203 if (ret != ENOERR) {
1204 PRINT_ERR("Bcache async task create failed in %s, %d\n", __FUNCTION__, __LINE__);
1205 }
1206
1207 return ret;
1208 }
1209
BcacheAsyncPrereadDeinit(OsBcache * bc)1210 UINT32 BcacheAsyncPrereadDeinit(OsBcache *bc)
1211 {
1212 UINT32 ret = LOS_NOK;
1213
1214 if (bc != NULL) {
1215 ret = LOS_TaskDelete(bc->prereadTaskId);
1216 if (ret != ENOERR) {
1217 PRINT_ERR("Bcache async task delete failed in %s, %d\n", __FUNCTION__, __LINE__);
1218 }
1219
1220 ret = LOS_EventDestroy(&bc->bcacheEvent);
1221 if (ret != ENOERR) {
1222 PRINT_ERR("Async event destroy failed in %s, %d\n", __FUNCTION__, __LINE__);
1223 return ret;
1224 }
1225 }
1226
1227 return ret;
1228 }
1229