• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * The little filesystem
3  *
4  * Copyright (c) 2022, The littlefs authors.
5  * Copyright (c) 2017, Arm Limited. All rights reserved.
6  * SPDX-License-Identifier: BSD-3-Clause
7  */
8 #include "lfs.h"
9 #include "lfs_util.h"
10 
11 
12 // some constants used throughout the code
13 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
14 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
15 
16 enum {
17     LFS_OK_RELOCATED = 1,
18     LFS_OK_DROPPED   = 2,
19     LFS_OK_ORPHANED  = 3,
20 };
21 
22 enum {
23     LFS_CMP_EQ = 0,
24     LFS_CMP_LT = 1,
25     LFS_CMP_GT = 2,
26 };
27 
28 
29 /// Caching block device operations ///
30 
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)31 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
32     // do not zero, cheaper if cache is readonly or only going to be
33     // written with identical data (during relocates)
34     (void)lfs;
35     rcache->block = LFS_BLOCK_NULL;
36 }
37 
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)38 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
39     // zero to avoid information leak
40     memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
41     pcache->block = LFS_BLOCK_NULL;
42 }
43 
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)44 static int lfs_bd_read(lfs_t *lfs,
45         const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
46         lfs_block_t block, lfs_off_t off,
47         void *buffer, lfs_size_t size) {
48     uint8_t *data = buffer;
49     if (off+size > lfs->cfg->block_size
50             || (lfs->block_count && block >= lfs->block_count)) {
51         return LFS_ERR_CORRUPT;
52     }
53 
54     while (size > 0) {
55         lfs_size_t diff = size;
56 
57         if (pcache && block == pcache->block &&
58                 off < pcache->off + pcache->size) {
59             if (off >= pcache->off) {
60                 // is already in pcache?
61                 diff = lfs_min(diff, pcache->size - (off-pcache->off));
62                 memcpy(data, &pcache->buffer[off-pcache->off], diff);
63 
64                 data += diff;
65                 off += diff;
66                 size -= diff;
67                 continue;
68             }
69 
70             // pcache takes priority
71             diff = lfs_min(diff, pcache->off-off);
72         }
73 
74         if (block == rcache->block &&
75                 off < rcache->off + rcache->size) {
76             if (off >= rcache->off) {
77                 // is already in rcache?
78                 diff = lfs_min(diff, rcache->size - (off-rcache->off));
79                 memcpy(data, &rcache->buffer[off-rcache->off], diff);
80 
81                 data += diff;
82                 off += diff;
83                 size -= diff;
84                 continue;
85             }
86 
87             // rcache takes priority
88             diff = lfs_min(diff, rcache->off-off);
89         }
90 
91         if (size >= hint && off % lfs->cfg->read_size == 0 &&
92                 size >= lfs->cfg->read_size) {
93             // bypass cache?
94             diff = lfs_aligndown(diff, lfs->cfg->read_size);
95             int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
96             if (err) {
97                 return err;
98             }
99 
100             data += diff;
101             off += diff;
102             size -= diff;
103             continue;
104         }
105 
106         // load to cache, first condition can no longer fail
107         LFS_ASSERT(!lfs->block_count || block < lfs->block_count);
108         rcache->block = block;
109         rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
110         rcache->size = lfs_min(
111                 lfs_min(
112                     lfs_alignup(off+hint, lfs->cfg->read_size),
113                     lfs->cfg->block_size)
114                 - rcache->off,
115                 lfs->cfg->cache_size);
116         int err = lfs->cfg->read(lfs->cfg, rcache->block,
117                 rcache->off, rcache->buffer, rcache->size);
118         LFS_ASSERT(err <= 0);
119         if (err) {
120             return err;
121         }
122     }
123 
124     return 0;
125 }
126 
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)127 static int lfs_bd_cmp(lfs_t *lfs,
128         const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
129         lfs_block_t block, lfs_off_t off,
130         const void *buffer, lfs_size_t size) {
131     const uint8_t *data = buffer;
132     lfs_size_t diff = 0;
133 
134     for (lfs_off_t i = 0; i < size; i += diff) {
135         uint8_t dat[8];
136 
137         diff = lfs_min(size-i, sizeof(dat));
138         int err = lfs_bd_read(lfs,
139                 pcache, rcache, hint-i,
140                 block, off+i, &dat, diff);
141         if (err) {
142             return err;
143         }
144 
145         int res = memcmp(dat, data + i, diff);
146         if (res) {
147             return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
148         }
149     }
150 
151     return LFS_CMP_EQ;
152 }
153 
lfs_bd_crc(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,lfs_size_t size,uint32_t * crc)154 static int lfs_bd_crc(lfs_t *lfs,
155         const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
156         lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
157     lfs_size_t diff = 0;
158 
159     for (lfs_off_t i = 0; i < size; i += diff) {
160         uint8_t dat[8];
161         diff = lfs_min(size-i, sizeof(dat));
162         int err = lfs_bd_read(lfs,
163                 pcache, rcache, hint-i,
164                 block, off+i, &dat, diff);
165         if (err) {
166             return err;
167         }
168 
169         *crc = lfs_crc(*crc, &dat, diff);
170     }
171 
172     return 0;
173 }
174 
175 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)176 static int lfs_bd_flush(lfs_t *lfs,
177         lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
178     if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
179         LFS_ASSERT(pcache->block < lfs->block_count);
180         lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
181         int err = lfs->cfg->prog(lfs->cfg, pcache->block,
182                 pcache->off, pcache->buffer, diff);
183         LFS_ASSERT(err <= 0);
184         if (err) {
185             return err;
186         }
187 
188         if (validate) {
189             // check data on disk
190             lfs_cache_drop(lfs, rcache);
191             int res = lfs_bd_cmp(lfs,
192                     NULL, rcache, diff,
193                     pcache->block, pcache->off, pcache->buffer, diff);
194             if (res < 0) {
195                 return res;
196             }
197 
198             if (res != LFS_CMP_EQ) {
199                 return LFS_ERR_CORRUPT;
200             }
201         }
202 
203         lfs_cache_zero(lfs, pcache);
204     }
205 
206     return 0;
207 }
208 #endif
209 
210 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)211 static int lfs_bd_sync(lfs_t *lfs,
212         lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
213     lfs_cache_drop(lfs, rcache);
214 
215     int err = lfs_bd_flush(lfs, pcache, rcache, validate);
216     if (err) {
217         return err;
218     }
219 
220     err = lfs->cfg->sync(lfs->cfg);
221     LFS_ASSERT(err <= 0);
222     return err;
223 }
224 #endif
225 
226 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)227 static int lfs_bd_prog(lfs_t *lfs,
228         lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
229         lfs_block_t block, lfs_off_t off,
230         const void *buffer, lfs_size_t size) {
231     const uint8_t *data = buffer;
232     LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->block_count);
233     LFS_ASSERT(off + size <= lfs->cfg->block_size);
234 
235     while (size > 0) {
236         if (block == pcache->block &&
237                 off >= pcache->off &&
238                 off < pcache->off + lfs->cfg->cache_size) {
239             // already fits in pcache?
240             lfs_size_t diff = lfs_min(size,
241                     lfs->cfg->cache_size - (off-pcache->off));
242             memcpy(&pcache->buffer[off-pcache->off], data, diff);
243 
244             data += diff;
245             off += diff;
246             size -= diff;
247 
248             pcache->size = lfs_max(pcache->size, off - pcache->off);
249             if (pcache->size == lfs->cfg->cache_size) {
250                 // eagerly flush out pcache if we fill up
251                 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
252                 if (err) {
253                     return err;
254                 }
255             }
256 
257             continue;
258         }
259 
260         // pcache must have been flushed, either by programming and
261         // entire block or manually flushing the pcache
262         LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
263 
264         // prepare pcache, first condition can no longer fail
265         pcache->block = block;
266         pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
267         pcache->size = 0;
268     }
269 
270     return 0;
271 }
272 #endif
273 
274 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)275 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
276     LFS_ASSERT(block < lfs->block_count);
277     int err = lfs->cfg->erase(lfs->cfg, block);
278     LFS_ASSERT(err <= 0);
279     return err;
280 }
281 #endif
282 
283 
284 /// Small type-level utilities ///
285 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])286 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
287     lfs_block_t t = pair[0];
288     pair[0] = pair[1];
289     pair[1] = t;
290 }
291 
lfs_pair_isnull(const lfs_block_t pair[2])292 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
293     return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
294 }
295 
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])296 static inline int lfs_pair_cmp(
297         const lfs_block_t paira[2],
298         const lfs_block_t pairb[2]) {
299     return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
300              paira[0] == pairb[1] || paira[1] == pairb[0]);
301 }
302 
lfs_pair_issync(const lfs_block_t paira[2],const lfs_block_t pairb[2])303 static inline bool lfs_pair_issync(
304         const lfs_block_t paira[2],
305         const lfs_block_t pairb[2]) {
306     return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
307            (paira[0] == pairb[1] && paira[1] == pairb[0]);
308 }
309 
lfs_pair_fromle32(lfs_block_t pair[2])310 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
311     pair[0] = lfs_fromle32(pair[0]);
312     pair[1] = lfs_fromle32(pair[1]);
313 }
314 
315 #ifndef LFS_READONLY
lfs_pair_tole32(lfs_block_t pair[2])316 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
317     pair[0] = lfs_tole32(pair[0]);
318     pair[1] = lfs_tole32(pair[1]);
319 }
320 #endif
321 
322 // operations on 32-bit entry tags
323 typedef uint32_t lfs_tag_t;
324 typedef int32_t lfs_stag_t;
325 
326 #define LFS_MKTAG(type, id, size) \
327     (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
328 
329 #define LFS_MKTAG_IF(cond, type, id, size) \
330     ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
331 
332 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
333     ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
334 
lfs_tag_isvalid(lfs_tag_t tag)335 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
336     return !(tag & 0x80000000);
337 }
338 
lfs_tag_isdelete(lfs_tag_t tag)339 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
340     return ((int32_t)(tag << 22) >> 22) == -1;
341 }
342 
lfs_tag_type1(lfs_tag_t tag)343 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
344     return (tag & 0x70000000) >> 20;
345 }
346 
lfs_tag_type2(lfs_tag_t tag)347 static inline uint16_t lfs_tag_type2(lfs_tag_t tag) {
348     return (tag & 0x78000000) >> 20;
349 }
350 
lfs_tag_type3(lfs_tag_t tag)351 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
352     return (tag & 0x7ff00000) >> 20;
353 }
354 
lfs_tag_chunk(lfs_tag_t tag)355 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
356     return (tag & 0x0ff00000) >> 20;
357 }
358 
lfs_tag_splice(lfs_tag_t tag)359 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
360     return (int8_t)lfs_tag_chunk(tag);
361 }
362 
lfs_tag_id(lfs_tag_t tag)363 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
364     return (tag & 0x000ffc00) >> 10;
365 }
366 
lfs_tag_size(lfs_tag_t tag)367 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
368     return tag & 0x000003ff;
369 }
370 
lfs_tag_dsize(lfs_tag_t tag)371 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
372     return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
373 }
374 
375 // operations on attributes in attribute lists
376 struct lfs_mattr {
377     lfs_tag_t tag;
378     const void *buffer;
379 };
380 
381 struct lfs_diskoff {
382     lfs_block_t block;
383     lfs_off_t off;
384 };
385 
386 #define LFS_MKATTRS(...) \
387     (struct lfs_mattr[]){__VA_ARGS__}, \
388     sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
389 
390 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)391 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
392     for (int i = 0; i < 3; i++) {
393         ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
394     }
395 }
396 
lfs_gstate_iszero(const lfs_gstate_t * a)397 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
398     for (int i = 0; i < 3; i++) {
399         if (((uint32_t*)a)[i] != 0) {
400             return false;
401         }
402     }
403     return true;
404 }
405 
406 #ifndef LFS_READONLY
lfs_gstate_hasorphans(const lfs_gstate_t * a)407 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
408     return lfs_tag_size(a->tag);
409 }
410 
lfs_gstate_getorphans(const lfs_gstate_t * a)411 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
412     return lfs_tag_size(a->tag) & 0x1ff;
413 }
414 
lfs_gstate_hasmove(const lfs_gstate_t * a)415 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
416     return lfs_tag_type1(a->tag);
417 }
418 #endif
419 
lfs_gstate_needssuperblock(const lfs_gstate_t * a)420 static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) {
421     return lfs_tag_size(a->tag) >> 9;
422 }
423 
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)424 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
425         const lfs_block_t *pair) {
426     return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
427 }
428 
lfs_gstate_fromle32(lfs_gstate_t * a)429 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
430     a->tag     = lfs_fromle32(a->tag);
431     a->pair[0] = lfs_fromle32(a->pair[0]);
432     a->pair[1] = lfs_fromle32(a->pair[1]);
433 }
434 
435 #ifndef LFS_READONLY
lfs_gstate_tole32(lfs_gstate_t * a)436 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
437     a->tag     = lfs_tole32(a->tag);
438     a->pair[0] = lfs_tole32(a->pair[0]);
439     a->pair[1] = lfs_tole32(a->pair[1]);
440 }
441 #endif
442 
443 // operations on forward-CRCs used to track erased state
444 struct lfs_fcrc {
445     lfs_size_t size;
446     uint32_t crc;
447 };
448 
lfs_fcrc_fromle32(struct lfs_fcrc * fcrc)449 static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) {
450     fcrc->size = lfs_fromle32(fcrc->size);
451     fcrc->crc = lfs_fromle32(fcrc->crc);
452 }
453 
454 #ifndef LFS_READONLY
lfs_fcrc_tole32(struct lfs_fcrc * fcrc)455 static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) {
456     fcrc->size = lfs_tole32(fcrc->size);
457     fcrc->crc = lfs_tole32(fcrc->crc);
458 }
459 #endif
460 
461 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)462 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
463     ctz->head = lfs_fromle32(ctz->head);
464     ctz->size = lfs_fromle32(ctz->size);
465 }
466 
467 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)468 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
469     ctz->head = lfs_tole32(ctz->head);
470     ctz->size = lfs_tole32(ctz->size);
471 }
472 #endif
473 
lfs_superblock_fromle32(lfs_superblock_t * superblock)474 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
475     superblock->version     = lfs_fromle32(superblock->version);
476     superblock->block_size  = lfs_fromle32(superblock->block_size);
477     superblock->block_count = lfs_fromle32(superblock->block_count);
478     superblock->name_max    = lfs_fromle32(superblock->name_max);
479     superblock->file_max    = lfs_fromle32(superblock->file_max);
480     superblock->attr_max    = lfs_fromle32(superblock->attr_max);
481 }
482 
483 #ifndef LFS_READONLY
lfs_superblock_tole32(lfs_superblock_t * superblock)484 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
485     superblock->version     = lfs_tole32(superblock->version);
486     superblock->block_size  = lfs_tole32(superblock->block_size);
487     superblock->block_count = lfs_tole32(superblock->block_count);
488     superblock->name_max    = lfs_tole32(superblock->name_max);
489     superblock->file_max    = lfs_tole32(superblock->file_max);
490     superblock->attr_max    = lfs_tole32(superblock->attr_max);
491 }
492 #endif
493 
494 #ifndef LFS_NO_ASSERT
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)495 static bool lfs_mlist_isopen(struct lfs_mlist *head,
496         struct lfs_mlist *node) {
497     for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
498         if (*p == (struct lfs_mlist*)node) {
499             return true;
500         }
501     }
502 
503     return false;
504 }
505 #endif
506 
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)507 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
508     for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
509         if (*p == mlist) {
510             *p = (*p)->next;
511             break;
512         }
513     }
514 }
515 
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)516 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
517     mlist->next = lfs->mlist;
518     lfs->mlist = mlist;
519 }
520 
521 // some other filesystem operations
lfs_fs_disk_version(lfs_t * lfs)522 static uint32_t lfs_fs_disk_version(lfs_t *lfs) {
523     (void)lfs;
524 #ifdef LFS_MULTIVERSION
525     if (lfs->cfg->disk_version) {
526         return lfs->cfg->disk_version;
527     } else
528 #endif
529     {
530         return LFS_DISK_VERSION;
531     }
532 }
533 
lfs_fs_disk_version_major(lfs_t * lfs)534 static uint16_t lfs_fs_disk_version_major(lfs_t *lfs) {
535     return 0xffff & (lfs_fs_disk_version(lfs) >> 16);
536 
537 }
538 
lfs_fs_disk_version_minor(lfs_t * lfs)539 static uint16_t lfs_fs_disk_version_minor(lfs_t *lfs) {
540     return 0xffff & (lfs_fs_disk_version(lfs) >> 0);
541 }
542 
543 
544 /// Internal operations predeclared here ///
545 #ifndef LFS_READONLY
546 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
547         const struct lfs_mattr *attrs, int attrcount);
548 static int lfs_dir_compact(lfs_t *lfs,
549         lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
550         lfs_mdir_t *source, uint16_t begin, uint16_t end);
551 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
552         const void *buffer, lfs_size_t size);
553 static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file,
554         const void *buffer, lfs_size_t size);
555 static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file);
556 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
557 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
558 
559 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
560 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
561 static void lfs_fs_prepmove(lfs_t *lfs,
562         uint16_t id, const lfs_block_t pair[2]);
563 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
564         lfs_mdir_t *pdir);
565 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
566         lfs_mdir_t *parent);
567 static int lfs_fs_forceconsistency(lfs_t *lfs);
568 #endif
569 
570 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock);
571 
572 #ifdef LFS_MIGRATE
573 static int lfs1_traverse(lfs_t *lfs,
574         int (*cb)(void*, lfs_block_t), void *data);
575 #endif
576 
577 static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir);
578 
579 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
580         void *buffer, lfs_size_t size);
581 static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file,
582         void *buffer, lfs_size_t size);
583 static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file);
584 static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file);
585 
586 static lfs_ssize_t lfs_fs_size_(lfs_t *lfs);
587 static int lfs_fs_traverse_(lfs_t *lfs,
588         int (*cb)(void *data, lfs_block_t block), void *data,
589         bool includeorphans);
590 
591 static int lfs_deinit(lfs_t *lfs);
592 static int lfs_unmount_(lfs_t *lfs);
593 
594 
595 /// Block allocator ///
596 
597 // allocations should call this when all allocated blocks are committed to
598 // the filesystem
599 //
600 // after a checkpoint, the block allocator may realloc any untracked blocks
lfs_alloc_ckpoint(lfs_t * lfs)601 static void lfs_alloc_ckpoint(lfs_t *lfs) {
602     lfs->lookahead.ckpoint = lfs->block_count;
603 }
604 
605 // drop the lookahead buffer, this is done during mounting and failed
606 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)607 static void lfs_alloc_drop(lfs_t *lfs) {
608     lfs->lookahead.size = 0;
609     lfs->lookahead.next = 0;
610     lfs_alloc_ckpoint(lfs);
611 }
612 
613 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)614 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
615     lfs_t *lfs = (lfs_t*)p;
616     lfs_block_t off = ((block - lfs->lookahead.start)
617             + lfs->block_count) % lfs->block_count;
618 
619     if (off < lfs->lookahead.size) {
620         lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
621     }
622 
623     return 0;
624 }
625 #endif
626 
627 #ifndef LFS_READONLY
lfs_alloc_scan(lfs_t * lfs)628 static int lfs_alloc_scan(lfs_t *lfs) {
629     // move lookahead buffer to the first unused block
630     //
631     // note we limit the lookahead buffer to at most the amount of blocks
632     // checkpointed, this prevents the math in lfs_alloc from underflowing
633     lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
634             % lfs->block_count;
635     lfs->lookahead.next = 0;
636     lfs->lookahead.size = lfs_min(
637             8*lfs->cfg->lookahead_size,
638             lfs->lookahead.ckpoint);
639 
640     // find mask of free blocks from tree
641     memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
642     int err = lfs_fs_traverse_(lfs, lfs_alloc_lookahead, lfs, true);
643     if (err) {
644         lfs_alloc_drop(lfs);
645         return err;
646     }
647 
648     return 0;
649 }
650 #endif
651 
652 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)653 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
654     while (true) {
655         // scan our lookahead buffer for free blocks
656         while (lfs->lookahead.next < lfs->lookahead.size) {
657             if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
658                     & (1U << (lfs->lookahead.next % 8)))) {
659                 // found a free block
660                 *block = (lfs->lookahead.start + lfs->lookahead.next)
661                         % lfs->block_count;
662 
663                 // eagerly find next free block to maximize how many blocks
664                 // lfs_alloc_ckpoint makes available for scanning
665                 while (true) {
666                     lfs->lookahead.next += 1;
667                     lfs->lookahead.ckpoint -= 1;
668 
669                     if (lfs->lookahead.next >= lfs->lookahead.size
670                             || !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
671                                 & (1U << (lfs->lookahead.next % 8)))) {
672                         return 0;
673                     }
674                 }
675             }
676 
677             lfs->lookahead.next += 1;
678             lfs->lookahead.ckpoint -= 1;
679         }
680 
681         // In order to keep our block allocator from spinning forever when our
682         // filesystem is full, we mark points where there are no in-flight
683         // allocations with a checkpoint before starting a set of allocations.
684         //
685         // If we've looked at all blocks since the last checkpoint, we report
686         // the filesystem as out of storage.
687         //
688         if (lfs->lookahead.ckpoint <= 0) {
689             LFS_ERROR("No more free space 0x%"PRIx32,
690                     (lfs->lookahead.start + lfs->lookahead.next)
691                         % lfs->cfg->block_count);
692             return LFS_ERR_NOSPC;
693         }
694 
695         // No blocks in our lookahead buffer, we need to scan the filesystem for
696         // unused blocks in the next lookahead window.
697         int err = lfs_alloc_scan(lfs);
698         if(err) {
699             return err;
700         }
701     }
702 }
703 #endif
704 
705 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)706 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
707         lfs_tag_t gmask, lfs_tag_t gtag,
708         lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
709     lfs_off_t off = dir->off;
710     lfs_tag_t ntag = dir->etag;
711     lfs_stag_t gdiff = 0;
712 
713     // synthetic moves
714     if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
715             lfs_tag_id(gmask) != 0) {
716         if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(gtag)) {
717             return LFS_ERR_NOENT;
718         } else if (lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(gtag)) {
719             gdiff -= LFS_MKTAG(0, 1, 0);
720         }
721     }
722 
723     // iterate over dir block backwards (for faster lookups)
724     while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
725         off -= lfs_tag_dsize(ntag);
726         lfs_tag_t tag = ntag;
727         int err = lfs_bd_read(lfs,
728                 NULL, &lfs->rcache, sizeof(ntag),
729                 dir->pair[0], off, &ntag, sizeof(ntag));
730         if (err) {
731             return err;
732         }
733 
734         ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
735 
736         if (lfs_tag_id(gmask) != 0 &&
737                 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
738                 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
739             if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
740                     (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
741                 // found where we were created
742                 return LFS_ERR_NOENT;
743             }
744 
745             // move around splices
746             gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
747         }
748 
749         if ((gmask & tag) == (gmask & (gtag - gdiff))) {
750             if (lfs_tag_isdelete(tag)) {
751                 return LFS_ERR_NOENT;
752             }
753 
754             lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
755             err = lfs_bd_read(lfs,
756                     NULL, &lfs->rcache, diff,
757                     dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
758             if (err) {
759                 return err;
760             }
761 
762             memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
763 
764             return tag + gdiff;
765         }
766     }
767 
768     return LFS_ERR_NOENT;
769 }
770 
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)771 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
772         lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
773     return lfs_dir_getslice(lfs, dir,
774             gmask, gtag,
775             0, buffer, lfs_tag_size(gtag));
776 }
777 
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)778 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
779         const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
780         lfs_tag_t gmask, lfs_tag_t gtag,
781         lfs_off_t off, void *buffer, lfs_size_t size) {
782     uint8_t *data = buffer;
783     if (off+size > lfs->cfg->block_size) {
784         return LFS_ERR_CORRUPT;
785     }
786 
787     while (size > 0) {
788         lfs_size_t diff = size;
789 
790         if (pcache && pcache->block == LFS_BLOCK_INLINE &&
791                 off < pcache->off + pcache->size) {
792             if (off >= pcache->off) {
793                 // is already in pcache?
794                 diff = lfs_min(diff, pcache->size - (off-pcache->off));
795                 memcpy(data, &pcache->buffer[off-pcache->off], diff);
796 
797                 data += diff;
798                 off += diff;
799                 size -= diff;
800                 continue;
801             }
802 
803             // pcache takes priority
804             diff = lfs_min(diff, pcache->off-off);
805         }
806 
807         if (rcache->block == LFS_BLOCK_INLINE &&
808                 off < rcache->off + rcache->size) {
809             if (off >= rcache->off) {
810                 // is already in rcache?
811                 diff = lfs_min(diff, rcache->size - (off-rcache->off));
812                 memcpy(data, &rcache->buffer[off-rcache->off], diff);
813 
814                 data += diff;
815                 off += diff;
816                 size -= diff;
817                 continue;
818             }
819 
820             // rcache takes priority
821             diff = lfs_min(diff, rcache->off-off);
822         }
823 
824         // load to cache, first condition can no longer fail
825         rcache->block = LFS_BLOCK_INLINE;
826         rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
827         rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
828                 lfs->cfg->cache_size);
829         int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
830                 rcache->off, rcache->buffer, rcache->size);
831         if (err < 0) {
832             return err;
833         }
834     }
835 
836     return 0;
837 }
838 
839 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)840 static int lfs_dir_traverse_filter(void *p,
841         lfs_tag_t tag, const void *buffer) {
842     lfs_tag_t *filtertag = p;
843     (void)buffer;
844 
845     // which mask depends on unique bit in tag structure
846     uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
847             ? LFS_MKTAG(0x7ff, 0x3ff, 0)
848             : LFS_MKTAG(0x700, 0x3ff, 0);
849 
850     // check for redundancy
851     if ((mask & tag) == (mask & *filtertag) ||
852             lfs_tag_isdelete(*filtertag) ||
853             (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
854                 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
855                     (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
856         *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
857         return true;
858     }
859 
860     // check if we need to adjust for created/deleted tags
861     if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
862             lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
863         *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
864     }
865 
866     return false;
867 }
868 #endif
869 
870 #ifndef LFS_READONLY
871 // maximum recursive depth of lfs_dir_traverse, the deepest call:
872 //
873 // traverse with commit
874 // '-> traverse with move
875 //     '-> traverse with filter
876 //
877 #define LFS_DIR_TRAVERSE_DEPTH 3
878 
879 struct lfs_dir_traverse {
880     const lfs_mdir_t *dir;
881     lfs_off_t off;
882     lfs_tag_t ptag;
883     const struct lfs_mattr *attrs;
884     int attrcount;
885 
886     lfs_tag_t tmask;
887     lfs_tag_t ttag;
888     uint16_t begin;
889     uint16_t end;
890     int16_t diff;
891 
892     int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
893     void *data;
894 
895     lfs_tag_t tag;
896     const void *buffer;
897     struct lfs_diskoff disk;
898 };
899 
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)900 static int lfs_dir_traverse(lfs_t *lfs,
901         const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
902         const struct lfs_mattr *attrs, int attrcount,
903         lfs_tag_t tmask, lfs_tag_t ttag,
904         uint16_t begin, uint16_t end, int16_t diff,
905         int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
906     // This function in inherently recursive, but bounded. To allow tool-based
907     // analysis without unnecessary code-cost we use an explicit stack
908     struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
909     unsigned sp = 0;
910     int res;
911 
912     // iterate over directory and attrs
913     lfs_tag_t tag;
914     const void *buffer;
915     struct lfs_diskoff disk = {0};
916     while (true) {
917         {
918             if (off+lfs_tag_dsize(ptag) < dir->off) {
919                 off += lfs_tag_dsize(ptag);
920                 int err = lfs_bd_read(lfs,
921                         NULL, &lfs->rcache, sizeof(tag),
922                         dir->pair[0], off, &tag, sizeof(tag));
923                 if (err) {
924                     return err;
925                 }
926 
927                 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
928                 disk.block = dir->pair[0];
929                 disk.off = off+sizeof(lfs_tag_t);
930                 buffer = &disk;
931                 ptag = tag;
932             } else if (attrcount > 0) {
933                 tag = attrs[0].tag;
934                 buffer = attrs[0].buffer;
935                 attrs += 1;
936                 attrcount -= 1;
937             } else {
938                 // finished traversal, pop from stack?
939                 res = 0;
940                 break;
941             }
942 
943             // do we need to filter?
944             lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
945             if ((mask & tmask & tag) != (mask & tmask & ttag)) {
946                 continue;
947             }
948 
949             if (lfs_tag_id(tmask) != 0) {
950                 LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
951                 // recurse, scan for duplicates, and update tag based on
952                 // creates/deletes
953                 stack[sp] = (struct lfs_dir_traverse){
954                     .dir        = dir,
955                     .off        = off,
956                     .ptag       = ptag,
957                     .attrs      = attrs,
958                     .attrcount  = attrcount,
959                     .tmask      = tmask,
960                     .ttag       = ttag,
961                     .begin      = begin,
962                     .end        = end,
963                     .diff       = diff,
964                     .cb         = cb,
965                     .data       = data,
966                     .tag        = tag,
967                     .buffer     = buffer,
968                     .disk       = disk,
969                 };
970                 sp += 1;
971 
972                 tmask = 0;
973                 ttag = 0;
974                 begin = 0;
975                 end = 0;
976                 diff = 0;
977                 cb = lfs_dir_traverse_filter;
978                 data = &stack[sp-1].tag;
979                 continue;
980             }
981         }
982 
983 popped:
984         // in filter range?
985         if (lfs_tag_id(tmask) != 0 &&
986                 !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
987             continue;
988         }
989 
990         // handle special cases for mcu-side operations
991         if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
992             // do nothing
993         } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
994             // Without this condition, lfs_dir_traverse can exhibit an
995             // extremely expensive O(n^3) of nested loops when renaming.
996             // This happens because lfs_dir_traverse tries to filter tags by
997             // the tags in the source directory, triggering a second
998             // lfs_dir_traverse with its own filter operation.
999             //
1000             // traverse with commit
1001             // '-> traverse with filter
1002             //     '-> traverse with move
1003             //         '-> traverse with filter
1004             //
1005             // However we don't actually care about filtering the second set of
1006             // tags, since duplicate tags have no effect when filtering.
1007             //
1008             // This check skips this unnecessary recursive filtering explicitly,
1009             // reducing this runtime from O(n^3) to O(n^2).
1010             if (cb == lfs_dir_traverse_filter) {
1011                 continue;
1012             }
1013 
1014             // recurse into move
1015             stack[sp] = (struct lfs_dir_traverse){
1016                 .dir        = dir,
1017                 .off        = off,
1018                 .ptag       = ptag,
1019                 .attrs      = attrs,
1020                 .attrcount  = attrcount,
1021                 .tmask      = tmask,
1022                 .ttag       = ttag,
1023                 .begin      = begin,
1024                 .end        = end,
1025                 .diff       = diff,
1026                 .cb         = cb,
1027                 .data       = data,
1028                 .tag        = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
1029             };
1030             sp += 1;
1031 
1032             uint16_t fromid = lfs_tag_size(tag);
1033             uint16_t toid = lfs_tag_id(tag);
1034             dir = buffer;
1035             off = 0;
1036             ptag = 0xffffffff;
1037             attrs = NULL;
1038             attrcount = 0;
1039             tmask = LFS_MKTAG(0x600, 0x3ff, 0);
1040             ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
1041             begin = fromid;
1042             end = fromid+1;
1043             diff = toid-fromid+diff;
1044         } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
1045             for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
1046                 const struct lfs_attr *a = buffer;
1047                 res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
1048                         lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
1049                 if (res < 0) {
1050                     return res;
1051                 }
1052 
1053                 if (res) {
1054                     break;
1055                 }
1056             }
1057         } else {
1058             res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
1059             if (res < 0) {
1060                 return res;
1061             }
1062 
1063             if (res) {
1064                 break;
1065             }
1066         }
1067     }
1068 
1069     if (sp > 0) {
1070         // pop from the stack and return, fortunately all pops share
1071         // a destination
1072         dir         = stack[sp-1].dir;
1073         off         = stack[sp-1].off;
1074         ptag        = stack[sp-1].ptag;
1075         attrs       = stack[sp-1].attrs;
1076         attrcount   = stack[sp-1].attrcount;
1077         tmask       = stack[sp-1].tmask;
1078         ttag        = stack[sp-1].ttag;
1079         begin       = stack[sp-1].begin;
1080         end         = stack[sp-1].end;
1081         diff        = stack[sp-1].diff;
1082         cb          = stack[sp-1].cb;
1083         data        = stack[sp-1].data;
1084         tag         = stack[sp-1].tag;
1085         buffer      = stack[sp-1].buffer;
1086         disk        = stack[sp-1].disk;
1087         sp -= 1;
1088         goto popped;
1089     } else {
1090         return res;
1091     }
1092 }
1093 #endif
1094 
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)1095 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
1096         lfs_mdir_t *dir, const lfs_block_t pair[2],
1097         lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
1098         int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
1099     // we can find tag very efficiently during a fetch, since we're already
1100     // scanning the entire directory
1101     lfs_stag_t besttag = -1;
1102 
1103     // if either block address is invalid we return LFS_ERR_CORRUPT here,
1104     // otherwise later writes to the pair could fail
1105     if (lfs->block_count
1106             && (pair[0] >= lfs->block_count || pair[1] >= lfs->block_count)) {
1107         return LFS_ERR_CORRUPT;
1108     }
1109 
1110     // find the block with the most recent revision
1111     uint32_t revs[2] = {0, 0};
1112     int r = 0;
1113     for (int i = 0; i < 2; i++) {
1114         int err = lfs_bd_read(lfs,
1115                 NULL, &lfs->rcache, sizeof(revs[i]),
1116                 pair[i], 0, &revs[i], sizeof(revs[i]));
1117         revs[i] = lfs_fromle32(revs[i]);
1118         if (err && err != LFS_ERR_CORRUPT) {
1119             return err;
1120         }
1121 
1122         if (err != LFS_ERR_CORRUPT &&
1123                 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
1124             r = i;
1125         }
1126     }
1127 
1128     dir->pair[0] = pair[(r+0)%2];
1129     dir->pair[1] = pair[(r+1)%2];
1130     dir->rev = revs[(r+0)%2];
1131     dir->off = 0; // nonzero = found some commits
1132 
1133     // now scan tags to fetch the actual dir and find possible match
1134     for (int i = 0; i < 2; i++) {
1135         lfs_off_t off = 0;
1136         lfs_tag_t ptag = 0xffffffff;
1137 
1138         uint16_t tempcount = 0;
1139         lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
1140         bool tempsplit = false;
1141         lfs_stag_t tempbesttag = besttag;
1142 
1143         // assume not erased until proven otherwise
1144         bool maybeerased = false;
1145         bool hasfcrc = false;
1146         struct lfs_fcrc fcrc;
1147 
1148         dir->rev = lfs_tole32(dir->rev);
1149         uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
1150         dir->rev = lfs_fromle32(dir->rev);
1151 
1152         while (true) {
1153             // extract next tag
1154             lfs_tag_t tag;
1155             off += lfs_tag_dsize(ptag);
1156             int err = lfs_bd_read(lfs,
1157                     NULL, &lfs->rcache, lfs->cfg->block_size,
1158                     dir->pair[0], off, &tag, sizeof(tag));
1159             if (err) {
1160                 if (err == LFS_ERR_CORRUPT) {
1161                     // can't continue?
1162                     break;
1163                 }
1164                 return err;
1165             }
1166 
1167             crc = lfs_crc(crc, &tag, sizeof(tag));
1168             tag = lfs_frombe32(tag) ^ ptag;
1169 
1170             // next commit not yet programmed?
1171             if (!lfs_tag_isvalid(tag)) {
1172                 // we only might be erased if the last tag was a crc
1173                 maybeerased = (lfs_tag_type2(ptag) == LFS_TYPE_CCRC);
1174                 break;
1175             // out of range?
1176             } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
1177                 break;
1178             }
1179 
1180             ptag = tag;
1181 
1182             if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) {
1183                 // check the crc attr
1184                 uint32_t dcrc;
1185                 err = lfs_bd_read(lfs,
1186                         NULL, &lfs->rcache, lfs->cfg->block_size,
1187                         dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
1188                 if (err) {
1189                     if (err == LFS_ERR_CORRUPT) {
1190                         break;
1191                     }
1192                     return err;
1193                 }
1194                 dcrc = lfs_fromle32(dcrc);
1195 
1196                 if (crc != dcrc) {
1197                     break;
1198                 }
1199 
1200                 // reset the next bit if we need to
1201                 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
1202 
1203                 // toss our crc into the filesystem seed for
1204                 // pseudorandom numbers, note we use another crc here
1205                 // as a collection function because it is sufficiently
1206                 // random and convenient
1207                 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
1208 
1209                 // update with what's found so far
1210                 besttag = tempbesttag;
1211                 dir->off = off + lfs_tag_dsize(tag);
1212                 dir->etag = ptag;
1213                 dir->count = tempcount;
1214                 dir->tail[0] = temptail[0];
1215                 dir->tail[1] = temptail[1];
1216                 dir->split = tempsplit;
1217 
1218                 // reset crc, hasfcrc
1219                 crc = 0xffffffff;
1220                 continue;
1221             }
1222 
1223             // crc the entry first, hopefully leaving it in the cache
1224             err = lfs_bd_crc(lfs,
1225                     NULL, &lfs->rcache, lfs->cfg->block_size,
1226                     dir->pair[0], off+sizeof(tag),
1227                     lfs_tag_dsize(tag)-sizeof(tag), &crc);
1228             if (err) {
1229                 if (err == LFS_ERR_CORRUPT) {
1230                     break;
1231                 }
1232                 return err;
1233             }
1234 
1235             // directory modification tags?
1236             if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
1237                 // increase count of files if necessary
1238                 if (lfs_tag_id(tag) >= tempcount) {
1239                     tempcount = lfs_tag_id(tag) + 1;
1240                 }
1241             } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
1242                 tempcount += lfs_tag_splice(tag);
1243 
1244                 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
1245                         (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
1246                     tempbesttag |= 0x80000000;
1247                 } else if (tempbesttag != -1 &&
1248                         lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1249                     tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
1250                 }
1251             } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
1252                 tempsplit = (lfs_tag_chunk(tag) & 1);
1253 
1254                 err = lfs_bd_read(lfs,
1255                         NULL, &lfs->rcache, lfs->cfg->block_size,
1256                         dir->pair[0], off+sizeof(tag), &temptail, 8);
1257                 if (err) {
1258                     if (err == LFS_ERR_CORRUPT) {
1259                         break;
1260                     }
1261                     return err;
1262                 }
1263                 lfs_pair_fromle32(temptail);
1264             } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) {
1265                 err = lfs_bd_read(lfs,
1266                         NULL, &lfs->rcache, lfs->cfg->block_size,
1267                         dir->pair[0], off+sizeof(tag),
1268                         &fcrc, sizeof(fcrc));
1269                 if (err) {
1270                     if (err == LFS_ERR_CORRUPT) {
1271                         break;
1272                     }
1273                 }
1274 
1275                 lfs_fcrc_fromle32(&fcrc);
1276                 hasfcrc = true;
1277             }
1278 
1279             // found a match for our fetcher?
1280             if ((fmask & tag) == (fmask & ftag)) {
1281                 int res = cb(data, tag, &(struct lfs_diskoff){
1282                         dir->pair[0], off+sizeof(tag)});
1283                 if (res < 0) {
1284                     if (res == LFS_ERR_CORRUPT) {
1285                         break;
1286                     }
1287                     return res;
1288                 }
1289 
1290                 if (res == LFS_CMP_EQ) {
1291                     // found a match
1292                     tempbesttag = tag;
1293                 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1294                         (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1295                     // found an identical tag, but contents didn't match
1296                     // this must mean that our besttag has been overwritten
1297                     tempbesttag = -1;
1298                 } else if (res == LFS_CMP_GT &&
1299                         lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1300                     // found a greater match, keep track to keep things sorted
1301                     tempbesttag = tag | 0x80000000;
1302                 }
1303             }
1304         }
1305 
1306         // found no valid commits?
1307         if (dir->off == 0) {
1308             // try the other block?
1309             lfs_pair_swap(dir->pair);
1310             dir->rev = revs[(r+1)%2];
1311             continue;
1312         }
1313 
1314         // did we end on a valid commit? we may have an erased block
1315         dir->erased = false;
1316         if (maybeerased && dir->off % lfs->cfg->prog_size == 0) {
1317         #ifdef LFS_MULTIVERSION
1318             // note versions < lfs2.1 did not have fcrc tags, if
1319             // we're < lfs2.1 treat missing fcrc as erased data
1320             //
1321             // we don't strictly need to do this, but otherwise writing
1322             // to lfs2.0 disks becomes very inefficient
1323             if (lfs_fs_disk_version(lfs) < 0x00020001) {
1324                 dir->erased = true;
1325 
1326             } else
1327         #endif
1328             if (hasfcrc) {
1329                 // check for an fcrc matching the next prog's erased state, if
1330                 // this failed most likely a previous prog was interrupted, we
1331                 // need a new erase
1332                 uint32_t fcrc_ = 0xffffffff;
1333                 int err = lfs_bd_crc(lfs,
1334                         NULL, &lfs->rcache, lfs->cfg->block_size,
1335                         dir->pair[0], dir->off, fcrc.size, &fcrc_);
1336                 if (err && err != LFS_ERR_CORRUPT) {
1337                     return err;
1338                 }
1339 
1340                 // found beginning of erased part?
1341                 dir->erased = (fcrc_ == fcrc.crc);
1342             }
1343         }
1344 
1345         // synthetic move
1346         if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1347             if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1348                 besttag |= 0x80000000;
1349             } else if (besttag != -1 &&
1350                     lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1351                 besttag -= LFS_MKTAG(0, 1, 0);
1352             }
1353         }
1354 
1355         // found tag? or found best id?
1356         if (id) {
1357             *id = lfs_min(lfs_tag_id(besttag), dir->count);
1358         }
1359 
1360         if (lfs_tag_isvalid(besttag)) {
1361             return besttag;
1362         } else if (lfs_tag_id(besttag) < dir->count) {
1363             return LFS_ERR_NOENT;
1364         } else {
1365             return 0;
1366         }
1367     }
1368 
1369     LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1370             dir->pair[0], dir->pair[1]);
1371     return LFS_ERR_CORRUPT;
1372 }
1373 
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1374 static int lfs_dir_fetch(lfs_t *lfs,
1375         lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1376     // note, mask=-1, tag=-1 can never match a tag since this
1377     // pattern has the invalid bit set
1378     return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1379             (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1380 }
1381 
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1382 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1383         lfs_gstate_t *gstate) {
1384     lfs_gstate_t temp;
1385     lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1386             LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1387     if (res < 0 && res != LFS_ERR_NOENT) {
1388         return res;
1389     }
1390 
1391     if (res != LFS_ERR_NOENT) {
1392         // xor together to find resulting gstate
1393         lfs_gstate_fromle32(&temp);
1394         lfs_gstate_xor(gstate, &temp);
1395     }
1396 
1397     return 0;
1398 }
1399 
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1400 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1401         uint16_t id, struct lfs_info *info) {
1402     if (id == 0x3ff) {
1403         // special case for root
1404         strcpy(info->name, "/");
1405         info->type = LFS_TYPE_DIR;
1406         return 0;
1407     }
1408 
1409     lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1410             LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1411     if (tag < 0) {
1412         return (int)tag;
1413     }
1414 
1415     info->type = lfs_tag_type3(tag);
1416 
1417     struct lfs_ctz ctz;
1418     tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1419             LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1420     if (tag < 0) {
1421         return (int)tag;
1422     }
1423     lfs_ctz_fromle32(&ctz);
1424 
1425     if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1426         info->size = ctz.size;
1427     } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1428         info->size = lfs_tag_size(tag);
1429     }
1430 
1431     return 0;
1432 }
1433 
1434 struct lfs_dir_find_match {
1435     lfs_t *lfs;
1436     const void *name;
1437     lfs_size_t size;
1438 };
1439 
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1440 static int lfs_dir_find_match(void *data,
1441         lfs_tag_t tag, const void *buffer) {
1442     struct lfs_dir_find_match *name = data;
1443     lfs_t *lfs = name->lfs;
1444     const struct lfs_diskoff *disk = buffer;
1445 
1446     // compare with disk
1447     lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1448     int res = lfs_bd_cmp(lfs,
1449             NULL, &lfs->rcache, diff,
1450             disk->block, disk->off, name->name, diff);
1451     if (res != LFS_CMP_EQ) {
1452         return res;
1453     }
1454 
1455     // only equal if our size is still the same
1456     if (name->size != lfs_tag_size(tag)) {
1457         return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1458     }
1459 
1460     // found a match!
1461     return LFS_CMP_EQ;
1462 }
1463 
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1464 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1465         const char **path, uint16_t *id) {
1466     // we reduce path to a single name if we can find it
1467     const char *name = *path;
1468     if (id) {
1469         *id = 0x3ff;
1470     }
1471 
1472     // default to root dir
1473     lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1474     dir->tail[0] = lfs->root[0];
1475     dir->tail[1] = lfs->root[1];
1476 
1477     while (true) {
1478 nextname:
1479         // skip slashes
1480         name += strspn(name, "/");
1481         lfs_size_t namelen = strcspn(name, "/");
1482 
1483         // skip '.' and root '..'
1484         if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1485             (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1486             name += namelen;
1487             goto nextname;
1488         }
1489 
1490         // skip if matched by '..' in name
1491         const char *suffix = name + namelen;
1492         lfs_size_t sufflen;
1493         int depth = 1;
1494         while (true) {
1495             suffix += strspn(suffix, "/");
1496             sufflen = strcspn(suffix, "/");
1497             if (sufflen == 0) {
1498                 break;
1499             }
1500 
1501             if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1502                 depth -= 1;
1503                 if (depth == 0) {
1504                     name = suffix + sufflen;
1505                     goto nextname;
1506                 }
1507             } else {
1508                 depth += 1;
1509             }
1510 
1511             suffix += sufflen;
1512         }
1513 
1514         // found path
1515         if (name[0] == '\0') {
1516             return tag;
1517         }
1518 
1519         // update what we've found so far
1520         *path = name;
1521 
1522         // only continue if we hit a directory
1523         if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1524             return LFS_ERR_NOTDIR;
1525         }
1526 
1527         // grab the entry data
1528         if (lfs_tag_id(tag) != 0x3ff) {
1529             lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1530                     LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1531             if (res < 0) {
1532                 return res;
1533             }
1534             lfs_pair_fromle32(dir->tail);
1535         }
1536 
1537         // find entry matching name
1538         while (true) {
1539             tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1540                     LFS_MKTAG(0x780, 0, 0),
1541                     LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1542                      // are we last name?
1543                     (strchr(name, '/') == NULL) ? id : NULL,
1544                     lfs_dir_find_match, &(struct lfs_dir_find_match){
1545                         lfs, name, namelen});
1546             if (tag < 0) {
1547                 return tag;
1548             }
1549 
1550             if (tag) {
1551                 break;
1552             }
1553 
1554             if (!dir->split) {
1555                 return LFS_ERR_NOENT;
1556             }
1557         }
1558 
1559         // to next name
1560         name += namelen;
1561     }
1562 }
1563 
1564 // commit logic
1565 struct lfs_commit {
1566     lfs_block_t block;
1567     lfs_off_t off;
1568     lfs_tag_t ptag;
1569     uint32_t crc;
1570 
1571     lfs_off_t begin;
1572     lfs_off_t end;
1573 };
1574 
1575 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1576 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1577         const void *buffer, lfs_size_t size) {
1578     int err = lfs_bd_prog(lfs,
1579             &lfs->pcache, &lfs->rcache, false,
1580             commit->block, commit->off ,
1581             (const uint8_t*)buffer, size);
1582     if (err) {
1583         return err;
1584     }
1585 
1586     commit->crc = lfs_crc(commit->crc, buffer, size);
1587     commit->off += size;
1588     return 0;
1589 }
1590 #endif
1591 
1592 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1593 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1594         lfs_tag_t tag, const void *buffer) {
1595     // check if we fit
1596     lfs_size_t dsize = lfs_tag_dsize(tag);
1597     if (commit->off + dsize > commit->end) {
1598         return LFS_ERR_NOSPC;
1599     }
1600 
1601     // write out tag
1602     lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1603     int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1604     if (err) {
1605         return err;
1606     }
1607 
1608     if (!(tag & 0x80000000)) {
1609         // from memory
1610         err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1611         if (err) {
1612             return err;
1613         }
1614     } else {
1615         // from disk
1616         const struct lfs_diskoff *disk = buffer;
1617         for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1618             // rely on caching to make this efficient
1619             uint8_t dat;
1620             err = lfs_bd_read(lfs,
1621                     NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1622                     disk->block, disk->off+i, &dat, 1);
1623             if (err) {
1624                 return err;
1625             }
1626 
1627             err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1628             if (err) {
1629                 return err;
1630             }
1631         }
1632     }
1633 
1634     commit->ptag = tag & 0x7fffffff;
1635     return 0;
1636 }
1637 #endif
1638 
1639 #ifndef LFS_READONLY
1640 
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1641 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1642     // align to program units
1643     //
1644     // this gets a bit complex as we have two types of crcs:
1645     // - 5-word crc with fcrc to check following prog (middle of block)
1646     // - 2-word crc with no following prog (end of block)
1647     const lfs_off_t end = lfs_alignup(
1648             lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size),
1649             lfs->cfg->prog_size);
1650 
1651     lfs_off_t off1 = 0;
1652     uint32_t crc1 = 0;
1653 
1654     // create crc tags to fill up remainder of commit, note that
1655     // padding is not crced, which lets fetches skip padding but
1656     // makes committing a bit more complicated
1657     while (commit->off < end) {
1658         lfs_off_t noff = (
1659                 lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe)
1660                 + (commit->off+sizeof(lfs_tag_t)));
1661         // too large for crc tag? need padding commits
1662         if (noff < end) {
1663             noff = lfs_min(noff, end - 5*sizeof(uint32_t));
1664         }
1665 
1666         // space for fcrc?
1667         uint8_t eperturb = (uint8_t)-1;
1668         if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) {
1669             // first read the leading byte, this always contains a bit
1670             // we can perturb to avoid writes that don't change the fcrc
1671             int err = lfs_bd_read(lfs,
1672                     NULL, &lfs->rcache, lfs->cfg->prog_size,
1673                     commit->block, noff, &eperturb, 1);
1674             if (err && err != LFS_ERR_CORRUPT) {
1675                 return err;
1676             }
1677 
1678         #ifdef LFS_MULTIVERSION
1679             // unfortunately fcrcs break mdir fetching < lfs2.1, so only write
1680             // these if we're a >= lfs2.1 filesystem
1681             if (lfs_fs_disk_version(lfs) <= 0x00020000) {
1682                 // don't write fcrc
1683             } else
1684         #endif
1685             {
1686                 // find the expected fcrc, don't bother avoiding a reread
1687                 // of the eperturb, it should still be in our cache
1688                 struct lfs_fcrc fcrc = {
1689                     .size = lfs->cfg->prog_size,
1690                     .crc = 0xffffffff
1691                 };
1692                 err = lfs_bd_crc(lfs,
1693                         NULL, &lfs->rcache, lfs->cfg->prog_size,
1694                         commit->block, noff, fcrc.size, &fcrc.crc);
1695                 if (err && err != LFS_ERR_CORRUPT) {
1696                     return err;
1697                 }
1698 
1699                 lfs_fcrc_tole32(&fcrc);
1700                 err = lfs_dir_commitattr(lfs, commit,
1701                         LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)),
1702                         &fcrc);
1703                 if (err) {
1704                     return err;
1705                 }
1706             }
1707         }
1708 
1709         // build commit crc
1710         struct {
1711             lfs_tag_t tag;
1712             uint32_t crc;
1713         } ccrc;
1714         lfs_tag_t ntag = LFS_MKTAG(
1715                 LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff,
1716                 noff - (commit->off+sizeof(lfs_tag_t)));
1717         ccrc.tag = lfs_tobe32(ntag ^ commit->ptag);
1718         commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t));
1719         ccrc.crc = lfs_tole32(commit->crc);
1720 
1721         int err = lfs_bd_prog(lfs,
1722                 &lfs->pcache, &lfs->rcache, false,
1723                 commit->block, commit->off, &ccrc, sizeof(ccrc));
1724         if (err) {
1725             return err;
1726         }
1727 
1728         // keep track of non-padding checksum to verify
1729         if (off1 == 0) {
1730             off1 = commit->off + sizeof(lfs_tag_t);
1731             crc1 = commit->crc;
1732         }
1733 
1734         commit->off = noff;
1735         // perturb valid bit?
1736         commit->ptag = ntag ^ ((0x80UL & ~eperturb) << 24);
1737         // reset crc for next commit
1738         commit->crc = 0xffffffff;
1739 
1740         // manually flush here since we don't prog the padding, this confuses
1741         // the caching layer
1742         if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) {
1743             // flush buffers
1744             int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1745             if (err) {
1746                 return err;
1747             }
1748         }
1749     }
1750 
1751     // successful commit, check checksums to make sure
1752     //
1753     // note that we don't need to check padding commits, worst
1754     // case if they are corrupted we would have had to compact anyways
1755     lfs_off_t off = commit->begin;
1756     uint32_t crc = 0xffffffff;
1757     int err = lfs_bd_crc(lfs,
1758             NULL, &lfs->rcache, off1+sizeof(uint32_t),
1759             commit->block, off, off1-off, &crc);
1760     if (err) {
1761         return err;
1762     }
1763 
1764     // check non-padding commits against known crc
1765     if (crc != crc1) {
1766         return LFS_ERR_CORRUPT;
1767     }
1768 
1769     // make sure to check crc in case we happen to pick
1770     // up an unrelated crc (frozen block?)
1771     err = lfs_bd_crc(lfs,
1772             NULL, &lfs->rcache, sizeof(uint32_t),
1773             commit->block, off1, sizeof(uint32_t), &crc);
1774     if (err) {
1775         return err;
1776     }
1777 
1778     if (crc != 0) {
1779         return LFS_ERR_CORRUPT;
1780     }
1781 
1782     return 0;
1783 }
1784 #endif
1785 
1786 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1787 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1788     // allocate pair of dir blocks (backwards, so we write block 1 first)
1789     for (int i = 0; i < 2; i++) {
1790         int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1791         if (err) {
1792             return err;
1793         }
1794     }
1795 
1796     // zero for reproducibility in case initial block is unreadable
1797     dir->rev = 0;
1798 
1799     // rather than clobbering one of the blocks we just pretend
1800     // the revision may be valid
1801     int err = lfs_bd_read(lfs,
1802             NULL, &lfs->rcache, sizeof(dir->rev),
1803             dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1804     dir->rev = lfs_fromle32(dir->rev);
1805     if (err && err != LFS_ERR_CORRUPT) {
1806         return err;
1807     }
1808 
1809     // to make sure we don't immediately evict, align the new revision count
1810     // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1811     // is tweaked this way
1812     if (lfs->cfg->block_cycles > 0) {
1813         dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1814     }
1815 
1816     // set defaults
1817     dir->off = sizeof(dir->rev);
1818     dir->etag = 0xffffffff;
1819     dir->count = 0;
1820     dir->tail[0] = LFS_BLOCK_NULL;
1821     dir->tail[1] = LFS_BLOCK_NULL;
1822     dir->erased = false;
1823     dir->split = false;
1824 
1825     // don't write out yet, let caller take care of that
1826     return 0;
1827 }
1828 #endif
1829 
1830 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1831 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1832     // steal state
1833     int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1834     if (err) {
1835         return err;
1836     }
1837 
1838     // steal tail
1839     lfs_pair_tole32(tail->tail);
1840     err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1841             {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1842     lfs_pair_fromle32(tail->tail);
1843     if (err) {
1844         return err;
1845     }
1846 
1847     return 0;
1848 }
1849 #endif
1850 
1851 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1852 static int lfs_dir_split(lfs_t *lfs,
1853         lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1854         lfs_mdir_t *source, uint16_t split, uint16_t end) {
1855     // create tail metadata pair
1856     lfs_mdir_t tail;
1857     int err = lfs_dir_alloc(lfs, &tail);
1858     if (err) {
1859         return err;
1860     }
1861 
1862     tail.split = dir->split;
1863     tail.tail[0] = dir->tail[0];
1864     tail.tail[1] = dir->tail[1];
1865 
1866     // note we don't care about LFS_OK_RELOCATED
1867     int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1868     if (res < 0) {
1869         return res;
1870     }
1871 
1872     dir->tail[0] = tail.pair[0];
1873     dir->tail[1] = tail.pair[1];
1874     dir->split = true;
1875 
1876     // update root if needed
1877     if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1878         lfs->root[0] = tail.pair[0];
1879         lfs->root[1] = tail.pair[1];
1880     }
1881 
1882     return 0;
1883 }
1884 #endif
1885 
1886 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1887 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1888     lfs_size_t *size = p;
1889     (void)buffer;
1890 
1891     *size += lfs_tag_dsize(tag);
1892     return 0;
1893 }
1894 #endif
1895 
1896 #ifndef LFS_READONLY
1897 struct lfs_dir_commit_commit {
1898     lfs_t *lfs;
1899     struct lfs_commit *commit;
1900 };
1901 #endif
1902 
1903 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1904 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1905     struct lfs_dir_commit_commit *commit = p;
1906     return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1907 }
1908 #endif
1909 
1910 #ifndef LFS_READONLY
lfs_dir_needsrelocation(lfs_t * lfs,lfs_mdir_t * dir)1911 static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
1912     // If our revision count == n * block_cycles, we should force a relocation,
1913     // this is how littlefs wear-levels at the metadata-pair level. Note that we
1914     // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1915     // 1. block_cycles = 1, which would prevent relocations from terminating
1916     // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1917     //    one metadata block in the pair, effectively making this useless
1918     return (lfs->cfg->block_cycles > 0
1919             && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
1920 }
1921 #endif
1922 
1923 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1924 static int lfs_dir_compact(lfs_t *lfs,
1925         lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1926         lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1927     // save some state in case block is bad
1928     bool relocated = false;
1929     bool tired = lfs_dir_needsrelocation(lfs, dir);
1930 
1931     // increment revision count
1932     dir->rev += 1;
1933 
1934     // do not proactively relocate blocks during migrations, this
1935     // can cause a number of failure states such: clobbering the
1936     // v1 superblock if we relocate root, and invalidating directory
1937     // pointers if we relocate the head of a directory. On top of
1938     // this, relocations increase the overall complexity of
1939     // lfs_migration, which is already a delicate operation.
1940 #ifdef LFS_MIGRATE
1941     if (lfs->lfs1) {
1942         tired = false;
1943     }
1944 #endif
1945 
1946     if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
1947         // we're writing too much, time to relocate
1948         goto relocate;
1949     }
1950 
1951     // begin loop to commit compaction to blocks until a compact sticks
1952     while (true) {
1953         {
1954             // setup commit state
1955             struct lfs_commit commit = {
1956                 .block = dir->pair[1],
1957                 .off = 0,
1958                 .ptag = 0xffffffff,
1959                 .crc = 0xffffffff,
1960 
1961                 .begin = 0,
1962                 .end = (lfs->cfg->metadata_max ?
1963                     lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1964             };
1965 
1966             // erase block to write to
1967             int err = lfs_bd_erase(lfs, dir->pair[1]);
1968             if (err) {
1969                 if (err == LFS_ERR_CORRUPT) {
1970                     goto relocate;
1971                 }
1972                 return err;
1973             }
1974 
1975             // write out header
1976             dir->rev = lfs_tole32(dir->rev);
1977             err = lfs_dir_commitprog(lfs, &commit,
1978                     &dir->rev, sizeof(dir->rev));
1979             dir->rev = lfs_fromle32(dir->rev);
1980             if (err) {
1981                 if (err == LFS_ERR_CORRUPT) {
1982                     goto relocate;
1983                 }
1984                 return err;
1985             }
1986 
1987             // traverse the directory, this time writing out all unique tags
1988             err = lfs_dir_traverse(lfs,
1989                     source, 0, 0xffffffff, attrs, attrcount,
1990                     LFS_MKTAG(0x400, 0x3ff, 0),
1991                     LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1992                     begin, end, -begin,
1993                     lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1994                         lfs, &commit});
1995             if (err) {
1996                 if (err == LFS_ERR_CORRUPT) {
1997                     goto relocate;
1998                 }
1999                 return err;
2000             }
2001 
2002             // commit tail, which may be new after last size check
2003             if (!lfs_pair_isnull(dir->tail)) {
2004                 lfs_pair_tole32(dir->tail);
2005                 err = lfs_dir_commitattr(lfs, &commit,
2006                         LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2007                         dir->tail);
2008                 lfs_pair_fromle32(dir->tail);
2009                 if (err) {
2010                     if (err == LFS_ERR_CORRUPT) {
2011                         goto relocate;
2012                     }
2013                     return err;
2014                 }
2015             }
2016 
2017             // bring over gstate?
2018             lfs_gstate_t delta = {0};
2019             if (!relocated) {
2020                 lfs_gstate_xor(&delta, &lfs->gdisk);
2021                 lfs_gstate_xor(&delta, &lfs->gstate);
2022             }
2023             lfs_gstate_xor(&delta, &lfs->gdelta);
2024             delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2025 
2026             err = lfs_dir_getgstate(lfs, dir, &delta);
2027             if (err) {
2028                 return err;
2029             }
2030 
2031             if (!lfs_gstate_iszero(&delta)) {
2032                 lfs_gstate_tole32(&delta);
2033                 err = lfs_dir_commitattr(lfs, &commit,
2034                         LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2035                             sizeof(delta)), &delta);
2036                 if (err) {
2037                     if (err == LFS_ERR_CORRUPT) {
2038                         goto relocate;
2039                     }
2040                     return err;
2041                 }
2042             }
2043 
2044             // complete commit with crc
2045             err = lfs_dir_commitcrc(lfs, &commit);
2046             if (err) {
2047                 if (err == LFS_ERR_CORRUPT) {
2048                     goto relocate;
2049                 }
2050                 return err;
2051             }
2052 
2053             // successful compaction, swap dir pair to indicate most recent
2054             LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2055             lfs_pair_swap(dir->pair);
2056             dir->count = end - begin;
2057             dir->off = commit.off;
2058             dir->etag = commit.ptag;
2059             // update gstate
2060             lfs->gdelta = (lfs_gstate_t){0};
2061             if (!relocated) {
2062                 lfs->gdisk = lfs->gstate;
2063             }
2064         }
2065         break;
2066 
2067 relocate:
2068         // commit was corrupted, drop caches and prepare to relocate block
2069         relocated = true;
2070         lfs_cache_drop(lfs, &lfs->pcache);
2071         if (!tired) {
2072             LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
2073         }
2074 
2075         // can't relocate superblock, filesystem is now frozen
2076         if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2077             LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
2078                     dir->pair[1]);
2079             return LFS_ERR_NOSPC;
2080         }
2081 
2082         // relocate half of pair
2083         int err = lfs_alloc(lfs, &dir->pair[1]);
2084         if (err && (err != LFS_ERR_NOSPC || !tired)) {
2085             return err;
2086         }
2087 
2088         tired = false;
2089         continue;
2090     }
2091 
2092     return relocated ? LFS_OK_RELOCATED : 0;
2093 }
2094 #endif
2095 
2096 #ifndef LFS_READONLY
lfs_dir_splittingcompact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)2097 static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
2098         const struct lfs_mattr *attrs, int attrcount,
2099         lfs_mdir_t *source, uint16_t begin, uint16_t end) {
2100     while (true) {
2101         // find size of first split, we do this by halving the split until
2102         // the metadata is guaranteed to fit
2103         //
2104         // Note that this isn't a true binary search, we never increase the
2105         // split size. This may result in poorly distributed metadata but isn't
2106         // worth the extra code size or performance hit to fix.
2107         lfs_size_t split = begin;
2108         while (end - split > 1) {
2109             lfs_size_t size = 0;
2110             int err = lfs_dir_traverse(lfs,
2111                     source, 0, 0xffffffff, attrs, attrcount,
2112                     LFS_MKTAG(0x400, 0x3ff, 0),
2113                     LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
2114                     split, end, -split,
2115                     lfs_dir_commit_size, &size);
2116             if (err) {
2117                 return err;
2118             }
2119 
2120             // space is complicated, we need room for:
2121             //
2122             // - tail:         4+2*4 = 12 bytes
2123             // - gstate:       4+3*4 = 16 bytes
2124             // - move delete:  4     = 4 bytes
2125             // - crc:          4+4   = 8 bytes
2126             //                 total = 40 bytes
2127             //
2128             // And we cap at half a block to avoid degenerate cases with
2129             // nearly-full metadata blocks.
2130             //
2131             if (end - split < 0xff
2132                     && size <= lfs_min(
2133                         lfs->cfg->block_size - 40,
2134                         lfs_alignup(
2135                             (lfs->cfg->metadata_max
2136                                 ? lfs->cfg->metadata_max
2137                                 : lfs->cfg->block_size)/2,
2138                             lfs->cfg->prog_size))) {
2139                 break;
2140             }
2141 
2142             split = split + ((end - split) / 2);
2143         }
2144 
2145         if (split == begin) {
2146             // no split needed
2147             break;
2148         }
2149 
2150         // split into two metadata pairs and continue
2151         int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2152                 source, split, end);
2153         if (err && err != LFS_ERR_NOSPC) {
2154             return err;
2155         }
2156 
2157         if (err) {
2158             // we can't allocate a new block, try to compact with degraded
2159             // performance
2160             LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
2161                     dir->pair[0], dir->pair[1]);
2162             break;
2163         } else {
2164             end = split;
2165         }
2166     }
2167 
2168     if (lfs_dir_needsrelocation(lfs, dir)
2169             && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2170         // oh no! we're writing too much to the superblock,
2171         // should we expand?
2172         lfs_ssize_t size = lfs_fs_size_(lfs);
2173         if (size < 0) {
2174             return size;
2175         }
2176 
2177         // littlefs cannot reclaim expanded superblocks, so expand cautiously
2178         //
2179         // if our filesystem is more than ~88% full, don't expand, this is
2180         // somewhat arbitrary
2181         if (lfs->block_count - size > lfs->block_count/8) {
2182             LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
2183             int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2184                     source, begin, end);
2185             if (err && err != LFS_ERR_NOSPC) {
2186                 return err;
2187             }
2188 
2189             if (err) {
2190                 // welp, we tried, if we ran out of space there's not much
2191                 // we can do, we'll error later if we've become frozen
2192                 LFS_WARN("Unable to expand superblock");
2193             } else {
2194                 end = begin;
2195             }
2196         }
2197     }
2198 
2199     return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
2200 }
2201 #endif
2202 
2203 #ifndef LFS_READONLY
lfs_dir_relocatingcommit(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * pdir)2204 static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
2205         const lfs_block_t pair[2],
2206         const struct lfs_mattr *attrs, int attrcount,
2207         lfs_mdir_t *pdir) {
2208     int state = 0;
2209 
2210     // calculate changes to the directory
2211     bool hasdelete = false;
2212     for (int i = 0; i < attrcount; i++) {
2213         if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
2214             dir->count += 1;
2215         } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
2216             LFS_ASSERT(dir->count > 0);
2217             dir->count -= 1;
2218             hasdelete = true;
2219         } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
2220             dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
2221             dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
2222             dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
2223             lfs_pair_fromle32(dir->tail);
2224         }
2225     }
2226 
2227     // should we actually drop the directory block?
2228     if (hasdelete && dir->count == 0) {
2229         LFS_ASSERT(pdir);
2230         int err = lfs_fs_pred(lfs, dir->pair, pdir);
2231         if (err && err != LFS_ERR_NOENT) {
2232             return err;
2233         }
2234 
2235         if (err != LFS_ERR_NOENT && pdir->split) {
2236             state = LFS_OK_DROPPED;
2237             goto fixmlist;
2238         }
2239     }
2240 
2241     if (dir->erased) {
2242         // try to commit
2243         struct lfs_commit commit = {
2244             .block = dir->pair[0],
2245             .off = dir->off,
2246             .ptag = dir->etag,
2247             .crc = 0xffffffff,
2248 
2249             .begin = dir->off,
2250             .end = (lfs->cfg->metadata_max ?
2251                 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
2252         };
2253 
2254         // traverse attrs that need to be written out
2255         lfs_pair_tole32(dir->tail);
2256         int err = lfs_dir_traverse(lfs,
2257                 dir, dir->off, dir->etag, attrs, attrcount,
2258                 0, 0, 0, 0, 0,
2259                 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2260                     lfs, &commit});
2261         lfs_pair_fromle32(dir->tail);
2262         if (err) {
2263             if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2264                 goto compact;
2265             }
2266             return err;
2267         }
2268 
2269         // commit any global diffs if we have any
2270         lfs_gstate_t delta = {0};
2271         lfs_gstate_xor(&delta, &lfs->gstate);
2272         lfs_gstate_xor(&delta, &lfs->gdisk);
2273         lfs_gstate_xor(&delta, &lfs->gdelta);
2274         delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2275         if (!lfs_gstate_iszero(&delta)) {
2276             err = lfs_dir_getgstate(lfs, dir, &delta);
2277             if (err) {
2278                 return err;
2279             }
2280 
2281             lfs_gstate_tole32(&delta);
2282             err = lfs_dir_commitattr(lfs, &commit,
2283                     LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2284                         sizeof(delta)), &delta);
2285             if (err) {
2286                 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2287                     goto compact;
2288                 }
2289                 return err;
2290             }
2291         }
2292 
2293         // finalize commit with the crc
2294         err = lfs_dir_commitcrc(lfs, &commit);
2295         if (err) {
2296             if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2297                 goto compact;
2298             }
2299             return err;
2300         }
2301 
2302         // successful commit, update dir
2303         LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2304         dir->off = commit.off;
2305         dir->etag = commit.ptag;
2306         // and update gstate
2307         lfs->gdisk = lfs->gstate;
2308         lfs->gdelta = (lfs_gstate_t){0};
2309 
2310         goto fixmlist;
2311     }
2312 
2313 compact:
2314     // fall back to compaction
2315     lfs_cache_drop(lfs, &lfs->pcache);
2316 
2317     state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
2318             dir, 0, dir->count);
2319     if (state < 0) {
2320         return state;
2321     }
2322 
2323     goto fixmlist;
2324 
2325 fixmlist:;
2326     // this complicated bit of logic is for fixing up any active
2327     // metadata-pairs that we may have affected
2328     //
2329     // note we have to make two passes since the mdir passed to
2330     // lfs_dir_commit could also be in this list, and even then
2331     // we need to copy the pair so they don't get clobbered if we refetch
2332     // our mdir.
2333     lfs_block_t oldpair[2] = {pair[0], pair[1]};
2334     for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2335         if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
2336             d->m = *dir;
2337             if (d->m.pair != pair) {
2338                 for (int i = 0; i < attrcount; i++) {
2339                     if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2340                             d->id == lfs_tag_id(attrs[i].tag)) {
2341                         d->m.pair[0] = LFS_BLOCK_NULL;
2342                         d->m.pair[1] = LFS_BLOCK_NULL;
2343                     } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2344                             d->id > lfs_tag_id(attrs[i].tag)) {
2345                         d->id -= 1;
2346                         if (d->type == LFS_TYPE_DIR) {
2347                             ((lfs_dir_t*)d)->pos -= 1;
2348                         }
2349                     } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
2350                             d->id >= lfs_tag_id(attrs[i].tag)) {
2351                         d->id += 1;
2352                         if (d->type == LFS_TYPE_DIR) {
2353                             ((lfs_dir_t*)d)->pos += 1;
2354                         }
2355                     }
2356                 }
2357             }
2358 
2359             while (d->id >= d->m.count && d->m.split) {
2360                 // we split and id is on tail now
2361                 d->id -= d->m.count;
2362                 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2363                 if (err) {
2364                     return err;
2365                 }
2366             }
2367         }
2368     }
2369 
2370     return state;
2371 }
2372 #endif
2373 
2374 #ifndef LFS_READONLY
lfs_dir_orphaningcommit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2375 static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
2376         const struct lfs_mattr *attrs, int attrcount) {
2377     // check for any inline files that aren't RAM backed and
2378     // forcefully evict them, needed for filesystem consistency
2379     for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
2380         if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
2381                 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
2382                 f->ctz.size > lfs->cfg->cache_size) {
2383             int err = lfs_file_outline(lfs, f);
2384             if (err) {
2385                 return err;
2386             }
2387 
2388             err = lfs_file_flush(lfs, f);
2389             if (err) {
2390                 return err;
2391             }
2392         }
2393     }
2394 
2395     lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
2396     lfs_mdir_t ldir = *dir;
2397     lfs_mdir_t pdir;
2398     int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
2399             attrs, attrcount, &pdir);
2400     if (state < 0) {
2401         return state;
2402     }
2403 
2404     // update if we're not in mlist, note we may have already been
2405     // updated if we are in mlist
2406     if (lfs_pair_cmp(dir->pair, lpair) == 0) {
2407         *dir = ldir;
2408     }
2409 
2410     // commit was successful, but may require other changes in the
2411     // filesystem, these would normally be tail recursive, but we have
2412     // flattened them here avoid unbounded stack usage
2413 
2414     // need to drop?
2415     if (state == LFS_OK_DROPPED) {
2416         // steal state
2417         int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
2418         if (err) {
2419             return err;
2420         }
2421 
2422         // steal tail, note that this can't create a recursive drop
2423         lpair[0] = pdir.pair[0];
2424         lpair[1] = pdir.pair[1];
2425         lfs_pair_tole32(dir->tail);
2426         state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2427                     {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2428                         dir->tail}),
2429                 NULL);
2430         lfs_pair_fromle32(dir->tail);
2431         if (state < 0) {
2432             return state;
2433         }
2434 
2435         ldir = pdir;
2436     }
2437 
2438     // need to relocate?
2439     bool orphans = false;
2440     while (state == LFS_OK_RELOCATED) {
2441         LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
2442                     "-> {0x%"PRIx32", 0x%"PRIx32"}",
2443                 lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
2444         state = 0;
2445 
2446         // update internal root
2447         if (lfs_pair_cmp(lpair, lfs->root) == 0) {
2448             lfs->root[0] = ldir.pair[0];
2449             lfs->root[1] = ldir.pair[1];
2450         }
2451 
2452         // update internally tracked dirs
2453         for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2454             if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
2455                 d->m.pair[0] = ldir.pair[0];
2456                 d->m.pair[1] = ldir.pair[1];
2457             }
2458 
2459             if (d->type == LFS_TYPE_DIR &&
2460                     lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
2461                 ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
2462                 ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
2463             }
2464         }
2465 
2466         // find parent
2467         lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
2468         if (tag < 0 && tag != LFS_ERR_NOENT) {
2469             return tag;
2470         }
2471 
2472         bool hasparent = (tag != LFS_ERR_NOENT);
2473         if (tag != LFS_ERR_NOENT) {
2474             // note that if we have a parent, we must have a pred, so this will
2475             // always create an orphan
2476             int err = lfs_fs_preporphans(lfs, +1);
2477             if (err) {
2478                 return err;
2479             }
2480 
2481             // fix pending move in this pair? this looks like an optimization but
2482             // is in fact _required_ since relocating may outdate the move.
2483             uint16_t moveid = 0x3ff;
2484             if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2485                 moveid = lfs_tag_id(lfs->gstate.tag);
2486                 LFS_DEBUG("Fixing move while relocating "
2487                         "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2488                         pdir.pair[0], pdir.pair[1], moveid);
2489                 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2490                 if (moveid < lfs_tag_id(tag)) {
2491                     tag -= LFS_MKTAG(0, 1, 0);
2492                 }
2493             }
2494 
2495             lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
2496             lfs_pair_tole32(ldir.pair);
2497             state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
2498                         {LFS_MKTAG_IF(moveid != 0x3ff,
2499                             LFS_TYPE_DELETE, moveid, 0), NULL},
2500                         {tag, ldir.pair}),
2501                     NULL);
2502             lfs_pair_fromle32(ldir.pair);
2503             if (state < 0) {
2504                 return state;
2505             }
2506 
2507             if (state == LFS_OK_RELOCATED) {
2508                 lpair[0] = ppair[0];
2509                 lpair[1] = ppair[1];
2510                 ldir = pdir;
2511                 orphans = true;
2512                 continue;
2513             }
2514         }
2515 
2516         // find pred
2517         int err = lfs_fs_pred(lfs, lpair, &pdir);
2518         if (err && err != LFS_ERR_NOENT) {
2519             return err;
2520         }
2521         LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
2522 
2523         // if we can't find dir, it must be new
2524         if (err != LFS_ERR_NOENT) {
2525             if (lfs_gstate_hasorphans(&lfs->gstate)) {
2526                 // next step, clean up orphans
2527                 err = lfs_fs_preporphans(lfs, -hasparent);
2528                 if (err) {
2529                     return err;
2530                 }
2531             }
2532 
2533             // fix pending move in this pair? this looks like an optimization
2534             // but is in fact _required_ since relocating may outdate the move.
2535             uint16_t moveid = 0x3ff;
2536             if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2537                 moveid = lfs_tag_id(lfs->gstate.tag);
2538                 LFS_DEBUG("Fixing move while relocating "
2539                         "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2540                         pdir.pair[0], pdir.pair[1], moveid);
2541                 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2542             }
2543 
2544             // replace bad pair, either we clean up desync, or no desync occured
2545             lpair[0] = pdir.pair[0];
2546             lpair[1] = pdir.pair[1];
2547             lfs_pair_tole32(ldir.pair);
2548             state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2549                         {LFS_MKTAG_IF(moveid != 0x3ff,
2550                             LFS_TYPE_DELETE, moveid, 0), NULL},
2551                         {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
2552                             ldir.pair}),
2553                     NULL);
2554             lfs_pair_fromle32(ldir.pair);
2555             if (state < 0) {
2556                 return state;
2557             }
2558 
2559             ldir = pdir;
2560         }
2561     }
2562 
2563     return orphans ? LFS_OK_ORPHANED : 0;
2564 }
2565 #endif
2566 
2567 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2568 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
2569         const struct lfs_mattr *attrs, int attrcount) {
2570     int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
2571     if (orphans < 0) {
2572         return orphans;
2573     }
2574 
2575     if (orphans) {
2576         // make sure we've removed all orphans, this is a noop if there
2577         // are none, but if we had nested blocks failures we may have
2578         // created some
2579         int err = lfs_fs_deorphan(lfs, false);
2580         if (err) {
2581             return err;
2582         }
2583     }
2584 
2585     return 0;
2586 }
2587 #endif
2588 
2589 
2590 /// Top level directory operations ///
2591 #ifndef LFS_READONLY
lfs_mkdir_(lfs_t * lfs,const char * path)2592 static int lfs_mkdir_(lfs_t *lfs, const char *path) {
2593     // deorphan if we haven't yet, needed at most once after poweron
2594     int err = lfs_fs_forceconsistency(lfs);
2595     if (err) {
2596         return err;
2597     }
2598 
2599     struct lfs_mlist cwd;
2600     cwd.next = lfs->mlist;
2601     uint16_t id;
2602     err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2603     if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
2604         return (err < 0) ? err : LFS_ERR_EXIST;
2605     }
2606 
2607     // check that name fits
2608     lfs_size_t nlen = strlen(path);
2609     if (nlen > lfs->name_max) {
2610         return LFS_ERR_NAMETOOLONG;
2611     }
2612 
2613     // build up new directory
2614     lfs_alloc_ckpoint(lfs);
2615     lfs_mdir_t dir;
2616     err = lfs_dir_alloc(lfs, &dir);
2617     if (err) {
2618         return err;
2619     }
2620 
2621     // find end of list
2622     lfs_mdir_t pred = cwd.m;
2623     while (pred.split) {
2624         err = lfs_dir_fetch(lfs, &pred, pred.tail);
2625         if (err) {
2626             return err;
2627         }
2628     }
2629 
2630     // setup dir
2631     lfs_pair_tole32(pred.tail);
2632     err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2633             {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2634     lfs_pair_fromle32(pred.tail);
2635     if (err) {
2636         return err;
2637     }
2638 
2639     // current block not end of list?
2640     if (cwd.m.split) {
2641         // update tails, this creates a desync
2642         err = lfs_fs_preporphans(lfs, +1);
2643         if (err) {
2644             return err;
2645         }
2646 
2647         // it's possible our predecessor has to be relocated, and if
2648         // our parent is our predecessor's predecessor, this could have
2649         // caused our parent to go out of date, fortunately we can hook
2650         // ourselves into littlefs to catch this
2651         cwd.type = 0;
2652         cwd.id = 0;
2653         lfs->mlist = &cwd;
2654 
2655         lfs_pair_tole32(dir.pair);
2656         err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2657                 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2658         lfs_pair_fromle32(dir.pair);
2659         if (err) {
2660             lfs->mlist = cwd.next;
2661             return err;
2662         }
2663 
2664         lfs->mlist = cwd.next;
2665         err = lfs_fs_preporphans(lfs, -1);
2666         if (err) {
2667             return err;
2668         }
2669     }
2670 
2671     // now insert into our parent block
2672     lfs_pair_tole32(dir.pair);
2673     err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2674             {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2675             {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2676             {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2677             {LFS_MKTAG_IF(!cwd.m.split,
2678                 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2679     lfs_pair_fromle32(dir.pair);
2680     if (err) {
2681         return err;
2682     }
2683 
2684     return 0;
2685 }
2686 #endif
2687 
lfs_dir_open_(lfs_t * lfs,lfs_dir_t * dir,const char * path)2688 static int lfs_dir_open_(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2689     lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2690     if (tag < 0) {
2691         return tag;
2692     }
2693 
2694     if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2695         return LFS_ERR_NOTDIR;
2696     }
2697 
2698     lfs_block_t pair[2];
2699     if (lfs_tag_id(tag) == 0x3ff) {
2700         // handle root dir separately
2701         pair[0] = lfs->root[0];
2702         pair[1] = lfs->root[1];
2703     } else {
2704         // get dir pair from parent
2705         lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2706                 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2707         if (res < 0) {
2708             return res;
2709         }
2710         lfs_pair_fromle32(pair);
2711     }
2712 
2713     // fetch first pair
2714     int err = lfs_dir_fetch(lfs, &dir->m, pair);
2715     if (err) {
2716         return err;
2717     }
2718 
2719     // setup entry
2720     dir->head[0] = dir->m.pair[0];
2721     dir->head[1] = dir->m.pair[1];
2722     dir->id = 0;
2723     dir->pos = 0;
2724 
2725     // add to list of mdirs
2726     dir->type = LFS_TYPE_DIR;
2727     lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2728 
2729     return 0;
2730 }
2731 
lfs_dir_close_(lfs_t * lfs,lfs_dir_t * dir)2732 static int lfs_dir_close_(lfs_t *lfs, lfs_dir_t *dir) {
2733     // remove from list of mdirs
2734     lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2735 
2736     return 0;
2737 }
2738 
lfs_dir_read_(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2739 static int lfs_dir_read_(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2740     memset(info, 0, sizeof(*info));
2741 
2742     // special offset for '.' and '..'
2743     if (dir->pos == 0) {
2744         info->type = LFS_TYPE_DIR;
2745         strcpy(info->name, ".");
2746         dir->pos += 1;
2747         return true;
2748     } else if (dir->pos == 1) {
2749         info->type = LFS_TYPE_DIR;
2750         strcpy(info->name, "..");
2751         dir->pos += 1;
2752         return true;
2753     }
2754 
2755     while (true) {
2756         if (dir->id == dir->m.count) {
2757             if (!dir->m.split) {
2758                 return false;
2759             }
2760 
2761             int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2762             if (err) {
2763                 return err;
2764             }
2765 
2766             dir->id = 0;
2767         }
2768 
2769         int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2770         if (err && err != LFS_ERR_NOENT) {
2771             return err;
2772         }
2773 
2774         dir->id += 1;
2775         if (err != LFS_ERR_NOENT) {
2776             break;
2777         }
2778     }
2779 
2780     dir->pos += 1;
2781     return true;
2782 }
2783 
lfs_dir_seek_(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2784 static int lfs_dir_seek_(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2785     // simply walk from head dir
2786     int err = lfs_dir_rewind_(lfs, dir);
2787     if (err) {
2788         return err;
2789     }
2790 
2791     // first two for ./..
2792     dir->pos = lfs_min(2, off);
2793     off -= dir->pos;
2794 
2795     // skip superblock entry
2796     dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2797 
2798     while (off > 0) {
2799         if (dir->id == dir->m.count) {
2800             if (!dir->m.split) {
2801                 return LFS_ERR_INVAL;
2802             }
2803 
2804             err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2805             if (err) {
2806                 return err;
2807             }
2808 
2809             dir->id = 0;
2810         }
2811 
2812         int diff = lfs_min(dir->m.count - dir->id, off);
2813         dir->id += diff;
2814         dir->pos += diff;
2815         off -= diff;
2816     }
2817 
2818     return 0;
2819 }
2820 
lfs_dir_tell_(lfs_t * lfs,lfs_dir_t * dir)2821 static lfs_soff_t lfs_dir_tell_(lfs_t *lfs, lfs_dir_t *dir) {
2822     (void)lfs;
2823     return dir->pos;
2824 }
2825 
lfs_dir_rewind_(lfs_t * lfs,lfs_dir_t * dir)2826 static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir) {
2827     // reload the head dir
2828     int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2829     if (err) {
2830         return err;
2831     }
2832 
2833     dir->id = 0;
2834     dir->pos = 0;
2835     return 0;
2836 }
2837 
2838 
2839 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2840 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2841     lfs_off_t size = *off;
2842     lfs_off_t b = lfs->cfg->block_size - 2*4;
2843     lfs_off_t i = size / b;
2844     if (i == 0) {
2845         return 0;
2846     }
2847 
2848     i = (size - 4*(lfs_popc(i-1)+2)) / b;
2849     *off = size - b*i - 4*lfs_popc(i);
2850     return i;
2851 }
2852 
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2853 static int lfs_ctz_find(lfs_t *lfs,
2854         const lfs_cache_t *pcache, lfs_cache_t *rcache,
2855         lfs_block_t head, lfs_size_t size,
2856         lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2857     if (size == 0) {
2858         *block = LFS_BLOCK_NULL;
2859         *off = 0;
2860         return 0;
2861     }
2862 
2863     lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2864     lfs_off_t target = lfs_ctz_index(lfs, &pos);
2865 
2866     while (current > target) {
2867         lfs_size_t skip = lfs_min(
2868                 lfs_npw2(current-target+1) - 1,
2869                 lfs_ctz(current));
2870 
2871         int err = lfs_bd_read(lfs,
2872                 pcache, rcache, sizeof(head),
2873                 head, 4*skip, &head, sizeof(head));
2874         head = lfs_fromle32(head);
2875         if (err) {
2876             return err;
2877         }
2878 
2879         current -= 1 << skip;
2880     }
2881 
2882     *block = head;
2883     *off = pos;
2884     return 0;
2885 }
2886 
2887 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2888 static int lfs_ctz_extend(lfs_t *lfs,
2889         lfs_cache_t *pcache, lfs_cache_t *rcache,
2890         lfs_block_t head, lfs_size_t size,
2891         lfs_block_t *block, lfs_off_t *off) {
2892     while (true) {
2893         // go ahead and grab a block
2894         lfs_block_t nblock;
2895         int err = lfs_alloc(lfs, &nblock);
2896         if (err) {
2897             return err;
2898         }
2899 
2900         {
2901             err = lfs_bd_erase(lfs, nblock);
2902             if (err) {
2903                 if (err == LFS_ERR_CORRUPT) {
2904                     goto relocate;
2905                 }
2906                 return err;
2907             }
2908 
2909             if (size == 0) {
2910                 *block = nblock;
2911                 *off = 0;
2912                 return 0;
2913             }
2914 
2915             lfs_size_t noff = size - 1;
2916             lfs_off_t index = lfs_ctz_index(lfs, &noff);
2917             noff = noff + 1;
2918 
2919             // just copy out the last block if it is incomplete
2920             if (noff != lfs->cfg->block_size) {
2921                 for (lfs_off_t i = 0; i < noff; i++) {
2922                     uint8_t data;
2923                     err = lfs_bd_read(lfs,
2924                             NULL, rcache, noff-i,
2925                             head, i, &data, 1);
2926                     if (err) {
2927                         return err;
2928                     }
2929 
2930                     err = lfs_bd_prog(lfs,
2931                             pcache, rcache, true,
2932                             nblock, i, &data, 1);
2933                     if (err) {
2934                         if (err == LFS_ERR_CORRUPT) {
2935                             goto relocate;
2936                         }
2937                         return err;
2938                     }
2939                 }
2940 
2941                 *block = nblock;
2942                 *off = noff;
2943                 return 0;
2944             }
2945 
2946             // append block
2947             index += 1;
2948             lfs_size_t skips = lfs_ctz(index) + 1;
2949             lfs_block_t nhead = head;
2950             for (lfs_off_t i = 0; i < skips; i++) {
2951                 nhead = lfs_tole32(nhead);
2952                 err = lfs_bd_prog(lfs, pcache, rcache, true,
2953                         nblock, 4*i, &nhead, 4);
2954                 nhead = lfs_fromle32(nhead);
2955                 if (err) {
2956                     if (err == LFS_ERR_CORRUPT) {
2957                         goto relocate;
2958                     }
2959                     return err;
2960                 }
2961 
2962                 if (i != skips-1) {
2963                     err = lfs_bd_read(lfs,
2964                             NULL, rcache, sizeof(nhead),
2965                             nhead, 4*i, &nhead, sizeof(nhead));
2966                     nhead = lfs_fromle32(nhead);
2967                     if (err) {
2968                         return err;
2969                     }
2970                 }
2971             }
2972 
2973             *block = nblock;
2974             *off = 4*skips;
2975             return 0;
2976         }
2977 
2978 relocate:
2979         LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2980 
2981         // just clear cache and try a new block
2982         lfs_cache_drop(lfs, pcache);
2983     }
2984 }
2985 #endif
2986 
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2987 static int lfs_ctz_traverse(lfs_t *lfs,
2988         const lfs_cache_t *pcache, lfs_cache_t *rcache,
2989         lfs_block_t head, lfs_size_t size,
2990         int (*cb)(void*, lfs_block_t), void *data) {
2991     if (size == 0) {
2992         return 0;
2993     }
2994 
2995     lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2996 
2997     while (true) {
2998         int err = cb(data, head);
2999         if (err) {
3000             return err;
3001         }
3002 
3003         if (index == 0) {
3004             return 0;
3005         }
3006 
3007         lfs_block_t heads[2];
3008         int count = 2 - (index & 1);
3009         err = lfs_bd_read(lfs,
3010                 pcache, rcache, count*sizeof(head),
3011                 head, 0, &heads, count*sizeof(head));
3012         heads[0] = lfs_fromle32(heads[0]);
3013         heads[1] = lfs_fromle32(heads[1]);
3014         if (err) {
3015             return err;
3016         }
3017 
3018         for (int i = 0; i < count-1; i++) {
3019             err = cb(data, heads[i]);
3020             if (err) {
3021                 return err;
3022             }
3023         }
3024 
3025         head = heads[count-1];
3026         index -= count;
3027     }
3028 }
3029 
3030 
3031 /// Top level file operations ///
lfs_file_opencfg_(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)3032 static int lfs_file_opencfg_(lfs_t *lfs, lfs_file_t *file,
3033         const char *path, int flags,
3034         const struct lfs_file_config *cfg) {
3035 #ifndef LFS_READONLY
3036     // deorphan if we haven't yet, needed at most once after poweron
3037     if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3038         int err = lfs_fs_forceconsistency(lfs);
3039         if (err) {
3040             return err;
3041         }
3042     }
3043 #else
3044     LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3045 #endif
3046 
3047     // setup simple file details
3048     int err;
3049     file->cfg = cfg;
3050     file->flags = flags;
3051     file->pos = 0;
3052     file->off = 0;
3053     file->cache.buffer = NULL;
3054 
3055     // allocate entry for file if it doesn't exist
3056     lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
3057     if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
3058         err = tag;
3059         goto cleanup;
3060     }
3061 
3062     // get id, add to list of mdirs to catch update changes
3063     file->type = LFS_TYPE_REG;
3064     lfs_mlist_append(lfs, (struct lfs_mlist *)file);
3065 
3066 #ifdef LFS_READONLY
3067     if (tag == LFS_ERR_NOENT) {
3068         err = LFS_ERR_NOENT;
3069         goto cleanup;
3070 #else
3071     if (tag == LFS_ERR_NOENT) {
3072         if (!(flags & LFS_O_CREAT)) {
3073             err = LFS_ERR_NOENT;
3074             goto cleanup;
3075         }
3076 
3077         // check that name fits
3078         lfs_size_t nlen = strlen(path);
3079         if (nlen > lfs->name_max) {
3080             err = LFS_ERR_NAMETOOLONG;
3081             goto cleanup;
3082         }
3083 
3084         // get next slot and create entry to remember name
3085         err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3086                 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
3087                 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
3088                 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
3089 
3090         // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
3091         // not fit in a 128 byte block.
3092         err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
3093         if (err) {
3094             goto cleanup;
3095         }
3096 
3097         tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
3098     } else if (flags & LFS_O_EXCL) {
3099         err = LFS_ERR_EXIST;
3100         goto cleanup;
3101 #endif
3102     } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
3103         err = LFS_ERR_ISDIR;
3104         goto cleanup;
3105 #ifndef LFS_READONLY
3106     } else if (flags & LFS_O_TRUNC) {
3107         // truncate if requested
3108         tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
3109         file->flags |= LFS_F_DIRTY;
3110 #endif
3111     } else {
3112         // try to load what's on disk, if it's inlined we'll fix it later
3113         tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
3114                 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
3115         if (tag < 0) {
3116             err = tag;
3117             goto cleanup;
3118         }
3119         lfs_ctz_fromle32(&file->ctz);
3120     }
3121 
3122     // fetch attrs
3123     for (unsigned i = 0; i < file->cfg->attr_count; i++) {
3124         // if opened for read / read-write operations
3125         if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
3126             lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3127                     LFS_MKTAG(0x7ff, 0x3ff, 0),
3128                     LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
3129                         file->id, file->cfg->attrs[i].size),
3130                         file->cfg->attrs[i].buffer);
3131             if (res < 0 && res != LFS_ERR_NOENT) {
3132                 err = res;
3133                 goto cleanup;
3134             }
3135         }
3136 
3137 #ifndef LFS_READONLY
3138         // if opened for write / read-write operations
3139         if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3140             if (file->cfg->attrs[i].size > lfs->attr_max) {
3141                 err = LFS_ERR_NOSPC;
3142                 goto cleanup;
3143             }
3144 
3145             file->flags |= LFS_F_DIRTY;
3146         }
3147 #endif
3148     }
3149 
3150     // allocate buffer if needed
3151     if (file->cfg->buffer) {
3152         file->cache.buffer = file->cfg->buffer;
3153     } else {
3154         file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
3155         if (!file->cache.buffer) {
3156             err = LFS_ERR_NOMEM;
3157             goto cleanup;
3158         }
3159     }
3160 
3161     // zero to avoid information leak
3162     lfs_cache_zero(lfs, &file->cache);
3163 
3164     if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
3165         // load inline files
3166         file->ctz.head = LFS_BLOCK_INLINE;
3167         file->ctz.size = lfs_tag_size(tag);
3168         file->flags |= LFS_F_INLINE;
3169         file->cache.block = file->ctz.head;
3170         file->cache.off = 0;
3171         file->cache.size = lfs->cfg->cache_size;
3172 
3173         // don't always read (may be new/trunc file)
3174         if (file->ctz.size > 0) {
3175             lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3176                     LFS_MKTAG(0x700, 0x3ff, 0),
3177                     LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
3178                         lfs_min(file->cache.size, 0x3fe)),
3179                     file->cache.buffer);
3180             if (res < 0) {
3181                 err = res;
3182                 goto cleanup;
3183             }
3184         }
3185     }
3186 
3187     return 0;
3188 
3189 cleanup:
3190     // clean up lingering resources
3191 #ifndef LFS_READONLY
3192     file->flags |= LFS_F_ERRED;
3193 #endif
3194     lfs_file_close_(lfs, file);
3195     return err;
3196 }
3197 
3198 #ifndef LFS_NO_MALLOC
3199 static int lfs_file_open_(lfs_t *lfs, lfs_file_t *file,
3200         const char *path, int flags) {
3201     static const struct lfs_file_config defaults = {0};
3202     int err = lfs_file_opencfg_(lfs, file, path, flags, &defaults);
3203     return err;
3204 }
3205 #endif
3206 
3207 static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file) {
3208 #ifndef LFS_READONLY
3209     int err = lfs_file_sync_(lfs, file);
3210 #else
3211     int err = 0;
3212 #endif
3213 
3214     // remove from list of mdirs
3215     lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
3216 
3217     // clean up memory
3218     if (!file->cfg->buffer) {
3219         lfs_free(file->cache.buffer);
3220     }
3221 
3222     return err;
3223 }
3224 
3225 
3226 #ifndef LFS_READONLY
3227 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
3228     while (true) {
3229         // just relocate what exists into new block
3230         lfs_block_t nblock;
3231         int err = lfs_alloc(lfs, &nblock);
3232         if (err) {
3233             return err;
3234         }
3235 
3236         err = lfs_bd_erase(lfs, nblock);
3237         if (err) {
3238             if (err == LFS_ERR_CORRUPT) {
3239                 goto relocate;
3240             }
3241             return err;
3242         }
3243 
3244         // either read from dirty cache or disk
3245         for (lfs_off_t i = 0; i < file->off; i++) {
3246             uint8_t data;
3247             if (file->flags & LFS_F_INLINE) {
3248                 err = lfs_dir_getread(lfs, &file->m,
3249                         // note we evict inline files before they can be dirty
3250                         NULL, &file->cache, file->off-i,
3251                         LFS_MKTAG(0xfff, 0x1ff, 0),
3252                         LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3253                         i, &data, 1);
3254                 if (err) {
3255                     return err;
3256                 }
3257             } else {
3258                 err = lfs_bd_read(lfs,
3259                         &file->cache, &lfs->rcache, file->off-i,
3260                         file->block, i, &data, 1);
3261                 if (err) {
3262                     return err;
3263                 }
3264             }
3265 
3266             err = lfs_bd_prog(lfs,
3267                     &lfs->pcache, &lfs->rcache, true,
3268                     nblock, i, &data, 1);
3269             if (err) {
3270                 if (err == LFS_ERR_CORRUPT) {
3271                     goto relocate;
3272                 }
3273                 return err;
3274             }
3275         }
3276 
3277         // copy over new state of file
3278         memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
3279         file->cache.block = lfs->pcache.block;
3280         file->cache.off = lfs->pcache.off;
3281         file->cache.size = lfs->pcache.size;
3282         lfs_cache_zero(lfs, &lfs->pcache);
3283 
3284         file->block = nblock;
3285         file->flags |= LFS_F_WRITING;
3286         return 0;
3287 
3288 relocate:
3289         LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3290 
3291         // just clear cache and try a new block
3292         lfs_cache_drop(lfs, &lfs->pcache);
3293     }
3294 }
3295 #endif
3296 
3297 #ifndef LFS_READONLY
3298 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
3299     file->off = file->pos;
3300     lfs_alloc_ckpoint(lfs);
3301     int err = lfs_file_relocate(lfs, file);
3302     if (err) {
3303         return err;
3304     }
3305 
3306     file->flags &= ~LFS_F_INLINE;
3307     return 0;
3308 }
3309 #endif
3310 
3311 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
3312     if (file->flags & LFS_F_READING) {
3313         if (!(file->flags & LFS_F_INLINE)) {
3314             lfs_cache_drop(lfs, &file->cache);
3315         }
3316         file->flags &= ~LFS_F_READING;
3317     }
3318 
3319 #ifndef LFS_READONLY
3320     if (file->flags & LFS_F_WRITING) {
3321         lfs_off_t pos = file->pos;
3322 
3323         if (!(file->flags & LFS_F_INLINE)) {
3324             // copy over anything after current branch
3325             lfs_file_t orig = {
3326                 .ctz.head = file->ctz.head,
3327                 .ctz.size = file->ctz.size,
3328                 .flags = LFS_O_RDONLY,
3329                 .pos = file->pos,
3330                 .cache = lfs->rcache,
3331             };
3332             lfs_cache_drop(lfs, &lfs->rcache);
3333 
3334             while (file->pos < file->ctz.size) {
3335                 // copy over a byte at a time, leave it up to caching
3336                 // to make this efficient
3337                 uint8_t data;
3338                 lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
3339                 if (res < 0) {
3340                     return res;
3341                 }
3342 
3343                 res = lfs_file_flushedwrite(lfs, file, &data, 1);
3344                 if (res < 0) {
3345                     return res;
3346                 }
3347 
3348                 // keep our reference to the rcache in sync
3349                 if (lfs->rcache.block != LFS_BLOCK_NULL) {
3350                     lfs_cache_drop(lfs, &orig.cache);
3351                     lfs_cache_drop(lfs, &lfs->rcache);
3352                 }
3353             }
3354 
3355             // write out what we have
3356             while (true) {
3357                 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
3358                 if (err) {
3359                     if (err == LFS_ERR_CORRUPT) {
3360                         goto relocate;
3361                     }
3362                     return err;
3363                 }
3364 
3365                 break;
3366 
3367 relocate:
3368                 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
3369                 err = lfs_file_relocate(lfs, file);
3370                 if (err) {
3371                     return err;
3372                 }
3373             }
3374         } else {
3375             file->pos = lfs_max(file->pos, file->ctz.size);
3376         }
3377 
3378         // actual file updates
3379         file->ctz.head = file->block;
3380         file->ctz.size = file->pos;
3381         file->flags &= ~LFS_F_WRITING;
3382         file->flags |= LFS_F_DIRTY;
3383 
3384         file->pos = pos;
3385     }
3386 #endif
3387 
3388     return 0;
3389 }
3390 
3391 #ifndef LFS_READONLY
3392 static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file) {
3393     if (file->flags & LFS_F_ERRED) {
3394         // it's not safe to do anything if our file errored
3395         return 0;
3396     }
3397 
3398     int err = lfs_file_flush(lfs, file);
3399     if (err) {
3400         file->flags |= LFS_F_ERRED;
3401         return err;
3402     }
3403 
3404 
3405     if ((file->flags & LFS_F_DIRTY) &&
3406             !lfs_pair_isnull(file->m.pair)) {
3407         // before we commit metadata, we need sync the disk to make sure
3408         // data writes don't complete after metadata writes
3409         if (!(file->flags & LFS_F_INLINE)) {
3410             err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
3411             if (err) {
3412                 return err;
3413             }
3414         }
3415 
3416         // update dir entry
3417         uint16_t type;
3418         const void *buffer;
3419         lfs_size_t size;
3420         struct lfs_ctz ctz;
3421         if (file->flags & LFS_F_INLINE) {
3422             // inline the whole file
3423             type = LFS_TYPE_INLINESTRUCT;
3424             buffer = file->cache.buffer;
3425             size = file->ctz.size;
3426         } else {
3427             // update the ctz reference
3428             type = LFS_TYPE_CTZSTRUCT;
3429             // copy ctz so alloc will work during a relocate
3430             ctz = file->ctz;
3431             lfs_ctz_tole32(&ctz);
3432             buffer = &ctz;
3433             size = sizeof(ctz);
3434         }
3435 
3436         // commit file data and attributes
3437         err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3438                 {LFS_MKTAG(type, file->id, size), buffer},
3439                 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
3440                     file->cfg->attr_count), file->cfg->attrs}));
3441         if (err) {
3442             file->flags |= LFS_F_ERRED;
3443             return err;
3444         }
3445 
3446         file->flags &= ~LFS_F_DIRTY;
3447     }
3448 
3449     return 0;
3450 }
3451 #endif
3452 
3453 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
3454         void *buffer, lfs_size_t size) {
3455     uint8_t *data = buffer;
3456     lfs_size_t nsize = size;
3457 
3458     if (file->pos >= file->ctz.size) {
3459         // eof if past end
3460         return 0;
3461     }
3462 
3463     size = lfs_min(size, file->ctz.size - file->pos);
3464     nsize = size;
3465 
3466     while (nsize > 0) {
3467         // check if we need a new block
3468         if (!(file->flags & LFS_F_READING) ||
3469                 file->off == lfs->cfg->block_size) {
3470             if (!(file->flags & LFS_F_INLINE)) {
3471                 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3472                         file->ctz.head, file->ctz.size,
3473                         file->pos, &file->block, &file->off);
3474                 if (err) {
3475                     return err;
3476                 }
3477             } else {
3478                 file->block = LFS_BLOCK_INLINE;
3479                 file->off = file->pos;
3480             }
3481 
3482             file->flags |= LFS_F_READING;
3483         }
3484 
3485         // read as much as we can in current block
3486         lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3487         if (file->flags & LFS_F_INLINE) {
3488             int err = lfs_dir_getread(lfs, &file->m,
3489                     NULL, &file->cache, lfs->cfg->block_size,
3490                     LFS_MKTAG(0xfff, 0x1ff, 0),
3491                     LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3492                     file->off, data, diff);
3493             if (err) {
3494                 return err;
3495             }
3496         } else {
3497             int err = lfs_bd_read(lfs,
3498                     NULL, &file->cache, lfs->cfg->block_size,
3499                     file->block, file->off, data, diff);
3500             if (err) {
3501                 return err;
3502             }
3503         }
3504 
3505         file->pos += diff;
3506         file->off += diff;
3507         data += diff;
3508         nsize -= diff;
3509     }
3510 
3511     return size;
3512 }
3513 
3514 static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file,
3515         void *buffer, lfs_size_t size) {
3516     LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3517 
3518 #ifndef LFS_READONLY
3519     if (file->flags & LFS_F_WRITING) {
3520         // flush out any writes
3521         int err = lfs_file_flush(lfs, file);
3522         if (err) {
3523             return err;
3524         }
3525     }
3526 #endif
3527 
3528     return lfs_file_flushedread(lfs, file, buffer, size);
3529 }
3530 
3531 
3532 #ifndef LFS_READONLY
3533 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
3534         const void *buffer, lfs_size_t size) {
3535     const uint8_t *data = buffer;
3536     lfs_size_t nsize = size;
3537 
3538     if ((file->flags & LFS_F_INLINE) &&
3539             lfs_max(file->pos+nsize, file->ctz.size) > lfs->inline_max) {
3540         // inline file doesn't fit anymore
3541         int err = lfs_file_outline(lfs, file);
3542         if (err) {
3543             file->flags |= LFS_F_ERRED;
3544             return err;
3545         }
3546     }
3547 
3548     while (nsize > 0) {
3549         // check if we need a new block
3550         if (!(file->flags & LFS_F_WRITING) ||
3551                 file->off == lfs->cfg->block_size) {
3552             if (!(file->flags & LFS_F_INLINE)) {
3553                 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
3554                     // find out which block we're extending from
3555                     int err = lfs_ctz_find(lfs, NULL, &file->cache,
3556                             file->ctz.head, file->ctz.size,
3557                             file->pos-1, &file->block, &(lfs_off_t){0});
3558                     if (err) {
3559                         file->flags |= LFS_F_ERRED;
3560                         return err;
3561                     }
3562 
3563                     // mark cache as dirty since we may have read data into it
3564                     lfs_cache_zero(lfs, &file->cache);
3565                 }
3566 
3567                 // extend file with new blocks
3568                 lfs_alloc_ckpoint(lfs);
3569                 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3570                         file->block, file->pos,
3571                         &file->block, &file->off);
3572                 if (err) {
3573                     file->flags |= LFS_F_ERRED;
3574                     return err;
3575                 }
3576             } else {
3577                 file->block = LFS_BLOCK_INLINE;
3578                 file->off = file->pos;
3579             }
3580 
3581             file->flags |= LFS_F_WRITING;
3582         }
3583 
3584         // program as much as we can in current block
3585         lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3586         while (true) {
3587             int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3588                     file->block, file->off, data, diff);
3589             if (err) {
3590                 if (err == LFS_ERR_CORRUPT) {
3591                     goto relocate;
3592                 }
3593                 file->flags |= LFS_F_ERRED;
3594                 return err;
3595             }
3596 
3597             break;
3598 relocate:
3599             err = lfs_file_relocate(lfs, file);
3600             if (err) {
3601                 file->flags |= LFS_F_ERRED;
3602                 return err;
3603             }
3604         }
3605 
3606         file->pos += diff;
3607         file->off += diff;
3608         data += diff;
3609         nsize -= diff;
3610 
3611         lfs_alloc_ckpoint(lfs);
3612     }
3613 
3614     return size;
3615 }
3616 
3617 static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file,
3618         const void *buffer, lfs_size_t size) {
3619     LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3620 
3621     if (file->flags & LFS_F_READING) {
3622         // drop any reads
3623         int err = lfs_file_flush(lfs, file);
3624         if (err) {
3625             return err;
3626         }
3627     }
3628 
3629     if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
3630         file->pos = file->ctz.size;
3631     }
3632 
3633     if (file->pos + size > lfs->file_max) {
3634         // Larger than file limit?
3635         return LFS_ERR_FBIG;
3636     }
3637 
3638     if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
3639         // fill with zeros
3640         lfs_off_t pos = file->pos;
3641         file->pos = file->ctz.size;
3642 
3643         while (file->pos < pos) {
3644             lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
3645             if (res < 0) {
3646                 return res;
3647             }
3648         }
3649     }
3650 
3651     lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
3652     if (nsize < 0) {
3653         return nsize;
3654     }
3655 
3656     file->flags &= ~LFS_F_ERRED;
3657     return nsize;
3658 }
3659 #endif
3660 
3661 static lfs_soff_t lfs_file_seek_(lfs_t *lfs, lfs_file_t *file,
3662         lfs_soff_t off, int whence) {
3663     // find new pos
3664     lfs_off_t npos = file->pos;
3665     if (whence == LFS_SEEK_SET) {
3666         npos = off;
3667     } else if (whence == LFS_SEEK_CUR) {
3668         if ((lfs_soff_t)file->pos + off < 0) {
3669             return LFS_ERR_INVAL;
3670         } else {
3671             npos = file->pos + off;
3672         }
3673     } else if (whence == LFS_SEEK_END) {
3674         lfs_soff_t res = lfs_file_size_(lfs, file) + off;
3675         if (res < 0) {
3676             return LFS_ERR_INVAL;
3677         } else {
3678             npos = res;
3679         }
3680     }
3681 
3682     if (npos > lfs->file_max) {
3683         // file position out of range
3684         return LFS_ERR_INVAL;
3685     }
3686 
3687     if (file->pos == npos) {
3688         // noop - position has not changed
3689         return npos;
3690     }
3691 
3692     // if we're only reading and our new offset is still in the file's cache
3693     // we can avoid flushing and needing to reread the data
3694     if (
3695 #ifndef LFS_READONLY
3696         !(file->flags & LFS_F_WRITING)
3697 #else
3698         true
3699 #endif
3700             ) {
3701         int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
3702         lfs_off_t noff = npos;
3703         int nindex = lfs_ctz_index(lfs, &noff);
3704         if (oindex == nindex
3705                 && noff >= file->cache.off
3706                 && noff < file->cache.off + file->cache.size) {
3707             file->pos = npos;
3708             file->off = noff;
3709             return npos;
3710         }
3711     }
3712 
3713     // write out everything beforehand, may be noop if rdonly
3714     int err = lfs_file_flush(lfs, file);
3715     if (err) {
3716         return err;
3717     }
3718 
3719     // update pos
3720     file->pos = npos;
3721     return npos;
3722 }
3723 
3724 #ifndef LFS_READONLY
3725 static int lfs_file_truncate_(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3726     LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3727 
3728     if (size > LFS_FILE_MAX) {
3729         return LFS_ERR_INVAL;
3730     }
3731 
3732     lfs_off_t pos = file->pos;
3733     lfs_off_t oldsize = lfs_file_size_(lfs, file);
3734     if (size < oldsize) {
3735         // revert to inline file?
3736         if (size <= lfs->inline_max) {
3737             // flush+seek to head
3738             lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET);
3739             if (res < 0) {
3740                 return (int)res;
3741             }
3742 
3743             // read our data into rcache temporarily
3744             lfs_cache_drop(lfs, &lfs->rcache);
3745             res = lfs_file_flushedread(lfs, file,
3746                     lfs->rcache.buffer, size);
3747             if (res < 0) {
3748                 return (int)res;
3749             }
3750 
3751             file->ctz.head = LFS_BLOCK_INLINE;
3752             file->ctz.size = size;
3753             file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE;
3754             file->cache.block = file->ctz.head;
3755             file->cache.off = 0;
3756             file->cache.size = lfs->cfg->cache_size;
3757             memcpy(file->cache.buffer, lfs->rcache.buffer, size);
3758 
3759         } else {
3760             // need to flush since directly changing metadata
3761             int err = lfs_file_flush(lfs, file);
3762             if (err) {
3763                 return err;
3764             }
3765 
3766             // lookup new head in ctz skip list
3767             err = lfs_ctz_find(lfs, NULL, &file->cache,
3768                     file->ctz.head, file->ctz.size,
3769                     size-1, &file->block, &(lfs_off_t){0});
3770             if (err) {
3771                 return err;
3772             }
3773 
3774             // need to set pos/block/off consistently so seeking back to
3775             // the old position does not get confused
3776             file->pos = size;
3777             file->ctz.head = file->block;
3778             file->ctz.size = size;
3779             file->flags |= LFS_F_DIRTY | LFS_F_READING;
3780         }
3781     } else if (size > oldsize) {
3782         // flush+seek if not already at end
3783         lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_END);
3784         if (res < 0) {
3785             return (int)res;
3786         }
3787 
3788         // fill with zeros
3789         while (file->pos < size) {
3790             res = lfs_file_write_(lfs, file, &(uint8_t){0}, 1);
3791             if (res < 0) {
3792                 return (int)res;
3793             }
3794         }
3795     }
3796 
3797     // restore pos
3798     lfs_soff_t res = lfs_file_seek_(lfs, file, pos, LFS_SEEK_SET);
3799     if (res < 0) {
3800       return (int)res;
3801     }
3802 
3803     return 0;
3804 }
3805 #endif
3806 
3807 static lfs_soff_t lfs_file_tell_(lfs_t *lfs, lfs_file_t *file) {
3808     (void)lfs;
3809     return file->pos;
3810 }
3811 
3812 static int lfs_file_rewind_(lfs_t *lfs, lfs_file_t *file) {
3813     lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET);
3814     if (res < 0) {
3815         return (int)res;
3816     }
3817 
3818     return 0;
3819 }
3820 
3821 static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file) {
3822     (void)lfs;
3823 
3824 #ifndef LFS_READONLY
3825     if (file->flags & LFS_F_WRITING) {
3826         return lfs_max(file->pos, file->ctz.size);
3827     }
3828 #endif
3829 
3830     return file->ctz.size;
3831 }
3832 
3833 
3834 /// General fs operations ///
3835 static int lfs_stat_(lfs_t *lfs, const char *path, struct lfs_info *info) {
3836     lfs_mdir_t cwd;
3837     lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3838     if (tag < 0) {
3839         return (int)tag;
3840     }
3841 
3842     return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3843 }
3844 
3845 #ifndef LFS_READONLY
3846 static int lfs_remove_(lfs_t *lfs, const char *path) {
3847     // deorphan if we haven't yet, needed at most once after poweron
3848     int err = lfs_fs_forceconsistency(lfs);
3849     if (err) {
3850         return err;
3851     }
3852 
3853     lfs_mdir_t cwd;
3854     lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3855     if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3856         return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3857     }
3858 
3859     struct lfs_mlist dir;
3860     dir.next = lfs->mlist;
3861     if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3862         // must be empty before removal
3863         lfs_block_t pair[2];
3864         lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3865                 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3866         if (res < 0) {
3867             return (int)res;
3868         }
3869         lfs_pair_fromle32(pair);
3870 
3871         err = lfs_dir_fetch(lfs, &dir.m, pair);
3872         if (err) {
3873             return err;
3874         }
3875 
3876         if (dir.m.count > 0 || dir.m.split) {
3877             return LFS_ERR_NOTEMPTY;
3878         }
3879 
3880         // mark fs as orphaned
3881         err = lfs_fs_preporphans(lfs, +1);
3882         if (err) {
3883             return err;
3884         }
3885 
3886         // I know it's crazy but yes, dir can be changed by our parent's
3887         // commit (if predecessor is child)
3888         dir.type = 0;
3889         dir.id = 0;
3890         lfs->mlist = &dir;
3891     }
3892 
3893     // delete the entry
3894     err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3895             {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3896     if (err) {
3897         lfs->mlist = dir.next;
3898         return err;
3899     }
3900 
3901     lfs->mlist = dir.next;
3902     if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3903         // fix orphan
3904         err = lfs_fs_preporphans(lfs, -1);
3905         if (err) {
3906             return err;
3907         }
3908 
3909         err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3910         if (err) {
3911             return err;
3912         }
3913 
3914         err = lfs_dir_drop(lfs, &cwd, &dir.m);
3915         if (err) {
3916             return err;
3917         }
3918     }
3919 
3920     return 0;
3921 }
3922 #endif
3923 
3924 #ifndef LFS_READONLY
3925 static int lfs_rename_(lfs_t *lfs, const char *oldpath, const char *newpath) {
3926     // deorphan if we haven't yet, needed at most once after poweron
3927     int err = lfs_fs_forceconsistency(lfs);
3928     if (err) {
3929         return err;
3930     }
3931 
3932     // find old entry
3933     lfs_mdir_t oldcwd;
3934     lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3935     if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3936         return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3937     }
3938 
3939     // find new entry
3940     lfs_mdir_t newcwd;
3941     uint16_t newid;
3942     lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3943     if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3944             !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3945         return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3946     }
3947 
3948     // if we're in the same pair there's a few special cases...
3949     bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3950     uint16_t newoldid = lfs_tag_id(oldtag);
3951 
3952     struct lfs_mlist prevdir;
3953     prevdir.next = lfs->mlist;
3954     if (prevtag == LFS_ERR_NOENT) {
3955         // check that name fits
3956         lfs_size_t nlen = strlen(newpath);
3957         if (nlen > lfs->name_max) {
3958             return LFS_ERR_NAMETOOLONG;
3959         }
3960 
3961         // there is a small chance we are being renamed in the same
3962         // directory/ to an id less than our old id, the global update
3963         // to handle this is a bit messy
3964         if (samepair && newid <= newoldid) {
3965             newoldid += 1;
3966         }
3967     } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3968         return (lfs_tag_type3(prevtag) == LFS_TYPE_DIR)
3969                 ? LFS_ERR_ISDIR
3970                 : LFS_ERR_NOTDIR;
3971     } else if (samepair && newid == newoldid) {
3972         // we're renaming to ourselves??
3973         return 0;
3974     } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3975         // must be empty before removal
3976         lfs_block_t prevpair[2];
3977         lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3978                 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3979         if (res < 0) {
3980             return (int)res;
3981         }
3982         lfs_pair_fromle32(prevpair);
3983 
3984         // must be empty before removal
3985         err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3986         if (err) {
3987             return err;
3988         }
3989 
3990         if (prevdir.m.count > 0 || prevdir.m.split) {
3991             return LFS_ERR_NOTEMPTY;
3992         }
3993 
3994         // mark fs as orphaned
3995         err = lfs_fs_preporphans(lfs, +1);
3996         if (err) {
3997             return err;
3998         }
3999 
4000         // I know it's crazy but yes, dir can be changed by our parent's
4001         // commit (if predecessor is child)
4002         prevdir.type = 0;
4003         prevdir.id = 0;
4004         lfs->mlist = &prevdir;
4005     }
4006 
4007     if (!samepair) {
4008         lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
4009     }
4010 
4011     // move over all attributes
4012     err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
4013             {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
4014                 LFS_TYPE_DELETE, newid, 0), NULL},
4015             {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
4016             {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
4017             {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
4018             {LFS_MKTAG_IF(samepair,
4019                 LFS_TYPE_DELETE, newoldid, 0), NULL}));
4020     if (err) {
4021         lfs->mlist = prevdir.next;
4022         return err;
4023     }
4024 
4025     // let commit clean up after move (if we're different! otherwise move
4026     // logic already fixed it for us)
4027     if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
4028         // prep gstate and delete move id
4029         lfs_fs_prepmove(lfs, 0x3ff, NULL);
4030         err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
4031                 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
4032         if (err) {
4033             lfs->mlist = prevdir.next;
4034             return err;
4035         }
4036     }
4037 
4038     lfs->mlist = prevdir.next;
4039     if (prevtag != LFS_ERR_NOENT
4040             && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
4041         // fix orphan
4042         err = lfs_fs_preporphans(lfs, -1);
4043         if (err) {
4044             return err;
4045         }
4046 
4047         err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
4048         if (err) {
4049             return err;
4050         }
4051 
4052         err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
4053         if (err) {
4054             return err;
4055         }
4056     }
4057 
4058     return 0;
4059 }
4060 #endif
4061 
4062 static lfs_ssize_t lfs_getattr_(lfs_t *lfs, const char *path,
4063         uint8_t type, void *buffer, lfs_size_t size) {
4064     lfs_mdir_t cwd;
4065     lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4066     if (tag < 0) {
4067         return tag;
4068     }
4069 
4070     uint16_t id = lfs_tag_id(tag);
4071     if (id == 0x3ff) {
4072         // special case for root
4073         id = 0;
4074         int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4075         if (err) {
4076             return err;
4077         }
4078     }
4079 
4080     tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
4081             LFS_MKTAG(LFS_TYPE_USERATTR + type,
4082                 id, lfs_min(size, lfs->attr_max)),
4083             buffer);
4084     if (tag < 0) {
4085         if (tag == LFS_ERR_NOENT) {
4086             return LFS_ERR_NOATTR;
4087         }
4088 
4089         return tag;
4090     }
4091 
4092     return lfs_tag_size(tag);
4093 }
4094 
4095 #ifndef LFS_READONLY
4096 static int lfs_commitattr(lfs_t *lfs, const char *path,
4097         uint8_t type, const void *buffer, lfs_size_t size) {
4098     lfs_mdir_t cwd;
4099     lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4100     if (tag < 0) {
4101         return tag;
4102     }
4103 
4104     uint16_t id = lfs_tag_id(tag);
4105     if (id == 0x3ff) {
4106         // special case for root
4107         id = 0;
4108         int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4109         if (err) {
4110             return err;
4111         }
4112     }
4113 
4114     return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
4115             {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
4116 }
4117 #endif
4118 
4119 #ifndef LFS_READONLY
4120 static int lfs_setattr_(lfs_t *lfs, const char *path,
4121         uint8_t type, const void *buffer, lfs_size_t size) {
4122     if (size > lfs->attr_max) {
4123         return LFS_ERR_NOSPC;
4124     }
4125 
4126     return lfs_commitattr(lfs, path, type, buffer, size);
4127 }
4128 #endif
4129 
4130 #ifndef LFS_READONLY
4131 static int lfs_removeattr_(lfs_t *lfs, const char *path, uint8_t type) {
4132     return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
4133 }
4134 #endif
4135 
4136 
4137 /// Filesystem operations ///
4138 
4139 // compile time checks, see lfs.h for why these limits exist
4140 #if LFS_NAME_MAX > 1022
4141 #error "Invalid LFS_NAME_MAX, must be <= 1022"
4142 #endif
4143 
4144 #if LFS_FILE_MAX > 2147483647
4145 #error "Invalid LFS_FILE_MAX, must be <= 2147483647"
4146 #endif
4147 
4148 #if LFS_ATTR_MAX > 1022
4149 #error "Invalid LFS_ATTR_MAX, must be <= 1022"
4150 #endif
4151 
4152 // common filesystem initialization
4153 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
4154     lfs->cfg = cfg;
4155     lfs->block_count = cfg->block_count;  // May be 0
4156     int err = 0;
4157 
4158 #ifdef LFS_MULTIVERSION
4159     // this driver only supports minor version < current minor version
4160     LFS_ASSERT(!lfs->cfg->disk_version || (
4161             (0xffff & (lfs->cfg->disk_version >> 16))
4162                     == LFS_DISK_VERSION_MAJOR
4163                 && (0xffff & (lfs->cfg->disk_version >> 0))
4164                     <= LFS_DISK_VERSION_MINOR));
4165 #endif
4166 
4167     // check that bool is a truthy-preserving type
4168     //
4169     // note the most common reason for this failure is a before-c99 compiler,
4170     // which littlefs currently does not support
4171     LFS_ASSERT((bool)0x80000000);
4172 
4173     // validate that the lfs-cfg sizes were initiated properly before
4174     // performing any arithmetic logics with them
4175     LFS_ASSERT(lfs->cfg->read_size != 0);
4176     LFS_ASSERT(lfs->cfg->prog_size != 0);
4177     LFS_ASSERT(lfs->cfg->cache_size != 0);
4178 
4179     // check that block size is a multiple of cache size is a multiple
4180     // of prog and read sizes
4181     LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
4182     LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
4183     LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
4184 
4185     // check that the block size is large enough to fit all ctz pointers
4186     LFS_ASSERT(lfs->cfg->block_size >= 128);
4187     // this is the exact calculation for all ctz pointers, if this fails
4188     // and the simpler assert above does not, math must be broken
4189     LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
4190             <= lfs->cfg->block_size);
4191 
4192     // block_cycles = 0 is no longer supported.
4193     //
4194     // block_cycles is the number of erase cycles before littlefs evicts
4195     // metadata logs as a part of wear leveling. Suggested values are in the
4196     // range of 100-1000, or set block_cycles to -1 to disable block-level
4197     // wear-leveling.
4198     LFS_ASSERT(lfs->cfg->block_cycles != 0);
4199 
4200     // check that compact_thresh makes sense
4201     //
4202     // metadata can't be compacted below block_size/2, and metadata can't
4203     // exceed a block_size
4204     LFS_ASSERT(lfs->cfg->compact_thresh == 0
4205             || lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
4206     LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
4207             || lfs->cfg->compact_thresh <= lfs->cfg->block_size);
4208 
4209     // setup read cache
4210     if (lfs->cfg->read_buffer) {
4211         lfs->rcache.buffer = lfs->cfg->read_buffer;
4212     } else {
4213         lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4214         if (!lfs->rcache.buffer) {
4215             err = LFS_ERR_NOMEM;
4216             goto cleanup;
4217         }
4218     }
4219 
4220     // setup program cache
4221     if (lfs->cfg->prog_buffer) {
4222         lfs->pcache.buffer = lfs->cfg->prog_buffer;
4223     } else {
4224         lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4225         if (!lfs->pcache.buffer) {
4226             err = LFS_ERR_NOMEM;
4227             goto cleanup;
4228         }
4229     }
4230 
4231     // zero to avoid information leaks
4232     lfs_cache_zero(lfs, &lfs->rcache);
4233     lfs_cache_zero(lfs, &lfs->pcache);
4234 
4235     // setup lookahead buffer, note mount finishes initializing this after
4236     // we establish a decent pseudo-random seed
4237     LFS_ASSERT(lfs->cfg->lookahead_size > 0);
4238     if (lfs->cfg->lookahead_buffer) {
4239         lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
4240     } else {
4241         lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
4242         if (!lfs->lookahead.buffer) {
4243             err = LFS_ERR_NOMEM;
4244             goto cleanup;
4245         }
4246     }
4247 
4248     // check that the size limits are sane
4249     LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
4250     lfs->name_max = lfs->cfg->name_max;
4251     if (!lfs->name_max) {
4252         lfs->name_max = LFS_NAME_MAX;
4253     }
4254 
4255     LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
4256     lfs->file_max = lfs->cfg->file_max;
4257     if (!lfs->file_max) {
4258         lfs->file_max = LFS_FILE_MAX;
4259     }
4260 
4261     LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
4262     lfs->attr_max = lfs->cfg->attr_max;
4263     if (!lfs->attr_max) {
4264         lfs->attr_max = LFS_ATTR_MAX;
4265     }
4266 
4267     LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
4268 
4269     LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
4270             || lfs->cfg->inline_max <= lfs->cfg->cache_size);
4271     LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
4272             || lfs->cfg->inline_max <= lfs->attr_max);
4273     LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
4274             || lfs->cfg->inline_max <= ((lfs->cfg->metadata_max)
4275                 ? lfs->cfg->metadata_max
4276                 : lfs->cfg->block_size)/8);
4277     lfs->inline_max = lfs->cfg->inline_max;
4278     if (lfs->inline_max == (lfs_size_t)-1) {
4279         lfs->inline_max = 0;
4280     } else if (lfs->inline_max == 0) {
4281         lfs->inline_max = lfs_min(
4282                 lfs->cfg->cache_size,
4283                 lfs_min(
4284                     lfs->attr_max,
4285                     ((lfs->cfg->metadata_max)
4286                         ? lfs->cfg->metadata_max
4287                         : lfs->cfg->block_size)/8));
4288     }
4289 
4290     // setup default state
4291     lfs->root[0] = LFS_BLOCK_NULL;
4292     lfs->root[1] = LFS_BLOCK_NULL;
4293     lfs->mlist = NULL;
4294     lfs->seed = 0;
4295     lfs->gdisk = (lfs_gstate_t){0};
4296     lfs->gstate = (lfs_gstate_t){0};
4297     lfs->gdelta = (lfs_gstate_t){0};
4298 #ifdef LFS_MIGRATE
4299     lfs->lfs1 = NULL;
4300 #endif
4301 
4302     return 0;
4303 
4304 cleanup:
4305     lfs_deinit(lfs);
4306     return err;
4307 }
4308 
4309 static int lfs_deinit(lfs_t *lfs) {
4310     // free allocated memory
4311     if (!lfs->cfg->read_buffer) {
4312         lfs_free(lfs->rcache.buffer);
4313     }
4314 
4315     if (!lfs->cfg->prog_buffer) {
4316         lfs_free(lfs->pcache.buffer);
4317     }
4318 
4319     if (!lfs->cfg->lookahead_buffer) {
4320         lfs_free(lfs->lookahead.buffer);
4321     }
4322 
4323     return 0;
4324 }
4325 
4326 
4327 
4328 #ifndef LFS_READONLY
4329 static int lfs_format_(lfs_t *lfs, const struct lfs_config *cfg) {
4330     int err = 0;
4331     {
4332         err = lfs_init(lfs, cfg);
4333         if (err) {
4334             return err;
4335         }
4336 
4337         LFS_ASSERT(cfg->block_count != 0);
4338 
4339         // create free lookahead
4340         memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
4341         lfs->lookahead.start = 0;
4342         lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
4343                 lfs->block_count);
4344         lfs->lookahead.next = 0;
4345         lfs_alloc_ckpoint(lfs);
4346 
4347         // create root dir
4348         lfs_mdir_t root;
4349         err = lfs_dir_alloc(lfs, &root);
4350         if (err) {
4351             goto cleanup;
4352         }
4353 
4354         // write one superblock
4355         lfs_superblock_t superblock = {
4356             .version     = lfs_fs_disk_version(lfs),
4357             .block_size  = lfs->cfg->block_size,
4358             .block_count = lfs->block_count,
4359             .name_max    = lfs->name_max,
4360             .file_max    = lfs->file_max,
4361             .attr_max    = lfs->attr_max,
4362         };
4363 
4364         lfs_superblock_tole32(&superblock);
4365         err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4366                 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4367                 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4368                 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4369                     &superblock}));
4370         if (err) {
4371             goto cleanup;
4372         }
4373 
4374         // force compaction to prevent accidentally mounting any
4375         // older version of littlefs that may live on disk
4376         root.erased = false;
4377         err = lfs_dir_commit(lfs, &root, NULL, 0);
4378         if (err) {
4379             goto cleanup;
4380         }
4381 
4382         // sanity check that fetch works
4383         err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
4384         if (err) {
4385             goto cleanup;
4386         }
4387     }
4388 
4389 cleanup:
4390     lfs_deinit(lfs);
4391     return err;
4392 
4393 }
4394 #endif
4395 
4396 static int lfs_mount_(lfs_t *lfs, const struct lfs_config *cfg) {
4397     int err = lfs_init(lfs, cfg);
4398     if (err) {
4399         return err;
4400     }
4401 
4402     // scan directory blocks for superblock and any global updates
4403     lfs_mdir_t dir = {.tail = {0, 1}};
4404     lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4405     lfs_size_t tortoise_i = 1;
4406     lfs_size_t tortoise_period = 1;
4407     while (!lfs_pair_isnull(dir.tail)) {
4408         // detect cycles with Brent's algorithm
4409         if (lfs_pair_issync(dir.tail, tortoise)) {
4410             LFS_WARN("Cycle detected in tail list");
4411             err = LFS_ERR_CORRUPT;
4412             goto cleanup;
4413         }
4414         if (tortoise_i == tortoise_period) {
4415             tortoise[0] = dir.tail[0];
4416             tortoise[1] = dir.tail[1];
4417             tortoise_i = 0;
4418             tortoise_period *= 2;
4419         }
4420         tortoise_i += 1;
4421 
4422         // fetch next block in tail list
4423         lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
4424                 LFS_MKTAG(0x7ff, 0x3ff, 0),
4425                 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
4426                 NULL,
4427                 lfs_dir_find_match, &(struct lfs_dir_find_match){
4428                     lfs, "littlefs", 8});
4429         if (tag < 0) {
4430             err = tag;
4431             goto cleanup;
4432         }
4433 
4434         // has superblock?
4435         if (tag && !lfs_tag_isdelete(tag)) {
4436             // update root
4437             lfs->root[0] = dir.pair[0];
4438             lfs->root[1] = dir.pair[1];
4439 
4440             // grab superblock
4441             lfs_superblock_t superblock;
4442             tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4443                     LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4444                     &superblock);
4445             if (tag < 0) {
4446                 err = tag;
4447                 goto cleanup;
4448             }
4449             lfs_superblock_fromle32(&superblock);
4450 
4451             // check version
4452             uint16_t major_version = (0xffff & (superblock.version >> 16));
4453             uint16_t minor_version = (0xffff & (superblock.version >>  0));
4454             if (major_version != lfs_fs_disk_version_major(lfs)
4455                     || minor_version > lfs_fs_disk_version_minor(lfs)) {
4456                 LFS_ERROR("Invalid version "
4457                         "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16,
4458                         major_version,
4459                         minor_version,
4460                         lfs_fs_disk_version_major(lfs),
4461                         lfs_fs_disk_version_minor(lfs));
4462                 err = LFS_ERR_INVAL;
4463                 goto cleanup;
4464             }
4465 
4466             // found older minor version? set an in-device only bit in the
4467             // gstate so we know we need to rewrite the superblock before
4468             // the first write
4469             if (minor_version < lfs_fs_disk_version_minor(lfs)) {
4470                 LFS_DEBUG("Found older minor version "
4471                         "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16,
4472                         major_version,
4473                         minor_version,
4474                         lfs_fs_disk_version_major(lfs),
4475                         lfs_fs_disk_version_minor(lfs));
4476                 // note this bit is reserved on disk, so fetching more gstate
4477                 // will not interfere here
4478                 lfs_fs_prepsuperblock(lfs, true);
4479             }
4480 
4481             // check superblock configuration
4482             if (superblock.name_max) {
4483                 if (superblock.name_max > lfs->name_max) {
4484                     LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
4485                             superblock.name_max, lfs->name_max);
4486                     err = LFS_ERR_INVAL;
4487                     goto cleanup;
4488                 }
4489 
4490                 lfs->name_max = superblock.name_max;
4491             }
4492 
4493             if (superblock.file_max) {
4494                 if (superblock.file_max > lfs->file_max) {
4495                     LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
4496                             superblock.file_max, lfs->file_max);
4497                     err = LFS_ERR_INVAL;
4498                     goto cleanup;
4499                 }
4500 
4501                 lfs->file_max = superblock.file_max;
4502             }
4503 
4504             if (superblock.attr_max) {
4505                 if (superblock.attr_max > lfs->attr_max) {
4506                     LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
4507                             superblock.attr_max, lfs->attr_max);
4508                     err = LFS_ERR_INVAL;
4509                     goto cleanup;
4510                 }
4511 
4512                 lfs->attr_max = superblock.attr_max;
4513 
4514                 // we also need to update inline_max in case attr_max changed
4515                 lfs->inline_max = lfs_min(lfs->inline_max, lfs->attr_max);
4516             }
4517 
4518             // this is where we get the block_count from disk if block_count=0
4519             if (lfs->cfg->block_count
4520                     && superblock.block_count != lfs->cfg->block_count) {
4521                 LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
4522                         superblock.block_count, lfs->cfg->block_count);
4523                 err = LFS_ERR_INVAL;
4524                 goto cleanup;
4525             }
4526 
4527             lfs->block_count = superblock.block_count;
4528 
4529             if (superblock.block_size != lfs->cfg->block_size) {
4530                 LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
4531                         superblock.block_size, lfs->cfg->block_size);
4532                 err = LFS_ERR_INVAL;
4533                 goto cleanup;
4534             }
4535         }
4536 
4537         // has gstate?
4538         err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
4539         if (err) {
4540             goto cleanup;
4541         }
4542     }
4543 
4544     // update littlefs with gstate
4545     if (!lfs_gstate_iszero(&lfs->gstate)) {
4546         LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
4547                 lfs->gstate.tag,
4548                 lfs->gstate.pair[0],
4549                 lfs->gstate.pair[1]);
4550     }
4551     lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
4552     lfs->gdisk = lfs->gstate;
4553 
4554     // setup free lookahead, to distribute allocations uniformly across
4555     // boots, we start the allocator at a random location
4556     lfs->lookahead.start = lfs->seed % lfs->block_count;
4557     lfs_alloc_drop(lfs);
4558 
4559     return 0;
4560 
4561 cleanup:
4562     lfs_unmount_(lfs);
4563     return err;
4564 }
4565 
4566 static int lfs_unmount_(lfs_t *lfs) {
4567     return lfs_deinit(lfs);
4568 }
4569 
4570 
4571 /// Filesystem filesystem operations ///
4572 static int lfs_fs_stat_(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
4573     // if the superblock is up-to-date, we must be on the most recent
4574     // minor version of littlefs
4575     if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4576         fsinfo->disk_version = lfs_fs_disk_version(lfs);
4577 
4578     // otherwise we need to read the minor version on disk
4579     } else {
4580         // fetch the superblock
4581         lfs_mdir_t dir;
4582         int err = lfs_dir_fetch(lfs, &dir, lfs->root);
4583         if (err) {
4584             return err;
4585         }
4586 
4587         lfs_superblock_t superblock;
4588         lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4589                 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4590                 &superblock);
4591         if (tag < 0) {
4592             return tag;
4593         }
4594         lfs_superblock_fromle32(&superblock);
4595 
4596         // read the on-disk version
4597         fsinfo->disk_version = superblock.version;
4598     }
4599 
4600     // filesystem geometry
4601     fsinfo->block_size = lfs->cfg->block_size;
4602     fsinfo->block_count = lfs->block_count;
4603 
4604     // other on-disk configuration, we cache all of these for internal use
4605     fsinfo->name_max = lfs->name_max;
4606     fsinfo->file_max = lfs->file_max;
4607     fsinfo->attr_max = lfs->attr_max;
4608 
4609     return 0;
4610 }
4611 
4612 int lfs_fs_traverse_(lfs_t *lfs,
4613         int (*cb)(void *data, lfs_block_t block), void *data,
4614         bool includeorphans) {
4615     // iterate over metadata pairs
4616     lfs_mdir_t dir = {.tail = {0, 1}};
4617 
4618 #ifdef LFS_MIGRATE
4619     // also consider v1 blocks during migration
4620     if (lfs->lfs1) {
4621         int err = lfs1_traverse(lfs, cb, data);
4622         if (err) {
4623             return err;
4624         }
4625 
4626         dir.tail[0] = lfs->root[0];
4627         dir.tail[1] = lfs->root[1];
4628     }
4629 #endif
4630 
4631     lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4632     lfs_size_t tortoise_i = 1;
4633     lfs_size_t tortoise_period = 1;
4634     while (!lfs_pair_isnull(dir.tail)) {
4635         // detect cycles with Brent's algorithm
4636         if (lfs_pair_issync(dir.tail, tortoise)) {
4637             LFS_WARN("Cycle detected in tail list");
4638             return LFS_ERR_CORRUPT;
4639         }
4640         if (tortoise_i == tortoise_period) {
4641             tortoise[0] = dir.tail[0];
4642             tortoise[1] = dir.tail[1];
4643             tortoise_i = 0;
4644             tortoise_period *= 2;
4645         }
4646         tortoise_i += 1;
4647 
4648         for (int i = 0; i < 2; i++) {
4649             int err = cb(data, dir.tail[i]);
4650             if (err) {
4651                 return err;
4652             }
4653         }
4654 
4655         // iterate through ids in directory
4656         int err = lfs_dir_fetch(lfs, &dir, dir.tail);
4657         if (err) {
4658             return err;
4659         }
4660 
4661         for (uint16_t id = 0; id < dir.count; id++) {
4662             struct lfs_ctz ctz;
4663             lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
4664                     LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
4665             if (tag < 0) {
4666                 if (tag == LFS_ERR_NOENT) {
4667                     continue;
4668                 }
4669                 return tag;
4670             }
4671             lfs_ctz_fromle32(&ctz);
4672 
4673             if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
4674                 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4675                         ctz.head, ctz.size, cb, data);
4676                 if (err) {
4677                     return err;
4678                 }
4679             } else if (includeorphans &&
4680                     lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
4681                 for (int i = 0; i < 2; i++) {
4682                     err = cb(data, (&ctz.head)[i]);
4683                     if (err) {
4684                         return err;
4685                     }
4686                 }
4687             }
4688         }
4689     }
4690 
4691 #ifndef LFS_READONLY
4692     // iterate over any open files
4693     for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
4694         if (f->type != LFS_TYPE_REG) {
4695             continue;
4696         }
4697 
4698         if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
4699             int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4700                     f->ctz.head, f->ctz.size, cb, data);
4701             if (err) {
4702                 return err;
4703             }
4704         }
4705 
4706         if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
4707             int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4708                     f->block, f->pos, cb, data);
4709             if (err) {
4710                 return err;
4711             }
4712         }
4713     }
4714 #endif
4715 
4716     return 0;
4717 }
4718 
4719 #ifndef LFS_READONLY
4720 static int lfs_fs_pred(lfs_t *lfs,
4721         const lfs_block_t pair[2], lfs_mdir_t *pdir) {
4722     // iterate over all directory directory entries
4723     pdir->tail[0] = 0;
4724     pdir->tail[1] = 1;
4725     lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4726     lfs_size_t tortoise_i = 1;
4727     lfs_size_t tortoise_period = 1;
4728     while (!lfs_pair_isnull(pdir->tail)) {
4729         // detect cycles with Brent's algorithm
4730         if (lfs_pair_issync(pdir->tail, tortoise)) {
4731             LFS_WARN("Cycle detected in tail list");
4732             return LFS_ERR_CORRUPT;
4733         }
4734         if (tortoise_i == tortoise_period) {
4735             tortoise[0] = pdir->tail[0];
4736             tortoise[1] = pdir->tail[1];
4737             tortoise_i = 0;
4738             tortoise_period *= 2;
4739         }
4740         tortoise_i += 1;
4741 
4742         if (lfs_pair_cmp(pdir->tail, pair) == 0) {
4743             return 0;
4744         }
4745 
4746         int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
4747         if (err) {
4748             return err;
4749         }
4750     }
4751 
4752     return LFS_ERR_NOENT;
4753 }
4754 #endif
4755 
4756 #ifndef LFS_READONLY
4757 struct lfs_fs_parent_match {
4758     lfs_t *lfs;
4759     const lfs_block_t pair[2];
4760 };
4761 #endif
4762 
4763 #ifndef LFS_READONLY
4764 static int lfs_fs_parent_match(void *data,
4765         lfs_tag_t tag, const void *buffer) {
4766     struct lfs_fs_parent_match *find = data;
4767     lfs_t *lfs = find->lfs;
4768     const struct lfs_diskoff *disk = buffer;
4769     (void)tag;
4770 
4771     lfs_block_t child[2];
4772     int err = lfs_bd_read(lfs,
4773             &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
4774             disk->block, disk->off, &child, sizeof(child));
4775     if (err) {
4776         return err;
4777     }
4778 
4779     lfs_pair_fromle32(child);
4780     return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
4781 }
4782 #endif
4783 
4784 #ifndef LFS_READONLY
4785 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
4786         lfs_mdir_t *parent) {
4787     // use fetchmatch with callback to find pairs
4788     parent->tail[0] = 0;
4789     parent->tail[1] = 1;
4790     lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4791     lfs_size_t tortoise_i = 1;
4792     lfs_size_t tortoise_period = 1;
4793     while (!lfs_pair_isnull(parent->tail)) {
4794         // detect cycles with Brent's algorithm
4795         if (lfs_pair_issync(parent->tail, tortoise)) {
4796             LFS_WARN("Cycle detected in tail list");
4797             return LFS_ERR_CORRUPT;
4798         }
4799         if (tortoise_i == tortoise_period) {
4800             tortoise[0] = parent->tail[0];
4801             tortoise[1] = parent->tail[1];
4802             tortoise_i = 0;
4803             tortoise_period *= 2;
4804         }
4805         tortoise_i += 1;
4806 
4807         lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
4808                 LFS_MKTAG(0x7ff, 0, 0x3ff),
4809                 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
4810                 NULL,
4811                 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
4812                     lfs, {pair[0], pair[1]}});
4813         if (tag && tag != LFS_ERR_NOENT) {
4814             return tag;
4815         }
4816     }
4817 
4818     return LFS_ERR_NOENT;
4819 }
4820 #endif
4821 
4822 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) {
4823     lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200))
4824             | (uint32_t)needssuperblock << 9;
4825 }
4826 
4827 #ifndef LFS_READONLY
4828 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4829     LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0);
4830     LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0);
4831     lfs->gstate.tag += orphans;
4832     lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4833             ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4834 
4835     return 0;
4836 }
4837 #endif
4838 
4839 #ifndef LFS_READONLY
4840 static void lfs_fs_prepmove(lfs_t *lfs,
4841         uint16_t id, const lfs_block_t pair[2]) {
4842     lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4843             ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4844     lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4845     lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4846 }
4847 #endif
4848 
4849 #ifndef LFS_READONLY
4850 static int lfs_fs_desuperblock(lfs_t *lfs) {
4851     if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4852         return 0;
4853     }
4854 
4855     LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}",
4856             lfs->root[0],
4857             lfs->root[1]);
4858 
4859     lfs_mdir_t root;
4860     int err = lfs_dir_fetch(lfs, &root, lfs->root);
4861     if (err) {
4862         return err;
4863     }
4864 
4865     // write a new superblock
4866     lfs_superblock_t superblock = {
4867         .version     = lfs_fs_disk_version(lfs),
4868         .block_size  = lfs->cfg->block_size,
4869         .block_count = lfs->block_count,
4870         .name_max    = lfs->name_max,
4871         .file_max    = lfs->file_max,
4872         .attr_max    = lfs->attr_max,
4873     };
4874 
4875     lfs_superblock_tole32(&superblock);
4876     err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4877             {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4878                 &superblock}));
4879     if (err) {
4880         return err;
4881     }
4882 
4883     lfs_fs_prepsuperblock(lfs, false);
4884     return 0;
4885 }
4886 #endif
4887 
4888 #ifndef LFS_READONLY
4889 static int lfs_fs_demove(lfs_t *lfs) {
4890     if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4891         return 0;
4892     }
4893 
4894     // Fix bad moves
4895     LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4896             lfs->gdisk.pair[0],
4897             lfs->gdisk.pair[1],
4898             lfs_tag_id(lfs->gdisk.tag));
4899 
4900     // no other gstate is supported at this time, so if we found something else
4901     // something most likely went wrong in gstate calculation
4902     LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE);
4903 
4904     // fetch and delete the moved entry
4905     lfs_mdir_t movedir;
4906     int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4907     if (err) {
4908         return err;
4909     }
4910 
4911     // prep gstate and delete move id
4912     uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4913     lfs_fs_prepmove(lfs, 0x3ff, NULL);
4914     err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4915             {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4916     if (err) {
4917         return err;
4918     }
4919 
4920     return 0;
4921 }
4922 #endif
4923 
4924 #ifndef LFS_READONLY
4925 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
4926     if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4927         return 0;
4928     }
4929 
4930     // Check for orphans in two separate passes:
4931     // - 1 for half-orphans (relocations)
4932     // - 2 for full-orphans (removes/renames)
4933     //
4934     // Two separate passes are needed as half-orphans can contain outdated
4935     // references to full-orphans, effectively hiding them from the deorphan
4936     // search.
4937     //
4938     int pass = 0;
4939     while (pass < 2) {
4940         // Fix any orphans
4941         lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4942         lfs_mdir_t dir;
4943         bool moreorphans = false;
4944 
4945         // iterate over all directory directory entries
4946         while (!lfs_pair_isnull(pdir.tail)) {
4947             int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4948             if (err) {
4949                 return err;
4950             }
4951 
4952             // check head blocks for orphans
4953             if (!pdir.split) {
4954                 // check if we have a parent
4955                 lfs_mdir_t parent;
4956                 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4957                 if (tag < 0 && tag != LFS_ERR_NOENT) {
4958                     return tag;
4959                 }
4960 
4961                 if (pass == 0 && tag != LFS_ERR_NOENT) {
4962                     lfs_block_t pair[2];
4963                     lfs_stag_t state = lfs_dir_get(lfs, &parent,
4964                             LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4965                     if (state < 0) {
4966                         return state;
4967                     }
4968                     lfs_pair_fromle32(pair);
4969 
4970                     if (!lfs_pair_issync(pair, pdir.tail)) {
4971                         // we have desynced
4972                         LFS_DEBUG("Fixing half-orphan "
4973                                 "{0x%"PRIx32", 0x%"PRIx32"} "
4974                                 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4975                                 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4976 
4977                         // fix pending move in this pair? this looks like an
4978                         // optimization but is in fact _required_ since
4979                         // relocating may outdate the move.
4980                         uint16_t moveid = 0x3ff;
4981                         if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
4982                             moveid = lfs_tag_id(lfs->gstate.tag);
4983                             LFS_DEBUG("Fixing move while fixing orphans "
4984                                     "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4985                                     pdir.pair[0], pdir.pair[1], moveid);
4986                             lfs_fs_prepmove(lfs, 0x3ff, NULL);
4987                         }
4988 
4989                         lfs_pair_tole32(pair);
4990                         state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4991                                 {LFS_MKTAG_IF(moveid != 0x3ff,
4992                                     LFS_TYPE_DELETE, moveid, 0), NULL},
4993                                 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
4994                                     pair}));
4995                         lfs_pair_fromle32(pair);
4996                         if (state < 0) {
4997                             return state;
4998                         }
4999 
5000                         // did our commit create more orphans?
5001                         if (state == LFS_OK_ORPHANED) {
5002                             moreorphans = true;
5003                         }
5004 
5005                         // refetch tail
5006                         continue;
5007                     }
5008                 }
5009 
5010                 // note we only check for full orphans if we may have had a
5011                 // power-loss, otherwise orphans are created intentionally
5012                 // during operations such as lfs_mkdir
5013                 if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) {
5014                     // we are an orphan
5015                     LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
5016                             pdir.tail[0], pdir.tail[1]);
5017 
5018                     // steal state
5019                     err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
5020                     if (err) {
5021                         return err;
5022                     }
5023 
5024                     // steal tail
5025                     lfs_pair_tole32(dir.tail);
5026                     int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
5027                             {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
5028                                 dir.tail}));
5029                     lfs_pair_fromle32(dir.tail);
5030                     if (state < 0) {
5031                         return state;
5032                     }
5033 
5034                     // did our commit create more orphans?
5035                     if (state == LFS_OK_ORPHANED) {
5036                         moreorphans = true;
5037                     }
5038 
5039                     // refetch tail
5040                     continue;
5041                 }
5042             }
5043 
5044             pdir = dir;
5045         }
5046 
5047         pass = moreorphans ? 0 : pass+1;
5048     }
5049 
5050     // mark orphans as fixed
5051     return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
5052 }
5053 #endif
5054 
5055 #ifndef LFS_READONLY
5056 static int lfs_fs_forceconsistency(lfs_t *lfs) {
5057     int err = lfs_fs_desuperblock(lfs);
5058     if (err) {
5059         return err;
5060     }
5061 
5062     err = lfs_fs_demove(lfs);
5063     if (err) {
5064         return err;
5065     }
5066 
5067     err = lfs_fs_deorphan(lfs, true);
5068     if (err) {
5069         return err;
5070     }
5071 
5072     return 0;
5073 }
5074 #endif
5075 
5076 #ifndef LFS_READONLY
5077 static int lfs_fs_mkconsistent_(lfs_t *lfs) {
5078     // lfs_fs_forceconsistency does most of the work here
5079     int err = lfs_fs_forceconsistency(lfs);
5080     if (err) {
5081         return err;
5082     }
5083 
5084     // do we have any pending gstate?
5085     lfs_gstate_t delta = {0};
5086     lfs_gstate_xor(&delta, &lfs->gdisk);
5087     lfs_gstate_xor(&delta, &lfs->gstate);
5088     if (!lfs_gstate_iszero(&delta)) {
5089         // lfs_dir_commit will implicitly write out any pending gstate
5090         lfs_mdir_t root;
5091         err = lfs_dir_fetch(lfs, &root, lfs->root);
5092         if (err) {
5093             return err;
5094         }
5095 
5096         err = lfs_dir_commit(lfs, &root, NULL, 0);
5097         if (err) {
5098             return err;
5099         }
5100     }
5101 
5102     return 0;
5103 }
5104 #endif
5105 
5106 static int lfs_fs_size_count(void *p, lfs_block_t block) {
5107     (void)block;
5108     lfs_size_t *size = p;
5109     *size += 1;
5110     return 0;
5111 }
5112 
5113 static lfs_ssize_t lfs_fs_size_(lfs_t *lfs) {
5114     lfs_size_t size = 0;
5115     int err = lfs_fs_traverse_(lfs, lfs_fs_size_count, &size, false);
5116     if (err) {
5117         return err;
5118     }
5119 
5120     return size;
5121 }
5122 
5123 // explicit garbage collection
5124 #ifndef LFS_READONLY
5125 static int lfs_fs_gc_(lfs_t *lfs) {
5126     // force consistency, even if we're not necessarily going to write,
5127     // because this function is supposed to take care of janitorial work
5128     // isn't it?
5129     int err = lfs_fs_forceconsistency(lfs);
5130     if (err) {
5131         return err;
5132     }
5133 
5134     // try to compact metadata pairs, note we can't really accomplish
5135     // anything if compact_thresh doesn't at least leave a prog_size
5136     // available
5137     if (lfs->cfg->compact_thresh
5138             < lfs->cfg->block_size - lfs->cfg->prog_size) {
5139         // iterate over all mdirs
5140         lfs_mdir_t mdir = {.tail = {0, 1}};
5141         while (!lfs_pair_isnull(mdir.tail)) {
5142             err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
5143             if (err) {
5144                 return err;
5145             }
5146 
5147             // not erased? exceeds our compaction threshold?
5148             if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
5149                     ? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
5150                     : mdir.off > lfs->cfg->compact_thresh)) {
5151                 // the easiest way to trigger a compaction is to mark
5152                 // the mdir as unerased and add an empty commit
5153                 mdir.erased = false;
5154                 err = lfs_dir_commit(lfs, &mdir, NULL, 0);
5155                 if (err) {
5156                     return err;
5157                 }
5158             }
5159         }
5160     }
5161 
5162     // try to populate the lookahead buffer, unless it's already full
5163     if (lfs->lookahead.size < 8*lfs->cfg->lookahead_size) {
5164         err = lfs_alloc_scan(lfs);
5165         if (err) {
5166             return err;
5167         }
5168     }
5169 
5170     return 0;
5171 }
5172 #endif
5173 
5174 #ifndef LFS_READONLY
5175 static int lfs_fs_grow_(lfs_t *lfs, lfs_size_t block_count) {
5176     // shrinking is not supported
5177     LFS_ASSERT(block_count >= lfs->block_count);
5178 
5179     if (block_count > lfs->block_count) {
5180         lfs->block_count = block_count;
5181 
5182         // fetch the root
5183         lfs_mdir_t root;
5184         int err = lfs_dir_fetch(lfs, &root, lfs->root);
5185         if (err) {
5186             return err;
5187         }
5188 
5189         // update the superblock
5190         lfs_superblock_t superblock;
5191         lfs_stag_t tag = lfs_dir_get(lfs, &root, LFS_MKTAG(0x7ff, 0x3ff, 0),
5192                 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5193                 &superblock);
5194         if (tag < 0) {
5195             return tag;
5196         }
5197         lfs_superblock_fromle32(&superblock);
5198 
5199         superblock.block_count = lfs->block_count;
5200 
5201         lfs_superblock_tole32(&superblock);
5202         err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
5203                 {tag, &superblock}));
5204         if (err) {
5205             return err;
5206         }
5207     }
5208 
5209     return 0;
5210 }
5211 #endif
5212 
5213 #ifdef LFS_MIGRATE
5214 ////// Migration from littelfs v1 below this //////
5215 
5216 /// Version info ///
5217 
5218 // Software library version
5219 // Major (top-nibble), incremented on backwards incompatible changes
5220 // Minor (bottom-nibble), incremented on feature additions
5221 #define LFS1_VERSION 0x00010007
5222 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
5223 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >>  0))
5224 
5225 // Version of On-disk data structures
5226 // Major (top-nibble), incremented on backwards incompatible changes
5227 // Minor (bottom-nibble), incremented on feature additions
5228 #define LFS1_DISK_VERSION 0x00010001
5229 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
5230 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >>  0))
5231 
5232 
5233 /// v1 Definitions ///
5234 
5235 // File types
5236 enum lfs1_type {
5237     LFS1_TYPE_REG        = 0x11,
5238     LFS1_TYPE_DIR        = 0x22,
5239     LFS1_TYPE_SUPERBLOCK = 0x2e,
5240 };
5241 
5242 typedef struct lfs1 {
5243     lfs_block_t root[2];
5244 } lfs1_t;
5245 
5246 typedef struct lfs1_entry {
5247     lfs_off_t off;
5248 
5249     struct lfs1_disk_entry {
5250         uint8_t type;
5251         uint8_t elen;
5252         uint8_t alen;
5253         uint8_t nlen;
5254         union {
5255             struct {
5256                 lfs_block_t head;
5257                 lfs_size_t size;
5258             } file;
5259             lfs_block_t dir[2];
5260         } u;
5261     } d;
5262 } lfs1_entry_t;
5263 
5264 typedef struct lfs1_dir {
5265     struct lfs1_dir *next;
5266     lfs_block_t pair[2];
5267     lfs_off_t off;
5268 
5269     lfs_block_t head[2];
5270     lfs_off_t pos;
5271 
5272     struct lfs1_disk_dir {
5273         uint32_t rev;
5274         lfs_size_t size;
5275         lfs_block_t tail[2];
5276     } d;
5277 } lfs1_dir_t;
5278 
5279 typedef struct lfs1_superblock {
5280     lfs_off_t off;
5281 
5282     struct lfs1_disk_superblock {
5283         uint8_t type;
5284         uint8_t elen;
5285         uint8_t alen;
5286         uint8_t nlen;
5287         lfs_block_t root[2];
5288         uint32_t block_size;
5289         uint32_t block_count;
5290         uint32_t version;
5291         char magic[8];
5292     } d;
5293 } lfs1_superblock_t;
5294 
5295 
5296 /// Low-level wrappers v1->v2 ///
5297 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
5298     *crc = lfs_crc(*crc, buffer, size);
5299 }
5300 
5301 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
5302         lfs_off_t off, void *buffer, lfs_size_t size) {
5303     // if we ever do more than writes to alternating pairs,
5304     // this may need to consider pcache
5305     return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
5306             block, off, buffer, size);
5307 }
5308 
5309 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
5310         lfs_off_t off, lfs_size_t size, uint32_t *crc) {
5311     for (lfs_off_t i = 0; i < size; i++) {
5312         uint8_t c;
5313         int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
5314         if (err) {
5315             return err;
5316         }
5317 
5318         lfs1_crc(crc, &c, 1);
5319     }
5320 
5321     return 0;
5322 }
5323 
5324 
5325 /// Endian swapping functions ///
5326 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
5327     d->rev     = lfs_fromle32(d->rev);
5328     d->size    = lfs_fromle32(d->size);
5329     d->tail[0] = lfs_fromle32(d->tail[0]);
5330     d->tail[1] = lfs_fromle32(d->tail[1]);
5331 }
5332 
5333 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
5334     d->rev     = lfs_tole32(d->rev);
5335     d->size    = lfs_tole32(d->size);
5336     d->tail[0] = lfs_tole32(d->tail[0]);
5337     d->tail[1] = lfs_tole32(d->tail[1]);
5338 }
5339 
5340 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
5341     d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
5342     d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
5343 }
5344 
5345 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
5346     d->u.dir[0] = lfs_tole32(d->u.dir[0]);
5347     d->u.dir[1] = lfs_tole32(d->u.dir[1]);
5348 }
5349 
5350 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
5351     d->root[0]     = lfs_fromle32(d->root[0]);
5352     d->root[1]     = lfs_fromle32(d->root[1]);
5353     d->block_size  = lfs_fromle32(d->block_size);
5354     d->block_count = lfs_fromle32(d->block_count);
5355     d->version     = lfs_fromle32(d->version);
5356 }
5357 
5358 
5359 ///// Metadata pair and directory operations ///
5360 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
5361     return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
5362 }
5363 
5364 static int lfs1_dir_fetch(lfs_t *lfs,
5365         lfs1_dir_t *dir, const lfs_block_t pair[2]) {
5366     // copy out pair, otherwise may be aliasing dir
5367     const lfs_block_t tpair[2] = {pair[0], pair[1]};
5368     bool valid = false;
5369 
5370     // check both blocks for the most recent revision
5371     for (int i = 0; i < 2; i++) {
5372         struct lfs1_disk_dir test;
5373         int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
5374         lfs1_dir_fromle32(&test);
5375         if (err) {
5376             if (err == LFS_ERR_CORRUPT) {
5377                 continue;
5378             }
5379             return err;
5380         }
5381 
5382         if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
5383             continue;
5384         }
5385 
5386         if ((0x7fffffff & test.size) < sizeof(test)+4 ||
5387             (0x7fffffff & test.size) > lfs->cfg->block_size) {
5388             continue;
5389         }
5390 
5391         uint32_t crc = 0xffffffff;
5392         lfs1_dir_tole32(&test);
5393         lfs1_crc(&crc, &test, sizeof(test));
5394         lfs1_dir_fromle32(&test);
5395         err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
5396                 (0x7fffffff & test.size) - sizeof(test), &crc);
5397         if (err) {
5398             if (err == LFS_ERR_CORRUPT) {
5399                 continue;
5400             }
5401             return err;
5402         }
5403 
5404         if (crc != 0) {
5405             continue;
5406         }
5407 
5408         valid = true;
5409 
5410         // setup dir in case it's valid
5411         dir->pair[0] = tpair[(i+0) % 2];
5412         dir->pair[1] = tpair[(i+1) % 2];
5413         dir->off = sizeof(dir->d);
5414         dir->d = test;
5415     }
5416 
5417     if (!valid) {
5418         LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
5419                 tpair[0], tpair[1]);
5420         return LFS_ERR_CORRUPT;
5421     }
5422 
5423     return 0;
5424 }
5425 
5426 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
5427     while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
5428         if (!(0x80000000 & dir->d.size)) {
5429             entry->off = dir->off;
5430             return LFS_ERR_NOENT;
5431         }
5432 
5433         int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
5434         if (err) {
5435             return err;
5436         }
5437 
5438         dir->off = sizeof(dir->d);
5439         dir->pos += sizeof(dir->d) + 4;
5440     }
5441 
5442     int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
5443             &entry->d, sizeof(entry->d));
5444     lfs1_entry_fromle32(&entry->d);
5445     if (err) {
5446         return err;
5447     }
5448 
5449     entry->off = dir->off;
5450     dir->off += lfs1_entry_size(entry);
5451     dir->pos += lfs1_entry_size(entry);
5452     return 0;
5453 }
5454 
5455 /// littlefs v1 specific operations ///
5456 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
5457     if (lfs_pair_isnull(lfs->lfs1->root)) {
5458         return 0;
5459     }
5460 
5461     // iterate over metadata pairs
5462     lfs1_dir_t dir;
5463     lfs1_entry_t entry;
5464     lfs_block_t cwd[2] = {0, 1};
5465 
5466     while (true) {
5467         for (int i = 0; i < 2; i++) {
5468             int err = cb(data, cwd[i]);
5469             if (err) {
5470                 return err;
5471             }
5472         }
5473 
5474         int err = lfs1_dir_fetch(lfs, &dir, cwd);
5475         if (err) {
5476             return err;
5477         }
5478 
5479         // iterate over contents
5480         while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
5481             err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
5482                     &entry.d, sizeof(entry.d));
5483             lfs1_entry_fromle32(&entry.d);
5484             if (err) {
5485                 return err;
5486             }
5487 
5488             dir.off += lfs1_entry_size(&entry);
5489             if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
5490                 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
5491                         entry.d.u.file.head, entry.d.u.file.size, cb, data);
5492                 if (err) {
5493                     return err;
5494                 }
5495             }
5496         }
5497 
5498         // we also need to check if we contain a threaded v2 directory
5499         lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
5500         while (dir2.split) {
5501             err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5502             if (err) {
5503                 break;
5504             }
5505 
5506             for (int i = 0; i < 2; i++) {
5507                 err = cb(data, dir2.pair[i]);
5508                 if (err) {
5509                     return err;
5510                 }
5511             }
5512         }
5513 
5514         cwd[0] = dir.d.tail[0];
5515         cwd[1] = dir.d.tail[1];
5516 
5517         if (lfs_pair_isnull(cwd)) {
5518             break;
5519         }
5520     }
5521 
5522     return 0;
5523 }
5524 
5525 static int lfs1_moved(lfs_t *lfs, const void *e) {
5526     if (lfs_pair_isnull(lfs->lfs1->root)) {
5527         return 0;
5528     }
5529 
5530     // skip superblock
5531     lfs1_dir_t cwd;
5532     int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
5533     if (err) {
5534         return err;
5535     }
5536 
5537     // iterate over all directory directory entries
5538     lfs1_entry_t entry;
5539     while (!lfs_pair_isnull(cwd.d.tail)) {
5540         err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
5541         if (err) {
5542             return err;
5543         }
5544 
5545         while (true) {
5546             err = lfs1_dir_next(lfs, &cwd, &entry);
5547             if (err && err != LFS_ERR_NOENT) {
5548                 return err;
5549             }
5550 
5551             if (err == LFS_ERR_NOENT) {
5552                 break;
5553             }
5554 
5555             if (!(0x80 & entry.d.type) &&
5556                  memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
5557                 return true;
5558             }
5559         }
5560     }
5561 
5562     return false;
5563 }
5564 
5565 /// Filesystem operations ///
5566 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
5567         const struct lfs_config *cfg) {
5568     int err = 0;
5569     {
5570         err = lfs_init(lfs, cfg);
5571         if (err) {
5572             return err;
5573         }
5574 
5575         lfs->lfs1 = lfs1;
5576         lfs->lfs1->root[0] = LFS_BLOCK_NULL;
5577         lfs->lfs1->root[1] = LFS_BLOCK_NULL;
5578 
5579         // setup free lookahead
5580         lfs->lookahead.start = 0;
5581         lfs->lookahead.size = 0;
5582         lfs->lookahead.next = 0;
5583         lfs_alloc_ckpoint(lfs);
5584 
5585         // load superblock
5586         lfs1_dir_t dir;
5587         lfs1_superblock_t superblock;
5588         err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
5589         if (err && err != LFS_ERR_CORRUPT) {
5590             goto cleanup;
5591         }
5592 
5593         if (!err) {
5594             err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
5595                     &superblock.d, sizeof(superblock.d));
5596             lfs1_superblock_fromle32(&superblock.d);
5597             if (err) {
5598                 goto cleanup;
5599             }
5600 
5601             lfs->lfs1->root[0] = superblock.d.root[0];
5602             lfs->lfs1->root[1] = superblock.d.root[1];
5603         }
5604 
5605         if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
5606             LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
5607                     0, 1);
5608             err = LFS_ERR_CORRUPT;
5609             goto cleanup;
5610         }
5611 
5612         uint16_t major_version = (0xffff & (superblock.d.version >> 16));
5613         uint16_t minor_version = (0xffff & (superblock.d.version >>  0));
5614         if ((major_version != LFS1_DISK_VERSION_MAJOR ||
5615              minor_version > LFS1_DISK_VERSION_MINOR)) {
5616             LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
5617             err = LFS_ERR_INVAL;
5618             goto cleanup;
5619         }
5620 
5621         return 0;
5622     }
5623 
5624 cleanup:
5625     lfs_deinit(lfs);
5626     return err;
5627 }
5628 
5629 static int lfs1_unmount(lfs_t *lfs) {
5630     return lfs_deinit(lfs);
5631 }
5632 
5633 /// v1 migration ///
5634 static int lfs_migrate_(lfs_t *lfs, const struct lfs_config *cfg) {
5635     struct lfs1 lfs1;
5636 
5637     // Indeterminate filesystem size not allowed for migration.
5638     LFS_ASSERT(cfg->block_count != 0);
5639 
5640     int err = lfs1_mount(lfs, &lfs1, cfg);
5641     if (err) {
5642         return err;
5643     }
5644 
5645     {
5646         // iterate through each directory, copying over entries
5647         // into new directory
5648         lfs1_dir_t dir1;
5649         lfs_mdir_t dir2;
5650         dir1.d.tail[0] = lfs->lfs1->root[0];
5651         dir1.d.tail[1] = lfs->lfs1->root[1];
5652         while (!lfs_pair_isnull(dir1.d.tail)) {
5653             // iterate old dir
5654             err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
5655             if (err) {
5656                 goto cleanup;
5657             }
5658 
5659             // create new dir and bind as temporary pretend root
5660             err = lfs_dir_alloc(lfs, &dir2);
5661             if (err) {
5662                 goto cleanup;
5663             }
5664 
5665             dir2.rev = dir1.d.rev;
5666             dir1.head[0] = dir1.pair[0];
5667             dir1.head[1] = dir1.pair[1];
5668             lfs->root[0] = dir2.pair[0];
5669             lfs->root[1] = dir2.pair[1];
5670 
5671             err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5672             if (err) {
5673                 goto cleanup;
5674             }
5675 
5676             while (true) {
5677                 lfs1_entry_t entry1;
5678                 err = lfs1_dir_next(lfs, &dir1, &entry1);
5679                 if (err && err != LFS_ERR_NOENT) {
5680                     goto cleanup;
5681                 }
5682 
5683                 if (err == LFS_ERR_NOENT) {
5684                     break;
5685                 }
5686 
5687                 // check that entry has not been moved
5688                 if (entry1.d.type & 0x80) {
5689                     int moved = lfs1_moved(lfs, &entry1.d.u);
5690                     if (moved < 0) {
5691                         err = moved;
5692                         goto cleanup;
5693                     }
5694 
5695                     if (moved) {
5696                         continue;
5697                     }
5698 
5699                     entry1.d.type &= ~0x80;
5700                 }
5701 
5702                 // also fetch name
5703                 char name[LFS_NAME_MAX+1];
5704                 memset(name, 0, sizeof(name));
5705                 err = lfs1_bd_read(lfs, dir1.pair[0],
5706                         entry1.off + 4+entry1.d.elen+entry1.d.alen,
5707                         name, entry1.d.nlen);
5708                 if (err) {
5709                     goto cleanup;
5710                 }
5711 
5712                 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
5713 
5714                 // create entry in new dir
5715                 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5716                 if (err) {
5717                     goto cleanup;
5718                 }
5719 
5720                 uint16_t id;
5721                 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
5722                 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
5723                     err = (err < 0) ? err : LFS_ERR_EXIST;
5724                     goto cleanup;
5725                 }
5726 
5727                 lfs1_entry_tole32(&entry1.d);
5728                 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5729                         {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
5730                         {LFS_MKTAG_IF_ELSE(isdir,
5731                             LFS_TYPE_DIR, id, entry1.d.nlen,
5732                             LFS_TYPE_REG, id, entry1.d.nlen),
5733                                 name},
5734                         {LFS_MKTAG_IF_ELSE(isdir,
5735                             LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
5736                             LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
5737                                 &entry1.d.u}));
5738                 lfs1_entry_fromle32(&entry1.d);
5739                 if (err) {
5740                     goto cleanup;
5741                 }
5742             }
5743 
5744             if (!lfs_pair_isnull(dir1.d.tail)) {
5745                 // find last block and update tail to thread into fs
5746                 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5747                 if (err) {
5748                     goto cleanup;
5749                 }
5750 
5751                 while (dir2.split) {
5752                     err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5753                     if (err) {
5754                         goto cleanup;
5755                     }
5756                 }
5757 
5758                 lfs_pair_tole32(dir2.pair);
5759                 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5760                         {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
5761                 lfs_pair_fromle32(dir2.pair);
5762                 if (err) {
5763                     goto cleanup;
5764                 }
5765             }
5766 
5767             // Copy over first block to thread into fs. Unfortunately
5768             // if this fails there is not much we can do.
5769             LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
5770                         "-> {0x%"PRIx32", 0x%"PRIx32"}",
5771                     lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
5772 
5773             err = lfs_bd_erase(lfs, dir1.head[1]);
5774             if (err) {
5775                 goto cleanup;
5776             }
5777 
5778             err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5779             if (err) {
5780                 goto cleanup;
5781             }
5782 
5783             for (lfs_off_t i = 0; i < dir2.off; i++) {
5784                 uint8_t dat;
5785                 err = lfs_bd_read(lfs,
5786                         NULL, &lfs->rcache, dir2.off,
5787                         dir2.pair[0], i, &dat, 1);
5788                 if (err) {
5789                     goto cleanup;
5790                 }
5791 
5792                 err = lfs_bd_prog(lfs,
5793                         &lfs->pcache, &lfs->rcache, true,
5794                         dir1.head[1], i, &dat, 1);
5795                 if (err) {
5796                     goto cleanup;
5797                 }
5798             }
5799 
5800             err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
5801             if (err) {
5802                 goto cleanup;
5803             }
5804         }
5805 
5806         // Create new superblock. This marks a successful migration!
5807         err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
5808         if (err) {
5809             goto cleanup;
5810         }
5811 
5812         dir2.pair[0] = dir1.pair[0];
5813         dir2.pair[1] = dir1.pair[1];
5814         dir2.rev = dir1.d.rev;
5815         dir2.off = sizeof(dir2.rev);
5816         dir2.etag = 0xffffffff;
5817         dir2.count = 0;
5818         dir2.tail[0] = lfs->lfs1->root[0];
5819         dir2.tail[1] = lfs->lfs1->root[1];
5820         dir2.erased = false;
5821         dir2.split = true;
5822 
5823         lfs_superblock_t superblock = {
5824             .version     = LFS_DISK_VERSION,
5825             .block_size  = lfs->cfg->block_size,
5826             .block_count = lfs->cfg->block_count,
5827             .name_max    = lfs->name_max,
5828             .file_max    = lfs->file_max,
5829             .attr_max    = lfs->attr_max,
5830         };
5831 
5832         lfs_superblock_tole32(&superblock);
5833         err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5834                 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
5835                 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
5836                 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5837                     &superblock}));
5838         if (err) {
5839             goto cleanup;
5840         }
5841 
5842         // sanity check that fetch works
5843         err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
5844         if (err) {
5845             goto cleanup;
5846         }
5847 
5848         // force compaction to prevent accidentally mounting v1
5849         dir2.erased = false;
5850         err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5851         if (err) {
5852             goto cleanup;
5853         }
5854     }
5855 
5856 cleanup:
5857     lfs1_unmount(lfs);
5858     return err;
5859 }
5860 
5861 #endif
5862 
5863 
5864 /// Public API wrappers ///
5865 
5866 // Here we can add tracing/thread safety easily
5867 
5868 // Thread-safe wrappers if enabled
5869 #ifdef LFS_THREADSAFE
5870 #define LFS_LOCK(cfg)   cfg->lock(cfg)
5871 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
5872 #else
5873 #define LFS_LOCK(cfg)   ((void)cfg, 0)
5874 #define LFS_UNLOCK(cfg) ((void)cfg)
5875 #endif
5876 
5877 // Public API
5878 #ifndef LFS_READONLY
5879 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
5880     int err = LFS_LOCK(cfg);
5881     if (err) {
5882         return err;
5883     }
5884     LFS_TRACE("lfs_format(%p, %p {.context=%p, "
5885                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5886                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5887                 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5888                 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5889                 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5890                 ".prog_buffer=%p, .lookahead_buffer=%p, "
5891                 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5892                 ".attr_max=%"PRIu32"})",
5893             (void*)lfs, (void*)cfg, cfg->context,
5894             (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5895             (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5896             cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5897             cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5898             cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5899             cfg->name_max, cfg->file_max, cfg->attr_max);
5900 
5901     err = lfs_format_(lfs, cfg);
5902 
5903     LFS_TRACE("lfs_format -> %d", err);
5904     LFS_UNLOCK(cfg);
5905     return err;
5906 }
5907 #endif
5908 
5909 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
5910     int err = LFS_LOCK(cfg);
5911     if (err) {
5912         return err;
5913     }
5914     LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
5915                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5916                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5917                 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5918                 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5919                 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5920                 ".prog_buffer=%p, .lookahead_buffer=%p, "
5921                 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5922                 ".attr_max=%"PRIu32"})",
5923             (void*)lfs, (void*)cfg, cfg->context,
5924             (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5925             (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5926             cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5927             cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5928             cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5929             cfg->name_max, cfg->file_max, cfg->attr_max);
5930 
5931     err = lfs_mount_(lfs, cfg);
5932 
5933     LFS_TRACE("lfs_mount -> %d", err);
5934     LFS_UNLOCK(cfg);
5935     return err;
5936 }
5937 
5938 int lfs_unmount(lfs_t *lfs) {
5939     int err = LFS_LOCK(lfs->cfg);
5940     if (err) {
5941         return err;
5942     }
5943     LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
5944 
5945     err = lfs_unmount_(lfs);
5946 
5947     LFS_TRACE("lfs_unmount -> %d", err);
5948     LFS_UNLOCK(lfs->cfg);
5949     return err;
5950 }
5951 
5952 #ifndef LFS_READONLY
5953 int lfs_remove(lfs_t *lfs, const char *path) {
5954     int err = LFS_LOCK(lfs->cfg);
5955     if (err) {
5956         return err;
5957     }
5958     LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
5959 
5960     err = lfs_remove_(lfs, path);
5961 
5962     LFS_TRACE("lfs_remove -> %d", err);
5963     LFS_UNLOCK(lfs->cfg);
5964     return err;
5965 }
5966 #endif
5967 
5968 #ifndef LFS_READONLY
5969 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
5970     int err = LFS_LOCK(lfs->cfg);
5971     if (err) {
5972         return err;
5973     }
5974     LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
5975 
5976     err = lfs_rename_(lfs, oldpath, newpath);
5977 
5978     LFS_TRACE("lfs_rename -> %d", err);
5979     LFS_UNLOCK(lfs->cfg);
5980     return err;
5981 }
5982 #endif
5983 
5984 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
5985     int err = LFS_LOCK(lfs->cfg);
5986     if (err) {
5987         return err;
5988     }
5989     LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
5990 
5991     err = lfs_stat_(lfs, path, info);
5992 
5993     LFS_TRACE("lfs_stat -> %d", err);
5994     LFS_UNLOCK(lfs->cfg);
5995     return err;
5996 }
5997 
5998 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
5999         uint8_t type, void *buffer, lfs_size_t size) {
6000     int err = LFS_LOCK(lfs->cfg);
6001     if (err) {
6002         return err;
6003     }
6004     LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
6005             (void*)lfs, path, type, buffer, size);
6006 
6007     lfs_ssize_t res = lfs_getattr_(lfs, path, type, buffer, size);
6008 
6009     LFS_TRACE("lfs_getattr -> %"PRId32, res);
6010     LFS_UNLOCK(lfs->cfg);
6011     return res;
6012 }
6013 
6014 #ifndef LFS_READONLY
6015 int lfs_setattr(lfs_t *lfs, const char *path,
6016         uint8_t type, const void *buffer, lfs_size_t size) {
6017     int err = LFS_LOCK(lfs->cfg);
6018     if (err) {
6019         return err;
6020     }
6021     LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
6022             (void*)lfs, path, type, buffer, size);
6023 
6024     err = lfs_setattr_(lfs, path, type, buffer, size);
6025 
6026     LFS_TRACE("lfs_setattr -> %d", err);
6027     LFS_UNLOCK(lfs->cfg);
6028     return err;
6029 }
6030 #endif
6031 
6032 #ifndef LFS_READONLY
6033 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
6034     int err = LFS_LOCK(lfs->cfg);
6035     if (err) {
6036         return err;
6037     }
6038     LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
6039 
6040     err = lfs_removeattr_(lfs, path, type);
6041 
6042     LFS_TRACE("lfs_removeattr -> %d", err);
6043     LFS_UNLOCK(lfs->cfg);
6044     return err;
6045 }
6046 #endif
6047 
6048 #ifndef LFS_NO_MALLOC
6049 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
6050     int err = LFS_LOCK(lfs->cfg);
6051     if (err) {
6052         return err;
6053     }
6054     LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
6055             (void*)lfs, (void*)file, path, flags);
6056     LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6057 
6058     err = lfs_file_open_(lfs, file, path, flags);
6059 
6060     LFS_TRACE("lfs_file_open -> %d", err);
6061     LFS_UNLOCK(lfs->cfg);
6062     return err;
6063 }
6064 #endif
6065 
6066 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
6067         const char *path, int flags,
6068         const struct lfs_file_config *cfg) {
6069     int err = LFS_LOCK(lfs->cfg);
6070     if (err) {
6071         return err;
6072     }
6073     LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
6074                  ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
6075             (void*)lfs, (void*)file, path, flags,
6076             (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
6077     LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6078 
6079     err = lfs_file_opencfg_(lfs, file, path, flags, cfg);
6080 
6081     LFS_TRACE("lfs_file_opencfg -> %d", err);
6082     LFS_UNLOCK(lfs->cfg);
6083     return err;
6084 }
6085 
6086 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
6087     int err = LFS_LOCK(lfs->cfg);
6088     if (err) {
6089         return err;
6090     }
6091     LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
6092     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6093 
6094     err = lfs_file_close_(lfs, file);
6095 
6096     LFS_TRACE("lfs_file_close -> %d", err);
6097     LFS_UNLOCK(lfs->cfg);
6098     return err;
6099 }
6100 
6101 #ifndef LFS_READONLY
6102 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
6103     int err = LFS_LOCK(lfs->cfg);
6104     if (err) {
6105         return err;
6106     }
6107     LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
6108     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6109 
6110     err = lfs_file_sync_(lfs, file);
6111 
6112     LFS_TRACE("lfs_file_sync -> %d", err);
6113     LFS_UNLOCK(lfs->cfg);
6114     return err;
6115 }
6116 #endif
6117 
6118 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
6119         void *buffer, lfs_size_t size) {
6120     int err = LFS_LOCK(lfs->cfg);
6121     if (err) {
6122         return err;
6123     }
6124     LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
6125             (void*)lfs, (void*)file, buffer, size);
6126     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6127 
6128     lfs_ssize_t res = lfs_file_read_(lfs, file, buffer, size);
6129 
6130     LFS_TRACE("lfs_file_read -> %"PRId32, res);
6131     LFS_UNLOCK(lfs->cfg);
6132     return res;
6133 }
6134 
6135 #ifndef LFS_READONLY
6136 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
6137         const void *buffer, lfs_size_t size) {
6138     int err = LFS_LOCK(lfs->cfg);
6139     if (err) {
6140         return err;
6141     }
6142     LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
6143             (void*)lfs, (void*)file, buffer, size);
6144     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6145 
6146     lfs_ssize_t res = lfs_file_write_(lfs, file, buffer, size);
6147 
6148     LFS_TRACE("lfs_file_write -> %"PRId32, res);
6149     LFS_UNLOCK(lfs->cfg);
6150     return res;
6151 }
6152 #endif
6153 
6154 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
6155         lfs_soff_t off, int whence) {
6156     int err = LFS_LOCK(lfs->cfg);
6157     if (err) {
6158         return err;
6159     }
6160     LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
6161             (void*)lfs, (void*)file, off, whence);
6162     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6163 
6164     lfs_soff_t res = lfs_file_seek_(lfs, file, off, whence);
6165 
6166     LFS_TRACE("lfs_file_seek -> %"PRId32, res);
6167     LFS_UNLOCK(lfs->cfg);
6168     return res;
6169 }
6170 
6171 #ifndef LFS_READONLY
6172 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
6173     int err = LFS_LOCK(lfs->cfg);
6174     if (err) {
6175         return err;
6176     }
6177     LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
6178             (void*)lfs, (void*)file, size);
6179     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6180 
6181     err = lfs_file_truncate_(lfs, file, size);
6182 
6183     LFS_TRACE("lfs_file_truncate -> %d", err);
6184     LFS_UNLOCK(lfs->cfg);
6185     return err;
6186 }
6187 #endif
6188 
6189 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
6190     int err = LFS_LOCK(lfs->cfg);
6191     if (err) {
6192         return err;
6193     }
6194     LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
6195     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6196 
6197     lfs_soff_t res = lfs_file_tell_(lfs, file);
6198 
6199     LFS_TRACE("lfs_file_tell -> %"PRId32, res);
6200     LFS_UNLOCK(lfs->cfg);
6201     return res;
6202 }
6203 
6204 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
6205     int err = LFS_LOCK(lfs->cfg);
6206     if (err) {
6207         return err;
6208     }
6209     LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
6210 
6211     err = lfs_file_rewind_(lfs, file);
6212 
6213     LFS_TRACE("lfs_file_rewind -> %d", err);
6214     LFS_UNLOCK(lfs->cfg);
6215     return err;
6216 }
6217 
6218 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
6219     int err = LFS_LOCK(lfs->cfg);
6220     if (err) {
6221         return err;
6222     }
6223     LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
6224     LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6225 
6226     lfs_soff_t res = lfs_file_size_(lfs, file);
6227 
6228     LFS_TRACE("lfs_file_size -> %"PRId32, res);
6229     LFS_UNLOCK(lfs->cfg);
6230     return res;
6231 }
6232 
6233 #ifndef LFS_READONLY
6234 int lfs_mkdir(lfs_t *lfs, const char *path) {
6235     int err = LFS_LOCK(lfs->cfg);
6236     if (err) {
6237         return err;
6238     }
6239     LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
6240 
6241     err = lfs_mkdir_(lfs, path);
6242 
6243     LFS_TRACE("lfs_mkdir -> %d", err);
6244     LFS_UNLOCK(lfs->cfg);
6245     return err;
6246 }
6247 #endif
6248 
6249 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
6250     int err = LFS_LOCK(lfs->cfg);
6251     if (err) {
6252         return err;
6253     }
6254     LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
6255     LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
6256 
6257     err = lfs_dir_open_(lfs, dir, path);
6258 
6259     LFS_TRACE("lfs_dir_open -> %d", err);
6260     LFS_UNLOCK(lfs->cfg);
6261     return err;
6262 }
6263 
6264 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
6265     int err = LFS_LOCK(lfs->cfg);
6266     if (err) {
6267         return err;
6268     }
6269     LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
6270 
6271     err = lfs_dir_close_(lfs, dir);
6272 
6273     LFS_TRACE("lfs_dir_close -> %d", err);
6274     LFS_UNLOCK(lfs->cfg);
6275     return err;
6276 }
6277 
6278 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
6279     int err = LFS_LOCK(lfs->cfg);
6280     if (err) {
6281         return err;
6282     }
6283     LFS_TRACE("lfs_dir_read(%p, %p, %p)",
6284             (void*)lfs, (void*)dir, (void*)info);
6285 
6286     err = lfs_dir_read_(lfs, dir, info);
6287 
6288     LFS_TRACE("lfs_dir_read -> %d", err);
6289     LFS_UNLOCK(lfs->cfg);
6290     return err;
6291 }
6292 
6293 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
6294     int err = LFS_LOCK(lfs->cfg);
6295     if (err) {
6296         return err;
6297     }
6298     LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
6299             (void*)lfs, (void*)dir, off);
6300 
6301     err = lfs_dir_seek_(lfs, dir, off);
6302 
6303     LFS_TRACE("lfs_dir_seek -> %d", err);
6304     LFS_UNLOCK(lfs->cfg);
6305     return err;
6306 }
6307 
6308 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
6309     int err = LFS_LOCK(lfs->cfg);
6310     if (err) {
6311         return err;
6312     }
6313     LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
6314 
6315     lfs_soff_t res = lfs_dir_tell_(lfs, dir);
6316 
6317     LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
6318     LFS_UNLOCK(lfs->cfg);
6319     return res;
6320 }
6321 
6322 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
6323     int err = LFS_LOCK(lfs->cfg);
6324     if (err) {
6325         return err;
6326     }
6327     LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
6328 
6329     err = lfs_dir_rewind_(lfs, dir);
6330 
6331     LFS_TRACE("lfs_dir_rewind -> %d", err);
6332     LFS_UNLOCK(lfs->cfg);
6333     return err;
6334 }
6335 
6336 int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
6337     int err = LFS_LOCK(lfs->cfg);
6338     if (err) {
6339         return err;
6340     }
6341     LFS_TRACE("lfs_fs_stat(%p, %p)", (void*)lfs, (void*)fsinfo);
6342 
6343     err = lfs_fs_stat_(lfs, fsinfo);
6344 
6345     LFS_TRACE("lfs_fs_stat -> %d", err);
6346     LFS_UNLOCK(lfs->cfg);
6347     return err;
6348 }
6349 
6350 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
6351     int err = LFS_LOCK(lfs->cfg);
6352     if (err) {
6353         return err;
6354     }
6355     LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
6356 
6357     lfs_ssize_t res = lfs_fs_size_(lfs);
6358 
6359     LFS_TRACE("lfs_fs_size -> %"PRId32, res);
6360     LFS_UNLOCK(lfs->cfg);
6361     return res;
6362 }
6363 
6364 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
6365     int err = LFS_LOCK(lfs->cfg);
6366     if (err) {
6367         return err;
6368     }
6369     LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
6370             (void*)lfs, (void*)(uintptr_t)cb, data);
6371 
6372     err = lfs_fs_traverse_(lfs, cb, data, true);
6373 
6374     LFS_TRACE("lfs_fs_traverse -> %d", err);
6375     LFS_UNLOCK(lfs->cfg);
6376     return err;
6377 }
6378 
6379 #ifndef LFS_READONLY
6380 int lfs_fs_mkconsistent(lfs_t *lfs) {
6381     int err = LFS_LOCK(lfs->cfg);
6382     if (err) {
6383         return err;
6384     }
6385     LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
6386 
6387     err = lfs_fs_mkconsistent_(lfs);
6388 
6389     LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
6390     LFS_UNLOCK(lfs->cfg);
6391     return err;
6392 }
6393 #endif
6394 
6395 #ifndef LFS_READONLY
6396 int lfs_fs_gc(lfs_t *lfs) {
6397     int err = LFS_LOCK(lfs->cfg);
6398     if (err) {
6399         return err;
6400     }
6401     LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
6402 
6403     err = lfs_fs_gc_(lfs);
6404 
6405     LFS_TRACE("lfs_fs_gc -> %d", err);
6406     LFS_UNLOCK(lfs->cfg);
6407     return err;
6408 }
6409 #endif
6410 
6411 #ifndef LFS_READONLY
6412 int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
6413     int err = LFS_LOCK(lfs->cfg);
6414     if (err) {
6415         return err;
6416     }
6417     LFS_TRACE("lfs_fs_grow(%p, %"PRIu32")", (void*)lfs, block_count);
6418 
6419     err = lfs_fs_grow_(lfs, block_count);
6420 
6421     LFS_TRACE("lfs_fs_grow -> %d", err);
6422     LFS_UNLOCK(lfs->cfg);
6423     return err;
6424 }
6425 #endif
6426 
6427 #ifdef LFS_MIGRATE
6428 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
6429     int err = LFS_LOCK(cfg);
6430     if (err) {
6431         return err;
6432     }
6433     LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
6434                 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
6435                 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
6436                 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
6437                 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
6438                 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
6439                 ".prog_buffer=%p, .lookahead_buffer=%p, "
6440                 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
6441                 ".attr_max=%"PRIu32"})",
6442             (void*)lfs, (void*)cfg, cfg->context,
6443             (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
6444             (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
6445             cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
6446             cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
6447             cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
6448             cfg->name_max, cfg->file_max, cfg->attr_max);
6449 
6450     err = lfs_migrate_(lfs, cfg);
6451 
6452     LFS_TRACE("lfs_migrate -> %d", err);
6453     LFS_UNLOCK(cfg);
6454     return err;
6455 }
6456 #endif
6457 
6458