1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2022, The littlefs authors.
5 * Copyright (c) 2017, Arm Limited. All rights reserved.
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8 #define LFS_NO_ASSERT
9 #include "lfs.h"
10 #include "lfs_util.h"
11
12 // some constants used throughout the code
13 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
14 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
15
16 enum {
17 LFS_OK_RELOCATED = 1,
18 LFS_OK_DROPPED = 2,
19 LFS_OK_ORPHANED = 3,
20 };
21
22 enum {
23 LFS_CMP_EQ = 0,
24 LFS_CMP_LT = 1,
25 LFS_CMP_GT = 2,
26 };
27
28
29 /// Caching block device operations ///
30
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)31 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
32 // do not zero, cheaper if cache is readonly or only going to be
33 // written with identical data (during relocates)
34 (void)lfs;
35 rcache->block = LFS_BLOCK_NULL;
36 }
37
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)38 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
39 // zero to avoid information leak
40 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
41 pcache->block = LFS_BLOCK_NULL;
42 }
43
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)44 static int lfs_bd_read(lfs_t *lfs,
45 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
46 lfs_block_t block, lfs_off_t off,
47 void *buffer, lfs_size_t size) {
48 uint8_t *data = buffer;
49 if (block >= lfs->cfg->block_count ||
50 off+size > lfs->cfg->block_size) {
51 return LFS_ERR_CORRUPT;
52 }
53
54 while (size > 0) {
55 lfs_size_t diff = size;
56
57 if (pcache && block == pcache->block &&
58 off < pcache->off + pcache->size) {
59 if (off >= pcache->off) {
60 // is already in pcache?
61 diff = lfs_min(diff, pcache->size - (off-pcache->off));
62 memcpy(data, &pcache->buffer[off-pcache->off], diff);
63
64 data += diff;
65 off += diff;
66 size -= diff;
67 continue;
68 }
69
70 // pcache takes priority
71 diff = lfs_min(diff, pcache->off-off);
72 }
73
74 if (block == rcache->block &&
75 off < rcache->off + rcache->size) {
76 if (off >= rcache->off) {
77 // is already in rcache?
78 diff = lfs_min(diff, rcache->size - (off-rcache->off));
79 memcpy(data, &rcache->buffer[off-rcache->off], diff);
80
81 data += diff;
82 off += diff;
83 size -= diff;
84 continue;
85 }
86
87 // rcache takes priority
88 diff = lfs_min(diff, rcache->off-off);
89 }
90
91 if (size >= hint && off % lfs->cfg->read_size == 0 &&
92 size >= lfs->cfg->read_size) {
93 // bypass cache?
94 diff = lfs_aligndown(diff, lfs->cfg->read_size);
95 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
96 if (err) {
97 return err;
98 }
99
100 data += diff;
101 off += diff;
102 size -= diff;
103 continue;
104 }
105
106 // load to cache, first condition can no longer fail
107 LFS_ASSERT(block < lfs->cfg->block_count);
108 rcache->block = block;
109 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
110 rcache->size = lfs_min(
111 lfs_min(
112 lfs_alignup(off+hint, lfs->cfg->read_size),
113 lfs->cfg->block_size)
114 - rcache->off,
115 lfs->cfg->cache_size);
116 int err = lfs->cfg->read(lfs->cfg, rcache->block,
117 rcache->off, rcache->buffer, rcache->size);
118 LFS_ASSERT(err <= 0);
119 if (err) {
120 return err;
121 }
122 }
123
124 return 0;
125 }
126
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)127 static int lfs_bd_cmp(lfs_t *lfs,
128 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
129 lfs_block_t block, lfs_off_t off,
130 const void *buffer, lfs_size_t size) {
131 const uint8_t *data = buffer;
132 lfs_size_t diff = 0;
133
134 for (lfs_off_t i = 0; i < size; i += diff) {
135 uint8_t dat[8];
136
137 diff = lfs_min(size-i, sizeof(dat));
138 int res = lfs_bd_read(lfs,
139 pcache, rcache, hint-i,
140 block, off+i, &dat, diff);
141 if (res) {
142 return res;
143 }
144
145 res = memcmp(dat, data + i, diff);
146 if (res) {
147 return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
148 }
149 }
150
151 return LFS_CMP_EQ;
152 }
153
154 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)155 static int lfs_bd_flush(lfs_t *lfs,
156 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
157 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
158 LFS_ASSERT(pcache->block < lfs->cfg->block_count);
159 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
160 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
161 pcache->off, pcache->buffer, diff);
162 LFS_ASSERT(err <= 0);
163 if (err) {
164 return err;
165 }
166
167 if (validate) {
168 // check data on disk
169 lfs_cache_drop(lfs, rcache);
170 int res = lfs_bd_cmp(lfs,
171 NULL, rcache, diff,
172 pcache->block, pcache->off, pcache->buffer, diff);
173 if (res < 0) {
174 return res;
175 }
176
177 if (res != LFS_CMP_EQ) {
178 return LFS_ERR_CORRUPT;
179 }
180 }
181
182 lfs_cache_zero(lfs, pcache);
183 }
184
185 return 0;
186 }
187 #endif
188
189 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)190 static int lfs_bd_sync(lfs_t *lfs,
191 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
192 lfs_cache_drop(lfs, rcache);
193
194 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
195 if (err) {
196 return err;
197 }
198
199 err = lfs->cfg->sync(lfs->cfg);
200 LFS_ASSERT(err <= 0);
201 return err;
202 }
203 #endif
204
205 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)206 static int lfs_bd_prog(lfs_t *lfs,
207 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
208 lfs_block_t block, lfs_off_t off,
209 const void *buffer, lfs_size_t size) {
210 const uint8_t *data = buffer;
211 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count);
212 LFS_ASSERT(off + size <= lfs->cfg->block_size);
213
214 while (size > 0) {
215 if (block == pcache->block &&
216 off >= pcache->off &&
217 off < pcache->off + lfs->cfg->cache_size) {
218 // already fits in pcache?
219 lfs_size_t diff = lfs_min(size,
220 lfs->cfg->cache_size - (off-pcache->off));
221 memcpy(&pcache->buffer[off-pcache->off], data, diff);
222
223 data += diff;
224 off += diff;
225 size -= diff;
226
227 pcache->size = lfs_max(pcache->size, off - pcache->off);
228 if (pcache->size == lfs->cfg->cache_size) {
229 // eagerly flush out pcache if we fill up
230 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
231 if (err) {
232 return err;
233 }
234 }
235
236 continue;
237 }
238
239 // pcache must have been flushed, either by programming and
240 // entire block or manually flushing the pcache
241 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
242
243 // prepare pcache, first condition can no longer fail
244 pcache->block = block;
245 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
246 pcache->size = 0;
247 }
248
249 return 0;
250 }
251 #endif
252
253 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)254 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
255 LFS_ASSERT(block < lfs->cfg->block_count);
256 int err = lfs->cfg->erase(lfs->cfg, block);
257 LFS_ASSERT(err <= 0);
258 return err;
259 }
260 #endif
261
262
263 /// Small type-level utilities ///
264 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])265 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
266 lfs_block_t t = pair[0];
267 pair[0] = pair[1];
268 pair[1] = t;
269 }
270
lfs_pair_isnull(const lfs_block_t pair[2])271 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
272 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
273 }
274
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])275 static inline int lfs_pair_cmp(
276 const lfs_block_t paira[2],
277 const lfs_block_t pairb[2]) {
278 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
279 paira[0] == pairb[1] || paira[1] == pairb[0]);
280 }
281
282 #ifndef LFS_READONLY
lfs_pair_sync(const lfs_block_t paira[2],const lfs_block_t pairb[2])283 static inline bool lfs_pair_sync(
284 const lfs_block_t paira[2],
285 const lfs_block_t pairb[2]) {
286 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
287 (paira[0] == pairb[1] && paira[1] == pairb[0]);
288 }
289 #endif
290
lfs_pair_fromle32(lfs_block_t pair[2])291 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
292 pair[0] = lfs_fromle32(pair[0]);
293 pair[1] = lfs_fromle32(pair[1]);
294 }
295
296 #ifndef LFS_READONLY
lfs_pair_tole32(lfs_block_t pair[2])297 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
298 pair[0] = lfs_tole32(pair[0]);
299 pair[1] = lfs_tole32(pair[1]);
300 }
301 #endif
302
303 // operations on 32-bit entry tags
304 typedef uint32_t lfs_tag_t;
305 typedef int32_t lfs_stag_t;
306
307 #define LFS_MKTAG(type, id, size) \
308 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
309
310 #define LFS_MKTAG_IF(cond, type, id, size) \
311 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
312
313 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
314 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
315
lfs_tag_isvalid(lfs_tag_t tag)316 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
317 return !(tag & 0x80000000);
318 }
319
lfs_tag_isdelete(lfs_tag_t tag)320 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
321 return ((int32_t)(tag << 22) >> 22) == -1;
322 }
323
lfs_tag_type1(lfs_tag_t tag)324 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
325 return (tag & 0x70000000) >> 20;
326 }
327
lfs_tag_type3(lfs_tag_t tag)328 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
329 return (tag & 0x7ff00000) >> 20;
330 }
331
lfs_tag_chunk(lfs_tag_t tag)332 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
333 return (tag & 0x0ff00000) >> 20;
334 }
335
lfs_tag_splice(lfs_tag_t tag)336 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
337 return (int8_t)lfs_tag_chunk(tag);
338 }
339
lfs_tag_id(lfs_tag_t tag)340 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
341 return (tag & 0x000ffc00) >> 10;
342 }
343
lfs_tag_size(lfs_tag_t tag)344 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
345 return tag & 0x000003ff;
346 }
347
lfs_tag_dsize(lfs_tag_t tag)348 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
349 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
350 }
351
352 // operations on attributes in attribute lists
353 struct lfs_mattr {
354 lfs_tag_t tag;
355 const void *buffer;
356 };
357
358 struct lfs_diskoff {
359 lfs_block_t block;
360 lfs_off_t off;
361 };
362
363 #define LFS_MKATTRS(...) \
364 (struct lfs_mattr[]){__VA_ARGS__}, \
365 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
366
367 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)368 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
369 for (int i = 0; i < 3; i++) {
370 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
371 }
372 }
373
lfs_gstate_iszero(const lfs_gstate_t * a)374 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
375 for (int i = 0; i < 3; i++) {
376 if (((uint32_t*)a)[i] != 0) {
377 return false;
378 }
379 }
380 return true;
381 }
382
383 #ifndef LFS_READONLY
lfs_gstate_hasorphans(const lfs_gstate_t * a)384 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
385 return lfs_tag_size(a->tag);
386 }
387
lfs_gstate_getorphans(const lfs_gstate_t * a)388 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
389 return lfs_tag_size(a->tag);
390 }
391
lfs_gstate_hasmove(const lfs_gstate_t * a)392 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
393 return lfs_tag_type1(a->tag);
394 }
395 #endif
396
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)397 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
398 const lfs_block_t *pair) {
399 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
400 }
401
lfs_gstate_fromle32(lfs_gstate_t * a)402 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
403 a->tag = lfs_fromle32(a->tag);
404 a->pair[0] = lfs_fromle32(a->pair[0]);
405 a->pair[1] = lfs_fromle32(a->pair[1]);
406 }
407
408 #ifndef LFS_READONLY
lfs_gstate_tole32(lfs_gstate_t * a)409 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
410 a->tag = lfs_tole32(a->tag);
411 a->pair[0] = lfs_tole32(a->pair[0]);
412 a->pair[1] = lfs_tole32(a->pair[1]);
413 }
414 #endif
415
416 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)417 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
418 ctz->head = lfs_fromle32(ctz->head);
419 ctz->size = lfs_fromle32(ctz->size);
420 }
421
422 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)423 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
424 ctz->head = lfs_tole32(ctz->head);
425 ctz->size = lfs_tole32(ctz->size);
426 }
427 #endif
428
lfs_superblock_fromle32(lfs_superblock_t * superblock)429 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
430 superblock->version = lfs_fromle32(superblock->version);
431 superblock->block_size = lfs_fromle32(superblock->block_size);
432 superblock->block_count = lfs_fromle32(superblock->block_count);
433 superblock->name_max = lfs_fromle32(superblock->name_max);
434 superblock->file_max = lfs_fromle32(superblock->file_max);
435 superblock->attr_max = lfs_fromle32(superblock->attr_max);
436 }
437
438 #ifndef LFS_READONLY
lfs_superblock_tole32(lfs_superblock_t * superblock)439 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
440 superblock->version = lfs_tole32(superblock->version);
441 superblock->block_size = lfs_tole32(superblock->block_size);
442 superblock->block_count = lfs_tole32(superblock->block_count);
443 superblock->name_max = lfs_tole32(superblock->name_max);
444 superblock->file_max = lfs_tole32(superblock->file_max);
445 superblock->attr_max = lfs_tole32(superblock->attr_max);
446 }
447 #endif
448
449 #ifndef LFS_NO_ASSERT
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)450 static bool lfs_mlist_isopen(struct lfs_mlist *head,
451 struct lfs_mlist *node) {
452 for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
453 if (*p == (struct lfs_mlist*)node) {
454 return true;
455 }
456 }
457
458 return false;
459 }
460 #endif
461
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)462 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
463 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
464 if (*p == mlist) {
465 *p = (*p)->next;
466 break;
467 }
468 }
469 }
470
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)471 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
472 mlist->next = lfs->mlist;
473 lfs->mlist = mlist;
474 }
475
476
477 /// Internal operations predeclared here ///
478 #ifndef LFS_READONLY
479 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
480 const struct lfs_mattr *attrs, int attrcount);
481 static int lfs_dir_compact(lfs_t *lfs,
482 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
483 lfs_mdir_t *source, uint16_t begin, uint16_t end);
484 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
485 const void *buffer, lfs_size_t size);
486 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
487 const void *buffer, lfs_size_t size);
488 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
489 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
490 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
491
492 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
493 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
494 static void lfs_fs_prepmove(lfs_t *lfs,
495 uint16_t id, const lfs_block_t pair[2]);
496 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
497 lfs_mdir_t *pdir);
498 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
499 lfs_mdir_t *parent);
500 static int lfs_fs_forceconsistency(lfs_t *lfs);
501 #endif
502
503 #ifdef LFS_MIGRATE
504 static int lfs1_traverse(lfs_t *lfs,
505 int (*cb)(void*, lfs_block_t), void *data);
506 #endif
507
508 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
509
510 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
511 void *buffer, lfs_size_t size);
512 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
513 void *buffer, lfs_size_t size);
514 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
515 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file);
516
517 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs);
518 static int lfs_fs_rawtraverse(lfs_t *lfs,
519 int (*cb)(void *data, lfs_block_t block), void *data,
520 bool includeorphans);
521
522 static int lfs_deinit(lfs_t *lfs);
523 static int lfs_rawunmount(lfs_t *lfs);
524
525
526 /// Block allocator ///
527 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)528 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
529 lfs_t *lfs = (lfs_t*)p;
530 lfs_block_t off = ((block - lfs->free.off)
531 + lfs->cfg->block_count) % lfs->cfg->block_count;
532
533 if (off < lfs->free.size) {
534 lfs->free.buffer[off / 32] |= 1U << (off % 32);
535 }
536
537 return 0;
538 }
539 #endif
540
541 // indicate allocated blocks have been committed into the filesystem, this
542 // is to prevent blocks from being garbage collected in the middle of a
543 // commit operation
lfs_alloc_ack(lfs_t * lfs)544 static void lfs_alloc_ack(lfs_t *lfs) {
545 lfs->free.ack = lfs->cfg->block_count;
546 }
547
548 // drop the lookahead buffer, this is done during mounting and failed
549 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)550 static void lfs_alloc_drop(lfs_t *lfs) {
551 lfs->free.size = 0;
552 lfs->free.i = 0;
553 lfs_alloc_ack(lfs);
554 }
555
556 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)557 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
558 while (true) {
559 while (lfs->free.i != lfs->free.size) {
560 lfs_block_t off = lfs->free.i;
561 lfs->free.i += 1;
562 lfs->free.ack -= 1;
563
564 if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
565 // found a free block
566 *block = (lfs->free.off + off) % lfs->cfg->block_count;
567
568 // eagerly find next off so an alloc ack can
569 // discredit old lookahead blocks
570 while (lfs->free.i != lfs->free.size &&
571 (lfs->free.buffer[lfs->free.i / 32]
572 & (1U << (lfs->free.i % 32)))) {
573 lfs->free.i += 1;
574 lfs->free.ack -= 1;
575 }
576
577 return 0;
578 }
579 }
580
581 // check if we have looked at all blocks since last ack
582 if (lfs->free.ack == 0) {
583 LFS_ERROR("No more free space %"PRIu32,
584 lfs->free.i + lfs->free.off);
585 return LFS_ERR_NOSPC;
586 }
587
588 lfs->free.off = (lfs->free.off + lfs->free.size)
589 % lfs->cfg->block_count;
590 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
591 lfs->free.i = 0;
592
593 // find mask of free blocks from tree
594 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
595 int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
596 if (err) {
597 lfs_alloc_drop(lfs);
598 return err;
599 }
600 }
601 }
602 #endif
603
604 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)605 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
606 lfs_tag_t gmask, lfs_tag_t gtag,
607 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
608 lfs_off_t off = dir->off;
609 lfs_tag_t ntag = dir->etag;
610 lfs_stag_t gdiff = 0;
611
612 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
613 lfs_tag_id(gmask) != 0 &&
614 lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
615 // synthetic moves
616 gdiff -= LFS_MKTAG(0, 1, 0);
617 }
618
619 // iterate over dir block backwards (for faster lookups)
620 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
621 off -= lfs_tag_dsize(ntag);
622 lfs_tag_t tag = ntag;
623 int err = lfs_bd_read(lfs,
624 NULL, &lfs->rcache, sizeof(ntag),
625 dir->pair[0], off, &ntag, sizeof(ntag));
626 if (err) {
627 return err;
628 }
629
630 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
631
632 if (lfs_tag_id(gmask) != 0 &&
633 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
634 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
635 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
636 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
637 // found where we were created
638 return LFS_ERR_NOENT;
639 }
640
641 // move around splices
642 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
643 }
644
645 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
646 if (lfs_tag_isdelete(tag)) {
647 return LFS_ERR_NOENT;
648 }
649
650 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
651 err = lfs_bd_read(lfs,
652 NULL, &lfs->rcache, diff,
653 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
654 if (err) {
655 return err;
656 }
657
658 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
659
660 return tag + gdiff;
661 }
662 }
663
664 return LFS_ERR_NOENT;
665 }
666
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)667 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
668 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
669 return lfs_dir_getslice(lfs, dir,
670 gmask, gtag,
671 0, buffer, lfs_tag_size(gtag));
672 }
673
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)674 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
675 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
676 lfs_tag_t gmask, lfs_tag_t gtag,
677 lfs_off_t off, void *buffer, lfs_size_t size) {
678 uint8_t *data = buffer;
679 if (off+size > lfs->cfg->block_size) {
680 return LFS_ERR_CORRUPT;
681 }
682
683 while (size > 0) {
684 lfs_size_t diff = size;
685
686 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
687 off < pcache->off + pcache->size) {
688 if (off >= pcache->off) {
689 // is already in pcache?
690 diff = lfs_min(diff, pcache->size - (off-pcache->off));
691 memcpy(data, &pcache->buffer[off-pcache->off], diff);
692
693 data += diff;
694 off += diff;
695 size -= diff;
696 continue;
697 }
698
699 // pcache takes priority
700 diff = lfs_min(diff, pcache->off-off);
701 }
702
703 if (rcache->block == LFS_BLOCK_INLINE &&
704 off < rcache->off + rcache->size) {
705 if (off >= rcache->off) {
706 // is already in rcache?
707 diff = lfs_min(diff, rcache->size - (off-rcache->off));
708 memcpy(data, &rcache->buffer[off-rcache->off], diff);
709
710 data += diff;
711 off += diff;
712 size -= diff;
713 continue;
714 }
715
716 // rcache takes priority
717 diff = lfs_min(diff, rcache->off-off);
718 }
719
720 // load to cache, first condition can no longer fail
721 rcache->block = LFS_BLOCK_INLINE;
722 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
723 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
724 lfs->cfg->cache_size);
725 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
726 rcache->off, rcache->buffer, rcache->size);
727 if (err < 0) {
728 return err;
729 }
730 }
731
732 return 0;
733 }
734
735 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)736 static int lfs_dir_traverse_filter(void *p,
737 lfs_tag_t tag, const void *buffer) {
738 lfs_tag_t *filtertag = p;
739 (void)buffer;
740
741 // which mask depends on unique bit in tag structure
742 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
743 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
744 : LFS_MKTAG(0x700, 0x3ff, 0);
745
746 // check for redundancy
747 if ((mask & tag) == (mask & *filtertag) ||
748 lfs_tag_isdelete(*filtertag) ||
749 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
750 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
751 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
752 *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
753 return true;
754 }
755
756 // check if we need to adjust for created/deleted tags
757 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
758 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
759 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
760 }
761
762 return false;
763 }
764 #endif
765
766 #ifndef LFS_READONLY
767 // maximum recursive depth of lfs_dir_traverse, the deepest call:
768 //
769 // traverse with commit
770 // '-> traverse with move
771 // '-> traverse with filter
772 //
773 #define LFS_DIR_TRAVERSE_DEPTH 3
774
775 struct lfs_dir_traverse {
776 const lfs_mdir_t *dir;
777 lfs_off_t off;
778 lfs_tag_t ptag;
779 const struct lfs_mattr *attrs;
780 int attrcount;
781
782 lfs_tag_t tmask;
783 lfs_tag_t ttag;
784 uint16_t begin;
785 uint16_t end;
786 int16_t diff;
787
788 int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
789 void *data;
790
791 lfs_tag_t tag;
792 const void *buffer;
793 struct lfs_diskoff disk;
794 };
795
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)796 static int lfs_dir_traverse(lfs_t *lfs,
797 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
798 const struct lfs_mattr *attrs, int attrcount,
799 lfs_tag_t tmask, lfs_tag_t ttag,
800 uint16_t begin, uint16_t end, int16_t diff,
801 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
802 // This function in inherently recursive, but bounded. To allow tool-based
803 // analysis without unnecessary code-cost we use an explicit stack
804 struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
805 unsigned sp = 0;
806 int res;
807
808 // iterate over directory and attrs
809 lfs_tag_t tag;
810 const void *buffer;
811 struct lfs_diskoff disk;
812 while (true) {
813 {
814 if (off+lfs_tag_dsize(ptag) < dir->off) {
815 off += lfs_tag_dsize(ptag);
816 int err = lfs_bd_read(lfs,
817 NULL, &lfs->rcache, sizeof(tag),
818 dir->pair[0], off, &tag, sizeof(tag));
819 if (err) {
820 return err;
821 }
822
823 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
824 disk.block = dir->pair[0];
825 disk.off = off+sizeof(lfs_tag_t);
826 buffer = &disk;
827 ptag = tag;
828 } else if (attrcount > 0) {
829 tag = attrs[0].tag;
830 buffer = attrs[0].buffer;
831 attrs += 1;
832 attrcount -= 1;
833 } else {
834 // finished traversal, pop from stack?
835 res = 0;
836 break;
837 }
838
839 // do we need to filter?
840 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
841 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
842 continue;
843 }
844
845 if (lfs_tag_id(tmask) != 0) {
846 LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
847 // recurse, scan for duplicates, and update tag based on
848 // creates/deletes
849 stack[sp] = (struct lfs_dir_traverse){
850 .dir = dir,
851 .off = off,
852 .ptag = ptag,
853 .attrs = attrs,
854 .attrcount = attrcount,
855 .tmask = tmask,
856 .ttag = ttag,
857 .begin = begin,
858 .end = end,
859 .diff = diff,
860 .cb = cb,
861 .data = data,
862 .tag = tag,
863 .buffer = buffer,
864 .disk = disk,
865 };
866 sp += 1;
867
868 dir = dir;
869 off = off;
870 ptag = ptag;
871 attrs = attrs;
872 attrcount = attrcount;
873 tmask = 0;
874 ttag = 0;
875 begin = 0;
876 end = 0;
877 diff = 0;
878 cb = lfs_dir_traverse_filter;
879 data = &stack[sp-1].tag;
880 continue;
881 }
882 }
883
884 popped:
885 // in filter range?
886 if (lfs_tag_id(tmask) != 0 &&
887 !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
888 continue;
889 }
890
891 // handle special cases for mcu-side operations
892 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
893 // do nothing
894 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
895 // Without this condition, lfs_dir_traverse can exhibit an
896 // extremely expensive O(n^3) of nested loops when renaming.
897 // This happens because lfs_dir_traverse tries to filter tags by
898 // the tags in the source directory, triggering a second
899 // lfs_dir_traverse with its own filter operation.
900 //
901 // traverse with commit
902 // '-> traverse with filter
903 // '-> traverse with move
904 // '-> traverse with filter
905 //
906 // However we don't actually care about filtering the second set of
907 // tags, since duplicate tags have no effect when filtering.
908 //
909 // This check skips this unnecessary recursive filtering explicitly,
910 // reducing this runtime from O(n^3) to O(n^2).
911 if (cb == lfs_dir_traverse_filter) {
912 continue;
913 }
914
915 // recurse into move
916 stack[sp] = (struct lfs_dir_traverse){
917 .dir = dir,
918 .off = off,
919 .ptag = ptag,
920 .attrs = attrs,
921 .attrcount = attrcount,
922 .tmask = tmask,
923 .ttag = ttag,
924 .begin = begin,
925 .end = end,
926 .diff = diff,
927 .cb = cb,
928 .data = data,
929 .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
930 };
931 sp += 1;
932
933 uint16_t fromid = lfs_tag_size(tag);
934 uint16_t toid = lfs_tag_id(tag);
935 dir = buffer;
936 off = 0;
937 ptag = 0xffffffff;
938 attrs = NULL;
939 attrcount = 0;
940 tmask = LFS_MKTAG(0x600, 0x3ff, 0);
941 ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
942 begin = fromid;
943 end = fromid+1;
944 diff = toid-fromid+diff;
945 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
946 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
947 const struct lfs_attr *a = buffer;
948 res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
949 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
950 if (res < 0) {
951 return res;
952 }
953
954 if (res) {
955 break;
956 }
957 }
958 } else {
959 res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
960 if (res < 0) {
961 return res;
962 }
963
964 if (res) {
965 break;
966 }
967 }
968 }
969
970 if (sp > 0) {
971 // pop from the stack and return, fortunately all pops share
972 // a destination
973 dir = stack[sp-1].dir;
974 off = stack[sp-1].off;
975 ptag = stack[sp-1].ptag;
976 attrs = stack[sp-1].attrs;
977 attrcount = stack[sp-1].attrcount;
978 tmask = stack[sp-1].tmask;
979 ttag = stack[sp-1].ttag;
980 begin = stack[sp-1].begin;
981 end = stack[sp-1].end;
982 diff = stack[sp-1].diff;
983 cb = stack[sp-1].cb;
984 data = stack[sp-1].data;
985 tag = stack[sp-1].tag;
986 buffer = stack[sp-1].buffer;
987 disk = stack[sp-1].disk;
988 sp -= 1;
989 goto popped;
990 } else {
991 return res;
992 }
993 }
994 #endif
995
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)996 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
997 lfs_mdir_t *dir, const lfs_block_t pair[2],
998 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
999 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
1000 // we can find tag very efficiently during a fetch, since we're already
1001 // scanning the entire directory
1002 lfs_stag_t besttag = -1;
1003
1004 // if either block address is invalid we return LFS_ERR_CORRUPT here,
1005 // otherwise later writes to the pair could fail
1006 if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) {
1007 return LFS_ERR_CORRUPT;
1008 }
1009
1010 // find the block with the most recent revision
1011 uint32_t revs[2] = {0, 0};
1012 int r = 0;
1013 for (int i = 0; i < 2; i++) {
1014 int err = lfs_bd_read(lfs,
1015 NULL, &lfs->rcache, sizeof(revs[i]),
1016 pair[i], 0, &revs[i], sizeof(revs[i]));
1017 revs[i] = lfs_fromle32(revs[i]);
1018 if (err && err != LFS_ERR_CORRUPT) {
1019 return err;
1020 }
1021
1022 if (err != LFS_ERR_CORRUPT &&
1023 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
1024 r = i;
1025 }
1026 }
1027
1028 dir->pair[0] = pair[(r+0)%2];
1029 dir->pair[1] = pair[(r+1)%2];
1030 dir->rev = revs[(r+0)%2];
1031 dir->off = 0; // nonzero = found some commits
1032
1033 // now scan tags to fetch the actual dir and find possible match
1034 for (int i = 0; i < 2; i++) {
1035 lfs_off_t off = 0;
1036 lfs_tag_t ptag = 0xffffffff;
1037
1038 uint16_t tempcount = 0;
1039 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
1040 bool tempsplit = false;
1041 lfs_stag_t tempbesttag = besttag;
1042
1043 dir->rev = lfs_tole32(dir->rev);
1044 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
1045 dir->rev = lfs_fromle32(dir->rev);
1046
1047 while (true) {
1048 // extract next tag
1049 lfs_tag_t tag;
1050 off += lfs_tag_dsize(ptag);
1051 int err = lfs_bd_read(lfs,
1052 NULL, &lfs->rcache, lfs->cfg->block_size,
1053 dir->pair[0], off, &tag, sizeof(tag));
1054 if (err) {
1055 if (err == LFS_ERR_CORRUPT) {
1056 // can't continue?
1057 dir->erased = false;
1058 break;
1059 }
1060 return err;
1061 }
1062
1063 crc = lfs_crc(crc, &tag, sizeof(tag));
1064 tag = lfs_frombe32(tag) ^ ptag;
1065
1066 // next commit not yet programmed or we're not in valid range
1067 if (!lfs_tag_isvalid(tag)) {
1068 dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
1069 dir->off % lfs->cfg->prog_size == 0);
1070 break;
1071 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
1072 dir->erased = false;
1073 break;
1074 }
1075
1076 ptag = tag;
1077
1078 if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
1079 // check the crc attr
1080 uint32_t dcrc;
1081 err = lfs_bd_read(lfs,
1082 NULL, &lfs->rcache, lfs->cfg->block_size,
1083 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
1084 if (err) {
1085 if (err == LFS_ERR_CORRUPT) {
1086 dir->erased = false;
1087 break;
1088 }
1089 return err;
1090 }
1091 dcrc = lfs_fromle32(dcrc);
1092
1093 if (crc != dcrc) {
1094 dir->erased = false;
1095 break;
1096 }
1097
1098 // reset the next bit if we need to
1099 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
1100
1101 // toss our crc into the filesystem seed for
1102 // pseudorandom numbers, note we use another crc here
1103 // as a collection function because it is sufficiently
1104 // random and convenient
1105 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
1106
1107 // update with what's found so far
1108 besttag = tempbesttag;
1109 dir->off = off + lfs_tag_dsize(tag);
1110 dir->etag = ptag;
1111 dir->count = tempcount;
1112 dir->tail[0] = temptail[0];
1113 dir->tail[1] = temptail[1];
1114 dir->split = tempsplit;
1115
1116 // reset crc
1117 crc = 0xffffffff;
1118 continue;
1119 }
1120
1121 // crc the entry first, hopefully leaving it in the cache
1122 for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
1123 uint8_t dat;
1124 err = lfs_bd_read(lfs,
1125 NULL, &lfs->rcache, lfs->cfg->block_size,
1126 dir->pair[0], off+j, &dat, 1);
1127 if (err) {
1128 if (err == LFS_ERR_CORRUPT) {
1129 dir->erased = false;
1130 break;
1131 }
1132 return err;
1133 }
1134
1135 crc = lfs_crc(crc, &dat, 1);
1136 }
1137
1138 // directory modification tags?
1139 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
1140 // increase count of files if necessary
1141 if (lfs_tag_id(tag) >= tempcount) {
1142 tempcount = lfs_tag_id(tag) + 1;
1143 }
1144 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
1145 tempcount += lfs_tag_splice(tag);
1146
1147 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
1148 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
1149 tempbesttag |= 0x80000000;
1150 } else if (tempbesttag != -1 &&
1151 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1152 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
1153 }
1154 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
1155 tempsplit = (lfs_tag_chunk(tag) & 1);
1156
1157 err = lfs_bd_read(lfs,
1158 NULL, &lfs->rcache, lfs->cfg->block_size,
1159 dir->pair[0], off+sizeof(tag), &temptail, 8);
1160 if (err) {
1161 if (err == LFS_ERR_CORRUPT) {
1162 dir->erased = false;
1163 break;
1164 }
1165 }
1166 lfs_pair_fromle32(temptail);
1167 }
1168
1169 // found a match for our fetcher?
1170 if ((fmask & tag) == (fmask & ftag)) {
1171 int res = cb(data, tag, &(struct lfs_diskoff){
1172 dir->pair[0], off+sizeof(tag)});
1173 if (res < 0) {
1174 if (res == LFS_ERR_CORRUPT) {
1175 dir->erased = false;
1176 break;
1177 }
1178 return res;
1179 }
1180
1181 if (res == LFS_CMP_EQ) {
1182 // found a match
1183 tempbesttag = tag;
1184 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1185 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1186 // found an identical tag, but contents didn't match
1187 // this must mean that our besttag has been overwritten
1188 tempbesttag = -1;
1189 } else if (res == LFS_CMP_GT &&
1190 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1191 // found a greater match, keep track to keep things sorted
1192 tempbesttag = tag | 0x80000000;
1193 }
1194 }
1195 }
1196
1197 // consider what we have good enough
1198 if (dir->off > 0) {
1199 // synthetic move
1200 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1201 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1202 besttag |= 0x80000000;
1203 } else if (besttag != -1 &&
1204 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1205 besttag -= LFS_MKTAG(0, 1, 0);
1206 }
1207 }
1208
1209 // found tag? or found best id?
1210 if (id) {
1211 *id = lfs_min(lfs_tag_id(besttag), dir->count);
1212 }
1213
1214 if (lfs_tag_isvalid(besttag)) {
1215 return besttag;
1216 } else if (lfs_tag_id(besttag) < dir->count) {
1217 return LFS_ERR_NOENT;
1218 } else {
1219 return 0;
1220 }
1221 }
1222
1223 // failed, try the other block?
1224 lfs_pair_swap(dir->pair);
1225 dir->rev = revs[(r+1)%2];
1226 }
1227
1228 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1229 dir->pair[0], dir->pair[1]);
1230 return LFS_ERR_CORRUPT;
1231 }
1232
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1233 static int lfs_dir_fetch(lfs_t *lfs,
1234 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1235 // note, mask=-1, tag=-1 can never match a tag since this
1236 // pattern has the invalid bit set
1237 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1238 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1239 }
1240
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1241 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1242 lfs_gstate_t *gstate) {
1243 lfs_gstate_t temp;
1244 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1245 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1246 if (res < 0 && res != LFS_ERR_NOENT) {
1247 return res;
1248 }
1249
1250 if (res != LFS_ERR_NOENT) {
1251 // xor together to find resulting gstate
1252 lfs_gstate_fromle32(&temp);
1253 lfs_gstate_xor(gstate, &temp);
1254 }
1255
1256 return 0;
1257 }
1258
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1259 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1260 uint16_t id, struct lfs_info *info) {
1261 if (id == 0x3ff) {
1262 // special case for root
1263 strcpy(info->name, "/");
1264 info->type = LFS_TYPE_DIR;
1265 return 0;
1266 }
1267
1268 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1269 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1270 if (tag < 0) {
1271 return (int)tag;
1272 }
1273
1274 info->type = lfs_tag_type3(tag);
1275
1276 struct lfs_ctz ctz;
1277 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1278 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1279 if (tag < 0) {
1280 return (int)tag;
1281 }
1282 lfs_ctz_fromle32(&ctz);
1283
1284 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1285 info->size = ctz.size;
1286 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1287 info->size = lfs_tag_size(tag);
1288 }
1289
1290 return 0;
1291 }
1292
1293 struct lfs_dir_find_match {
1294 lfs_t *lfs;
1295 const void *name;
1296 lfs_size_t size;
1297 };
1298
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1299 static int lfs_dir_find_match(void *data,
1300 lfs_tag_t tag, const void *buffer) {
1301 struct lfs_dir_find_match *name = data;
1302 lfs_t *lfs = name->lfs;
1303 const struct lfs_diskoff *disk = buffer;
1304
1305 // compare with disk
1306 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1307 int res = lfs_bd_cmp(lfs,
1308 NULL, &lfs->rcache, diff,
1309 disk->block, disk->off, name->name, diff);
1310 if (res != LFS_CMP_EQ) {
1311 return res;
1312 }
1313
1314 // only equal if our size is still the same
1315 if (name->size != lfs_tag_size(tag)) {
1316 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1317 }
1318
1319 // found a match!
1320 return LFS_CMP_EQ;
1321 }
1322
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1323 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1324 const char **path, uint16_t *id) {
1325 // we reduce path to a single name if we can find it
1326 const char *name = *path;
1327 if (id) {
1328 *id = 0x3ff;
1329 }
1330
1331 // default to root dir
1332 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1333 dir->tail[0] = lfs->root[0];
1334 dir->tail[1] = lfs->root[1];
1335
1336 while (true) {
1337 nextname:
1338 // skip slashes
1339 name += strspn(name, "/");
1340 lfs_size_t namelen = strcspn(name, "/");
1341
1342 // skip '.' and root '..'
1343 if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1344 (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1345 name += namelen;
1346 goto nextname;
1347 }
1348
1349 // skip if matched by '..' in name
1350 const char *suffix = name + namelen;
1351 lfs_size_t sufflen;
1352 int depth = 1;
1353 while (true) {
1354 suffix += strspn(suffix, "/");
1355 sufflen = strcspn(suffix, "/");
1356 if (sufflen == 0) {
1357 break;
1358 }
1359
1360 if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1361 depth -= 1;
1362 if (depth == 0) {
1363 name = suffix + sufflen;
1364 goto nextname;
1365 }
1366 } else {
1367 depth += 1;
1368 }
1369
1370 suffix += sufflen;
1371 }
1372
1373 // found path
1374 if (name[0] == '\0') {
1375 return tag;
1376 }
1377
1378 // update what we've found so far
1379 *path = name;
1380
1381 // only continue if we hit a directory
1382 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1383 return LFS_ERR_NOTDIR;
1384 }
1385
1386 // grab the entry data
1387 if (lfs_tag_id(tag) != 0x3ff) {
1388 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1389 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1390 if (res < 0) {
1391 return res;
1392 }
1393 lfs_pair_fromle32(dir->tail);
1394 }
1395
1396 // find entry matching name
1397 while (true) {
1398 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1399 LFS_MKTAG(0x780, 0, 0),
1400 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1401 // are we last name?
1402 (strchr(name, '/') == NULL) ? id : NULL,
1403 lfs_dir_find_match, &(struct lfs_dir_find_match){
1404 lfs, name, namelen});
1405 if (tag < 0) {
1406 return tag;
1407 }
1408
1409 if (tag) {
1410 break;
1411 }
1412
1413 if (!dir->split) {
1414 return LFS_ERR_NOENT;
1415 }
1416 }
1417
1418 // to next name
1419 name += namelen;
1420 }
1421 }
1422
1423 // commit logic
1424 struct lfs_commit {
1425 lfs_block_t block;
1426 lfs_off_t off;
1427 lfs_tag_t ptag;
1428 uint32_t crc;
1429
1430 lfs_off_t begin;
1431 lfs_off_t end;
1432 };
1433
1434 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1435 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1436 const void *buffer, lfs_size_t size) {
1437 int err = lfs_bd_prog(lfs,
1438 &lfs->pcache, &lfs->rcache, false,
1439 commit->block, commit->off ,
1440 (const uint8_t*)buffer, size);
1441 if (err) {
1442 return err;
1443 }
1444
1445 commit->crc = lfs_crc(commit->crc, buffer, size);
1446 commit->off += size;
1447 return 0;
1448 }
1449 #endif
1450
1451 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1452 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1453 lfs_tag_t tag, const void *buffer) {
1454 // check if we fit
1455 lfs_size_t dsize = lfs_tag_dsize(tag);
1456 if (commit->off + dsize > commit->end) {
1457 return LFS_ERR_NOSPC;
1458 }
1459
1460 // write out tag
1461 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1462 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1463 if (err) {
1464 return err;
1465 }
1466
1467 if (!(tag & 0x80000000)) {
1468 // from memory
1469 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1470 if (err) {
1471 return err;
1472 }
1473 } else {
1474 // from disk
1475 const struct lfs_diskoff *disk = buffer;
1476 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1477 // rely on caching to make this efficient
1478 uint8_t dat;
1479 err = lfs_bd_read(lfs,
1480 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1481 disk->block, disk->off+i, &dat, 1);
1482 if (err) {
1483 return err;
1484 }
1485
1486 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1487 if (err) {
1488 return err;
1489 }
1490 }
1491 }
1492
1493 commit->ptag = tag & 0x7fffffff;
1494 return 0;
1495 }
1496 #endif
1497
1498 #ifndef LFS_READONLY
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1499 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1500 // align to program units
1501 const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t),
1502 lfs->cfg->prog_size);
1503
1504 lfs_off_t off1 = 0;
1505 uint32_t crc1 = 0;
1506
1507 // create crc tags to fill up remainder of commit, note that
1508 // padding is not crced, which lets fetches skip padding but
1509 // makes committing a bit more complicated
1510 while (commit->off < end) {
1511 lfs_off_t off = commit->off + sizeof(lfs_tag_t);
1512 lfs_off_t noff = lfs_min(end - off, 0x3fe) + off;
1513 if (noff < end) {
1514 noff = lfs_min(noff, end - 2*sizeof(uint32_t));
1515 }
1516
1517 // read erased state from next program unit
1518 lfs_tag_t tag = 0xffffffff;
1519 int err = lfs_bd_read(lfs,
1520 NULL, &lfs->rcache, sizeof(tag),
1521 commit->block, noff, &tag, sizeof(tag));
1522 if (err && err != LFS_ERR_CORRUPT) {
1523 return err;
1524 }
1525
1526 // build crc tag
1527 bool reset = ~lfs_frombe32(tag) >> 31;
1528 tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
1529
1530 // write out crc
1531 uint32_t footer[2];
1532 footer[0] = lfs_tobe32(tag ^ commit->ptag);
1533 commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
1534 footer[1] = lfs_tole32(commit->crc);
1535 err = lfs_bd_prog(lfs,
1536 &lfs->pcache, &lfs->rcache, false,
1537 commit->block, commit->off, &footer, sizeof(footer));
1538 if (err) {
1539 return err;
1540 }
1541
1542 // keep track of non-padding checksum to verify
1543 if (off1 == 0) {
1544 off1 = commit->off + sizeof(uint32_t);
1545 crc1 = commit->crc;
1546 }
1547
1548 commit->off += sizeof(tag)+lfs_tag_size(tag);
1549 commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
1550 commit->crc = 0xffffffff; // reset crc for next "commit"
1551 }
1552
1553 // flush buffers
1554 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1555 if (err) {
1556 return err;
1557 }
1558
1559 // successful commit, check checksums to make sure
1560 lfs_off_t off = commit->begin;
1561 lfs_off_t noff = off1;
1562 while (off < end) {
1563 uint32_t crc = 0xffffffff;
1564 for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
1565 // check against written crc, may catch blocks that
1566 // become readonly and match our commit size exactly
1567 if (i == off1 && crc != crc1) {
1568 return LFS_ERR_CORRUPT;
1569 }
1570
1571 // leave it up to caching to make this efficient
1572 uint8_t dat;
1573 err = lfs_bd_read(lfs,
1574 NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
1575 commit->block, i, &dat, 1);
1576 if (err) {
1577 return err;
1578 }
1579
1580 crc = lfs_crc(crc, &dat, 1);
1581 }
1582
1583 // detected write error?
1584 if (crc != 0) {
1585 return LFS_ERR_CORRUPT;
1586 }
1587
1588 // skip padding
1589 off = lfs_min(end - noff, 0x3fe) + noff;
1590 if (off < end) {
1591 off = lfs_min(off, end - 2*sizeof(uint32_t));
1592 }
1593 noff = off + sizeof(uint32_t);
1594 }
1595
1596 return 0;
1597 }
1598 #endif
1599
1600 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1601 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1602 // allocate pair of dir blocks (backwards, so we write block 1 first)
1603 for (int i = 0; i < 2; i++) {
1604 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1605 if (err) {
1606 return err;
1607 }
1608 }
1609
1610 // zero for reproducibility in case initial block is unreadable
1611 dir->rev = 0;
1612
1613 // rather than clobbering one of the blocks we just pretend
1614 // the revision may be valid
1615 int err = lfs_bd_read(lfs,
1616 NULL, &lfs->rcache, sizeof(dir->rev),
1617 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1618 dir->rev = lfs_fromle32(dir->rev);
1619 if (err && err != LFS_ERR_CORRUPT) {
1620 return err;
1621 }
1622
1623 // to make sure we don't immediately evict, align the new revision count
1624 // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1625 // is tweaked this way
1626 if (lfs->cfg->block_cycles > 0) {
1627 dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1628 }
1629
1630 // set defaults
1631 dir->off = sizeof(dir->rev);
1632 dir->etag = 0xffffffff;
1633 dir->count = 0;
1634 dir->tail[0] = LFS_BLOCK_NULL;
1635 dir->tail[1] = LFS_BLOCK_NULL;
1636 dir->erased = false;
1637 dir->split = false;
1638
1639 // don't write out yet, let caller take care of that
1640 return 0;
1641 }
1642 #endif
1643
1644 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1645 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1646 // steal state
1647 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1648 if (err) {
1649 return err;
1650 }
1651
1652 // steal tail
1653 lfs_pair_tole32(tail->tail);
1654 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1655 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1656 lfs_pair_fromle32(tail->tail);
1657 if (err) {
1658 return err;
1659 }
1660
1661 return 0;
1662 }
1663 #endif
1664
1665 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1666 static int lfs_dir_split(lfs_t *lfs,
1667 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1668 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1669 // create tail metadata pair
1670 lfs_mdir_t tail;
1671 int err = lfs_dir_alloc(lfs, &tail);
1672 if (err) {
1673 return err;
1674 }
1675
1676 tail.split = dir->split;
1677 tail.tail[0] = dir->tail[0];
1678 tail.tail[1] = dir->tail[1];
1679
1680 // note we don't care about LFS_OK_RELOCATED
1681 int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1682 if (res < 0) {
1683 return res;
1684 }
1685
1686 dir->tail[0] = tail.pair[0];
1687 dir->tail[1] = tail.pair[1];
1688 dir->split = true;
1689
1690 // update root if needed
1691 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1692 lfs->root[0] = tail.pair[0];
1693 lfs->root[1] = tail.pair[1];
1694 }
1695
1696 return 0;
1697 }
1698 #endif
1699
1700 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1701 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1702 lfs_size_t *size = p;
1703 (void)buffer;
1704
1705 *size += lfs_tag_dsize(tag);
1706 return 0;
1707 }
1708 #endif
1709
1710 #ifndef LFS_READONLY
1711 struct lfs_dir_commit_commit {
1712 lfs_t *lfs;
1713 struct lfs_commit *commit;
1714 };
1715 #endif
1716
1717 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1718 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1719 struct lfs_dir_commit_commit *commit = p;
1720 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1721 }
1722 #endif
1723
1724 #ifndef LFS_READONLY
lfs_dir_needsrelocation(lfs_t * lfs,lfs_mdir_t * dir)1725 static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
1726 // If our revision count == n * block_cycles, we should force a relocation,
1727 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1728 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1729 // 1. block_cycles = 1, which would prevent relocations from terminating
1730 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1731 // one metadata block in the pair, effectively making this useless
1732 return (lfs->cfg->block_cycles > 0
1733 && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
1734 }
1735 #endif
1736
1737 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1738 static int lfs_dir_compact(lfs_t *lfs,
1739 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1740 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1741 // save some state in case block is bad
1742 bool relocated = false;
1743 bool tired = lfs_dir_needsrelocation(lfs, dir);
1744
1745 // increment revision count
1746 dir->rev += 1;
1747
1748 // do not proactively relocate blocks during migrations, this
1749 // can cause a number of failure states such: clobbering the
1750 // v1 superblock if we relocate root, and invalidating directory
1751 // pointers if we relocate the head of a directory. On top of
1752 // this, relocations increase the overall complexity of
1753 // lfs_migration, which is already a delicate operation.
1754 #ifdef LFS_MIGRATE
1755 if (lfs->lfs1) {
1756 tired = false;
1757 }
1758 #endif
1759
1760 if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
1761 // we're writing too much, time to relocate
1762 goto relocate;
1763 }
1764
1765 // begin loop to commit compaction to blocks until a compact sticks
1766 while (true) {
1767 {
1768 // setup commit state
1769 struct lfs_commit commit = {
1770 .block = dir->pair[1],
1771 .off = 0,
1772 .ptag = 0xffffffff,
1773 .crc = 0xffffffff,
1774
1775 .begin = 0,
1776 .end = (lfs->cfg->metadata_max ?
1777 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1778 };
1779
1780 // erase block to write to
1781 int err = lfs_bd_erase(lfs, dir->pair[1]);
1782 if (err) {
1783 if (err == LFS_ERR_CORRUPT) {
1784 goto relocate;
1785 }
1786 return err;
1787 }
1788
1789 // write out header
1790 dir->rev = lfs_tole32(dir->rev);
1791 err = lfs_dir_commitprog(lfs, &commit,
1792 &dir->rev, sizeof(dir->rev));
1793 dir->rev = lfs_fromle32(dir->rev);
1794 if (err) {
1795 if (err == LFS_ERR_CORRUPT) {
1796 goto relocate;
1797 }
1798 return err;
1799 }
1800
1801 // traverse the directory, this time writing out all unique tags
1802 err = lfs_dir_traverse(lfs,
1803 source, 0, 0xffffffff, attrs, attrcount,
1804 LFS_MKTAG(0x400, 0x3ff, 0),
1805 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1806 begin, end, -begin,
1807 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1808 lfs, &commit});
1809 if (err) {
1810 if (err == LFS_ERR_CORRUPT) {
1811 goto relocate;
1812 }
1813 return err;
1814 }
1815
1816 // commit tail, which may be new after last size check
1817 if (!lfs_pair_isnull(dir->tail)) {
1818 lfs_pair_tole32(dir->tail);
1819 err = lfs_dir_commitattr(lfs, &commit,
1820 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
1821 dir->tail);
1822 lfs_pair_fromle32(dir->tail);
1823 if (err) {
1824 if (err == LFS_ERR_CORRUPT) {
1825 goto relocate;
1826 }
1827 return err;
1828 }
1829 }
1830
1831 // bring over gstate?
1832 lfs_gstate_t delta = {0};
1833 if (!relocated) {
1834 lfs_gstate_xor(&delta, &lfs->gdisk);
1835 lfs_gstate_xor(&delta, &lfs->gstate);
1836 }
1837 lfs_gstate_xor(&delta, &lfs->gdelta);
1838 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
1839
1840 err = lfs_dir_getgstate(lfs, dir, &delta);
1841 if (err) {
1842 return err;
1843 }
1844
1845 if (!lfs_gstate_iszero(&delta)) {
1846 lfs_gstate_tole32(&delta);
1847 err = lfs_dir_commitattr(lfs, &commit,
1848 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
1849 sizeof(delta)), &delta);
1850 if (err) {
1851 if (err == LFS_ERR_CORRUPT) {
1852 goto relocate;
1853 }
1854 return err;
1855 }
1856 }
1857
1858 // complete commit with crc
1859 err = lfs_dir_commitcrc(lfs, &commit);
1860 if (err) {
1861 if (err == LFS_ERR_CORRUPT) {
1862 goto relocate;
1863 }
1864 return err;
1865 }
1866
1867 // successful compaction, swap dir pair to indicate most recent
1868 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
1869 lfs_pair_swap(dir->pair);
1870 dir->count = end - begin;
1871 dir->off = commit.off;
1872 dir->etag = commit.ptag;
1873 // update gstate
1874 lfs->gdelta = (lfs_gstate_t){0};
1875 if (!relocated) {
1876 lfs->gdisk = lfs->gstate;
1877 }
1878 }
1879 break;
1880
1881 relocate:
1882 // commit was corrupted, drop caches and prepare to relocate block
1883 relocated = true;
1884 lfs_cache_drop(lfs, &lfs->pcache);
1885 if (!tired) {
1886 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
1887 }
1888
1889 // can't relocate superblock, filesystem is now frozen
1890 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1891 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
1892 dir->pair[1]);
1893 return LFS_ERR_NOSPC;
1894 }
1895
1896 // relocate half of pair
1897 int err = lfs_alloc(lfs, &dir->pair[1]);
1898 if (err && (err != LFS_ERR_NOSPC || !tired)) {
1899 return err;
1900 }
1901
1902 tired = false;
1903 continue;
1904 }
1905
1906 return relocated ? LFS_OK_RELOCATED : 0;
1907 }
1908 #endif
1909
1910 #ifndef LFS_READONLY
lfs_dir_splittingcompact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1911 static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
1912 const struct lfs_mattr *attrs, int attrcount,
1913 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1914 while (true) {
1915 // find size of first split, we do this by halving the split until
1916 // the metadata is guaranteed to fit
1917 //
1918 // Note that this isn't a true binary search, we never increase the
1919 // split size. This may result in poorly distributed metadata but isn't
1920 // worth the extra code size or performance hit to fix.
1921 lfs_size_t split = begin;
1922 while (end - split > 1) {
1923 lfs_size_t size = 0;
1924 int err = lfs_dir_traverse(lfs,
1925 source, 0, 0xffffffff, attrs, attrcount,
1926 LFS_MKTAG(0x400, 0x3ff, 0),
1927 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1928 split, end, -split,
1929 lfs_dir_commit_size, &size);
1930 if (err) {
1931 return err;
1932 }
1933
1934 // space is complicated, we need room for tail, crc, gstate,
1935 // cleanup delete, and we cap at half a block to give room
1936 // for metadata updates.
1937 if (end - split < 0xff
1938 && size <= lfs_min(lfs->cfg->block_size - 36,
1939 lfs_alignup(
1940 (lfs->cfg->metadata_max
1941 ? lfs->cfg->metadata_max
1942 : lfs->cfg->block_size)/2,
1943 lfs->cfg->prog_size))) {
1944 break;
1945 }
1946
1947 split = split + ((end - split) / 2);
1948 }
1949
1950 if (split == begin) {
1951 // no split needed
1952 break;
1953 }
1954
1955 // split into two metadata pairs and continue
1956 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
1957 source, split, end);
1958 if (err && err != LFS_ERR_NOSPC) {
1959 return err;
1960 }
1961
1962 if (err) {
1963 // we can't allocate a new block, try to compact with degraded
1964 // performance
1965 LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
1966 dir->pair[0], dir->pair[1]);
1967 break;
1968 } else {
1969 end = split;
1970 }
1971 }
1972
1973 if (lfs_dir_needsrelocation(lfs, dir)
1974 && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1975 // oh no! we're writing too much to the superblock,
1976 // should we expand?
1977 lfs_ssize_t size = lfs_fs_rawsize(lfs);
1978 if (size < 0) {
1979 return size;
1980 }
1981
1982 // do we have extra space? littlefs can't reclaim this space
1983 // by itself, so expand cautiously
1984 if ((lfs_size_t)size < lfs->cfg->block_count/2) {
1985 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
1986 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
1987 source, begin, end);
1988 if (err && err != LFS_ERR_NOSPC) {
1989 return err;
1990 }
1991
1992 if (err) {
1993 // welp, we tried, if we ran out of space there's not much
1994 // we can do, we'll error later if we've become frozen
1995 LFS_WARN("Unable to expand superblock");
1996 } else {
1997 end = begin;
1998 }
1999 }
2000 }
2001
2002 return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
2003 }
2004 #endif
2005
2006 #ifndef LFS_READONLY
lfs_dir_relocatingcommit(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * pdir)2007 static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
2008 const lfs_block_t pair[2],
2009 const struct lfs_mattr *attrs, int attrcount,
2010 lfs_mdir_t *pdir) {
2011 int state = 0;
2012
2013 // calculate changes to the directory
2014 bool hasdelete = false;
2015 for (int i = 0; i < attrcount; i++) {
2016 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
2017 dir->count += 1;
2018 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
2019 LFS_ASSERT(dir->count > 0);
2020 dir->count -= 1;
2021 hasdelete = true;
2022 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
2023 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
2024 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
2025 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
2026 lfs_pair_fromle32(dir->tail);
2027 }
2028 }
2029
2030 // should we actually drop the directory block?
2031 if (hasdelete && dir->count == 0) {
2032 LFS_ASSERT(pdir);
2033 int err = lfs_fs_pred(lfs, dir->pair, pdir);
2034 if (err && err != LFS_ERR_NOENT) {
2035 return err;
2036 }
2037
2038 if (err != LFS_ERR_NOENT && pdir->split) {
2039 state = LFS_OK_DROPPED;
2040 goto fixmlist;
2041 }
2042 }
2043
2044 if (dir->erased) {
2045 // try to commit
2046 struct lfs_commit commit = {
2047 .block = dir->pair[0],
2048 .off = dir->off,
2049 .ptag = dir->etag,
2050 .crc = 0xffffffff,
2051
2052 .begin = dir->off,
2053 .end = (lfs->cfg->metadata_max ?
2054 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
2055 };
2056
2057 // traverse attrs that need to be written out
2058 lfs_pair_tole32(dir->tail);
2059 int err = lfs_dir_traverse(lfs,
2060 dir, dir->off, dir->etag, attrs, attrcount,
2061 0, 0, 0, 0, 0,
2062 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2063 lfs, &commit});
2064 lfs_pair_fromle32(dir->tail);
2065 if (err) {
2066 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2067 goto compact;
2068 }
2069 return err;
2070 }
2071
2072 // commit any global diffs if we have any
2073 lfs_gstate_t delta = {0};
2074 lfs_gstate_xor(&delta, &lfs->gstate);
2075 lfs_gstate_xor(&delta, &lfs->gdisk);
2076 lfs_gstate_xor(&delta, &lfs->gdelta);
2077 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2078 if (!lfs_gstate_iszero(&delta)) {
2079 err = lfs_dir_getgstate(lfs, dir, &delta);
2080 if (err) {
2081 return err;
2082 }
2083
2084 lfs_gstate_tole32(&delta);
2085 err = lfs_dir_commitattr(lfs, &commit,
2086 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2087 sizeof(delta)), &delta);
2088 if (err) {
2089 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2090 goto compact;
2091 }
2092 return err;
2093 }
2094 }
2095
2096 // finalize commit with the crc
2097 err = lfs_dir_commitcrc(lfs, &commit);
2098 if (err) {
2099 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2100 goto compact;
2101 }
2102 return err;
2103 }
2104
2105 // successful commit, update dir
2106 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2107 dir->off = commit.off;
2108 dir->etag = commit.ptag;
2109 // and update gstate
2110 lfs->gdisk = lfs->gstate;
2111 lfs->gdelta = (lfs_gstate_t){0};
2112
2113 goto fixmlist;
2114 }
2115
2116 compact:
2117 // fall back to compaction
2118 lfs_cache_drop(lfs, &lfs->pcache);
2119
2120 state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
2121 dir, 0, dir->count);
2122 if (state < 0) {
2123 return state;
2124 }
2125
2126 goto fixmlist;
2127
2128 fixmlist:;
2129 // this complicated bit of logic is for fixing up any active
2130 // metadata-pairs that we may have affected
2131 //
2132 // note we have to make two passes since the mdir passed to
2133 // lfs_dir_commit could also be in this list, and even then
2134 // we need to copy the pair so they don't get clobbered if we refetch
2135 // our mdir.
2136 lfs_block_t oldpair[2] = {pair[0], pair[1]};
2137 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2138 if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
2139 d->m = *dir;
2140 if (d->m.pair != pair) {
2141 for (int i = 0; i < attrcount; i++) {
2142 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2143 d->id == lfs_tag_id(attrs[i].tag)) {
2144 d->m.pair[0] = LFS_BLOCK_NULL;
2145 d->m.pair[1] = LFS_BLOCK_NULL;
2146 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2147 d->id > lfs_tag_id(attrs[i].tag)) {
2148 d->id -= 1;
2149 if (d->type == LFS_TYPE_DIR) {
2150 ((lfs_dir_t*)d)->pos -= 1;
2151 }
2152 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
2153 d->id >= lfs_tag_id(attrs[i].tag)) {
2154 d->id += 1;
2155 if (d->type == LFS_TYPE_DIR) {
2156 ((lfs_dir_t*)d)->pos += 1;
2157 }
2158 }
2159 }
2160 }
2161
2162 while (d->id >= d->m.count && d->m.split) {
2163 // we split and id is on tail now
2164 d->id -= d->m.count;
2165 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2166 if (err) {
2167 return err;
2168 }
2169 }
2170 }
2171 }
2172
2173 return state;
2174 }
2175 #endif
2176
2177 #ifndef LFS_READONLY
lfs_dir_orphaningcommit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2178 static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
2179 const struct lfs_mattr *attrs, int attrcount) {
2180 // check for any inline files that aren't RAM backed and
2181 // forcefully evict them, needed for filesystem consistency
2182 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
2183 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
2184 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
2185 f->ctz.size > lfs->cfg->cache_size) {
2186 int err = lfs_file_outline(lfs, f);
2187 if (err) {
2188 return err;
2189 }
2190
2191 err = lfs_file_flush(lfs, f);
2192 if (err) {
2193 return err;
2194 }
2195 }
2196 }
2197
2198 lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
2199 lfs_mdir_t ldir = *dir;
2200 lfs_mdir_t pdir;
2201 int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
2202 attrs, attrcount, &pdir);
2203 if (state < 0) {
2204 return state;
2205 }
2206
2207 // update if we're not in mlist, note we may have already been
2208 // updated if we are in mlist
2209 if (lfs_pair_cmp(dir->pair, lpair) == 0) {
2210 *dir = ldir;
2211 }
2212
2213 // commit was successful, but may require other changes in the
2214 // filesystem, these would normally be tail recursive, but we have
2215 // flattened them here avoid unbounded stack usage
2216
2217 // need to drop?
2218 if (state == LFS_OK_DROPPED) {
2219 // steal state
2220 int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
2221 if (err) {
2222 return err;
2223 }
2224
2225 // steal tail, note that this can't create a recursive drop
2226 lpair[0] = pdir.pair[0];
2227 lpair[1] = pdir.pair[1];
2228 lfs_pair_tole32(dir->tail);
2229 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2230 {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2231 dir->tail}),
2232 NULL);
2233 lfs_pair_fromle32(dir->tail);
2234 if (state < 0) {
2235 return state;
2236 }
2237
2238 ldir = pdir;
2239 }
2240
2241 // need to relocate?
2242 bool orphans = false;
2243 while (state == LFS_OK_RELOCATED) {
2244 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
2245 "-> {0x%"PRIx32", 0x%"PRIx32"}",
2246 lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
2247 state = 0;
2248
2249 // update internal root
2250 if (lfs_pair_cmp(lpair, lfs->root) == 0) {
2251 lfs->root[0] = ldir.pair[0];
2252 lfs->root[1] = ldir.pair[1];
2253 }
2254
2255 // update internally tracked dirs
2256 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2257 if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
2258 d->m.pair[0] = ldir.pair[0];
2259 d->m.pair[1] = ldir.pair[1];
2260 }
2261
2262 if (d->type == LFS_TYPE_DIR &&
2263 lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
2264 ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
2265 ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
2266 }
2267 }
2268
2269 // find parent
2270 lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
2271 if (tag < 0 && tag != LFS_ERR_NOENT) {
2272 return tag;
2273 }
2274
2275 bool hasparent = (tag != LFS_ERR_NOENT);
2276 if (tag != LFS_ERR_NOENT) {
2277 // note that if we have a parent, we must have a pred, so this will
2278 // always create an orphan
2279 int err = lfs_fs_preporphans(lfs, +1);
2280 if (err) {
2281 return err;
2282 }
2283
2284 // fix pending move in this pair? this looks like an optimization but
2285 // is in fact _required_ since relocating may outdate the move.
2286 uint16_t moveid = 0x3ff;
2287 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2288 moveid = lfs_tag_id(lfs->gstate.tag);
2289 LFS_DEBUG("Fixing move while relocating "
2290 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2291 pdir.pair[0], pdir.pair[1], moveid);
2292 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2293 if (moveid < lfs_tag_id(tag)) {
2294 tag -= LFS_MKTAG(0, 1, 0);
2295 }
2296 }
2297
2298 lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
2299 lfs_pair_tole32(ldir.pair);
2300 state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
2301 {LFS_MKTAG_IF(moveid != 0x3ff,
2302 LFS_TYPE_DELETE, moveid, 0), NULL},
2303 {tag, ldir.pair}),
2304 NULL);
2305 lfs_pair_fromle32(ldir.pair);
2306 if (state < 0) {
2307 return state;
2308 }
2309
2310 if (state == LFS_OK_RELOCATED) {
2311 lpair[0] = ppair[0];
2312 lpair[1] = ppair[1];
2313 ldir = pdir;
2314 orphans = true;
2315 continue;
2316 }
2317 }
2318
2319 // find pred
2320 int err = lfs_fs_pred(lfs, lpair, &pdir);
2321 if (err && err != LFS_ERR_NOENT) {
2322 return err;
2323 }
2324 LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
2325
2326 // if we can't find dir, it must be new
2327 if (err != LFS_ERR_NOENT) {
2328 if (lfs_gstate_hasorphans(&lfs->gstate)) {
2329 // next step, clean up orphans
2330 err = lfs_fs_preporphans(lfs, -hasparent);
2331 if (err) {
2332 return err;
2333 }
2334 }
2335
2336 // fix pending move in this pair? this looks like an optimization
2337 // but is in fact _required_ since relocating may outdate the move.
2338 uint16_t moveid = 0x3ff;
2339 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2340 moveid = lfs_tag_id(lfs->gstate.tag);
2341 LFS_DEBUG("Fixing move while relocating "
2342 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2343 pdir.pair[0], pdir.pair[1], moveid);
2344 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2345 }
2346
2347 // replace bad pair, either we clean up desync, or no desync occured
2348 lpair[0] = pdir.pair[0];
2349 lpair[1] = pdir.pair[1];
2350 lfs_pair_tole32(ldir.pair);
2351 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2352 {LFS_MKTAG_IF(moveid != 0x3ff,
2353 LFS_TYPE_DELETE, moveid, 0), NULL},
2354 {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
2355 ldir.pair}),
2356 NULL);
2357 lfs_pair_fromle32(ldir.pair);
2358 if (state < 0) {
2359 return state;
2360 }
2361
2362 ldir = pdir;
2363 }
2364 }
2365
2366 return orphans ? LFS_OK_ORPHANED : 0;
2367 }
2368 #endif
2369
2370 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2371 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
2372 const struct lfs_mattr *attrs, int attrcount) {
2373 int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
2374 if (orphans < 0) {
2375 return orphans;
2376 }
2377
2378 if (orphans) {
2379 // make sure we've removed all orphans, this is a noop if there
2380 // are none, but if we had nested blocks failures we may have
2381 // created some
2382 int err = lfs_fs_deorphan(lfs, false);
2383 if (err) {
2384 return err;
2385 }
2386 }
2387
2388 return 0;
2389 }
2390 #endif
2391
2392
2393 /// Top level directory operations ///
2394 #ifndef LFS_READONLY
lfs_rawmkdir(lfs_t * lfs,const char * path)2395 static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
2396 // deorphan if we haven't yet, needed at most once after poweron
2397 int err = lfs_fs_forceconsistency(lfs);
2398 if (err) {
2399 return err;
2400 }
2401
2402 struct lfs_mlist cwd;
2403 cwd.next = lfs->mlist;
2404 uint16_t id;
2405 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2406 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
2407 return (err < 0) ? err : LFS_ERR_EXIST;
2408 }
2409
2410 // check that name fits
2411 lfs_size_t nlen = strlen(path);
2412 if (nlen > lfs->name_max) {
2413 return LFS_ERR_NAMETOOLONG;
2414 }
2415
2416 // build up new directory
2417 lfs_alloc_ack(lfs);
2418 lfs_mdir_t dir;
2419 err = lfs_dir_alloc(lfs, &dir);
2420 if (err) {
2421 return err;
2422 }
2423
2424 // find end of list
2425 lfs_mdir_t pred = cwd.m;
2426 while (pred.split) {
2427 err = lfs_dir_fetch(lfs, &pred, pred.tail);
2428 if (err) {
2429 return err;
2430 }
2431 }
2432
2433 // setup dir
2434 lfs_pair_tole32(pred.tail);
2435 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2436 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2437 lfs_pair_fromle32(pred.tail);
2438 if (err) {
2439 return err;
2440 }
2441
2442 // current block not end of list?
2443 if (cwd.m.split) {
2444 // update tails, this creates a desync
2445 err = lfs_fs_preporphans(lfs, +1);
2446 if (err) {
2447 return err;
2448 }
2449
2450 // it's possible our predecessor has to be relocated, and if
2451 // our parent is our predecessor's predecessor, this could have
2452 // caused our parent to go out of date, fortunately we can hook
2453 // ourselves into littlefs to catch this
2454 cwd.type = 0;
2455 cwd.id = 0;
2456 lfs->mlist = &cwd;
2457
2458 lfs_pair_tole32(dir.pair);
2459 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2460 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2461 lfs_pair_fromle32(dir.pair);
2462 if (err) {
2463 lfs->mlist = cwd.next;
2464 return err;
2465 }
2466
2467 lfs->mlist = cwd.next;
2468 err = lfs_fs_preporphans(lfs, -1);
2469 if (err) {
2470 return err;
2471 }
2472 }
2473
2474 // now insert into our parent block
2475 lfs_pair_tole32(dir.pair);
2476 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2477 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2478 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2479 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2480 {LFS_MKTAG_IF(!cwd.m.split,
2481 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2482 lfs_pair_fromle32(dir.pair);
2483 if (err) {
2484 return err;
2485 }
2486
2487 return 0;
2488 }
2489 #endif
2490
lfs_dir_rawopen(lfs_t * lfs,lfs_dir_t * dir,const char * path)2491 static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2492 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2493 if (tag < 0) {
2494 return tag;
2495 }
2496
2497 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2498 return LFS_ERR_NOTDIR;
2499 }
2500
2501 lfs_block_t pair[2];
2502 if (lfs_tag_id(tag) == 0x3ff) {
2503 // handle root dir separately
2504 pair[0] = lfs->root[0];
2505 pair[1] = lfs->root[1];
2506 } else {
2507 // get dir pair from parent
2508 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2509 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2510 if (res < 0) {
2511 return res;
2512 }
2513 lfs_pair_fromle32(pair);
2514 }
2515
2516 // fetch first pair
2517 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2518 if (err) {
2519 return err;
2520 }
2521
2522 // setup entry
2523 dir->head[0] = dir->m.pair[0];
2524 dir->head[1] = dir->m.pair[1];
2525 dir->id = 0;
2526 dir->pos = 0;
2527
2528 // add to list of mdirs
2529 dir->type = LFS_TYPE_DIR;
2530 lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2531
2532 return 0;
2533 }
2534
lfs_dir_rawclose(lfs_t * lfs,lfs_dir_t * dir)2535 static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) {
2536 // remove from list of mdirs
2537 lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2538
2539 return 0;
2540 }
2541
lfs_dir_rawread(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2542 static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2543 memset(info, 0, sizeof(*info));
2544
2545 // special offset for '.' and '..'
2546 if (dir->pos == 0) {
2547 info->type = LFS_TYPE_DIR;
2548 strcpy(info->name, ".");
2549 dir->pos += 1;
2550 return true;
2551 } else if (dir->pos == 1) {
2552 info->type = LFS_TYPE_DIR;
2553 strcpy(info->name, "..");
2554 dir->pos += 1;
2555 return true;
2556 }
2557
2558 while (true) {
2559 if (dir->id == dir->m.count) {
2560 if (!dir->m.split) {
2561 return false;
2562 }
2563
2564 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2565 if (err) {
2566 return err;
2567 }
2568
2569 dir->id = 0;
2570 }
2571
2572 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2573 if (err && err != LFS_ERR_NOENT) {
2574 return err;
2575 }
2576
2577 dir->id += 1;
2578 if (err != LFS_ERR_NOENT) {
2579 break;
2580 }
2581 }
2582
2583 dir->pos += 1;
2584 return true;
2585 }
2586
lfs_dir_rawseek(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2587 static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2588 // simply walk from head dir
2589 int err = lfs_dir_rawrewind(lfs, dir);
2590 if (err) {
2591 return err;
2592 }
2593
2594 // first two for ./..
2595 dir->pos = lfs_min(2, off);
2596 off -= dir->pos;
2597
2598 // skip superblock entry
2599 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2600
2601 while (off > 0) {
2602 int diff = lfs_min(dir->m.count - dir->id, off);
2603 dir->id += diff;
2604 dir->pos += diff;
2605 off -= diff;
2606
2607 if (dir->id == dir->m.count) {
2608 if (!dir->m.split) {
2609 return LFS_ERR_INVAL;
2610 }
2611
2612 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2613 if (err) {
2614 return err;
2615 }
2616
2617 dir->id = 0;
2618 }
2619 }
2620
2621 return 0;
2622 }
2623
lfs_dir_rawtell(lfs_t * lfs,lfs_dir_t * dir)2624 static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) {
2625 (void)lfs;
2626 return dir->pos;
2627 }
2628
lfs_dir_rawrewind(lfs_t * lfs,lfs_dir_t * dir)2629 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) {
2630 // reload the head dir
2631 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2632 if (err) {
2633 return err;
2634 }
2635
2636 dir->id = 0;
2637 dir->pos = 0;
2638 return 0;
2639 }
2640
2641
2642 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2643 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2644 lfs_off_t size = *off;
2645 lfs_off_t b = lfs->cfg->block_size - 2*4;
2646 lfs_off_t i = size / b;
2647 if (i == 0) {
2648 return 0;
2649 }
2650
2651 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2652 *off = size - b*i - 4*lfs_popc(i);
2653 return i;
2654 }
2655
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2656 static int lfs_ctz_find(lfs_t *lfs,
2657 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2658 lfs_block_t head, lfs_size_t size,
2659 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2660 if (size == 0) {
2661 *block = LFS_BLOCK_NULL;
2662 *off = 0;
2663 return 0;
2664 }
2665
2666 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2667 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2668
2669 while (current > target) {
2670 lfs_size_t skip = lfs_min(
2671 lfs_npw2(current-target+1) - 1,
2672 lfs_ctz(current));
2673
2674 int err = lfs_bd_read(lfs,
2675 pcache, rcache, sizeof(head),
2676 head, 4*skip, &head, sizeof(head));
2677 head = lfs_fromle32(head);
2678 if (err) {
2679 return err;
2680 }
2681
2682 current -= 1 << skip;
2683 }
2684
2685 *block = head;
2686 *off = pos;
2687 return 0;
2688 }
2689
2690 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2691 static int lfs_ctz_extend(lfs_t *lfs,
2692 lfs_cache_t *pcache, lfs_cache_t *rcache,
2693 lfs_block_t head, lfs_size_t size,
2694 lfs_block_t *block, lfs_off_t *off) {
2695 while (true) {
2696 // go ahead and grab a block
2697 lfs_block_t nblock;
2698 int err = lfs_alloc(lfs, &nblock);
2699 if (err) {
2700 return err;
2701 }
2702
2703 {
2704 err = lfs_bd_erase(lfs, nblock);
2705 if (err) {
2706 if (err == LFS_ERR_CORRUPT) {
2707 goto relocate;
2708 }
2709 return err;
2710 }
2711
2712 if (size == 0) {
2713 *block = nblock;
2714 *off = 0;
2715 return 0;
2716 }
2717
2718 lfs_size_t noff = size - 1;
2719 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2720 noff = noff + 1;
2721
2722 // just copy out the last block if it is incomplete
2723 if (noff != lfs->cfg->block_size) {
2724 for (lfs_off_t i = 0; i < noff; i++) {
2725 uint8_t data;
2726 err = lfs_bd_read(lfs,
2727 NULL, rcache, noff-i,
2728 head, i, &data, 1);
2729 if (err) {
2730 return err;
2731 }
2732
2733 err = lfs_bd_prog(lfs,
2734 pcache, rcache, true,
2735 nblock, i, &data, 1);
2736 if (err) {
2737 if (err == LFS_ERR_CORRUPT) {
2738 goto relocate;
2739 }
2740 return err;
2741 }
2742 }
2743
2744 *block = nblock;
2745 *off = noff;
2746 return 0;
2747 }
2748
2749 // append block
2750 index += 1;
2751 lfs_size_t skips = lfs_ctz(index) + 1;
2752 lfs_block_t nhead = head;
2753 for (lfs_off_t i = 0; i < skips; i++) {
2754 nhead = lfs_tole32(nhead);
2755 err = lfs_bd_prog(lfs, pcache, rcache, true,
2756 nblock, 4*i, &nhead, 4);
2757 nhead = lfs_fromle32(nhead);
2758 if (err) {
2759 if (err == LFS_ERR_CORRUPT) {
2760 goto relocate;
2761 }
2762 return err;
2763 }
2764
2765 if (i != skips-1) {
2766 err = lfs_bd_read(lfs,
2767 NULL, rcache, sizeof(nhead),
2768 nhead, 4*i, &nhead, sizeof(nhead));
2769 nhead = lfs_fromle32(nhead);
2770 if (err) {
2771 return err;
2772 }
2773 }
2774 }
2775
2776 *block = nblock;
2777 *off = 4*skips;
2778 return 0;
2779 }
2780
2781 relocate:
2782 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2783
2784 // just clear cache and try a new block
2785 lfs_cache_drop(lfs, pcache);
2786 }
2787 }
2788 #endif
2789
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2790 static int lfs_ctz_traverse(lfs_t *lfs,
2791 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2792 lfs_block_t head, lfs_size_t size,
2793 int (*cb)(void*, lfs_block_t), void *data) {
2794 if (size == 0) {
2795 return 0;
2796 }
2797
2798 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2799
2800 while (true) {
2801 int err = cb(data, head);
2802 if (err) {
2803 return err;
2804 }
2805
2806 if (index == 0) {
2807 return 0;
2808 }
2809
2810 lfs_block_t heads[2];
2811 int count = 2 - (index & 1);
2812 err = lfs_bd_read(lfs,
2813 pcache, rcache, count*sizeof(head),
2814 head, 0, &heads, count*sizeof(head));
2815 heads[0] = lfs_fromle32(heads[0]);
2816 heads[1] = lfs_fromle32(heads[1]);
2817 if (err) {
2818 return err;
2819 }
2820
2821 for (int i = 0; i < count-1; i++) {
2822 err = cb(data, heads[i]);
2823 if (err) {
2824 return err;
2825 }
2826 }
2827
2828 head = heads[count-1];
2829 index -= count;
2830 }
2831 }
2832
2833
2834 /// Top level file operations ///
lfs_file_rawopencfg(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)2835 static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file,
2836 const char *path, int flags,
2837 const struct lfs_file_config *cfg) {
2838 #ifndef LFS_READONLY
2839 // deorphan if we haven't yet, needed at most once after poweron
2840 if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
2841 int err = lfs_fs_forceconsistency(lfs);
2842 if (err) {
2843 return err;
2844 }
2845 }
2846 #else
2847 LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
2848 #endif
2849
2850 // setup simple file details
2851 int err;
2852 file->cfg = cfg;
2853 file->flags = flags;
2854 file->pos = 0;
2855 file->off = 0;
2856 file->cache.buffer = NULL;
2857
2858 // allocate entry for file if it doesn't exist
2859 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
2860 if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
2861 err = tag;
2862 goto cleanup;
2863 }
2864
2865 // get id, add to list of mdirs to catch update changes
2866 file->type = LFS_TYPE_REG;
2867 lfs_mlist_append(lfs, (struct lfs_mlist *)file);
2868
2869 #ifdef LFS_READONLY
2870 if (tag == LFS_ERR_NOENT) {
2871 err = LFS_ERR_NOENT;
2872 goto cleanup;
2873 #else
2874 if (tag == LFS_ERR_NOENT) {
2875 if (!(flags & LFS_O_CREAT)) {
2876 err = LFS_ERR_NOENT;
2877 goto cleanup;
2878 }
2879
2880 // check that name fits
2881 lfs_size_t nlen = strlen(path);
2882 if (nlen > lfs->name_max) {
2883 err = LFS_ERR_NAMETOOLONG;
2884 goto cleanup;
2885 }
2886
2887 // get next slot and create entry to remember name
2888 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
2889 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
2890 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
2891 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
2892
2893 // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
2894 // not fit in a 128 byte block.
2895 err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
2896 if (err) {
2897 goto cleanup;
2898 }
2899
2900 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
2901 } else if (flags & LFS_O_EXCL) {
2902 err = LFS_ERR_EXIST;
2903 goto cleanup;
2904 #endif
2905 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
2906 err = LFS_ERR_ISDIR;
2907 goto cleanup;
2908 #ifndef LFS_READONLY
2909 } else if (flags & LFS_O_TRUNC) {
2910 // truncate if requested
2911 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
2912 file->flags |= LFS_F_DIRTY;
2913 #endif
2914 } else {
2915 // try to load what's on disk, if it's inlined we'll fix it later
2916 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
2917 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
2918 if (tag < 0) {
2919 err = tag;
2920 goto cleanup;
2921 }
2922 lfs_ctz_fromle32(&file->ctz);
2923 }
2924
2925 // fetch attrs
2926 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
2927 // if opened for read / read-write operations
2928 if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
2929 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2930 LFS_MKTAG(0x7ff, 0x3ff, 0),
2931 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
2932 file->id, file->cfg->attrs[i].size),
2933 file->cfg->attrs[i].buffer);
2934 if (res < 0 && res != LFS_ERR_NOENT) {
2935 err = res;
2936 goto cleanup;
2937 }
2938 }
2939
2940 #ifndef LFS_READONLY
2941 // if opened for write / read-write operations
2942 if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
2943 if (file->cfg->attrs[i].size > lfs->attr_max) {
2944 err = LFS_ERR_NOSPC;
2945 goto cleanup;
2946 }
2947
2948 file->flags |= LFS_F_DIRTY;
2949 }
2950 #endif
2951 }
2952
2953 // allocate buffer if needed
2954 if (file->cfg->buffer) {
2955 file->cache.buffer = file->cfg->buffer;
2956 } else {
2957 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
2958 if (!file->cache.buffer) {
2959 err = LFS_ERR_NOMEM;
2960 goto cleanup;
2961 }
2962 }
2963
2964 // zero to avoid information leak
2965 lfs_cache_zero(lfs, &file->cache);
2966
2967 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
2968 // load inline files
2969 file->ctz.head = LFS_BLOCK_INLINE;
2970 file->ctz.size = lfs_tag_size(tag);
2971 file->flags |= LFS_F_INLINE;
2972 file->cache.block = file->ctz.head;
2973 file->cache.off = 0;
2974 file->cache.size = lfs->cfg->cache_size;
2975
2976 // don't always read (may be new/trunc file)
2977 if (file->ctz.size > 0) {
2978 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2979 LFS_MKTAG(0x700, 0x3ff, 0),
2980 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
2981 lfs_min(file->cache.size, 0x3fe)),
2982 file->cache.buffer);
2983 if (res < 0) {
2984 err = res;
2985 goto cleanup;
2986 }
2987 }
2988 }
2989
2990 return 0;
2991
2992 cleanup:
2993 // clean up lingering resources
2994 #ifndef LFS_READONLY
2995 file->flags |= LFS_F_ERRED;
2996 #endif
2997 lfs_file_rawclose(lfs, file);
2998 return err;
2999 }
3000
3001 static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file,
3002 const char *path, int flags) {
3003 static const struct lfs_file_config defaults = {0};
3004 int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults);
3005 return err;
3006 }
3007
3008 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) {
3009 #ifndef LFS_READONLY
3010 int err = lfs_file_rawsync(lfs, file);
3011 #else
3012 int err = 0;
3013 #endif
3014
3015 // remove from list of mdirs
3016 lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
3017
3018 // clean up memory
3019 if (!file->cfg->buffer) {
3020 lfs_free(file->cache.buffer);
3021 }
3022
3023 return err;
3024 }
3025
3026
3027 #ifndef LFS_READONLY
3028 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
3029 while (true) {
3030 // just relocate what exists into new block
3031 lfs_block_t nblock;
3032 int err = lfs_alloc(lfs, &nblock);
3033 if (err) {
3034 return err;
3035 }
3036
3037 err = lfs_bd_erase(lfs, nblock);
3038 if (err) {
3039 if (err == LFS_ERR_CORRUPT) {
3040 goto relocate;
3041 }
3042 return err;
3043 }
3044
3045 // either read from dirty cache or disk
3046 for (lfs_off_t i = 0; i < file->off; i++) {
3047 uint8_t data;
3048 if (file->flags & LFS_F_INLINE) {
3049 err = lfs_dir_getread(lfs, &file->m,
3050 // note we evict inline files before they can be dirty
3051 NULL, &file->cache, file->off-i,
3052 LFS_MKTAG(0xfff, 0x1ff, 0),
3053 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3054 i, &data, 1);
3055 if (err) {
3056 return err;
3057 }
3058 } else {
3059 err = lfs_bd_read(lfs,
3060 &file->cache, &lfs->rcache, file->off-i,
3061 file->block, i, &data, 1);
3062 if (err) {
3063 return err;
3064 }
3065 }
3066
3067 err = lfs_bd_prog(lfs,
3068 &lfs->pcache, &lfs->rcache, true,
3069 nblock, i, &data, 1);
3070 if (err) {
3071 if (err == LFS_ERR_CORRUPT) {
3072 goto relocate;
3073 }
3074 return err;
3075 }
3076 }
3077
3078 // copy over new state of file
3079 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
3080 file->cache.block = lfs->pcache.block;
3081 file->cache.off = lfs->pcache.off;
3082 file->cache.size = lfs->pcache.size;
3083 lfs_cache_zero(lfs, &lfs->pcache);
3084
3085 file->block = nblock;
3086 file->flags |= LFS_F_WRITING;
3087 return 0;
3088
3089 relocate:
3090 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3091
3092 // just clear cache and try a new block
3093 lfs_cache_drop(lfs, &lfs->pcache);
3094 }
3095 }
3096 #endif
3097
3098 #ifndef LFS_READONLY
3099 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
3100 file->off = file->pos;
3101 lfs_alloc_ack(lfs);
3102 int err = lfs_file_relocate(lfs, file);
3103 if (err) {
3104 return err;
3105 }
3106
3107 file->flags &= ~LFS_F_INLINE;
3108 return 0;
3109 }
3110 #endif
3111
3112 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
3113 if (file->flags & LFS_F_READING) {
3114 if (!(file->flags & LFS_F_INLINE)) {
3115 lfs_cache_drop(lfs, &file->cache);
3116 }
3117 file->flags &= ~LFS_F_READING;
3118 }
3119
3120 #ifndef LFS_READONLY
3121 if (file->flags & LFS_F_WRITING) {
3122 lfs_off_t pos = file->pos;
3123
3124 if (!(file->flags & LFS_F_INLINE)) {
3125 // copy over anything after current branch
3126 lfs_file_t orig = {
3127 .ctz.head = file->ctz.head,
3128 .ctz.size = file->ctz.size,
3129 .flags = LFS_O_RDONLY,
3130 .pos = file->pos,
3131 .cache = lfs->rcache,
3132 };
3133 lfs_cache_drop(lfs, &lfs->rcache);
3134
3135 while (file->pos < file->ctz.size) {
3136 // copy over a byte at a time, leave it up to caching
3137 // to make this efficient
3138 uint8_t data;
3139 lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
3140 if (res < 0) {
3141 return res;
3142 }
3143
3144 res = lfs_file_flushedwrite(lfs, file, &data, 1);
3145 if (res < 0) {
3146 return res;
3147 }
3148
3149 // keep our reference to the rcache in sync
3150 if (lfs->rcache.block != LFS_BLOCK_NULL) {
3151 lfs_cache_drop(lfs, &orig.cache);
3152 lfs_cache_drop(lfs, &lfs->rcache);
3153 }
3154 }
3155
3156 // write out what we have
3157 while (true) {
3158 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
3159 if (err) {
3160 if (err == LFS_ERR_CORRUPT) {
3161 goto relocate;
3162 }
3163 return err;
3164 }
3165
3166 break;
3167
3168 relocate:
3169 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
3170 err = lfs_file_relocate(lfs, file);
3171 if (err) {
3172 return err;
3173 }
3174 }
3175 } else {
3176 file->pos = lfs_max(file->pos, file->ctz.size);
3177 }
3178
3179 // actual file updates
3180 file->ctz.head = file->block;
3181 file->ctz.size = file->pos;
3182 file->flags &= ~LFS_F_WRITING;
3183 file->flags |= LFS_F_DIRTY;
3184
3185 file->pos = pos;
3186 }
3187 #endif
3188
3189 return 0;
3190 }
3191
3192 #ifndef LFS_READONLY
3193 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
3194 if (file->flags & LFS_F_ERRED) {
3195 // it's not safe to do anything if our file errored
3196 return 0;
3197 }
3198
3199 int err = lfs_file_flush(lfs, file);
3200 if (err) {
3201 file->flags |= LFS_F_ERRED;
3202 return err;
3203 }
3204
3205
3206 if ((file->flags & LFS_F_DIRTY) &&
3207 !lfs_pair_isnull(file->m.pair)) {
3208 // update dir entry
3209 uint16_t type;
3210 const void *buffer;
3211 lfs_size_t size;
3212 struct lfs_ctz ctz;
3213 if (file->flags & LFS_F_INLINE) {
3214 // inline the whole file
3215 type = LFS_TYPE_INLINESTRUCT;
3216 buffer = file->cache.buffer;
3217 size = file->ctz.size;
3218 } else {
3219 // update the ctz reference
3220 type = LFS_TYPE_CTZSTRUCT;
3221 // copy ctz so alloc will work during a relocate
3222 ctz = file->ctz;
3223 lfs_ctz_tole32(&ctz);
3224 buffer = &ctz;
3225 size = sizeof(ctz);
3226 }
3227
3228 // commit file data and attributes
3229 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3230 {LFS_MKTAG(type, file->id, size), buffer},
3231 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
3232 file->cfg->attr_count), file->cfg->attrs}));
3233 if (err) {
3234 file->flags |= LFS_F_ERRED;
3235 return err;
3236 }
3237
3238 file->flags &= ~LFS_F_DIRTY;
3239 }
3240
3241 return 0;
3242 }
3243 #endif
3244
3245 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
3246 void *buffer, lfs_size_t size) {
3247 uint8_t *data = buffer;
3248 lfs_size_t nsize = size;
3249
3250 if (file->pos >= file->ctz.size) {
3251 // eof if past end
3252 return 0;
3253 }
3254
3255 size = lfs_min(size, file->ctz.size - file->pos);
3256 nsize = size;
3257
3258 while (nsize > 0) {
3259 // check if we need a new block
3260 if (!(file->flags & LFS_F_READING) ||
3261 file->off == lfs->cfg->block_size) {
3262 if (!(file->flags & LFS_F_INLINE)) {
3263 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3264 file->ctz.head, file->ctz.size,
3265 file->pos, &file->block, &file->off);
3266 if (err) {
3267 return err;
3268 }
3269 } else {
3270 file->block = LFS_BLOCK_INLINE;
3271 file->off = file->pos;
3272 }
3273
3274 file->flags |= LFS_F_READING;
3275 }
3276
3277 // read as much as we can in current block
3278 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3279 if (file->flags & LFS_F_INLINE) {
3280 int err = lfs_dir_getread(lfs, &file->m,
3281 NULL, &file->cache, lfs->cfg->block_size,
3282 LFS_MKTAG(0xfff, 0x1ff, 0),
3283 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3284 file->off, data, diff);
3285 if (err) {
3286 return err;
3287 }
3288 } else {
3289 int err = lfs_bd_read(lfs,
3290 NULL, &file->cache, lfs->cfg->block_size,
3291 file->block, file->off, data, diff);
3292 if (err) {
3293 return err;
3294 }
3295 }
3296
3297 file->pos += diff;
3298 file->off += diff;
3299 data += diff;
3300 nsize -= diff;
3301 }
3302
3303 return size;
3304 }
3305
3306 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
3307 void *buffer, lfs_size_t size) {
3308 LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3309
3310 #ifndef LFS_READONLY
3311 if (file->flags & LFS_F_WRITING) {
3312 // flush out any writes
3313 int err = lfs_file_flush(lfs, file);
3314 if (err) {
3315 return err;
3316 }
3317 }
3318 #endif
3319
3320 return lfs_file_flushedread(lfs, file, buffer, size);
3321 }
3322
3323
3324 #ifndef LFS_READONLY
3325 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
3326 const void *buffer, lfs_size_t size) {
3327 const uint8_t *data = buffer;
3328 lfs_size_t nsize = size;
3329
3330 if ((file->flags & LFS_F_INLINE) &&
3331 lfs_max(file->pos+nsize, file->ctz.size) >
3332 lfs_min(0x3fe, lfs_min(
3333 lfs->cfg->cache_size,
3334 (lfs->cfg->metadata_max ?
3335 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
3336 // inline file doesn't fit anymore
3337 int err = lfs_file_outline(lfs, file);
3338 if (err) {
3339 file->flags |= LFS_F_ERRED;
3340 return err;
3341 }
3342 }
3343
3344 while (nsize > 0) {
3345 // check if we need a new block
3346 if (!(file->flags & LFS_F_WRITING) ||
3347 file->off == lfs->cfg->block_size) {
3348 if (!(file->flags & LFS_F_INLINE)) {
3349 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
3350 // find out which block we're extending from
3351 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3352 file->ctz.head, file->ctz.size,
3353 file->pos-1, &file->block, &file->off);
3354 if (err) {
3355 file->flags |= LFS_F_ERRED;
3356 return err;
3357 }
3358
3359 // mark cache as dirty since we may have read data into it
3360 lfs_cache_zero(lfs, &file->cache);
3361 }
3362
3363 // extend file with new blocks
3364 lfs_alloc_ack(lfs);
3365 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3366 file->block, file->pos,
3367 &file->block, &file->off);
3368 if (err) {
3369 file->flags |= LFS_F_ERRED;
3370 return err;
3371 }
3372 } else {
3373 file->block = LFS_BLOCK_INLINE;
3374 file->off = file->pos;
3375 }
3376
3377 file->flags |= LFS_F_WRITING;
3378 }
3379
3380 // program as much as we can in current block
3381 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3382 while (true) {
3383 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3384 file->block, file->off, data, diff);
3385 if (err) {
3386 if (err == LFS_ERR_CORRUPT) {
3387 goto relocate;
3388 }
3389 file->flags |= LFS_F_ERRED;
3390 return err;
3391 }
3392
3393 break;
3394 relocate:
3395 err = lfs_file_relocate(lfs, file);
3396 if (err) {
3397 file->flags |= LFS_F_ERRED;
3398 return err;
3399 }
3400 }
3401
3402 file->pos += diff;
3403 file->off += diff;
3404 data += diff;
3405 nsize -= diff;
3406
3407 lfs_alloc_ack(lfs);
3408 }
3409
3410 return size;
3411 }
3412
3413 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
3414 const void *buffer, lfs_size_t size) {
3415 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3416
3417 if (file->flags & LFS_F_READING) {
3418 // drop any reads
3419 int err = lfs_file_flush(lfs, file);
3420 if (err) {
3421 return err;
3422 }
3423 }
3424
3425 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
3426 file->pos = file->ctz.size;
3427 }
3428
3429 if (file->pos + size > lfs->file_max) {
3430 // Larger than file limit?
3431 return LFS_ERR_FBIG;
3432 }
3433
3434 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
3435 // fill with zeros
3436 lfs_off_t pos = file->pos;
3437 file->pos = file->ctz.size;
3438
3439 while (file->pos < pos) {
3440 lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
3441 if (res < 0) {
3442 return res;
3443 }
3444 }
3445 }
3446
3447 lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
3448 if (nsize < 0) {
3449 return nsize;
3450 }
3451
3452 file->flags &= ~LFS_F_ERRED;
3453 return nsize;
3454 }
3455 #endif
3456
3457 static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
3458 lfs_soff_t off, int whence) {
3459 // find new pos
3460 lfs_off_t npos = file->pos;
3461 if (whence == LFS_SEEK_SET) {
3462 npos = off;
3463 } else if (whence == LFS_SEEK_CUR) {
3464 if ((lfs_soff_t)file->pos + off < 0) {
3465 return LFS_ERR_INVAL;
3466 } else {
3467 npos = file->pos + off;
3468 }
3469 } else if (whence == LFS_SEEK_END) {
3470 lfs_soff_t res = lfs_file_rawsize(lfs, file) + off;
3471 if (res < 0) {
3472 return LFS_ERR_INVAL;
3473 } else {
3474 npos = res;
3475 }
3476 }
3477
3478 if (npos > lfs->file_max) {
3479 // file position out of range
3480 return LFS_ERR_INVAL;
3481 }
3482
3483 if (file->pos == npos) {
3484 // noop - position has not changed
3485 return npos;
3486 }
3487
3488 // if we're only reading and our new offset is still in the file's cache
3489 // we can avoid flushing and needing to reread the data
3490 if (
3491 #ifndef LFS_READONLY
3492 !(file->flags & LFS_F_WRITING)
3493 #else
3494 true
3495 #endif
3496 ) {
3497 int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
3498 lfs_off_t noff = npos;
3499 int nindex = lfs_ctz_index(lfs, &noff);
3500 if (oindex == nindex
3501 && noff >= file->cache.off
3502 && noff < file->cache.off + file->cache.size) {
3503 file->pos = npos;
3504 file->off = noff;
3505 return npos;
3506 }
3507 }
3508
3509 // write out everything beforehand, may be noop if rdonly
3510 int err = lfs_file_flush(lfs, file);
3511 if (err) {
3512 return err;
3513 }
3514
3515 // update pos
3516 file->pos = npos;
3517 return npos;
3518 }
3519
3520 #ifndef LFS_READONLY
3521 static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3522 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3523
3524 if (size > LFS_FILE_MAX) {
3525 return LFS_ERR_INVAL;
3526 }
3527
3528 lfs_off_t pos = file->pos;
3529 lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
3530 if (size < oldsize) {
3531 // need to flush since directly changing metadata
3532 int err = lfs_file_flush(lfs, file);
3533 if (err) {
3534 return err;
3535 }
3536
3537 // lookup new head in ctz skip list
3538 err = lfs_ctz_find(lfs, NULL, &file->cache,
3539 file->ctz.head, file->ctz.size,
3540 size, &file->block, &file->off);
3541 if (err) {
3542 return err;
3543 }
3544
3545 // need to set pos/block/off consistently so seeking back to
3546 // the old position does not get confused
3547 file->pos = size;
3548 file->ctz.head = file->block;
3549 file->ctz.size = size;
3550 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3551 } else if (size > oldsize) {
3552 // flush+seek if not already at end
3553 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
3554 if (res < 0) {
3555 return (int)res;
3556 }
3557
3558 // fill with zeros
3559 while (file->pos < size) {
3560 res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
3561 if (res < 0) {
3562 return (int)res;
3563 }
3564 }
3565 }
3566
3567 // restore pos
3568 lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET);
3569 if (res < 0) {
3570 return (int)res;
3571 }
3572
3573 return 0;
3574 }
3575 #endif
3576
3577 static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) {
3578 (void)lfs;
3579 return file->pos;
3580 }
3581
3582 static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) {
3583 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3584 if (res < 0) {
3585 return (int)res;
3586 }
3587
3588 return 0;
3589 }
3590
3591 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) {
3592 (void)lfs;
3593
3594 #ifndef LFS_READONLY
3595 if (file->flags & LFS_F_WRITING) {
3596 return lfs_max(file->pos, file->ctz.size);
3597 }
3598 #endif
3599
3600 return file->ctz.size;
3601 }
3602
3603
3604 /// General fs operations ///
3605 static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) {
3606 lfs_mdir_t cwd;
3607 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3608 if (tag < 0) {
3609 return (int)tag;
3610 }
3611
3612 return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3613 }
3614
3615 #ifndef LFS_READONLY
3616 static int lfs_rawremove(lfs_t *lfs, const char *path) {
3617 // deorphan if we haven't yet, needed at most once after poweron
3618 int err = lfs_fs_forceconsistency(lfs);
3619 if (err) {
3620 return err;
3621 }
3622
3623 lfs_mdir_t cwd;
3624 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3625 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3626 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3627 }
3628
3629 struct lfs_mlist dir;
3630 dir.next = lfs->mlist;
3631 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3632 // must be empty before removal
3633 lfs_block_t pair[2];
3634 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3635 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3636 if (res < 0) {
3637 return (int)res;
3638 }
3639 lfs_pair_fromle32(pair);
3640
3641 err = lfs_dir_fetch(lfs, &dir.m, pair);
3642 if (err) {
3643 return err;
3644 }
3645
3646 if (dir.m.count > 0 || dir.m.split) {
3647 return LFS_ERR_NOTEMPTY;
3648 }
3649
3650 // mark fs as orphaned
3651 err = lfs_fs_preporphans(lfs, +1);
3652 if (err) {
3653 return err;
3654 }
3655
3656 // I know it's crazy but yes, dir can be changed by our parent's
3657 // commit (if predecessor is child)
3658 dir.type = 0;
3659 dir.id = 0;
3660 lfs->mlist = &dir;
3661 }
3662
3663 // delete the entry
3664 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3665 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3666 if (err) {
3667 lfs->mlist = dir.next;
3668 return err;
3669 }
3670
3671 lfs->mlist = dir.next;
3672 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3673 // fix orphan
3674 err = lfs_fs_preporphans(lfs, -1);
3675 if (err) {
3676 return err;
3677 }
3678
3679 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3680 if (err) {
3681 return err;
3682 }
3683
3684 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3685 if (err) {
3686 return err;
3687 }
3688 }
3689
3690 return 0;
3691 }
3692 #endif
3693
3694 #ifndef LFS_READONLY
3695 static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
3696 // deorphan if we haven't yet, needed at most once after poweron
3697 int err = lfs_fs_forceconsistency(lfs);
3698 if (err) {
3699 return err;
3700 }
3701
3702 // find old entry
3703 lfs_mdir_t oldcwd;
3704 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3705 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3706 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3707 }
3708
3709 // find new entry
3710 lfs_mdir_t newcwd;
3711 uint16_t newid;
3712 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3713 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3714 !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3715 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3716 }
3717
3718 // if we're in the same pair there's a few special cases...
3719 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3720 uint16_t newoldid = lfs_tag_id(oldtag);
3721
3722 struct lfs_mlist prevdir;
3723 prevdir.next = lfs->mlist;
3724 if (prevtag == LFS_ERR_NOENT) {
3725 // check that name fits
3726 lfs_size_t nlen = strlen(newpath);
3727 if (nlen > lfs->name_max) {
3728 return LFS_ERR_NAMETOOLONG;
3729 }
3730
3731 // there is a small chance we are being renamed in the same
3732 // directory/ to an id less than our old id, the global update
3733 // to handle this is a bit messy
3734 if (samepair && newid <= newoldid) {
3735 newoldid += 1;
3736 }
3737 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3738 return LFS_ERR_ISDIR;
3739 } else if (samepair && newid == newoldid) {
3740 // we're renaming to ourselves??
3741 return 0;
3742 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3743 // must be empty before removal
3744 lfs_block_t prevpair[2];
3745 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3746 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3747 if (res < 0) {
3748 return (int)res;
3749 }
3750 lfs_pair_fromle32(prevpair);
3751
3752 // must be empty before removal
3753 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3754 if (err) {
3755 return err;
3756 }
3757
3758 if (prevdir.m.count > 0 || prevdir.m.split) {
3759 return LFS_ERR_NOTEMPTY;
3760 }
3761
3762 // mark fs as orphaned
3763 err = lfs_fs_preporphans(lfs, +1);
3764 if (err) {
3765 return err;
3766 }
3767
3768 // I know it's crazy but yes, dir can be changed by our parent's
3769 // commit (if predecessor is child)
3770 prevdir.type = 0;
3771 prevdir.id = 0;
3772 lfs->mlist = &prevdir;
3773 }
3774
3775 if (!samepair) {
3776 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
3777 }
3778
3779 // move over all attributes
3780 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
3781 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
3782 LFS_TYPE_DELETE, newid, 0), NULL},
3783 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
3784 {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
3785 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
3786 {LFS_MKTAG_IF(samepair,
3787 LFS_TYPE_DELETE, newoldid, 0), NULL}));
3788 if (err) {
3789 lfs->mlist = prevdir.next;
3790 return err;
3791 }
3792
3793 // let commit clean up after move (if we're different! otherwise move
3794 // logic already fixed it for us)
3795 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
3796 // prep gstate and delete move id
3797 lfs_fs_prepmove(lfs, 0x3ff, NULL);
3798 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
3799 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
3800 if (err) {
3801 lfs->mlist = prevdir.next;
3802 return err;
3803 }
3804 }
3805
3806 lfs->mlist = prevdir.next;
3807 if (prevtag != LFS_ERR_NOENT
3808 && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3809 // fix orphan
3810 err = lfs_fs_preporphans(lfs, -1);
3811 if (err) {
3812 return err;
3813 }
3814
3815 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
3816 if (err) {
3817 return err;
3818 }
3819
3820 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
3821 if (err) {
3822 return err;
3823 }
3824 }
3825
3826 return 0;
3827 }
3828 #endif
3829
3830 static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path,
3831 uint8_t type, void *buffer, lfs_size_t size) {
3832 lfs_mdir_t cwd;
3833 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3834 if (tag < 0) {
3835 return tag;
3836 }
3837
3838 uint16_t id = lfs_tag_id(tag);
3839 if (id == 0x3ff) {
3840 // special case for root
3841 id = 0;
3842 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3843 if (err) {
3844 return err;
3845 }
3846 }
3847
3848 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
3849 LFS_MKTAG(LFS_TYPE_USERATTR + type,
3850 id, lfs_min(size, lfs->attr_max)),
3851 buffer);
3852 if (tag < 0) {
3853 if (tag == LFS_ERR_NOENT) {
3854 return LFS_ERR_NOATTR;
3855 }
3856
3857 return tag;
3858 }
3859
3860 return lfs_tag_size(tag);
3861 }
3862
3863 #ifndef LFS_READONLY
3864 static int lfs_commitattr(lfs_t *lfs, const char *path,
3865 uint8_t type, const void *buffer, lfs_size_t size) {
3866 lfs_mdir_t cwd;
3867 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3868 if (tag < 0) {
3869 return tag;
3870 }
3871
3872 uint16_t id = lfs_tag_id(tag);
3873 if (id == 0x3ff) {
3874 // special case for root
3875 id = 0;
3876 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3877 if (err) {
3878 return err;
3879 }
3880 }
3881
3882 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3883 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
3884 }
3885 #endif
3886
3887 #ifndef LFS_READONLY
3888 static int lfs_rawsetattr(lfs_t *lfs, const char *path,
3889 uint8_t type, const void *buffer, lfs_size_t size) {
3890 if (size > lfs->attr_max) {
3891 return LFS_ERR_NOSPC;
3892 }
3893
3894 return lfs_commitattr(lfs, path, type, buffer, size);
3895 }
3896 #endif
3897
3898 #ifndef LFS_READONLY
3899 static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) {
3900 return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
3901 }
3902 #endif
3903
3904
3905 /// Filesystem operations ///
3906 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
3907 lfs->cfg = cfg;
3908 int err = 0;
3909
3910 // validate that the lfs-cfg sizes were initiated properly before
3911 // performing any arithmetic logics with them
3912 LFS_ASSERT(lfs->cfg->read_size != 0);
3913 LFS_ASSERT(lfs->cfg->prog_size != 0);
3914 LFS_ASSERT(lfs->cfg->cache_size != 0);
3915
3916 // check that block size is a multiple of cache size is a multiple
3917 // of prog and read sizes
3918 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
3919 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
3920 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
3921
3922 // check that the block size is large enough to fit ctz pointers
3923 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
3924 <= lfs->cfg->block_size);
3925
3926 // block_cycles = 0 is no longer supported.
3927 //
3928 // block_cycles is the number of erase cycles before littlefs evicts
3929 // metadata logs as a part of wear leveling. Suggested values are in the
3930 // range of 100-1000, or set block_cycles to -1 to disable block-level
3931 // wear-leveling.
3932 LFS_ASSERT(lfs->cfg->block_cycles != 0);
3933
3934
3935 // setup read cache
3936 if (lfs->cfg->read_buffer) {
3937 lfs->rcache.buffer = lfs->cfg->read_buffer;
3938 } else {
3939 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3940 if (!lfs->rcache.buffer) {
3941 err = LFS_ERR_NOMEM;
3942 goto cleanup;
3943 }
3944 }
3945
3946 // setup program cache
3947 if (lfs->cfg->prog_buffer) {
3948 lfs->pcache.buffer = lfs->cfg->prog_buffer;
3949 } else {
3950 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3951 if (!lfs->pcache.buffer) {
3952 err = LFS_ERR_NOMEM;
3953 goto cleanup;
3954 }
3955 }
3956
3957 // zero to avoid information leaks
3958 lfs_cache_zero(lfs, &lfs->rcache);
3959 lfs_cache_zero(lfs, &lfs->pcache);
3960
3961 // setup lookahead, must be multiple of 64-bits, 32-bit aligned
3962 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
3963 LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
3964 (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
3965 if (lfs->cfg->lookahead_buffer) {
3966 lfs->free.buffer = lfs->cfg->lookahead_buffer;
3967 } else {
3968 lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
3969 if (!lfs->free.buffer) {
3970 err = LFS_ERR_NOMEM;
3971 goto cleanup;
3972 }
3973 }
3974
3975 // check that the size limits are sane
3976 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
3977 lfs->name_max = lfs->cfg->name_max;
3978 if (!lfs->name_max) {
3979 lfs->name_max = LFS_NAME_MAX;
3980 }
3981
3982 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
3983 lfs->file_max = lfs->cfg->file_max;
3984 if (!lfs->file_max) {
3985 lfs->file_max = LFS_FILE_MAX;
3986 }
3987
3988 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
3989 lfs->attr_max = lfs->cfg->attr_max;
3990 if (!lfs->attr_max) {
3991 lfs->attr_max = LFS_ATTR_MAX;
3992 }
3993
3994 LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
3995
3996 // setup default state
3997 lfs->root[0] = LFS_BLOCK_NULL;
3998 lfs->root[1] = LFS_BLOCK_NULL;
3999 lfs->mlist = NULL;
4000 lfs->seed = 0;
4001 lfs->gdisk = (lfs_gstate_t){0};
4002 lfs->gstate = (lfs_gstate_t){0};
4003 lfs->gdelta = (lfs_gstate_t){0};
4004 #ifdef LFS_MIGRATE
4005 lfs->lfs1 = NULL;
4006 #endif
4007
4008 return 0;
4009
4010 cleanup:
4011 lfs_deinit(lfs);
4012 return err;
4013 }
4014
4015 static int lfs_deinit(lfs_t *lfs) {
4016 // free allocated memory
4017 if (!lfs->cfg->read_buffer) {
4018 lfs_free(lfs->rcache.buffer);
4019 }
4020
4021 if (!lfs->cfg->prog_buffer) {
4022 lfs_free(lfs->pcache.buffer);
4023 }
4024
4025 if (!lfs->cfg->lookahead_buffer) {
4026 lfs_free(lfs->free.buffer);
4027 }
4028
4029 return 0;
4030 }
4031
4032 #ifndef LFS_READONLY
4033 static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
4034 int err = 0;
4035 {
4036 err = lfs_init(lfs, cfg);
4037 if (err) {
4038 return err;
4039 }
4040
4041 // create free lookahead
4042 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
4043 lfs->free.off = 0;
4044 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
4045 lfs->cfg->block_count);
4046 lfs->free.i = 0;
4047 lfs_alloc_ack(lfs);
4048
4049 // create root dir
4050 lfs_mdir_t root;
4051 err = lfs_dir_alloc(lfs, &root);
4052 if (err) {
4053 goto cleanup;
4054 }
4055
4056 // write one superblock
4057 lfs_superblock_t superblock = {
4058 .version = LFS_DISK_VERSION,
4059 .block_size = lfs->cfg->block_size,
4060 .block_count = lfs->cfg->block_count,
4061 .name_max = lfs->name_max,
4062 .file_max = lfs->file_max,
4063 .attr_max = lfs->attr_max,
4064 };
4065
4066 lfs_superblock_tole32(&superblock);
4067 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4068 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4069 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4070 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4071 &superblock}));
4072 if (err) {
4073 goto cleanup;
4074 }
4075
4076 // force compaction to prevent accidentally mounting any
4077 // older version of littlefs that may live on disk
4078 root.erased = false;
4079 err = lfs_dir_commit(lfs, &root, NULL, 0);
4080 if (err) {
4081 goto cleanup;
4082 }
4083
4084 // sanity check that fetch works
4085 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
4086 if (err) {
4087 goto cleanup;
4088 }
4089 }
4090
4091 cleanup:
4092 lfs_deinit(lfs);
4093 return err;
4094
4095 }
4096 #endif
4097
4098 static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
4099 int err = lfs_init(lfs, cfg);
4100 if (err) {
4101 return err;
4102 }
4103
4104 // scan directory blocks for superblock and any global updates
4105 lfs_mdir_t dir = {.tail = {0, 1}};
4106 lfs_block_t cycle = 0;
4107 while (!lfs_pair_isnull(dir.tail)) {
4108 if (cycle >= lfs->cfg->block_count/2) {
4109 // loop detected
4110 err = LFS_ERR_CORRUPT;
4111 goto cleanup;
4112 }
4113 cycle += 1;
4114
4115 // fetch next block in tail list
4116 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
4117 LFS_MKTAG(0x7ff, 0x3ff, 0),
4118 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
4119 NULL,
4120 lfs_dir_find_match, &(struct lfs_dir_find_match){
4121 lfs, "littlefs", 8});
4122 if (tag < 0) {
4123 err = tag;
4124 goto cleanup;
4125 }
4126
4127 // has superblock?
4128 if (tag && !lfs_tag_isdelete(tag)) {
4129 // update root
4130 lfs->root[0] = dir.pair[0];
4131 lfs->root[1] = dir.pair[1];
4132
4133 // grab superblock
4134 lfs_superblock_t superblock;
4135 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4136 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4137 &superblock);
4138 if (tag < 0) {
4139 err = tag;
4140 goto cleanup;
4141 }
4142 lfs_superblock_fromle32(&superblock);
4143
4144 // check version
4145 uint16_t major_version = (0xffff & (superblock.version >> 16));
4146 uint16_t minor_version = (0xffff & (superblock.version >> 0));
4147 if ((major_version != LFS_DISK_VERSION_MAJOR ||
4148 minor_version > LFS_DISK_VERSION_MINOR)) {
4149 LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16,
4150 major_version, minor_version);
4151 err = LFS_ERR_INVAL;
4152 goto cleanup;
4153 }
4154
4155 // check superblock configuration
4156 if (superblock.name_max) {
4157 if (superblock.name_max > lfs->name_max) {
4158 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
4159 superblock.name_max, lfs->name_max);
4160 err = LFS_ERR_INVAL;
4161 goto cleanup;
4162 }
4163
4164 lfs->name_max = superblock.name_max;
4165 }
4166
4167 if (superblock.file_max) {
4168 if (superblock.file_max > lfs->file_max) {
4169 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
4170 superblock.file_max, lfs->file_max);
4171 err = LFS_ERR_INVAL;
4172 goto cleanup;
4173 }
4174
4175 lfs->file_max = superblock.file_max;
4176 }
4177
4178 if (superblock.attr_max) {
4179 if (superblock.attr_max > lfs->attr_max) {
4180 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
4181 superblock.attr_max, lfs->attr_max);
4182 err = LFS_ERR_INVAL;
4183 goto cleanup;
4184 }
4185
4186 lfs->attr_max = superblock.attr_max;
4187 }
4188
4189 if (superblock.block_count != lfs->cfg->block_count) {
4190 LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
4191 superblock.block_count, lfs->cfg->block_count);
4192 err = LFS_ERR_INVAL;
4193 goto cleanup;
4194 }
4195
4196 if (superblock.block_size != lfs->cfg->block_size) {
4197 LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
4198 superblock.block_count, lfs->cfg->block_count);
4199 err = LFS_ERR_INVAL;
4200 goto cleanup;
4201 }
4202 }
4203
4204 // has gstate?
4205 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
4206 if (err) {
4207 goto cleanup;
4208 }
4209 }
4210
4211 // found superblock?
4212 if (lfs_pair_isnull(lfs->root)) {
4213 err = LFS_ERR_INVAL;
4214 goto cleanup;
4215 }
4216
4217 // update littlefs with gstate
4218 if (!lfs_gstate_iszero(&lfs->gstate)) {
4219 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
4220 lfs->gstate.tag,
4221 lfs->gstate.pair[0],
4222 lfs->gstate.pair[1]);
4223 }
4224 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
4225 lfs->gdisk = lfs->gstate;
4226
4227 // setup free lookahead, to distribute allocations uniformly across
4228 // boots, we start the allocator at a random location
4229 lfs->free.off = lfs->seed % lfs->cfg->block_count;
4230 lfs_alloc_drop(lfs);
4231
4232 return 0;
4233
4234 cleanup:
4235 lfs_rawunmount(lfs);
4236 return err;
4237 }
4238
4239 static int lfs_rawunmount(lfs_t *lfs) {
4240 return lfs_deinit(lfs);
4241 }
4242
4243
4244 /// Filesystem filesystem operations ///
4245 int lfs_fs_rawtraverse(lfs_t *lfs,
4246 int (*cb)(void *data, lfs_block_t block), void *data,
4247 bool includeorphans) {
4248 // iterate over metadata pairs
4249 lfs_mdir_t dir = {.tail = {0, 1}};
4250
4251 #ifdef LFS_MIGRATE
4252 // also consider v1 blocks during migration
4253 if (lfs->lfs1) {
4254 int err = lfs1_traverse(lfs, cb, data);
4255 if (err) {
4256 return err;
4257 }
4258
4259 dir.tail[0] = lfs->root[0];
4260 dir.tail[1] = lfs->root[1];
4261 }
4262 #endif
4263
4264 lfs_block_t cycle = 0;
4265 while (!lfs_pair_isnull(dir.tail)) {
4266 if (cycle >= lfs->cfg->block_count/2) {
4267 // loop detected
4268 return LFS_ERR_CORRUPT;
4269 }
4270 cycle += 1;
4271
4272 for (int i = 0; i < 2; i++) {
4273 int err = cb(data, dir.tail[i]);
4274 if (err) {
4275 return err;
4276 }
4277 }
4278
4279 // iterate through ids in directory
4280 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
4281 if (err) {
4282 return err;
4283 }
4284
4285 for (uint16_t id = 0; id < dir.count; id++) {
4286 struct lfs_ctz ctz;
4287 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
4288 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
4289 if (tag < 0) {
4290 if (tag == LFS_ERR_NOENT) {
4291 continue;
4292 }
4293 return tag;
4294 }
4295 lfs_ctz_fromle32(&ctz);
4296
4297 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
4298 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4299 ctz.head, ctz.size, cb, data);
4300 if (err) {
4301 return err;
4302 }
4303 } else if (includeorphans &&
4304 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
4305 for (int i = 0; i < 2; i++) {
4306 err = cb(data, (&ctz.head)[i]);
4307 if (err) {
4308 return err;
4309 }
4310 }
4311 }
4312 }
4313 }
4314
4315 #ifndef LFS_READONLY
4316 // iterate over any open files
4317 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
4318 if (f->type != LFS_TYPE_REG) {
4319 continue;
4320 }
4321
4322 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
4323 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4324 f->ctz.head, f->ctz.size, cb, data);
4325 if (err) {
4326 return err;
4327 }
4328 }
4329
4330 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
4331 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4332 f->block, f->pos, cb, data);
4333 if (err) {
4334 return err;
4335 }
4336 }
4337 }
4338 #endif
4339
4340 return 0;
4341 }
4342
4343 #ifndef LFS_READONLY
4344 static int lfs_fs_pred(lfs_t *lfs,
4345 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
4346 // iterate over all directory directory entries
4347 pdir->tail[0] = 0;
4348 pdir->tail[1] = 1;
4349 lfs_block_t cycle = 0;
4350 while (!lfs_pair_isnull(pdir->tail)) {
4351 if (cycle >= lfs->cfg->block_count/2) {
4352 // loop detected
4353 return LFS_ERR_CORRUPT;
4354 }
4355 cycle += 1;
4356
4357 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
4358 return 0;
4359 }
4360
4361 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
4362 if (err) {
4363 return err;
4364 }
4365 }
4366
4367 return LFS_ERR_NOENT;
4368 }
4369 #endif
4370
4371 #ifndef LFS_READONLY
4372 struct lfs_fs_parent_match {
4373 lfs_t *lfs;
4374 const lfs_block_t pair[2];
4375 };
4376 #endif
4377
4378 #ifndef LFS_READONLY
4379 static int lfs_fs_parent_match(void *data,
4380 lfs_tag_t tag, const void *buffer) {
4381 struct lfs_fs_parent_match *find = data;
4382 lfs_t *lfs = find->lfs;
4383 const struct lfs_diskoff *disk = buffer;
4384 (void)tag;
4385
4386 lfs_block_t child[2];
4387 int err = lfs_bd_read(lfs,
4388 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
4389 disk->block, disk->off, &child, sizeof(child));
4390 if (err) {
4391 return err;
4392 }
4393
4394 lfs_pair_fromle32(child);
4395 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
4396 }
4397 #endif
4398
4399 #ifndef LFS_READONLY
4400 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
4401 lfs_mdir_t *parent) {
4402 // use fetchmatch with callback to find pairs
4403 parent->tail[0] = 0;
4404 parent->tail[1] = 1;
4405 lfs_block_t cycle = 0;
4406 while (!lfs_pair_isnull(parent->tail)) {
4407 if (cycle >= lfs->cfg->block_count/2) {
4408 // loop detected
4409 return LFS_ERR_CORRUPT;
4410 }
4411 cycle += 1;
4412
4413 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
4414 LFS_MKTAG(0x7ff, 0, 0x3ff),
4415 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
4416 NULL,
4417 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
4418 lfs, {pair[0], pair[1]}});
4419 if (tag && tag != LFS_ERR_NOENT) {
4420 return tag;
4421 }
4422 }
4423
4424 return LFS_ERR_NOENT;
4425 }
4426 #endif
4427
4428 #ifndef LFS_READONLY
4429 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4430 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
4431 lfs->gstate.tag += orphans;
4432 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4433 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4434
4435 return 0;
4436 }
4437 #endif
4438
4439 #ifndef LFS_READONLY
4440 static void lfs_fs_prepmove(lfs_t *lfs,
4441 uint16_t id, const lfs_block_t pair[2]) {
4442 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4443 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4444 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4445 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4446 }
4447 #endif
4448
4449 #ifndef LFS_READONLY
4450 static int lfs_fs_demove(lfs_t *lfs) {
4451 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4452 return 0;
4453 }
4454
4455 // Fix bad moves
4456 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4457 lfs->gdisk.pair[0],
4458 lfs->gdisk.pair[1],
4459 lfs_tag_id(lfs->gdisk.tag));
4460
4461 // fetch and delete the moved entry
4462 lfs_mdir_t movedir;
4463 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4464 if (err) {
4465 return err;
4466 }
4467
4468 // prep gstate and delete move id
4469 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4470 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4471 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4472 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4473 if (err) {
4474 return err;
4475 }
4476
4477 return 0;
4478 }
4479 #endif
4480
4481 #ifndef LFS_READONLY
4482 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
4483 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4484 return 0;
4485 }
4486
4487 int8_t found = 0;
4488 restart:
4489 {
4490 // Fix any orphans
4491 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4492 lfs_mdir_t dir;
4493
4494 // iterate over all directory directory entries
4495 while (!lfs_pair_isnull(pdir.tail)) {
4496 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4497 if (err) {
4498 return err;
4499 }
4500
4501 // check head blocks for orphans
4502 if (!pdir.split) {
4503 // check if we have a parent
4504 lfs_mdir_t parent;
4505 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4506 if (tag < 0 && tag != LFS_ERR_NOENT) {
4507 return tag;
4508 }
4509
4510 // note we only check for full orphans if we may have had a
4511 // power-loss, otherwise orphans are created intentionally
4512 // during operations such as lfs_mkdir
4513 if (tag == LFS_ERR_NOENT && powerloss) {
4514 // we are an orphan
4515 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
4516 pdir.tail[0], pdir.tail[1]);
4517
4518 // steal state
4519 err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
4520 if (err) {
4521 return err;
4522 }
4523
4524 // steal tail
4525 lfs_pair_tole32(dir.tail);
4526 int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4527 {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
4528 dir.tail}));
4529 lfs_pair_fromle32(dir.tail);
4530 if (state < 0) {
4531 return state;
4532 }
4533
4534 found += 1;
4535
4536 // did our commit create more orphans?
4537 if (state == LFS_OK_ORPHANED) {
4538 goto restart;
4539 }
4540
4541 // refetch tail
4542 continue;
4543 }
4544
4545 if (tag != LFS_ERR_NOENT) {
4546 lfs_block_t pair[2];
4547 lfs_stag_t state = lfs_dir_get(lfs, &parent,
4548 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4549 if (state < 0) {
4550 return state;
4551 }
4552 lfs_pair_fromle32(pair);
4553
4554 if (!lfs_pair_sync(pair, pdir.tail)) {
4555 // we have desynced
4556 LFS_DEBUG("Fixing half-orphan "
4557 "{0x%"PRIx32", 0x%"PRIx32"} "
4558 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4559 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4560
4561 // fix pending move in this pair? this looks like an
4562 // optimization but is in fact _required_ since
4563 // relocating may outdate the move.
4564 uint16_t moveid = 0x3ff;
4565 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
4566 moveid = lfs_tag_id(lfs->gstate.tag);
4567 LFS_DEBUG("Fixing move while fixing orphans "
4568 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4569 pdir.pair[0], pdir.pair[1], moveid);
4570 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4571 }
4572
4573 lfs_pair_tole32(pair);
4574 state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4575 {LFS_MKTAG_IF(moveid != 0x3ff,
4576 LFS_TYPE_DELETE, moveid, 0), NULL},
4577 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
4578 pair}));
4579 lfs_pair_fromle32(pair);
4580 if (state < 0) {
4581 return state;
4582 }
4583
4584 found += 1;
4585
4586 // did our commit create more orphans?
4587 if (state == LFS_OK_ORPHANED) {
4588 goto restart;
4589 }
4590
4591 // refetch tail
4592 continue;
4593 }
4594 }
4595 }
4596
4597 pdir = dir;
4598 }
4599 }
4600
4601 // mark orphans as fixed
4602 return lfs_fs_preporphans(lfs, -lfs_min(
4603 lfs_gstate_getorphans(&lfs->gstate),
4604 found));
4605 }
4606 #endif
4607
4608 #ifndef LFS_READONLY
4609 static int lfs_fs_forceconsistency(lfs_t *lfs) {
4610 int err = lfs_fs_demove(lfs);
4611 if (err) {
4612 return err;
4613 }
4614
4615 err = lfs_fs_deorphan(lfs, true);
4616 if (err) {
4617 return err;
4618 }
4619
4620 return 0;
4621 }
4622 #endif
4623
4624 static int lfs_fs_size_count(void *p, lfs_block_t block) {
4625 (void)block;
4626 lfs_size_t *size = p;
4627 *size += 1;
4628 return 0;
4629 }
4630
4631 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
4632 lfs_size_t size = 0;
4633 int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false);
4634 if (err) {
4635 return err;
4636 }
4637
4638 return size;
4639 }
4640
4641 #ifdef LFS_MIGRATE
4642 ////// Migration from littelfs v1 below this //////
4643
4644 /// Version info ///
4645
4646 // Software library version
4647 // Major (top-nibble), incremented on backwards incompatible changes
4648 // Minor (bottom-nibble), incremented on feature additions
4649 #define LFS1_VERSION 0x00010007
4650 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
4651 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
4652
4653 // Version of On-disk data structures
4654 // Major (top-nibble), incremented on backwards incompatible changes
4655 // Minor (bottom-nibble), incremented on feature additions
4656 #define LFS1_DISK_VERSION 0x00010001
4657 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
4658 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
4659
4660
4661 /// v1 Definitions ///
4662
4663 // File types
4664 enum lfs1_type {
4665 LFS1_TYPE_REG = 0x11,
4666 LFS1_TYPE_DIR = 0x22,
4667 LFS1_TYPE_SUPERBLOCK = 0x2e,
4668 };
4669
4670 typedef struct lfs1 {
4671 lfs_block_t root[2];
4672 } lfs1_t;
4673
4674 typedef struct lfs1_entry {
4675 lfs_off_t off;
4676
4677 struct lfs1_disk_entry {
4678 uint8_t type;
4679 uint8_t elen;
4680 uint8_t alen;
4681 uint8_t nlen;
4682 union {
4683 struct {
4684 lfs_block_t head;
4685 lfs_size_t size;
4686 } file;
4687 lfs_block_t dir[2];
4688 } u;
4689 } d;
4690 } lfs1_entry_t;
4691
4692 typedef struct lfs1_dir {
4693 struct lfs1_dir *next;
4694 lfs_block_t pair[2];
4695 lfs_off_t off;
4696
4697 lfs_block_t head[2];
4698 lfs_off_t pos;
4699
4700 struct lfs1_disk_dir {
4701 uint32_t rev;
4702 lfs_size_t size;
4703 lfs_block_t tail[2];
4704 } d;
4705 } lfs1_dir_t;
4706
4707 typedef struct lfs1_superblock {
4708 lfs_off_t off;
4709
4710 struct lfs1_disk_superblock {
4711 uint8_t type;
4712 uint8_t elen;
4713 uint8_t alen;
4714 uint8_t nlen;
4715 lfs_block_t root[2];
4716 uint32_t block_size;
4717 uint32_t block_count;
4718 uint32_t version;
4719 char magic[8];
4720 } d;
4721 } lfs1_superblock_t;
4722
4723
4724 /// Low-level wrappers v1->v2 ///
4725 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
4726 *crc = lfs_crc(*crc, buffer, size);
4727 }
4728
4729 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
4730 lfs_off_t off, void *buffer, lfs_size_t size) {
4731 // if we ever do more than writes to alternating pairs,
4732 // this may need to consider pcache
4733 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
4734 block, off, buffer, size);
4735 }
4736
4737 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
4738 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
4739 for (lfs_off_t i = 0; i < size; i++) {
4740 uint8_t c;
4741 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
4742 if (err) {
4743 return err;
4744 }
4745
4746 lfs1_crc(crc, &c, 1);
4747 }
4748
4749 return 0;
4750 }
4751
4752
4753 /// Endian swapping functions ///
4754 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
4755 d->rev = lfs_fromle32(d->rev);
4756 d->size = lfs_fromle32(d->size);
4757 d->tail[0] = lfs_fromle32(d->tail[0]);
4758 d->tail[1] = lfs_fromle32(d->tail[1]);
4759 }
4760
4761 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
4762 d->rev = lfs_tole32(d->rev);
4763 d->size = lfs_tole32(d->size);
4764 d->tail[0] = lfs_tole32(d->tail[0]);
4765 d->tail[1] = lfs_tole32(d->tail[1]);
4766 }
4767
4768 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
4769 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
4770 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
4771 }
4772
4773 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
4774 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
4775 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
4776 }
4777
4778 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
4779 d->root[0] = lfs_fromle32(d->root[0]);
4780 d->root[1] = lfs_fromle32(d->root[1]);
4781 d->block_size = lfs_fromle32(d->block_size);
4782 d->block_count = lfs_fromle32(d->block_count);
4783 d->version = lfs_fromle32(d->version);
4784 }
4785
4786
4787 ///// Metadata pair and directory operations ///
4788 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
4789 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
4790 }
4791
4792 static int lfs1_dir_fetch(lfs_t *lfs,
4793 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
4794 // copy out pair, otherwise may be aliasing dir
4795 const lfs_block_t tpair[2] = {pair[0], pair[1]};
4796 bool valid = false;
4797
4798 // check both blocks for the most recent revision
4799 for (int i = 0; i < 2; i++) {
4800 struct lfs1_disk_dir test;
4801 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
4802 lfs1_dir_fromle32(&test);
4803 if (err) {
4804 if (err == LFS_ERR_CORRUPT) {
4805 continue;
4806 }
4807 return err;
4808 }
4809
4810 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
4811 continue;
4812 }
4813
4814 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
4815 (0x7fffffff & test.size) > lfs->cfg->block_size) {
4816 continue;
4817 }
4818
4819 uint32_t crc = 0xffffffff;
4820 lfs1_dir_tole32(&test);
4821 lfs1_crc(&crc, &test, sizeof(test));
4822 lfs1_dir_fromle32(&test);
4823 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
4824 (0x7fffffff & test.size) - sizeof(test), &crc);
4825 if (err) {
4826 if (err == LFS_ERR_CORRUPT) {
4827 continue;
4828 }
4829 return err;
4830 }
4831
4832 if (crc != 0) {
4833 continue;
4834 }
4835
4836 valid = true;
4837
4838 // setup dir in case it's valid
4839 dir->pair[0] = tpair[(i+0) % 2];
4840 dir->pair[1] = tpair[(i+1) % 2];
4841 dir->off = sizeof(dir->d);
4842 dir->d = test;
4843 }
4844
4845 if (!valid) {
4846 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
4847 tpair[0], tpair[1]);
4848 return LFS_ERR_CORRUPT;
4849 }
4850
4851 return 0;
4852 }
4853
4854 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
4855 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
4856 if (!(0x80000000 & dir->d.size)) {
4857 entry->off = dir->off;
4858 return LFS_ERR_NOENT;
4859 }
4860
4861 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
4862 if (err) {
4863 return err;
4864 }
4865
4866 dir->off = sizeof(dir->d);
4867 dir->pos += sizeof(dir->d) + 4;
4868 }
4869
4870 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
4871 &entry->d, sizeof(entry->d));
4872 lfs1_entry_fromle32(&entry->d);
4873 if (err) {
4874 return err;
4875 }
4876
4877 entry->off = dir->off;
4878 dir->off += lfs1_entry_size(entry);
4879 dir->pos += lfs1_entry_size(entry);
4880 return 0;
4881 }
4882
4883 /// littlefs v1 specific operations ///
4884 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
4885 if (lfs_pair_isnull(lfs->lfs1->root)) {
4886 return 0;
4887 }
4888
4889 // iterate over metadata pairs
4890 lfs1_dir_t dir;
4891 lfs1_entry_t entry;
4892 lfs_block_t cwd[2] = {0, 1};
4893
4894 while (true) {
4895 for (int i = 0; i < 2; i++) {
4896 int err = cb(data, cwd[i]);
4897 if (err) {
4898 return err;
4899 }
4900 }
4901
4902 int err = lfs1_dir_fetch(lfs, &dir, cwd);
4903 if (err) {
4904 return err;
4905 }
4906
4907 // iterate over contents
4908 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
4909 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
4910 &entry.d, sizeof(entry.d));
4911 lfs1_entry_fromle32(&entry.d);
4912 if (err) {
4913 return err;
4914 }
4915
4916 dir.off += lfs1_entry_size(&entry);
4917 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
4918 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4919 entry.d.u.file.head, entry.d.u.file.size, cb, data);
4920 if (err) {
4921 return err;
4922 }
4923 }
4924 }
4925
4926 // we also need to check if we contain a threaded v2 directory
4927 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
4928 while (dir2.split) {
4929 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
4930 if (err) {
4931 break;
4932 }
4933
4934 for (int i = 0; i < 2; i++) {
4935 err = cb(data, dir2.pair[i]);
4936 if (err) {
4937 return err;
4938 }
4939 }
4940 }
4941
4942 cwd[0] = dir.d.tail[0];
4943 cwd[1] = dir.d.tail[1];
4944
4945 if (lfs_pair_isnull(cwd)) {
4946 break;
4947 }
4948 }
4949
4950 return 0;
4951 }
4952
4953 static int lfs1_moved(lfs_t *lfs, const void *e) {
4954 if (lfs_pair_isnull(lfs->lfs1->root)) {
4955 return 0;
4956 }
4957
4958 // skip superblock
4959 lfs1_dir_t cwd;
4960 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
4961 if (err) {
4962 return err;
4963 }
4964
4965 // iterate over all directory directory entries
4966 lfs1_entry_t entry;
4967 while (!lfs_pair_isnull(cwd.d.tail)) {
4968 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
4969 if (err) {
4970 return err;
4971 }
4972
4973 while (true) {
4974 err = lfs1_dir_next(lfs, &cwd, &entry);
4975 if (err && err != LFS_ERR_NOENT) {
4976 return err;
4977 }
4978
4979 if (err == LFS_ERR_NOENT) {
4980 break;
4981 }
4982
4983 if (!(0x80 & entry.d.type) &&
4984 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
4985 return true;
4986 }
4987 }
4988 }
4989
4990 return false;
4991 }
4992
4993 /// Filesystem operations ///
4994 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
4995 const struct lfs_config *cfg) {
4996 int err = 0;
4997 {
4998 err = lfs_init(lfs, cfg);
4999 if (err) {
5000 return err;
5001 }
5002
5003 lfs->lfs1 = lfs1;
5004 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
5005 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
5006
5007 // setup free lookahead
5008 lfs->free.off = 0;
5009 lfs->free.size = 0;
5010 lfs->free.i = 0;
5011 lfs_alloc_ack(lfs);
5012
5013 // load superblock
5014 lfs1_dir_t dir;
5015 lfs1_superblock_t superblock;
5016 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
5017 if (err && err != LFS_ERR_CORRUPT) {
5018 goto cleanup;
5019 }
5020
5021 if (!err) {
5022 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
5023 &superblock.d, sizeof(superblock.d));
5024 lfs1_superblock_fromle32(&superblock.d);
5025 if (err) {
5026 goto cleanup;
5027 }
5028
5029 lfs->lfs1->root[0] = superblock.d.root[0];
5030 lfs->lfs1->root[1] = superblock.d.root[1];
5031 }
5032
5033 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
5034 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
5035 0, 1);
5036 err = LFS_ERR_CORRUPT;
5037 goto cleanup;
5038 }
5039
5040 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
5041 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
5042 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
5043 minor_version > LFS1_DISK_VERSION_MINOR)) {
5044 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
5045 err = LFS_ERR_INVAL;
5046 goto cleanup;
5047 }
5048
5049 return 0;
5050 }
5051
5052 cleanup:
5053 lfs_deinit(lfs);
5054 return err;
5055 }
5056
5057 static int lfs1_unmount(lfs_t *lfs) {
5058 return lfs_deinit(lfs);
5059 }
5060
5061 /// v1 migration ///
5062 static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
5063 struct lfs1 lfs1;
5064 int err = lfs1_mount(lfs, &lfs1, cfg);
5065 if (err) {
5066 return err;
5067 }
5068
5069 {
5070 // iterate through each directory, copying over entries
5071 // into new directory
5072 lfs1_dir_t dir1;
5073 lfs_mdir_t dir2;
5074 dir1.d.tail[0] = lfs->lfs1->root[0];
5075 dir1.d.tail[1] = lfs->lfs1->root[1];
5076 while (!lfs_pair_isnull(dir1.d.tail)) {
5077 // iterate old dir
5078 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
5079 if (err) {
5080 goto cleanup;
5081 }
5082
5083 // create new dir and bind as temporary pretend root
5084 err = lfs_dir_alloc(lfs, &dir2);
5085 if (err) {
5086 goto cleanup;
5087 }
5088
5089 dir2.rev = dir1.d.rev;
5090 dir1.head[0] = dir1.pair[0];
5091 dir1.head[1] = dir1.pair[1];
5092 lfs->root[0] = dir2.pair[0];
5093 lfs->root[1] = dir2.pair[1];
5094
5095 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5096 if (err) {
5097 goto cleanup;
5098 }
5099
5100 while (true) {
5101 lfs1_entry_t entry1;
5102 err = lfs1_dir_next(lfs, &dir1, &entry1);
5103 if (err && err != LFS_ERR_NOENT) {
5104 goto cleanup;
5105 }
5106
5107 if (err == LFS_ERR_NOENT) {
5108 break;
5109 }
5110
5111 // check that entry has not been moved
5112 if (entry1.d.type & 0x80) {
5113 int moved = lfs1_moved(lfs, &entry1.d.u);
5114 if (moved < 0) {
5115 err = moved;
5116 goto cleanup;
5117 }
5118
5119 if (moved) {
5120 continue;
5121 }
5122
5123 entry1.d.type &= ~0x80;
5124 }
5125
5126 // also fetch name
5127 char name[LFS_NAME_MAX+1];
5128 memset(name, 0, sizeof(name));
5129 err = lfs1_bd_read(lfs, dir1.pair[0],
5130 entry1.off + 4+entry1.d.elen+entry1.d.alen,
5131 name, entry1.d.nlen);
5132 if (err) {
5133 goto cleanup;
5134 }
5135
5136 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
5137
5138 // create entry in new dir
5139 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5140 if (err) {
5141 goto cleanup;
5142 }
5143
5144 uint16_t id;
5145 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
5146 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
5147 err = (err < 0) ? err : LFS_ERR_EXIST;
5148 goto cleanup;
5149 }
5150
5151 lfs1_entry_tole32(&entry1.d);
5152 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5153 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
5154 {LFS_MKTAG_IF_ELSE(isdir,
5155 LFS_TYPE_DIR, id, entry1.d.nlen,
5156 LFS_TYPE_REG, id, entry1.d.nlen),
5157 name},
5158 {LFS_MKTAG_IF_ELSE(isdir,
5159 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
5160 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
5161 &entry1.d.u}));
5162 lfs1_entry_fromle32(&entry1.d);
5163 if (err) {
5164 goto cleanup;
5165 }
5166 }
5167
5168 if (!lfs_pair_isnull(dir1.d.tail)) {
5169 // find last block and update tail to thread into fs
5170 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5171 if (err) {
5172 goto cleanup;
5173 }
5174
5175 while (dir2.split) {
5176 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5177 if (err) {
5178 goto cleanup;
5179 }
5180 }
5181
5182 lfs_pair_tole32(dir2.pair);
5183 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5184 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
5185 lfs_pair_fromle32(dir2.pair);
5186 if (err) {
5187 goto cleanup;
5188 }
5189 }
5190
5191 // Copy over first block to thread into fs. Unfortunately
5192 // if this fails there is not much we can do.
5193 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
5194 "-> {0x%"PRIx32", 0x%"PRIx32"}",
5195 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
5196
5197 err = lfs_bd_erase(lfs, dir1.head[1]);
5198 if (err) {
5199 goto cleanup;
5200 }
5201
5202 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5203 if (err) {
5204 goto cleanup;
5205 }
5206
5207 for (lfs_off_t i = 0; i < dir2.off; i++) {
5208 uint8_t dat;
5209 err = lfs_bd_read(lfs,
5210 NULL, &lfs->rcache, dir2.off,
5211 dir2.pair[0], i, &dat, 1);
5212 if (err) {
5213 goto cleanup;
5214 }
5215
5216 err = lfs_bd_prog(lfs,
5217 &lfs->pcache, &lfs->rcache, true,
5218 dir1.head[1], i, &dat, 1);
5219 if (err) {
5220 goto cleanup;
5221 }
5222 }
5223
5224 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
5225 if (err) {
5226 goto cleanup;
5227 }
5228 }
5229
5230 // Create new superblock. This marks a successful migration!
5231 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
5232 if (err) {
5233 goto cleanup;
5234 }
5235
5236 dir2.pair[0] = dir1.pair[0];
5237 dir2.pair[1] = dir1.pair[1];
5238 dir2.rev = dir1.d.rev;
5239 dir2.off = sizeof(dir2.rev);
5240 dir2.etag = 0xffffffff;
5241 dir2.count = 0;
5242 dir2.tail[0] = lfs->lfs1->root[0];
5243 dir2.tail[1] = lfs->lfs1->root[1];
5244 dir2.erased = false;
5245 dir2.split = true;
5246
5247 lfs_superblock_t superblock = {
5248 .version = LFS_DISK_VERSION,
5249 .block_size = lfs->cfg->block_size,
5250 .block_count = lfs->cfg->block_count,
5251 .name_max = lfs->name_max,
5252 .file_max = lfs->file_max,
5253 .attr_max = lfs->attr_max,
5254 };
5255
5256 lfs_superblock_tole32(&superblock);
5257 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5258 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
5259 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
5260 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5261 &superblock}));
5262 if (err) {
5263 goto cleanup;
5264 }
5265
5266 // sanity check that fetch works
5267 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
5268 if (err) {
5269 goto cleanup;
5270 }
5271
5272 // force compaction to prevent accidentally mounting v1
5273 dir2.erased = false;
5274 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5275 if (err) {
5276 goto cleanup;
5277 }
5278 }
5279
5280 cleanup:
5281 lfs1_unmount(lfs);
5282 return err;
5283 }
5284
5285 #endif
5286
5287
5288 /// Public API wrappers ///
5289
5290 // Here we can add tracing/thread safety easily
5291
5292 // Thread-safe wrappers if enabled
5293 #ifdef LFS_THREADSAFE
5294 #define LFS_LOCK(cfg) cfg->lock(cfg)
5295 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
5296 #else
5297 #define LFS_LOCK(cfg) ((void)cfg, 0)
5298 #define LFS_UNLOCK(cfg) ((void)cfg)
5299 #endif
5300
5301 // Public API
5302 #ifndef LFS_READONLY
5303 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
5304 int err = LFS_LOCK(cfg);
5305 if (err) {
5306 return err;
5307 }
5308 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
5309 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5310 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5311 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5312 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5313 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5314 ".prog_buffer=%p, .lookahead_buffer=%p, "
5315 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5316 ".attr_max=%"PRIu32"})",
5317 (void*)lfs, (void*)cfg, cfg->context,
5318 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5319 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5320 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5321 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5322 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5323 cfg->name_max, cfg->file_max, cfg->attr_max);
5324
5325 err = lfs_rawformat(lfs, cfg);
5326
5327 LFS_TRACE("lfs_format -> %d", err);
5328 LFS_UNLOCK(cfg);
5329 return err;
5330 }
5331 #endif
5332
5333 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
5334 int err = LFS_LOCK(cfg);
5335 if (err) {
5336 return err;
5337 }
5338 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
5339 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5340 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5341 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5342 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5343 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5344 ".prog_buffer=%p, .lookahead_buffer=%p, "
5345 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5346 ".attr_max=%"PRIu32"})",
5347 (void*)lfs, (void*)cfg, cfg->context,
5348 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5349 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5350 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5351 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5352 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5353 cfg->name_max, cfg->file_max, cfg->attr_max);
5354
5355 err = lfs_rawmount(lfs, cfg);
5356
5357 LFS_TRACE("lfs_mount -> %d", err);
5358 LFS_UNLOCK(cfg);
5359 return err;
5360 }
5361
5362 int lfs_unmount(lfs_t *lfs) {
5363 int err = LFS_LOCK(lfs->cfg);
5364 if (err) {
5365 return err;
5366 }
5367 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
5368
5369 err = lfs_rawunmount(lfs);
5370
5371 LFS_TRACE("lfs_unmount -> %d", err);
5372 LFS_UNLOCK(lfs->cfg);
5373 return err;
5374 }
5375
5376 #ifndef LFS_READONLY
5377 int lfs_remove(lfs_t *lfs, const char *path) {
5378 int err = LFS_LOCK(lfs->cfg);
5379 if (err) {
5380 return err;
5381 }
5382 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
5383
5384 err = lfs_rawremove(lfs, path);
5385
5386 LFS_TRACE("lfs_remove -> %d", err);
5387 LFS_UNLOCK(lfs->cfg);
5388 return err;
5389 }
5390 #endif
5391
5392 #ifndef LFS_READONLY
5393 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
5394 int err = LFS_LOCK(lfs->cfg);
5395 if (err) {
5396 return err;
5397 }
5398 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
5399
5400 err = lfs_rawrename(lfs, oldpath, newpath);
5401
5402 LFS_TRACE("lfs_rename -> %d", err);
5403 LFS_UNLOCK(lfs->cfg);
5404 return err;
5405 }
5406 #endif
5407
5408 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
5409 int err = LFS_LOCK(lfs->cfg);
5410 if (err) {
5411 return err;
5412 }
5413 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
5414
5415 err = lfs_rawstat(lfs, path, info);
5416
5417 LFS_TRACE("lfs_stat -> %d", err);
5418 LFS_UNLOCK(lfs->cfg);
5419 return err;
5420 }
5421
5422 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
5423 uint8_t type, void *buffer, lfs_size_t size) {
5424 int err = LFS_LOCK(lfs->cfg);
5425 if (err) {
5426 return err;
5427 }
5428 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5429 (void*)lfs, path, type, buffer, size);
5430
5431 lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size);
5432
5433 LFS_TRACE("lfs_getattr -> %"PRId32, res);
5434 LFS_UNLOCK(lfs->cfg);
5435 return res;
5436 }
5437
5438 #ifndef LFS_READONLY
5439 int lfs_setattr(lfs_t *lfs, const char *path,
5440 uint8_t type, const void *buffer, lfs_size_t size) {
5441 int err = LFS_LOCK(lfs->cfg);
5442 if (err) {
5443 return err;
5444 }
5445 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5446 (void*)lfs, path, type, buffer, size);
5447
5448 err = lfs_rawsetattr(lfs, path, type, buffer, size);
5449
5450 LFS_TRACE("lfs_setattr -> %d", err);
5451 LFS_UNLOCK(lfs->cfg);
5452 return err;
5453 }
5454 #endif
5455
5456 #ifndef LFS_READONLY
5457 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
5458 int err = LFS_LOCK(lfs->cfg);
5459 if (err) {
5460 return err;
5461 }
5462 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
5463
5464 err = lfs_rawremoveattr(lfs, path, type);
5465
5466 LFS_TRACE("lfs_removeattr -> %d", err);
5467 LFS_UNLOCK(lfs->cfg);
5468 return err;
5469 }
5470 #endif
5471
5472 #ifndef LFS_NO_MALLOC
5473 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
5474 int err = LFS_LOCK(lfs->cfg);
5475 if (err) {
5476 return err;
5477 }
5478 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
5479 (void*)lfs, (void*)file, path, flags);
5480 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5481
5482 err = lfs_file_rawopen(lfs, file, path, flags);
5483
5484 LFS_TRACE("lfs_file_open -> %d", err);
5485 LFS_UNLOCK(lfs->cfg);
5486 return err;
5487 }
5488 #endif
5489
5490 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
5491 const char *path, int flags,
5492 const struct lfs_file_config *cfg) {
5493 int err = LFS_LOCK(lfs->cfg);
5494 if (err) {
5495 return err;
5496 }
5497 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
5498 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
5499 (void*)lfs, (void*)file, path, flags,
5500 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
5501 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5502
5503 err = lfs_file_rawopencfg(lfs, file, path, flags, cfg);
5504
5505 LFS_TRACE("lfs_file_opencfg -> %d", err);
5506 LFS_UNLOCK(lfs->cfg);
5507 return err;
5508 }
5509
5510 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
5511 int err = LFS_LOCK(lfs->cfg);
5512 if (err) {
5513 return err;
5514 }
5515 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
5516 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5517
5518 err = lfs_file_rawclose(lfs, file);
5519
5520 LFS_TRACE("lfs_file_close -> %d", err);
5521 LFS_UNLOCK(lfs->cfg);
5522 return err;
5523 }
5524
5525 #ifndef LFS_READONLY
5526 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
5527 int err = LFS_LOCK(lfs->cfg);
5528 if (err) {
5529 return err;
5530 }
5531 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
5532 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5533
5534 err = lfs_file_rawsync(lfs, file);
5535
5536 LFS_TRACE("lfs_file_sync -> %d", err);
5537 LFS_UNLOCK(lfs->cfg);
5538 return err;
5539 }
5540 #endif
5541
5542 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
5543 void *buffer, lfs_size_t size) {
5544 int err = LFS_LOCK(lfs->cfg);
5545 if (err) {
5546 return err;
5547 }
5548 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
5549 (void*)lfs, (void*)file, buffer, size);
5550 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5551
5552 lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size);
5553
5554 LFS_TRACE("lfs_file_read -> %"PRId32, res);
5555 LFS_UNLOCK(lfs->cfg);
5556 return res;
5557 }
5558
5559 #ifndef LFS_READONLY
5560 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
5561 const void *buffer, lfs_size_t size) {
5562 int err = LFS_LOCK(lfs->cfg);
5563 if (err) {
5564 return err;
5565 }
5566 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
5567 (void*)lfs, (void*)file, buffer, size);
5568 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5569
5570 lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size);
5571
5572 LFS_TRACE("lfs_file_write -> %"PRId32, res);
5573 LFS_UNLOCK(lfs->cfg);
5574 return res;
5575 }
5576 #endif
5577
5578 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
5579 lfs_soff_t off, int whence) {
5580 int err = LFS_LOCK(lfs->cfg);
5581 if (err) {
5582 return err;
5583 }
5584 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
5585 (void*)lfs, (void*)file, off, whence);
5586 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5587
5588 lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence);
5589
5590 LFS_TRACE("lfs_file_seek -> %"PRId32, res);
5591 LFS_UNLOCK(lfs->cfg);
5592 return res;
5593 }
5594
5595 #ifndef LFS_READONLY
5596 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
5597 int err = LFS_LOCK(lfs->cfg);
5598 if (err) {
5599 return err;
5600 }
5601 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
5602 (void*)lfs, (void*)file, size);
5603 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5604
5605 err = lfs_file_rawtruncate(lfs, file, size);
5606
5607 LFS_TRACE("lfs_file_truncate -> %d", err);
5608 LFS_UNLOCK(lfs->cfg);
5609 return err;
5610 }
5611 #endif
5612
5613 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
5614 int err = LFS_LOCK(lfs->cfg);
5615 if (err) {
5616 return err;
5617 }
5618 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
5619 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5620
5621 lfs_soff_t res = lfs_file_rawtell(lfs, file);
5622
5623 LFS_TRACE("lfs_file_tell -> %"PRId32, res);
5624 LFS_UNLOCK(lfs->cfg);
5625 return res;
5626 }
5627
5628 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
5629 int err = LFS_LOCK(lfs->cfg);
5630 if (err) {
5631 return err;
5632 }
5633 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
5634
5635 err = lfs_file_rawrewind(lfs, file);
5636
5637 LFS_TRACE("lfs_file_rewind -> %d", err);
5638 LFS_UNLOCK(lfs->cfg);
5639 return err;
5640 }
5641
5642 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
5643 int err = LFS_LOCK(lfs->cfg);
5644 if (err) {
5645 return err;
5646 }
5647 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
5648 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5649
5650 lfs_soff_t res = lfs_file_rawsize(lfs, file);
5651
5652 LFS_TRACE("lfs_file_size -> %"PRId32, res);
5653 LFS_UNLOCK(lfs->cfg);
5654 return res;
5655 }
5656
5657 #ifndef LFS_READONLY
5658 int lfs_mkdir(lfs_t *lfs, const char *path) {
5659 int err = LFS_LOCK(lfs->cfg);
5660 if (err) {
5661 return err;
5662 }
5663 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
5664
5665 err = lfs_rawmkdir(lfs, path);
5666
5667 LFS_TRACE("lfs_mkdir -> %d", err);
5668 LFS_UNLOCK(lfs->cfg);
5669 return err;
5670 }
5671 #endif
5672
5673 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
5674 int err = LFS_LOCK(lfs->cfg);
5675 if (err) {
5676 return err;
5677 }
5678 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
5679 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
5680
5681 err = lfs_dir_rawopen(lfs, dir, path);
5682
5683 LFS_TRACE("lfs_dir_open -> %d", err);
5684 LFS_UNLOCK(lfs->cfg);
5685 return err;
5686 }
5687
5688 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
5689 int err = LFS_LOCK(lfs->cfg);
5690 if (err) {
5691 return err;
5692 }
5693 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
5694
5695 err = lfs_dir_rawclose(lfs, dir);
5696
5697 LFS_TRACE("lfs_dir_close -> %d", err);
5698 LFS_UNLOCK(lfs->cfg);
5699 return err;
5700 }
5701
5702 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
5703 int err = LFS_LOCK(lfs->cfg);
5704 if (err) {
5705 return err;
5706 }
5707 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
5708 (void*)lfs, (void*)dir, (void*)info);
5709
5710 err = lfs_dir_rawread(lfs, dir, info);
5711
5712 LFS_TRACE("lfs_dir_read -> %d", err);
5713 LFS_UNLOCK(lfs->cfg);
5714 return err;
5715 }
5716
5717 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
5718 int err = LFS_LOCK(lfs->cfg);
5719 if (err) {
5720 return err;
5721 }
5722 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
5723 (void*)lfs, (void*)dir, off);
5724
5725 err = lfs_dir_rawseek(lfs, dir, off);
5726
5727 LFS_TRACE("lfs_dir_seek -> %d", err);
5728 LFS_UNLOCK(lfs->cfg);
5729 return err;
5730 }
5731
5732 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
5733 int err = LFS_LOCK(lfs->cfg);
5734 if (err) {
5735 return err;
5736 }
5737 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
5738
5739 lfs_soff_t res = lfs_dir_rawtell(lfs, dir);
5740
5741 LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
5742 LFS_UNLOCK(lfs->cfg);
5743 return res;
5744 }
5745
5746 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
5747 int err = LFS_LOCK(lfs->cfg);
5748 if (err) {
5749 return err;
5750 }
5751 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
5752
5753 err = lfs_dir_rawrewind(lfs, dir);
5754
5755 LFS_TRACE("lfs_dir_rewind -> %d", err);
5756 LFS_UNLOCK(lfs->cfg);
5757 return err;
5758 }
5759
5760 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
5761 int err = LFS_LOCK(lfs->cfg);
5762 if (err) {
5763 return err;
5764 }
5765 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
5766
5767 lfs_ssize_t res = lfs_fs_rawsize(lfs);
5768
5769 LFS_TRACE("lfs_fs_size -> %"PRId32, res);
5770 LFS_UNLOCK(lfs->cfg);
5771 return res;
5772 }
5773
5774 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
5775 int err = LFS_LOCK(lfs->cfg);
5776 if (err) {
5777 return err;
5778 }
5779 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
5780 (void*)lfs, (void*)(uintptr_t)cb, data);
5781
5782 err = lfs_fs_rawtraverse(lfs, cb, data, true);
5783
5784 LFS_TRACE("lfs_fs_traverse -> %d", err);
5785 LFS_UNLOCK(lfs->cfg);
5786 return err;
5787 }
5788
5789 #ifdef LFS_MIGRATE
5790 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
5791 int err = LFS_LOCK(cfg);
5792 if (err) {
5793 return err;
5794 }
5795 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
5796 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5797 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5798 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5799 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5800 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5801 ".prog_buffer=%p, .lookahead_buffer=%p, "
5802 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5803 ".attr_max=%"PRIu32"})",
5804 (void*)lfs, (void*)cfg, cfg->context,
5805 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5806 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5807 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5808 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5809 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5810 cfg->name_max, cfg->file_max, cfg->attr_max);
5811
5812 err = lfs_rawmigrate(lfs, cfg);
5813
5814 LFS_TRACE("lfs_migrate -> %d", err);
5815 LFS_UNLOCK(cfg);
5816 return err;
5817 }
5818 #endif
5819
5820