1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2022, The littlefs authors.
5 * Copyright (c) 2017, Arm Limited. All rights reserved.
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8 #include "lfs.h"
9 #include "lfs_util.h"
10
11
12 // some constants used throughout the code
13 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
14 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
15
16 enum {
17 LFS_OK_RELOCATED = 1,
18 LFS_OK_DROPPED = 2,
19 LFS_OK_ORPHANED = 3,
20 };
21
22 enum {
23 LFS_CMP_EQ = 0,
24 LFS_CMP_LT = 1,
25 LFS_CMP_GT = 2,
26 };
27
28
29 /// Caching block device operations ///
30
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)31 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
32 // do not zero, cheaper if cache is readonly or only going to be
33 // written with identical data (during relocates)
34 (void)lfs;
35 rcache->block = LFS_BLOCK_NULL;
36 }
37
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)38 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
39 // zero to avoid information leak
40 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
41 pcache->block = LFS_BLOCK_NULL;
42 }
43
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)44 static int lfs_bd_read(lfs_t *lfs,
45 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
46 lfs_block_t block, lfs_off_t off,
47 void *buffer, lfs_size_t size) {
48 uint8_t *data = buffer;
49 if (off+size > lfs->cfg->block_size
50 || (lfs->block_count && block >= lfs->block_count)) {
51 return LFS_ERR_CORRUPT;
52 }
53
54 while (size > 0) {
55 lfs_size_t diff = size;
56
57 if (pcache && block == pcache->block &&
58 off < pcache->off + pcache->size) {
59 if (off >= pcache->off) {
60 // is already in pcache?
61 diff = lfs_min(diff, pcache->size - (off-pcache->off));
62 memcpy(data, &pcache->buffer[off-pcache->off], diff);
63
64 data += diff;
65 off += diff;
66 size -= diff;
67 continue;
68 }
69
70 // pcache takes priority
71 diff = lfs_min(diff, pcache->off-off);
72 }
73
74 if (block == rcache->block &&
75 off < rcache->off + rcache->size) {
76 if (off >= rcache->off) {
77 // is already in rcache?
78 diff = lfs_min(diff, rcache->size - (off-rcache->off));
79 memcpy(data, &rcache->buffer[off-rcache->off], diff);
80
81 data += diff;
82 off += diff;
83 size -= diff;
84 continue;
85 }
86
87 // rcache takes priority
88 diff = lfs_min(diff, rcache->off-off);
89 }
90
91 if (size >= hint && off % lfs->cfg->read_size == 0 &&
92 size >= lfs->cfg->read_size) {
93 // bypass cache?
94 diff = lfs_aligndown(diff, lfs->cfg->read_size);
95 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
96 if (err) {
97 return err;
98 }
99
100 data += diff;
101 off += diff;
102 size -= diff;
103 continue;
104 }
105
106 // load to cache, first condition can no longer fail
107 LFS_ASSERT(!lfs->block_count || block < lfs->block_count);
108 rcache->block = block;
109 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
110 rcache->size = lfs_min(
111 lfs_min(
112 lfs_alignup(off+hint, lfs->cfg->read_size),
113 lfs->cfg->block_size)
114 - rcache->off,
115 lfs->cfg->cache_size);
116 int err = lfs->cfg->read(lfs->cfg, rcache->block,
117 rcache->off, rcache->buffer, rcache->size);
118 LFS_ASSERT(err <= 0);
119 if (err) {
120 return err;
121 }
122 }
123
124 return 0;
125 }
126
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)127 static int lfs_bd_cmp(lfs_t *lfs,
128 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
129 lfs_block_t block, lfs_off_t off,
130 const void *buffer, lfs_size_t size) {
131 const uint8_t *data = buffer;
132 lfs_size_t diff = 0;
133
134 for (lfs_off_t i = 0; i < size; i += diff) {
135 uint8_t dat[8];
136
137 diff = lfs_min(size-i, sizeof(dat));
138 int err = lfs_bd_read(lfs,
139 pcache, rcache, hint-i,
140 block, off+i, &dat, diff);
141 if (err) {
142 return err;
143 }
144
145 int res = memcmp(dat, data + i, diff);
146 if (res) {
147 return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
148 }
149 }
150
151 return LFS_CMP_EQ;
152 }
153
lfs_bd_crc(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,lfs_size_t size,uint32_t * crc)154 static int lfs_bd_crc(lfs_t *lfs,
155 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
156 lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
157 lfs_size_t diff = 0;
158
159 for (lfs_off_t i = 0; i < size; i += diff) {
160 uint8_t dat[8];
161 diff = lfs_min(size-i, sizeof(dat));
162 int err = lfs_bd_read(lfs,
163 pcache, rcache, hint-i,
164 block, off+i, &dat, diff);
165 if (err) {
166 return err;
167 }
168
169 *crc = lfs_crc(*crc, &dat, diff);
170 }
171
172 return 0;
173 }
174
175 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)176 static int lfs_bd_flush(lfs_t *lfs,
177 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
178 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
179 LFS_ASSERT(pcache->block < lfs->block_count);
180 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
181 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
182 pcache->off, pcache->buffer, diff);
183 LFS_ASSERT(err <= 0);
184 if (err) {
185 return err;
186 }
187
188 if (validate) {
189 // check data on disk
190 lfs_cache_drop(lfs, rcache);
191 int res = lfs_bd_cmp(lfs,
192 NULL, rcache, diff,
193 pcache->block, pcache->off, pcache->buffer, diff);
194 if (res < 0) {
195 return res;
196 }
197
198 if (res != LFS_CMP_EQ) {
199 return LFS_ERR_CORRUPT;
200 }
201 }
202
203 lfs_cache_zero(lfs, pcache);
204 }
205
206 return 0;
207 }
208 #endif
209
210 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)211 static int lfs_bd_sync(lfs_t *lfs,
212 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
213 lfs_cache_drop(lfs, rcache);
214
215 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
216 if (err) {
217 return err;
218 }
219
220 err = lfs->cfg->sync(lfs->cfg);
221 LFS_ASSERT(err <= 0);
222 return err;
223 }
224 #endif
225
226 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)227 static int lfs_bd_prog(lfs_t *lfs,
228 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
229 lfs_block_t block, lfs_off_t off,
230 const void *buffer, lfs_size_t size) {
231 const uint8_t *data = buffer;
232 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->block_count);
233 LFS_ASSERT(off + size <= lfs->cfg->block_size);
234
235 while (size > 0) {
236 if (block == pcache->block &&
237 off >= pcache->off &&
238 off < pcache->off + lfs->cfg->cache_size) {
239 // already fits in pcache?
240 lfs_size_t diff = lfs_min(size,
241 lfs->cfg->cache_size - (off-pcache->off));
242 memcpy(&pcache->buffer[off-pcache->off], data, diff);
243
244 data += diff;
245 off += diff;
246 size -= diff;
247
248 pcache->size = lfs_max(pcache->size, off - pcache->off);
249 if (pcache->size == lfs->cfg->cache_size) {
250 // eagerly flush out pcache if we fill up
251 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
252 if (err) {
253 return err;
254 }
255 }
256
257 continue;
258 }
259
260 // pcache must have been flushed, either by programming and
261 // entire block or manually flushing the pcache
262 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
263
264 // prepare pcache, first condition can no longer fail
265 pcache->block = block;
266 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
267 pcache->size = 0;
268 }
269
270 return 0;
271 }
272 #endif
273
274 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)275 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
276 LFS_ASSERT(block < lfs->block_count);
277 int err = lfs->cfg->erase(lfs->cfg, block);
278 LFS_ASSERT(err <= 0);
279 return err;
280 }
281 #endif
282
283
284 /// Small type-level utilities ///
285 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])286 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
287 lfs_block_t t = pair[0];
288 pair[0] = pair[1];
289 pair[1] = t;
290 }
291
lfs_pair_isnull(const lfs_block_t pair[2])292 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
293 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
294 }
295
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])296 static inline int lfs_pair_cmp(
297 const lfs_block_t paira[2],
298 const lfs_block_t pairb[2]) {
299 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
300 paira[0] == pairb[1] || paira[1] == pairb[0]);
301 }
302
lfs_pair_issync(const lfs_block_t paira[2],const lfs_block_t pairb[2])303 static inline bool lfs_pair_issync(
304 const lfs_block_t paira[2],
305 const lfs_block_t pairb[2]) {
306 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
307 (paira[0] == pairb[1] && paira[1] == pairb[0]);
308 }
309
lfs_pair_fromle32(lfs_block_t pair[2])310 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
311 pair[0] = lfs_fromle32(pair[0]);
312 pair[1] = lfs_fromle32(pair[1]);
313 }
314
315 #ifndef LFS_READONLY
lfs_pair_tole32(lfs_block_t pair[2])316 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
317 pair[0] = lfs_tole32(pair[0]);
318 pair[1] = lfs_tole32(pair[1]);
319 }
320 #endif
321
322 // operations on 32-bit entry tags
323 typedef uint32_t lfs_tag_t;
324 typedef int32_t lfs_stag_t;
325
326 #define LFS_MKTAG(type, id, size) \
327 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
328
329 #define LFS_MKTAG_IF(cond, type, id, size) \
330 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
331
332 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
333 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
334
lfs_tag_isvalid(lfs_tag_t tag)335 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
336 return !(tag & 0x80000000);
337 }
338
lfs_tag_isdelete(lfs_tag_t tag)339 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
340 return ((int32_t)(tag << 22) >> 22) == -1;
341 }
342
lfs_tag_type1(lfs_tag_t tag)343 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
344 return (tag & 0x70000000) >> 20;
345 }
346
lfs_tag_type2(lfs_tag_t tag)347 static inline uint16_t lfs_tag_type2(lfs_tag_t tag) {
348 return (tag & 0x78000000) >> 20;
349 }
350
lfs_tag_type3(lfs_tag_t tag)351 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
352 return (tag & 0x7ff00000) >> 20;
353 }
354
lfs_tag_chunk(lfs_tag_t tag)355 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
356 return (tag & 0x0ff00000) >> 20;
357 }
358
lfs_tag_splice(lfs_tag_t tag)359 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
360 return (int8_t)lfs_tag_chunk(tag);
361 }
362
lfs_tag_id(lfs_tag_t tag)363 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
364 return (tag & 0x000ffc00) >> 10;
365 }
366
lfs_tag_size(lfs_tag_t tag)367 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
368 return tag & 0x000003ff;
369 }
370
lfs_tag_dsize(lfs_tag_t tag)371 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
372 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
373 }
374
375 // operations on attributes in attribute lists
376 struct lfs_mattr {
377 lfs_tag_t tag;
378 const void *buffer;
379 };
380
381 struct lfs_diskoff {
382 lfs_block_t block;
383 lfs_off_t off;
384 };
385
386 #define LFS_MKATTRS(...) \
387 (struct lfs_mattr[]){__VA_ARGS__}, \
388 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
389
390 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)391 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
392 for (int i = 0; i < 3; i++) {
393 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
394 }
395 }
396
lfs_gstate_iszero(const lfs_gstate_t * a)397 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
398 for (int i = 0; i < 3; i++) {
399 if (((uint32_t*)a)[i] != 0) {
400 return false;
401 }
402 }
403 return true;
404 }
405
406 #ifndef LFS_READONLY
lfs_gstate_hasorphans(const lfs_gstate_t * a)407 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
408 return lfs_tag_size(a->tag);
409 }
410
lfs_gstate_getorphans(const lfs_gstate_t * a)411 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
412 return lfs_tag_size(a->tag) & 0x1ff;
413 }
414
lfs_gstate_hasmove(const lfs_gstate_t * a)415 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
416 return lfs_tag_type1(a->tag);
417 }
418 #endif
419
lfs_gstate_needssuperblock(const lfs_gstate_t * a)420 static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) {
421 return lfs_tag_size(a->tag) >> 9;
422 }
423
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)424 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
425 const lfs_block_t *pair) {
426 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
427 }
428
lfs_gstate_fromle32(lfs_gstate_t * a)429 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
430 a->tag = lfs_fromle32(a->tag);
431 a->pair[0] = lfs_fromle32(a->pair[0]);
432 a->pair[1] = lfs_fromle32(a->pair[1]);
433 }
434
435 #ifndef LFS_READONLY
lfs_gstate_tole32(lfs_gstate_t * a)436 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
437 a->tag = lfs_tole32(a->tag);
438 a->pair[0] = lfs_tole32(a->pair[0]);
439 a->pair[1] = lfs_tole32(a->pair[1]);
440 }
441 #endif
442
443 // operations on forward-CRCs used to track erased state
444 struct lfs_fcrc {
445 lfs_size_t size;
446 uint32_t crc;
447 };
448
lfs_fcrc_fromle32(struct lfs_fcrc * fcrc)449 static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) {
450 fcrc->size = lfs_fromle32(fcrc->size);
451 fcrc->crc = lfs_fromle32(fcrc->crc);
452 }
453
454 #ifndef LFS_READONLY
lfs_fcrc_tole32(struct lfs_fcrc * fcrc)455 static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) {
456 fcrc->size = lfs_tole32(fcrc->size);
457 fcrc->crc = lfs_tole32(fcrc->crc);
458 }
459 #endif
460
461 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)462 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
463 ctz->head = lfs_fromle32(ctz->head);
464 ctz->size = lfs_fromle32(ctz->size);
465 }
466
467 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)468 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
469 ctz->head = lfs_tole32(ctz->head);
470 ctz->size = lfs_tole32(ctz->size);
471 }
472 #endif
473
lfs_superblock_fromle32(lfs_superblock_t * superblock)474 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
475 superblock->version = lfs_fromle32(superblock->version);
476 superblock->block_size = lfs_fromle32(superblock->block_size);
477 superblock->block_count = lfs_fromle32(superblock->block_count);
478 superblock->name_max = lfs_fromle32(superblock->name_max);
479 superblock->file_max = lfs_fromle32(superblock->file_max);
480 superblock->attr_max = lfs_fromle32(superblock->attr_max);
481 }
482
483 #ifndef LFS_READONLY
lfs_superblock_tole32(lfs_superblock_t * superblock)484 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
485 superblock->version = lfs_tole32(superblock->version);
486 superblock->block_size = lfs_tole32(superblock->block_size);
487 superblock->block_count = lfs_tole32(superblock->block_count);
488 superblock->name_max = lfs_tole32(superblock->name_max);
489 superblock->file_max = lfs_tole32(superblock->file_max);
490 superblock->attr_max = lfs_tole32(superblock->attr_max);
491 }
492 #endif
493
494 #ifndef LFS_NO_ASSERT
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)495 static bool lfs_mlist_isopen(struct lfs_mlist *head,
496 struct lfs_mlist *node) {
497 for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
498 if (*p == (struct lfs_mlist*)node) {
499 return true;
500 }
501 }
502
503 return false;
504 }
505 #endif
506
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)507 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
508 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
509 if (*p == mlist) {
510 *p = (*p)->next;
511 break;
512 }
513 }
514 }
515
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)516 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
517 mlist->next = lfs->mlist;
518 lfs->mlist = mlist;
519 }
520
521 // some other filesystem operations
lfs_fs_disk_version(lfs_t * lfs)522 static uint32_t lfs_fs_disk_version(lfs_t *lfs) {
523 (void)lfs;
524 #ifdef LFS_MULTIVERSION
525 if (lfs->cfg->disk_version) {
526 return lfs->cfg->disk_version;
527 } else
528 #endif
529 {
530 return LFS_DISK_VERSION;
531 }
532 }
533
lfs_fs_disk_version_major(lfs_t * lfs)534 static uint16_t lfs_fs_disk_version_major(lfs_t *lfs) {
535 return 0xffff & (lfs_fs_disk_version(lfs) >> 16);
536
537 }
538
lfs_fs_disk_version_minor(lfs_t * lfs)539 static uint16_t lfs_fs_disk_version_minor(lfs_t *lfs) {
540 return 0xffff & (lfs_fs_disk_version(lfs) >> 0);
541 }
542
543
544 /// Internal operations predeclared here ///
545 #ifndef LFS_READONLY
546 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
547 const struct lfs_mattr *attrs, int attrcount);
548 static int lfs_dir_compact(lfs_t *lfs,
549 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
550 lfs_mdir_t *source, uint16_t begin, uint16_t end);
551 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
552 const void *buffer, lfs_size_t size);
553 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
554 const void *buffer, lfs_size_t size);
555 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
556 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
557 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
558
559 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
560 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
561 static void lfs_fs_prepmove(lfs_t *lfs,
562 uint16_t id, const lfs_block_t pair[2]);
563 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
564 lfs_mdir_t *pdir);
565 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
566 lfs_mdir_t *parent);
567 static int lfs_fs_forceconsistency(lfs_t *lfs);
568 #endif
569
570 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock);
571
572 #ifdef LFS_MIGRATE
573 static int lfs1_traverse(lfs_t *lfs,
574 int (*cb)(void*, lfs_block_t), void *data);
575 #endif
576
577 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
578
579 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
580 void *buffer, lfs_size_t size);
581 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
582 void *buffer, lfs_size_t size);
583 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
584 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file);
585
586 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs);
587 static int lfs_fs_rawtraverse(lfs_t *lfs,
588 int (*cb)(void *data, lfs_block_t block), void *data,
589 bool includeorphans);
590
591 static int lfs_deinit(lfs_t *lfs);
592 static int lfs_rawunmount(lfs_t *lfs);
593
594
595 /// Block allocator ///
596 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)597 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
598 lfs_t *lfs = (lfs_t*)p;
599 lfs_block_t off = ((block - lfs->free.off)
600 + lfs->block_count) % lfs->block_count;
601
602 if (off < lfs->free.size) {
603 lfs->free.buffer[off / 32] |= 1U << (off % 32);
604 }
605
606 return 0;
607 }
608 #endif
609
610 // indicate allocated blocks have been committed into the filesystem, this
611 // is to prevent blocks from being garbage collected in the middle of a
612 // commit operation
lfs_alloc_ack(lfs_t * lfs)613 static void lfs_alloc_ack(lfs_t *lfs) {
614 lfs->free.ack = lfs->block_count;
615 }
616
617 // drop the lookahead buffer, this is done during mounting and failed
618 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)619 static void lfs_alloc_drop(lfs_t *lfs) {
620 lfs->free.size = 0;
621 lfs->free.i = 0;
622 lfs_alloc_ack(lfs);
623 }
624
625 #ifndef LFS_READONLY
lfs_fs_rawgc(lfs_t * lfs)626 static int lfs_fs_rawgc(lfs_t *lfs) {
627 // Move free offset at the first unused block (lfs->free.i)
628 // lfs->free.i is equal lfs->free.size when all blocks are used
629 lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
630 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
631 lfs->free.i = 0;
632
633 // find mask of free blocks from tree
634 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
635 int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
636 if (err) {
637 lfs_alloc_drop(lfs);
638 return err;
639 }
640
641 return 0;
642 }
643 #endif
644
645 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)646 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
647 while (true) {
648 while (lfs->free.i != lfs->free.size) {
649 lfs_block_t off = lfs->free.i;
650 lfs->free.i += 1;
651 lfs->free.ack -= 1;
652
653 if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
654 // found a free block
655 *block = (lfs->free.off + off) % lfs->block_count;
656
657 // eagerly find next off so an alloc ack can
658 // discredit old lookahead blocks
659 while (lfs->free.i != lfs->free.size &&
660 (lfs->free.buffer[lfs->free.i / 32]
661 & (1U << (lfs->free.i % 32)))) {
662 lfs->free.i += 1;
663 lfs->free.ack -= 1;
664 }
665
666 return 0;
667 }
668 }
669
670 // check if we have looked at all blocks since last ack
671 if (lfs->free.ack == 0) {
672 LFS_ERROR("No more free space %"PRIu32,
673 lfs->free.i + lfs->free.off);
674 return LFS_ERR_NOSPC;
675 }
676
677 int err = lfs_fs_rawgc(lfs);
678 if(err) {
679 return err;
680 }
681 }
682 }
683 #endif
684
685 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)686 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
687 lfs_tag_t gmask, lfs_tag_t gtag,
688 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
689 lfs_off_t off = dir->off;
690 lfs_tag_t ntag = dir->etag;
691 lfs_stag_t gdiff = 0;
692
693 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
694 lfs_tag_id(gmask) != 0 &&
695 lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
696 // synthetic moves
697 gdiff -= LFS_MKTAG(0, 1, 0);
698 }
699
700 // iterate over dir block backwards (for faster lookups)
701 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
702 off -= lfs_tag_dsize(ntag);
703 lfs_tag_t tag = ntag;
704 int err = lfs_bd_read(lfs,
705 NULL, &lfs->rcache, sizeof(ntag),
706 dir->pair[0], off, &ntag, sizeof(ntag));
707 if (err) {
708 return err;
709 }
710
711 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
712
713 if (lfs_tag_id(gmask) != 0 &&
714 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
715 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
716 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
717 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
718 // found where we were created
719 return LFS_ERR_NOENT;
720 }
721
722 // move around splices
723 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
724 }
725
726 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
727 if (lfs_tag_isdelete(tag)) {
728 return LFS_ERR_NOENT;
729 }
730
731 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
732 err = lfs_bd_read(lfs,
733 NULL, &lfs->rcache, diff,
734 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
735 if (err) {
736 return err;
737 }
738
739 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
740
741 return tag + gdiff;
742 }
743 }
744
745 return LFS_ERR_NOENT;
746 }
747
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)748 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
749 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
750 return lfs_dir_getslice(lfs, dir,
751 gmask, gtag,
752 0, buffer, lfs_tag_size(gtag));
753 }
754
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)755 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
756 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
757 lfs_tag_t gmask, lfs_tag_t gtag,
758 lfs_off_t off, void *buffer, lfs_size_t size) {
759 uint8_t *data = buffer;
760 if (off+size > lfs->cfg->block_size) {
761 return LFS_ERR_CORRUPT;
762 }
763
764 while (size > 0) {
765 lfs_size_t diff = size;
766
767 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
768 off < pcache->off + pcache->size) {
769 if (off >= pcache->off) {
770 // is already in pcache?
771 diff = lfs_min(diff, pcache->size - (off-pcache->off));
772 memcpy(data, &pcache->buffer[off-pcache->off], diff);
773
774 data += diff;
775 off += diff;
776 size -= diff;
777 continue;
778 }
779
780 // pcache takes priority
781 diff = lfs_min(diff, pcache->off-off);
782 }
783
784 if (rcache->block == LFS_BLOCK_INLINE &&
785 off < rcache->off + rcache->size) {
786 if (off >= rcache->off) {
787 // is already in rcache?
788 diff = lfs_min(diff, rcache->size - (off-rcache->off));
789 memcpy(data, &rcache->buffer[off-rcache->off], diff);
790
791 data += diff;
792 off += diff;
793 size -= diff;
794 continue;
795 }
796
797 // rcache takes priority
798 diff = lfs_min(diff, rcache->off-off);
799 }
800
801 // load to cache, first condition can no longer fail
802 rcache->block = LFS_BLOCK_INLINE;
803 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
804 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
805 lfs->cfg->cache_size);
806 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
807 rcache->off, rcache->buffer, rcache->size);
808 if (err < 0) {
809 return err;
810 }
811 }
812
813 return 0;
814 }
815
816 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)817 static int lfs_dir_traverse_filter(void *p,
818 lfs_tag_t tag, const void *buffer) {
819 lfs_tag_t *filtertag = p;
820 (void)buffer;
821
822 // which mask depends on unique bit in tag structure
823 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
824 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
825 : LFS_MKTAG(0x700, 0x3ff, 0);
826
827 // check for redundancy
828 if ((mask & tag) == (mask & *filtertag) ||
829 lfs_tag_isdelete(*filtertag) ||
830 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
831 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
832 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
833 *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
834 return true;
835 }
836
837 // check if we need to adjust for created/deleted tags
838 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
839 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
840 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
841 }
842
843 return false;
844 }
845 #endif
846
847 #ifndef LFS_READONLY
848 // maximum recursive depth of lfs_dir_traverse, the deepest call:
849 //
850 // traverse with commit
851 // '-> traverse with move
852 // '-> traverse with filter
853 //
854 #define LFS_DIR_TRAVERSE_DEPTH 3
855
856 struct lfs_dir_traverse {
857 const lfs_mdir_t *dir;
858 lfs_off_t off;
859 lfs_tag_t ptag;
860 const struct lfs_mattr *attrs;
861 int attrcount;
862
863 lfs_tag_t tmask;
864 lfs_tag_t ttag;
865 uint16_t begin;
866 uint16_t end;
867 int16_t diff;
868
869 int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
870 void *data;
871
872 lfs_tag_t tag;
873 const void *buffer;
874 struct lfs_diskoff disk;
875 };
876
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)877 static int lfs_dir_traverse(lfs_t *lfs,
878 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
879 const struct lfs_mattr *attrs, int attrcount,
880 lfs_tag_t tmask, lfs_tag_t ttag,
881 uint16_t begin, uint16_t end, int16_t diff,
882 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
883 // This function in inherently recursive, but bounded. To allow tool-based
884 // analysis without unnecessary code-cost we use an explicit stack
885 struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
886 unsigned sp = 0;
887 int res;
888
889 // iterate over directory and attrs
890 lfs_tag_t tag;
891 const void *buffer;
892 struct lfs_diskoff disk = {0};
893 while (true) {
894 {
895 if (off+lfs_tag_dsize(ptag) < dir->off) {
896 off += lfs_tag_dsize(ptag);
897 int err = lfs_bd_read(lfs,
898 NULL, &lfs->rcache, sizeof(tag),
899 dir->pair[0], off, &tag, sizeof(tag));
900 if (err) {
901 return err;
902 }
903
904 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
905 disk.block = dir->pair[0];
906 disk.off = off+sizeof(lfs_tag_t);
907 buffer = &disk;
908 ptag = tag;
909 } else if (attrcount > 0) {
910 tag = attrs[0].tag;
911 buffer = attrs[0].buffer;
912 attrs += 1;
913 attrcount -= 1;
914 } else {
915 // finished traversal, pop from stack?
916 res = 0;
917 break;
918 }
919
920 // do we need to filter?
921 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
922 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
923 continue;
924 }
925
926 if (lfs_tag_id(tmask) != 0) {
927 LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
928 // recurse, scan for duplicates, and update tag based on
929 // creates/deletes
930 stack[sp] = (struct lfs_dir_traverse){
931 .dir = dir,
932 .off = off,
933 .ptag = ptag,
934 .attrs = attrs,
935 .attrcount = attrcount,
936 .tmask = tmask,
937 .ttag = ttag,
938 .begin = begin,
939 .end = end,
940 .diff = diff,
941 .cb = cb,
942 .data = data,
943 .tag = tag,
944 .buffer = buffer,
945 .disk = disk,
946 };
947 sp += 1;
948
949 tmask = 0;
950 ttag = 0;
951 begin = 0;
952 end = 0;
953 diff = 0;
954 cb = lfs_dir_traverse_filter;
955 data = &stack[sp-1].tag;
956 continue;
957 }
958 }
959
960 popped:
961 // in filter range?
962 if (lfs_tag_id(tmask) != 0 &&
963 !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
964 continue;
965 }
966
967 // handle special cases for mcu-side operations
968 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
969 // do nothing
970 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
971 // Without this condition, lfs_dir_traverse can exhibit an
972 // extremely expensive O(n^3) of nested loops when renaming.
973 // This happens because lfs_dir_traverse tries to filter tags by
974 // the tags in the source directory, triggering a second
975 // lfs_dir_traverse with its own filter operation.
976 //
977 // traverse with commit
978 // '-> traverse with filter
979 // '-> traverse with move
980 // '-> traverse with filter
981 //
982 // However we don't actually care about filtering the second set of
983 // tags, since duplicate tags have no effect when filtering.
984 //
985 // This check skips this unnecessary recursive filtering explicitly,
986 // reducing this runtime from O(n^3) to O(n^2).
987 if (cb == lfs_dir_traverse_filter) {
988 continue;
989 }
990
991 // recurse into move
992 stack[sp] = (struct lfs_dir_traverse){
993 .dir = dir,
994 .off = off,
995 .ptag = ptag,
996 .attrs = attrs,
997 .attrcount = attrcount,
998 .tmask = tmask,
999 .ttag = ttag,
1000 .begin = begin,
1001 .end = end,
1002 .diff = diff,
1003 .cb = cb,
1004 .data = data,
1005 .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
1006 };
1007 sp += 1;
1008
1009 uint16_t fromid = lfs_tag_size(tag);
1010 uint16_t toid = lfs_tag_id(tag);
1011 dir = buffer;
1012 off = 0;
1013 ptag = 0xffffffff;
1014 attrs = NULL;
1015 attrcount = 0;
1016 tmask = LFS_MKTAG(0x600, 0x3ff, 0);
1017 ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
1018 begin = fromid;
1019 end = fromid+1;
1020 diff = toid-fromid+diff;
1021 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
1022 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
1023 const struct lfs_attr *a = buffer;
1024 res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
1025 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
1026 if (res < 0) {
1027 return res;
1028 }
1029
1030 if (res) {
1031 break;
1032 }
1033 }
1034 } else {
1035 res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
1036 if (res < 0) {
1037 return res;
1038 }
1039
1040 if (res) {
1041 break;
1042 }
1043 }
1044 }
1045
1046 if (sp > 0) {
1047 // pop from the stack and return, fortunately all pops share
1048 // a destination
1049 dir = stack[sp-1].dir;
1050 off = stack[sp-1].off;
1051 ptag = stack[sp-1].ptag;
1052 attrs = stack[sp-1].attrs;
1053 attrcount = stack[sp-1].attrcount;
1054 tmask = stack[sp-1].tmask;
1055 ttag = stack[sp-1].ttag;
1056 begin = stack[sp-1].begin;
1057 end = stack[sp-1].end;
1058 diff = stack[sp-1].diff;
1059 cb = stack[sp-1].cb;
1060 data = stack[sp-1].data;
1061 tag = stack[sp-1].tag;
1062 buffer = stack[sp-1].buffer;
1063 disk = stack[sp-1].disk;
1064 sp -= 1;
1065 goto popped;
1066 } else {
1067 return res;
1068 }
1069 }
1070 #endif
1071
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)1072 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
1073 lfs_mdir_t *dir, const lfs_block_t pair[2],
1074 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
1075 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
1076 // we can find tag very efficiently during a fetch, since we're already
1077 // scanning the entire directory
1078 lfs_stag_t besttag = -1;
1079
1080 // if either block address is invalid we return LFS_ERR_CORRUPT here,
1081 // otherwise later writes to the pair could fail
1082 if (lfs->block_count
1083 && (pair[0] >= lfs->block_count || pair[1] >= lfs->block_count)) {
1084 return LFS_ERR_CORRUPT;
1085 }
1086
1087 // find the block with the most recent revision
1088 uint32_t revs[2] = {0, 0};
1089 int r = 0;
1090 for (int i = 0; i < 2; i++) {
1091 int err = lfs_bd_read(lfs,
1092 NULL, &lfs->rcache, sizeof(revs[i]),
1093 pair[i], 0, &revs[i], sizeof(revs[i]));
1094 revs[i] = lfs_fromle32(revs[i]);
1095 if (err && err != LFS_ERR_CORRUPT) {
1096 return err;
1097 }
1098
1099 if (err != LFS_ERR_CORRUPT &&
1100 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
1101 r = i;
1102 }
1103 }
1104
1105 dir->pair[0] = pair[(r+0)%2];
1106 dir->pair[1] = pair[(r+1)%2];
1107 dir->rev = revs[(r+0)%2];
1108 dir->off = 0; // nonzero = found some commits
1109
1110 // now scan tags to fetch the actual dir and find possible match
1111 for (int i = 0; i < 2; i++) {
1112 lfs_off_t off = 0;
1113 lfs_tag_t ptag = 0xffffffff;
1114
1115 uint16_t tempcount = 0;
1116 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
1117 bool tempsplit = false;
1118 lfs_stag_t tempbesttag = besttag;
1119
1120 // assume not erased until proven otherwise
1121 bool maybeerased = false;
1122 bool hasfcrc = false;
1123 struct lfs_fcrc fcrc;
1124
1125 dir->rev = lfs_tole32(dir->rev);
1126 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
1127 dir->rev = lfs_fromle32(dir->rev);
1128
1129 while (true) {
1130 // extract next tag
1131 lfs_tag_t tag;
1132 off += lfs_tag_dsize(ptag);
1133 int err = lfs_bd_read(lfs,
1134 NULL, &lfs->rcache, lfs->cfg->block_size,
1135 dir->pair[0], off, &tag, sizeof(tag));
1136 if (err) {
1137 if (err == LFS_ERR_CORRUPT) {
1138 // can't continue?
1139 break;
1140 }
1141 return err;
1142 }
1143
1144 crc = lfs_crc(crc, &tag, sizeof(tag));
1145 tag = lfs_frombe32(tag) ^ ptag;
1146
1147 // next commit not yet programmed?
1148 if (!lfs_tag_isvalid(tag)) {
1149 // we only might be erased if the last tag was a crc
1150 maybeerased = (lfs_tag_type2(ptag) == LFS_TYPE_CCRC);
1151 break;
1152 // out of range?
1153 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
1154 break;
1155 }
1156
1157 ptag = tag;
1158
1159 if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) {
1160 // check the crc attr
1161 uint32_t dcrc;
1162 err = lfs_bd_read(lfs,
1163 NULL, &lfs->rcache, lfs->cfg->block_size,
1164 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
1165 if (err) {
1166 if (err == LFS_ERR_CORRUPT) {
1167 break;
1168 }
1169 return err;
1170 }
1171 dcrc = lfs_fromle32(dcrc);
1172
1173 if (crc != dcrc) {
1174 break;
1175 }
1176
1177 // reset the next bit if we need to
1178 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
1179
1180 // toss our crc into the filesystem seed for
1181 // pseudorandom numbers, note we use another crc here
1182 // as a collection function because it is sufficiently
1183 // random and convenient
1184 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
1185
1186 // update with what's found so far
1187 besttag = tempbesttag;
1188 dir->off = off + lfs_tag_dsize(tag);
1189 dir->etag = ptag;
1190 dir->count = tempcount;
1191 dir->tail[0] = temptail[0];
1192 dir->tail[1] = temptail[1];
1193 dir->split = tempsplit;
1194
1195 // reset crc, hasfcrc
1196 crc = 0xffffffff;
1197 continue;
1198 }
1199
1200 // crc the entry first, hopefully leaving it in the cache
1201 err = lfs_bd_crc(lfs,
1202 NULL, &lfs->rcache, lfs->cfg->block_size,
1203 dir->pair[0], off+sizeof(tag),
1204 lfs_tag_dsize(tag)-sizeof(tag), &crc);
1205 if (err) {
1206 if (err == LFS_ERR_CORRUPT) {
1207 break;
1208 }
1209 return err;
1210 }
1211
1212 // directory modification tags?
1213 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
1214 // increase count of files if necessary
1215 if (lfs_tag_id(tag) >= tempcount) {
1216 tempcount = lfs_tag_id(tag) + 1;
1217 }
1218 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
1219 tempcount += lfs_tag_splice(tag);
1220
1221 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
1222 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
1223 tempbesttag |= 0x80000000;
1224 } else if (tempbesttag != -1 &&
1225 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1226 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
1227 }
1228 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
1229 tempsplit = (lfs_tag_chunk(tag) & 1);
1230
1231 err = lfs_bd_read(lfs,
1232 NULL, &lfs->rcache, lfs->cfg->block_size,
1233 dir->pair[0], off+sizeof(tag), &temptail, 8);
1234 if (err) {
1235 if (err == LFS_ERR_CORRUPT) {
1236 break;
1237 }
1238 return err;
1239 }
1240 lfs_pair_fromle32(temptail);
1241 } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) {
1242 err = lfs_bd_read(lfs,
1243 NULL, &lfs->rcache, lfs->cfg->block_size,
1244 dir->pair[0], off+sizeof(tag),
1245 &fcrc, sizeof(fcrc));
1246 if (err) {
1247 if (err == LFS_ERR_CORRUPT) {
1248 break;
1249 }
1250 }
1251
1252 lfs_fcrc_fromle32(&fcrc);
1253 hasfcrc = true;
1254 }
1255
1256 // found a match for our fetcher?
1257 if ((fmask & tag) == (fmask & ftag)) {
1258 int res = cb(data, tag, &(struct lfs_diskoff){
1259 dir->pair[0], off+sizeof(tag)});
1260 if (res < 0) {
1261 if (res == LFS_ERR_CORRUPT) {
1262 break;
1263 }
1264 return res;
1265 }
1266
1267 if (res == LFS_CMP_EQ) {
1268 // found a match
1269 tempbesttag = tag;
1270 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1271 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1272 // found an identical tag, but contents didn't match
1273 // this must mean that our besttag has been overwritten
1274 tempbesttag = -1;
1275 } else if (res == LFS_CMP_GT &&
1276 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1277 // found a greater match, keep track to keep things sorted
1278 tempbesttag = tag | 0x80000000;
1279 }
1280 }
1281 }
1282
1283 // found no valid commits?
1284 if (dir->off == 0) {
1285 // try the other block?
1286 lfs_pair_swap(dir->pair);
1287 dir->rev = revs[(r+1)%2];
1288 continue;
1289 }
1290
1291 // did we end on a valid commit? we may have an erased block
1292 dir->erased = false;
1293 if (maybeerased && dir->off % lfs->cfg->prog_size == 0) {
1294 #ifdef LFS_MULTIVERSION
1295 // note versions < lfs2.1 did not have fcrc tags, if
1296 // we're < lfs2.1 treat missing fcrc as erased data
1297 //
1298 // we don't strictly need to do this, but otherwise writing
1299 // to lfs2.0 disks becomes very inefficient
1300 if (lfs_fs_disk_version(lfs) < 0x00020001) {
1301 dir->erased = true;
1302
1303 } else
1304 #endif
1305 if (hasfcrc) {
1306 // check for an fcrc matching the next prog's erased state, if
1307 // this failed most likely a previous prog was interrupted, we
1308 // need a new erase
1309 uint32_t fcrc_ = 0xffffffff;
1310 int err = lfs_bd_crc(lfs,
1311 NULL, &lfs->rcache, lfs->cfg->block_size,
1312 dir->pair[0], dir->off, fcrc.size, &fcrc_);
1313 if (err && err != LFS_ERR_CORRUPT) {
1314 return err;
1315 }
1316
1317 // found beginning of erased part?
1318 dir->erased = (fcrc_ == fcrc.crc);
1319 }
1320 }
1321
1322 // synthetic move
1323 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1324 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1325 besttag |= 0x80000000;
1326 } else if (besttag != -1 &&
1327 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1328 besttag -= LFS_MKTAG(0, 1, 0);
1329 }
1330 }
1331
1332 // found tag? or found best id?
1333 if (id) {
1334 *id = lfs_min(lfs_tag_id(besttag), dir->count);
1335 }
1336
1337 if (lfs_tag_isvalid(besttag)) {
1338 return besttag;
1339 } else if (lfs_tag_id(besttag) < dir->count) {
1340 return LFS_ERR_NOENT;
1341 } else {
1342 return 0;
1343 }
1344 }
1345
1346 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1347 dir->pair[0], dir->pair[1]);
1348 return LFS_ERR_CORRUPT;
1349 }
1350
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1351 static int lfs_dir_fetch(lfs_t *lfs,
1352 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1353 // note, mask=-1, tag=-1 can never match a tag since this
1354 // pattern has the invalid bit set
1355 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1356 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1357 }
1358
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1359 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1360 lfs_gstate_t *gstate) {
1361 lfs_gstate_t temp;
1362 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1363 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1364 if (res < 0 && res != LFS_ERR_NOENT) {
1365 return res;
1366 }
1367
1368 if (res != LFS_ERR_NOENT) {
1369 // xor together to find resulting gstate
1370 lfs_gstate_fromle32(&temp);
1371 lfs_gstate_xor(gstate, &temp);
1372 }
1373
1374 return 0;
1375 }
1376
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1377 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1378 uint16_t id, struct lfs_info *info) {
1379 if (id == 0x3ff) {
1380 // special case for root
1381 strcpy(info->name, "/");
1382 info->type = LFS_TYPE_DIR;
1383 return 0;
1384 }
1385
1386 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1387 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1388 if (tag < 0) {
1389 return (int)tag;
1390 }
1391
1392 info->type = lfs_tag_type3(tag);
1393
1394 struct lfs_ctz ctz;
1395 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1396 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1397 if (tag < 0) {
1398 return (int)tag;
1399 }
1400 lfs_ctz_fromle32(&ctz);
1401
1402 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1403 info->size = ctz.size;
1404 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1405 info->size = lfs_tag_size(tag);
1406 }
1407
1408 return 0;
1409 }
1410
1411 struct lfs_dir_find_match {
1412 lfs_t *lfs;
1413 const void *name;
1414 lfs_size_t size;
1415 };
1416
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1417 static int lfs_dir_find_match(void *data,
1418 lfs_tag_t tag, const void *buffer) {
1419 struct lfs_dir_find_match *name = data;
1420 lfs_t *lfs = name->lfs;
1421 const struct lfs_diskoff *disk = buffer;
1422
1423 // compare with disk
1424 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1425 int res = lfs_bd_cmp(lfs,
1426 NULL, &lfs->rcache, diff,
1427 disk->block, disk->off, name->name, diff);
1428 if (res != LFS_CMP_EQ) {
1429 return res;
1430 }
1431
1432 // only equal if our size is still the same
1433 if (name->size != lfs_tag_size(tag)) {
1434 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1435 }
1436
1437 // found a match!
1438 return LFS_CMP_EQ;
1439 }
1440
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1441 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1442 const char **path, uint16_t *id) {
1443 // we reduce path to a single name if we can find it
1444 const char *name = *path;
1445 if (id) {
1446 *id = 0x3ff;
1447 }
1448
1449 // default to root dir
1450 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1451 dir->tail[0] = lfs->root[0];
1452 dir->tail[1] = lfs->root[1];
1453
1454 while (true) {
1455 nextname:
1456 // skip slashes
1457 name += strspn(name, "/");
1458 lfs_size_t namelen = strcspn(name, "/");
1459
1460 // skip '.' and root '..'
1461 if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1462 (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1463 name += namelen;
1464 goto nextname;
1465 }
1466
1467 // skip if matched by '..' in name
1468 const char *suffix = name + namelen;
1469 lfs_size_t sufflen;
1470 int depth = 1;
1471 while (true) {
1472 suffix += strspn(suffix, "/");
1473 sufflen = strcspn(suffix, "/");
1474 if (sufflen == 0) {
1475 break;
1476 }
1477
1478 if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1479 depth -= 1;
1480 if (depth == 0) {
1481 name = suffix + sufflen;
1482 goto nextname;
1483 }
1484 } else {
1485 depth += 1;
1486 }
1487
1488 suffix += sufflen;
1489 }
1490
1491 // found path
1492 if (name[0] == '\0') {
1493 return tag;
1494 }
1495
1496 // update what we've found so far
1497 *path = name;
1498
1499 // only continue if we hit a directory
1500 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1501 return LFS_ERR_NOTDIR;
1502 }
1503
1504 // grab the entry data
1505 if (lfs_tag_id(tag) != 0x3ff) {
1506 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1507 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1508 if (res < 0) {
1509 return res;
1510 }
1511 lfs_pair_fromle32(dir->tail);
1512 }
1513
1514 // find entry matching name
1515 while (true) {
1516 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1517 LFS_MKTAG(0x780, 0, 0),
1518 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1519 // are we last name?
1520 (strchr(name, '/') == NULL) ? id : NULL,
1521 lfs_dir_find_match, &(struct lfs_dir_find_match){
1522 lfs, name, namelen});
1523 if (tag < 0) {
1524 return tag;
1525 }
1526
1527 if (tag) {
1528 break;
1529 }
1530
1531 if (!dir->split) {
1532 return LFS_ERR_NOENT;
1533 }
1534 }
1535
1536 // to next name
1537 name += namelen;
1538 }
1539 }
1540
1541 // commit logic
1542 struct lfs_commit {
1543 lfs_block_t block;
1544 lfs_off_t off;
1545 lfs_tag_t ptag;
1546 uint32_t crc;
1547
1548 lfs_off_t begin;
1549 lfs_off_t end;
1550 };
1551
1552 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1553 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1554 const void *buffer, lfs_size_t size) {
1555 int err = lfs_bd_prog(lfs,
1556 &lfs->pcache, &lfs->rcache, false,
1557 commit->block, commit->off ,
1558 (const uint8_t*)buffer, size);
1559 if (err) {
1560 return err;
1561 }
1562
1563 commit->crc = lfs_crc(commit->crc, buffer, size);
1564 commit->off += size;
1565 return 0;
1566 }
1567 #endif
1568
1569 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1570 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1571 lfs_tag_t tag, const void *buffer) {
1572 // check if we fit
1573 lfs_size_t dsize = lfs_tag_dsize(tag);
1574 if (commit->off + dsize > commit->end) {
1575 return LFS_ERR_NOSPC;
1576 }
1577
1578 // write out tag
1579 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1580 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1581 if (err) {
1582 return err;
1583 }
1584
1585 if (!(tag & 0x80000000)) {
1586 // from memory
1587 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1588 if (err) {
1589 return err;
1590 }
1591 } else {
1592 // from disk
1593 const struct lfs_diskoff *disk = buffer;
1594 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1595 // rely on caching to make this efficient
1596 uint8_t dat;
1597 err = lfs_bd_read(lfs,
1598 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1599 disk->block, disk->off+i, &dat, 1);
1600 if (err) {
1601 return err;
1602 }
1603
1604 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1605 if (err) {
1606 return err;
1607 }
1608 }
1609 }
1610
1611 commit->ptag = tag & 0x7fffffff;
1612 return 0;
1613 }
1614 #endif
1615
1616 #ifndef LFS_READONLY
1617
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1618 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1619 // align to program units
1620 //
1621 // this gets a bit complex as we have two types of crcs:
1622 // - 5-word crc with fcrc to check following prog (middle of block)
1623 // - 2-word crc with no following prog (end of block)
1624 const lfs_off_t end = lfs_alignup(
1625 lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size),
1626 lfs->cfg->prog_size);
1627
1628 lfs_off_t off1 = 0;
1629 uint32_t crc1 = 0;
1630
1631 // create crc tags to fill up remainder of commit, note that
1632 // padding is not crced, which lets fetches skip padding but
1633 // makes committing a bit more complicated
1634 while (commit->off < end) {
1635 lfs_off_t noff = (
1636 lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe)
1637 + (commit->off+sizeof(lfs_tag_t)));
1638 // too large for crc tag? need padding commits
1639 if (noff < end) {
1640 noff = lfs_min(noff, end - 5*sizeof(uint32_t));
1641 }
1642
1643 // space for fcrc?
1644 uint8_t eperturb = (uint8_t)-1;
1645 if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) {
1646 // first read the leading byte, this always contains a bit
1647 // we can perturb to avoid writes that don't change the fcrc
1648 int err = lfs_bd_read(lfs,
1649 NULL, &lfs->rcache, lfs->cfg->prog_size,
1650 commit->block, noff, &eperturb, 1);
1651 if (err && err != LFS_ERR_CORRUPT) {
1652 return err;
1653 }
1654
1655 #ifdef LFS_MULTIVERSION
1656 // unfortunately fcrcs break mdir fetching < lfs2.1, so only write
1657 // these if we're a >= lfs2.1 filesystem
1658 if (lfs_fs_disk_version(lfs) <= 0x00020000) {
1659 // don't write fcrc
1660 } else
1661 #endif
1662 {
1663 // find the expected fcrc, don't bother avoiding a reread
1664 // of the eperturb, it should still be in our cache
1665 struct lfs_fcrc fcrc = {
1666 .size = lfs->cfg->prog_size,
1667 .crc = 0xffffffff
1668 };
1669 err = lfs_bd_crc(lfs,
1670 NULL, &lfs->rcache, lfs->cfg->prog_size,
1671 commit->block, noff, fcrc.size, &fcrc.crc);
1672 if (err && err != LFS_ERR_CORRUPT) {
1673 return err;
1674 }
1675
1676 lfs_fcrc_tole32(&fcrc);
1677 err = lfs_dir_commitattr(lfs, commit,
1678 LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)),
1679 &fcrc);
1680 if (err) {
1681 return err;
1682 }
1683 }
1684 }
1685
1686 // build commit crc
1687 struct {
1688 lfs_tag_t tag;
1689 uint32_t crc;
1690 } ccrc;
1691 lfs_tag_t ntag = LFS_MKTAG(
1692 LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff,
1693 noff - (commit->off+sizeof(lfs_tag_t)));
1694 ccrc.tag = lfs_tobe32(ntag ^ commit->ptag);
1695 commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t));
1696 ccrc.crc = lfs_tole32(commit->crc);
1697
1698 int err = lfs_bd_prog(lfs,
1699 &lfs->pcache, &lfs->rcache, false,
1700 commit->block, commit->off, &ccrc, sizeof(ccrc));
1701 if (err) {
1702 return err;
1703 }
1704
1705 // keep track of non-padding checksum to verify
1706 if (off1 == 0) {
1707 off1 = commit->off + sizeof(lfs_tag_t);
1708 crc1 = commit->crc;
1709 }
1710
1711 commit->off = noff;
1712 // perturb valid bit?
1713 commit->ptag = ntag ^ ((0x80UL & ~eperturb) << 24);
1714 // reset crc for next commit
1715 commit->crc = 0xffffffff;
1716
1717 // manually flush here since we don't prog the padding, this confuses
1718 // the caching layer
1719 if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) {
1720 // flush buffers
1721 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1722 if (err) {
1723 return err;
1724 }
1725 }
1726 }
1727
1728 // successful commit, check checksums to make sure
1729 //
1730 // note that we don't need to check padding commits, worst
1731 // case if they are corrupted we would have had to compact anyways
1732 lfs_off_t off = commit->begin;
1733 uint32_t crc = 0xffffffff;
1734 int err = lfs_bd_crc(lfs,
1735 NULL, &lfs->rcache, off1+sizeof(uint32_t),
1736 commit->block, off, off1-off, &crc);
1737 if (err) {
1738 return err;
1739 }
1740
1741 // check non-padding commits against known crc
1742 if (crc != crc1) {
1743 return LFS_ERR_CORRUPT;
1744 }
1745
1746 // make sure to check crc in case we happen to pick
1747 // up an unrelated crc (frozen block?)
1748 err = lfs_bd_crc(lfs,
1749 NULL, &lfs->rcache, sizeof(uint32_t),
1750 commit->block, off1, sizeof(uint32_t), &crc);
1751 if (err) {
1752 return err;
1753 }
1754
1755 if (crc != 0) {
1756 return LFS_ERR_CORRUPT;
1757 }
1758
1759 return 0;
1760 }
1761 #endif
1762
1763 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1764 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1765 // allocate pair of dir blocks (backwards, so we write block 1 first)
1766 for (int i = 0; i < 2; i++) {
1767 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1768 if (err) {
1769 return err;
1770 }
1771 }
1772
1773 // zero for reproducibility in case initial block is unreadable
1774 dir->rev = 0;
1775
1776 // rather than clobbering one of the blocks we just pretend
1777 // the revision may be valid
1778 int err = lfs_bd_read(lfs,
1779 NULL, &lfs->rcache, sizeof(dir->rev),
1780 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1781 dir->rev = lfs_fromle32(dir->rev);
1782 if (err && err != LFS_ERR_CORRUPT) {
1783 return err;
1784 }
1785
1786 // to make sure we don't immediately evict, align the new revision count
1787 // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1788 // is tweaked this way
1789 if (lfs->cfg->block_cycles > 0) {
1790 dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1791 }
1792
1793 // set defaults
1794 dir->off = sizeof(dir->rev);
1795 dir->etag = 0xffffffff;
1796 dir->count = 0;
1797 dir->tail[0] = LFS_BLOCK_NULL;
1798 dir->tail[1] = LFS_BLOCK_NULL;
1799 dir->erased = false;
1800 dir->split = false;
1801
1802 // don't write out yet, let caller take care of that
1803 return 0;
1804 }
1805 #endif
1806
1807 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1808 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1809 // steal state
1810 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1811 if (err) {
1812 return err;
1813 }
1814
1815 // steal tail
1816 lfs_pair_tole32(tail->tail);
1817 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1818 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1819 lfs_pair_fromle32(tail->tail);
1820 if (err) {
1821 return err;
1822 }
1823
1824 return 0;
1825 }
1826 #endif
1827
1828 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1829 static int lfs_dir_split(lfs_t *lfs,
1830 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1831 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1832 // create tail metadata pair
1833 lfs_mdir_t tail;
1834 int err = lfs_dir_alloc(lfs, &tail);
1835 if (err) {
1836 return err;
1837 }
1838
1839 tail.split = dir->split;
1840 tail.tail[0] = dir->tail[0];
1841 tail.tail[1] = dir->tail[1];
1842
1843 // note we don't care about LFS_OK_RELOCATED
1844 int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1845 if (res < 0) {
1846 return res;
1847 }
1848
1849 dir->tail[0] = tail.pair[0];
1850 dir->tail[1] = tail.pair[1];
1851 dir->split = true;
1852
1853 // update root if needed
1854 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1855 lfs->root[0] = tail.pair[0];
1856 lfs->root[1] = tail.pair[1];
1857 }
1858
1859 return 0;
1860 }
1861 #endif
1862
1863 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1864 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1865 lfs_size_t *size = p;
1866 (void)buffer;
1867
1868 *size += lfs_tag_dsize(tag);
1869 return 0;
1870 }
1871 #endif
1872
1873 #ifndef LFS_READONLY
1874 struct lfs_dir_commit_commit {
1875 lfs_t *lfs;
1876 struct lfs_commit *commit;
1877 };
1878 #endif
1879
1880 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1881 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1882 struct lfs_dir_commit_commit *commit = p;
1883 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1884 }
1885 #endif
1886
1887 #ifndef LFS_READONLY
lfs_dir_needsrelocation(lfs_t * lfs,lfs_mdir_t * dir)1888 static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
1889 // If our revision count == n * block_cycles, we should force a relocation,
1890 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1891 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1892 // 1. block_cycles = 1, which would prevent relocations from terminating
1893 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1894 // one metadata block in the pair, effectively making this useless
1895 return (lfs->cfg->block_cycles > 0
1896 && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
1897 }
1898 #endif
1899
1900 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1901 static int lfs_dir_compact(lfs_t *lfs,
1902 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1903 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1904 // save some state in case block is bad
1905 bool relocated = false;
1906 bool tired = lfs_dir_needsrelocation(lfs, dir);
1907
1908 // increment revision count
1909 dir->rev += 1;
1910
1911 // do not proactively relocate blocks during migrations, this
1912 // can cause a number of failure states such: clobbering the
1913 // v1 superblock if we relocate root, and invalidating directory
1914 // pointers if we relocate the head of a directory. On top of
1915 // this, relocations increase the overall complexity of
1916 // lfs_migration, which is already a delicate operation.
1917 #ifdef LFS_MIGRATE
1918 if (lfs->lfs1) {
1919 tired = false;
1920 }
1921 #endif
1922
1923 if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
1924 // we're writing too much, time to relocate
1925 goto relocate;
1926 }
1927
1928 // begin loop to commit compaction to blocks until a compact sticks
1929 while (true) {
1930 {
1931 // setup commit state
1932 struct lfs_commit commit = {
1933 .block = dir->pair[1],
1934 .off = 0,
1935 .ptag = 0xffffffff,
1936 .crc = 0xffffffff,
1937
1938 .begin = 0,
1939 .end = (lfs->cfg->metadata_max ?
1940 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1941 };
1942
1943 // erase block to write to
1944 int err = lfs_bd_erase(lfs, dir->pair[1]);
1945 if (err) {
1946 if (err == LFS_ERR_CORRUPT) {
1947 goto relocate;
1948 }
1949 return err;
1950 }
1951
1952 // write out header
1953 dir->rev = lfs_tole32(dir->rev);
1954 err = lfs_dir_commitprog(lfs, &commit,
1955 &dir->rev, sizeof(dir->rev));
1956 dir->rev = lfs_fromle32(dir->rev);
1957 if (err) {
1958 if (err == LFS_ERR_CORRUPT) {
1959 goto relocate;
1960 }
1961 return err;
1962 }
1963
1964 // traverse the directory, this time writing out all unique tags
1965 err = lfs_dir_traverse(lfs,
1966 source, 0, 0xffffffff, attrs, attrcount,
1967 LFS_MKTAG(0x400, 0x3ff, 0),
1968 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1969 begin, end, -begin,
1970 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1971 lfs, &commit});
1972 if (err) {
1973 if (err == LFS_ERR_CORRUPT) {
1974 goto relocate;
1975 }
1976 return err;
1977 }
1978
1979 // commit tail, which may be new after last size check
1980 if (!lfs_pair_isnull(dir->tail)) {
1981 lfs_pair_tole32(dir->tail);
1982 err = lfs_dir_commitattr(lfs, &commit,
1983 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
1984 dir->tail);
1985 lfs_pair_fromle32(dir->tail);
1986 if (err) {
1987 if (err == LFS_ERR_CORRUPT) {
1988 goto relocate;
1989 }
1990 return err;
1991 }
1992 }
1993
1994 // bring over gstate?
1995 lfs_gstate_t delta = {0};
1996 if (!relocated) {
1997 lfs_gstate_xor(&delta, &lfs->gdisk);
1998 lfs_gstate_xor(&delta, &lfs->gstate);
1999 }
2000 lfs_gstate_xor(&delta, &lfs->gdelta);
2001 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2002
2003 err = lfs_dir_getgstate(lfs, dir, &delta);
2004 if (err) {
2005 return err;
2006 }
2007
2008 if (!lfs_gstate_iszero(&delta)) {
2009 lfs_gstate_tole32(&delta);
2010 err = lfs_dir_commitattr(lfs, &commit,
2011 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2012 sizeof(delta)), &delta);
2013 if (err) {
2014 if (err == LFS_ERR_CORRUPT) {
2015 goto relocate;
2016 }
2017 return err;
2018 }
2019 }
2020
2021 // complete commit with crc
2022 err = lfs_dir_commitcrc(lfs, &commit);
2023 if (err) {
2024 if (err == LFS_ERR_CORRUPT) {
2025 goto relocate;
2026 }
2027 return err;
2028 }
2029
2030 // successful compaction, swap dir pair to indicate most recent
2031 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2032 lfs_pair_swap(dir->pair);
2033 dir->count = end - begin;
2034 dir->off = commit.off;
2035 dir->etag = commit.ptag;
2036 // update gstate
2037 lfs->gdelta = (lfs_gstate_t){0};
2038 if (!relocated) {
2039 lfs->gdisk = lfs->gstate;
2040 }
2041 }
2042 break;
2043
2044 relocate:
2045 // commit was corrupted, drop caches and prepare to relocate block
2046 relocated = true;
2047 lfs_cache_drop(lfs, &lfs->pcache);
2048 if (!tired) {
2049 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
2050 }
2051
2052 // can't relocate superblock, filesystem is now frozen
2053 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2054 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
2055 dir->pair[1]);
2056 return LFS_ERR_NOSPC;
2057 }
2058
2059 // relocate half of pair
2060 int err = lfs_alloc(lfs, &dir->pair[1]);
2061 if (err && (err != LFS_ERR_NOSPC || !tired)) {
2062 return err;
2063 }
2064
2065 tired = false;
2066 continue;
2067 }
2068
2069 return relocated ? LFS_OK_RELOCATED : 0;
2070 }
2071 #endif
2072
2073 #ifndef LFS_READONLY
lfs_dir_splittingcompact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)2074 static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
2075 const struct lfs_mattr *attrs, int attrcount,
2076 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
2077 while (true) {
2078 // find size of first split, we do this by halving the split until
2079 // the metadata is guaranteed to fit
2080 //
2081 // Note that this isn't a true binary search, we never increase the
2082 // split size. This may result in poorly distributed metadata but isn't
2083 // worth the extra code size or performance hit to fix.
2084 lfs_size_t split = begin;
2085 while (end - split > 1) {
2086 lfs_size_t size = 0;
2087 int err = lfs_dir_traverse(lfs,
2088 source, 0, 0xffffffff, attrs, attrcount,
2089 LFS_MKTAG(0x400, 0x3ff, 0),
2090 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
2091 split, end, -split,
2092 lfs_dir_commit_size, &size);
2093 if (err) {
2094 return err;
2095 }
2096
2097 // space is complicated, we need room for:
2098 //
2099 // - tail: 4+2*4 = 12 bytes
2100 // - gstate: 4+3*4 = 16 bytes
2101 // - move delete: 4 = 4 bytes
2102 // - crc: 4+4 = 8 bytes
2103 // total = 40 bytes
2104 //
2105 // And we cap at half a block to avoid degenerate cases with
2106 // nearly-full metadata blocks.
2107 //
2108 if (end - split < 0xff
2109 && size <= lfs_min(
2110 lfs->cfg->block_size - 40,
2111 lfs_alignup(
2112 (lfs->cfg->metadata_max
2113 ? lfs->cfg->metadata_max
2114 : lfs->cfg->block_size)/2,
2115 lfs->cfg->prog_size))) {
2116 break;
2117 }
2118
2119 split = split + ((end - split) / 2);
2120 }
2121
2122 if (split == begin) {
2123 // no split needed
2124 break;
2125 }
2126
2127 // split into two metadata pairs and continue
2128 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2129 source, split, end);
2130 if (err && err != LFS_ERR_NOSPC) {
2131 return err;
2132 }
2133
2134 if (err) {
2135 // we can't allocate a new block, try to compact with degraded
2136 // performance
2137 LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
2138 dir->pair[0], dir->pair[1]);
2139 break;
2140 } else {
2141 end = split;
2142 }
2143 }
2144
2145 if (lfs_dir_needsrelocation(lfs, dir)
2146 && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2147 // oh no! we're writing too much to the superblock,
2148 // should we expand?
2149 lfs_ssize_t size = lfs_fs_rawsize(lfs);
2150 if (size < 0) {
2151 return size;
2152 }
2153
2154 // do we have extra space? littlefs can't reclaim this space
2155 // by itself, so expand cautiously
2156 if ((lfs_size_t)size < lfs->block_count/2) {
2157 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
2158 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2159 source, begin, end);
2160 if (err && err != LFS_ERR_NOSPC) {
2161 return err;
2162 }
2163
2164 if (err) {
2165 // welp, we tried, if we ran out of space there's not much
2166 // we can do, we'll error later if we've become frozen
2167 LFS_WARN("Unable to expand superblock");
2168 } else {
2169 end = begin;
2170 }
2171 }
2172 }
2173
2174 return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
2175 }
2176 #endif
2177
2178 #ifndef LFS_READONLY
lfs_dir_relocatingcommit(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * pdir)2179 static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
2180 const lfs_block_t pair[2],
2181 const struct lfs_mattr *attrs, int attrcount,
2182 lfs_mdir_t *pdir) {
2183 int state = 0;
2184
2185 // calculate changes to the directory
2186 bool hasdelete = false;
2187 for (int i = 0; i < attrcount; i++) {
2188 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
2189 dir->count += 1;
2190 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
2191 LFS_ASSERT(dir->count > 0);
2192 dir->count -= 1;
2193 hasdelete = true;
2194 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
2195 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
2196 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
2197 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
2198 lfs_pair_fromle32(dir->tail);
2199 }
2200 }
2201
2202 // should we actually drop the directory block?
2203 if (hasdelete && dir->count == 0) {
2204 LFS_ASSERT(pdir);
2205 int err = lfs_fs_pred(lfs, dir->pair, pdir);
2206 if (err && err != LFS_ERR_NOENT) {
2207 return err;
2208 }
2209
2210 if (err != LFS_ERR_NOENT && pdir->split) {
2211 state = LFS_OK_DROPPED;
2212 goto fixmlist;
2213 }
2214 }
2215
2216 if (dir->erased) {
2217 // try to commit
2218 struct lfs_commit commit = {
2219 .block = dir->pair[0],
2220 .off = dir->off,
2221 .ptag = dir->etag,
2222 .crc = 0xffffffff,
2223
2224 .begin = dir->off,
2225 .end = (lfs->cfg->metadata_max ?
2226 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
2227 };
2228
2229 // traverse attrs that need to be written out
2230 lfs_pair_tole32(dir->tail);
2231 int err = lfs_dir_traverse(lfs,
2232 dir, dir->off, dir->etag, attrs, attrcount,
2233 0, 0, 0, 0, 0,
2234 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2235 lfs, &commit});
2236 lfs_pair_fromle32(dir->tail);
2237 if (err) {
2238 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2239 goto compact;
2240 }
2241 return err;
2242 }
2243
2244 // commit any global diffs if we have any
2245 lfs_gstate_t delta = {0};
2246 lfs_gstate_xor(&delta, &lfs->gstate);
2247 lfs_gstate_xor(&delta, &lfs->gdisk);
2248 lfs_gstate_xor(&delta, &lfs->gdelta);
2249 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2250 if (!lfs_gstate_iszero(&delta)) {
2251 err = lfs_dir_getgstate(lfs, dir, &delta);
2252 if (err) {
2253 return err;
2254 }
2255
2256 lfs_gstate_tole32(&delta);
2257 err = lfs_dir_commitattr(lfs, &commit,
2258 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2259 sizeof(delta)), &delta);
2260 if (err) {
2261 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2262 goto compact;
2263 }
2264 return err;
2265 }
2266 }
2267
2268 // finalize commit with the crc
2269 err = lfs_dir_commitcrc(lfs, &commit);
2270 if (err) {
2271 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2272 goto compact;
2273 }
2274 return err;
2275 }
2276
2277 // successful commit, update dir
2278 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2279 dir->off = commit.off;
2280 dir->etag = commit.ptag;
2281 // and update gstate
2282 lfs->gdisk = lfs->gstate;
2283 lfs->gdelta = (lfs_gstate_t){0};
2284
2285 goto fixmlist;
2286 }
2287
2288 compact:
2289 // fall back to compaction
2290 lfs_cache_drop(lfs, &lfs->pcache);
2291
2292 state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
2293 dir, 0, dir->count);
2294 if (state < 0) {
2295 return state;
2296 }
2297
2298 goto fixmlist;
2299
2300 fixmlist:;
2301 // this complicated bit of logic is for fixing up any active
2302 // metadata-pairs that we may have affected
2303 //
2304 // note we have to make two passes since the mdir passed to
2305 // lfs_dir_commit could also be in this list, and even then
2306 // we need to copy the pair so they don't get clobbered if we refetch
2307 // our mdir.
2308 lfs_block_t oldpair[2] = {pair[0], pair[1]};
2309 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2310 if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
2311 d->m = *dir;
2312 if (d->m.pair != pair) {
2313 for (int i = 0; i < attrcount; i++) {
2314 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2315 d->id == lfs_tag_id(attrs[i].tag)) {
2316 d->m.pair[0] = LFS_BLOCK_NULL;
2317 d->m.pair[1] = LFS_BLOCK_NULL;
2318 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2319 d->id > lfs_tag_id(attrs[i].tag)) {
2320 d->id -= 1;
2321 if (d->type == LFS_TYPE_DIR) {
2322 ((lfs_dir_t*)d)->pos -= 1;
2323 }
2324 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
2325 d->id >= lfs_tag_id(attrs[i].tag)) {
2326 d->id += 1;
2327 if (d->type == LFS_TYPE_DIR) {
2328 ((lfs_dir_t*)d)->pos += 1;
2329 }
2330 }
2331 }
2332 }
2333
2334 while (d->id >= d->m.count && d->m.split) {
2335 // we split and id is on tail now
2336 d->id -= d->m.count;
2337 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2338 if (err) {
2339 return err;
2340 }
2341 }
2342 }
2343 }
2344
2345 return state;
2346 }
2347 #endif
2348
2349 #ifndef LFS_READONLY
lfs_dir_orphaningcommit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2350 static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
2351 const struct lfs_mattr *attrs, int attrcount) {
2352 // check for any inline files that aren't RAM backed and
2353 // forcefully evict them, needed for filesystem consistency
2354 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
2355 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
2356 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
2357 f->ctz.size > lfs->cfg->cache_size) {
2358 int err = lfs_file_outline(lfs, f);
2359 if (err) {
2360 return err;
2361 }
2362
2363 err = lfs_file_flush(lfs, f);
2364 if (err) {
2365 return err;
2366 }
2367 }
2368 }
2369
2370 lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
2371 lfs_mdir_t ldir = *dir;
2372 lfs_mdir_t pdir;
2373 int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
2374 attrs, attrcount, &pdir);
2375 if (state < 0) {
2376 return state;
2377 }
2378
2379 // update if we're not in mlist, note we may have already been
2380 // updated if we are in mlist
2381 if (lfs_pair_cmp(dir->pair, lpair) == 0) {
2382 *dir = ldir;
2383 }
2384
2385 // commit was successful, but may require other changes in the
2386 // filesystem, these would normally be tail recursive, but we have
2387 // flattened them here avoid unbounded stack usage
2388
2389 // need to drop?
2390 if (state == LFS_OK_DROPPED) {
2391 // steal state
2392 int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
2393 if (err) {
2394 return err;
2395 }
2396
2397 // steal tail, note that this can't create a recursive drop
2398 lpair[0] = pdir.pair[0];
2399 lpair[1] = pdir.pair[1];
2400 lfs_pair_tole32(dir->tail);
2401 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2402 {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2403 dir->tail}),
2404 NULL);
2405 lfs_pair_fromle32(dir->tail);
2406 if (state < 0) {
2407 return state;
2408 }
2409
2410 ldir = pdir;
2411 }
2412
2413 // need to relocate?
2414 bool orphans = false;
2415 while (state == LFS_OK_RELOCATED) {
2416 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
2417 "-> {0x%"PRIx32", 0x%"PRIx32"}",
2418 lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
2419 state = 0;
2420
2421 // update internal root
2422 if (lfs_pair_cmp(lpair, lfs->root) == 0) {
2423 lfs->root[0] = ldir.pair[0];
2424 lfs->root[1] = ldir.pair[1];
2425 }
2426
2427 // update internally tracked dirs
2428 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2429 if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
2430 d->m.pair[0] = ldir.pair[0];
2431 d->m.pair[1] = ldir.pair[1];
2432 }
2433
2434 if (d->type == LFS_TYPE_DIR &&
2435 lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
2436 ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
2437 ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
2438 }
2439 }
2440
2441 // find parent
2442 lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
2443 if (tag < 0 && tag != LFS_ERR_NOENT) {
2444 return tag;
2445 }
2446
2447 bool hasparent = (tag != LFS_ERR_NOENT);
2448 if (tag != LFS_ERR_NOENT) {
2449 // note that if we have a parent, we must have a pred, so this will
2450 // always create an orphan
2451 int err = lfs_fs_preporphans(lfs, +1);
2452 if (err) {
2453 return err;
2454 }
2455
2456 // fix pending move in this pair? this looks like an optimization but
2457 // is in fact _required_ since relocating may outdate the move.
2458 uint16_t moveid = 0x3ff;
2459 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2460 moveid = lfs_tag_id(lfs->gstate.tag);
2461 LFS_DEBUG("Fixing move while relocating "
2462 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2463 pdir.pair[0], pdir.pair[1], moveid);
2464 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2465 if (moveid < lfs_tag_id(tag)) {
2466 tag -= LFS_MKTAG(0, 1, 0);
2467 }
2468 }
2469
2470 lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
2471 lfs_pair_tole32(ldir.pair);
2472 state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
2473 {LFS_MKTAG_IF(moveid != 0x3ff,
2474 LFS_TYPE_DELETE, moveid, 0), NULL},
2475 {tag, ldir.pair}),
2476 NULL);
2477 lfs_pair_fromle32(ldir.pair);
2478 if (state < 0) {
2479 return state;
2480 }
2481
2482 if (state == LFS_OK_RELOCATED) {
2483 lpair[0] = ppair[0];
2484 lpair[1] = ppair[1];
2485 ldir = pdir;
2486 orphans = true;
2487 continue;
2488 }
2489 }
2490
2491 // find pred
2492 int err = lfs_fs_pred(lfs, lpair, &pdir);
2493 if (err && err != LFS_ERR_NOENT) {
2494 return err;
2495 }
2496 LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
2497
2498 // if we can't find dir, it must be new
2499 if (err != LFS_ERR_NOENT) {
2500 if (lfs_gstate_hasorphans(&lfs->gstate)) {
2501 // next step, clean up orphans
2502 err = lfs_fs_preporphans(lfs, -hasparent);
2503 if (err) {
2504 return err;
2505 }
2506 }
2507
2508 // fix pending move in this pair? this looks like an optimization
2509 // but is in fact _required_ since relocating may outdate the move.
2510 uint16_t moveid = 0x3ff;
2511 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2512 moveid = lfs_tag_id(lfs->gstate.tag);
2513 LFS_DEBUG("Fixing move while relocating "
2514 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2515 pdir.pair[0], pdir.pair[1], moveid);
2516 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2517 }
2518
2519 // replace bad pair, either we clean up desync, or no desync occured
2520 lpair[0] = pdir.pair[0];
2521 lpair[1] = pdir.pair[1];
2522 lfs_pair_tole32(ldir.pair);
2523 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2524 {LFS_MKTAG_IF(moveid != 0x3ff,
2525 LFS_TYPE_DELETE, moveid, 0), NULL},
2526 {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
2527 ldir.pair}),
2528 NULL);
2529 lfs_pair_fromle32(ldir.pair);
2530 if (state < 0) {
2531 return state;
2532 }
2533
2534 ldir = pdir;
2535 }
2536 }
2537
2538 return orphans ? LFS_OK_ORPHANED : 0;
2539 }
2540 #endif
2541
2542 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2543 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
2544 const struct lfs_mattr *attrs, int attrcount) {
2545 int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
2546 if (orphans < 0) {
2547 return orphans;
2548 }
2549
2550 if (orphans) {
2551 // make sure we've removed all orphans, this is a noop if there
2552 // are none, but if we had nested blocks failures we may have
2553 // created some
2554 int err = lfs_fs_deorphan(lfs, false);
2555 if (err) {
2556 return err;
2557 }
2558 }
2559
2560 return 0;
2561 }
2562 #endif
2563
2564
2565 /// Top level directory operations ///
2566 #ifndef LFS_READONLY
lfs_rawmkdir(lfs_t * lfs,const char * path)2567 static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
2568 // deorphan if we haven't yet, needed at most once after poweron
2569 int err = lfs_fs_forceconsistency(lfs);
2570 if (err) {
2571 return err;
2572 }
2573
2574 struct lfs_mlist cwd;
2575 cwd.next = lfs->mlist;
2576 uint16_t id;
2577 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2578 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
2579 return (err < 0) ? err : LFS_ERR_EXIST;
2580 }
2581
2582 // check that name fits
2583 lfs_size_t nlen = strlen(path);
2584 if (nlen > lfs->name_max) {
2585 return LFS_ERR_NAMETOOLONG;
2586 }
2587
2588 // build up new directory
2589 lfs_alloc_ack(lfs);
2590 lfs_mdir_t dir;
2591 err = lfs_dir_alloc(lfs, &dir);
2592 if (err) {
2593 return err;
2594 }
2595
2596 // find end of list
2597 lfs_mdir_t pred = cwd.m;
2598 while (pred.split) {
2599 err = lfs_dir_fetch(lfs, &pred, pred.tail);
2600 if (err) {
2601 return err;
2602 }
2603 }
2604
2605 // setup dir
2606 lfs_pair_tole32(pred.tail);
2607 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2608 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2609 lfs_pair_fromle32(pred.tail);
2610 if (err) {
2611 return err;
2612 }
2613
2614 // current block not end of list?
2615 if (cwd.m.split) {
2616 // update tails, this creates a desync
2617 err = lfs_fs_preporphans(lfs, +1);
2618 if (err) {
2619 return err;
2620 }
2621
2622 // it's possible our predecessor has to be relocated, and if
2623 // our parent is our predecessor's predecessor, this could have
2624 // caused our parent to go out of date, fortunately we can hook
2625 // ourselves into littlefs to catch this
2626 cwd.type = 0;
2627 cwd.id = 0;
2628 lfs->mlist = &cwd;
2629
2630 lfs_pair_tole32(dir.pair);
2631 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2632 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2633 lfs_pair_fromle32(dir.pair);
2634 if (err) {
2635 lfs->mlist = cwd.next;
2636 return err;
2637 }
2638
2639 lfs->mlist = cwd.next;
2640 err = lfs_fs_preporphans(lfs, -1);
2641 if (err) {
2642 return err;
2643 }
2644 }
2645
2646 // now insert into our parent block
2647 lfs_pair_tole32(dir.pair);
2648 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2649 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2650 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2651 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2652 {LFS_MKTAG_IF(!cwd.m.split,
2653 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2654 lfs_pair_fromle32(dir.pair);
2655 if (err) {
2656 return err;
2657 }
2658
2659 return 0;
2660 }
2661 #endif
2662
lfs_dir_rawopen(lfs_t * lfs,lfs_dir_t * dir,const char * path)2663 static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2664 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2665 if (tag < 0) {
2666 return tag;
2667 }
2668
2669 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2670 return LFS_ERR_NOTDIR;
2671 }
2672
2673 lfs_block_t pair[2];
2674 if (lfs_tag_id(tag) == 0x3ff) {
2675 // handle root dir separately
2676 pair[0] = lfs->root[0];
2677 pair[1] = lfs->root[1];
2678 } else {
2679 // get dir pair from parent
2680 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2681 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2682 if (res < 0) {
2683 return res;
2684 }
2685 lfs_pair_fromle32(pair);
2686 }
2687
2688 // fetch first pair
2689 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2690 if (err) {
2691 return err;
2692 }
2693
2694 // setup entry
2695 dir->head[0] = dir->m.pair[0];
2696 dir->head[1] = dir->m.pair[1];
2697 dir->id = 0;
2698 dir->pos = 0;
2699
2700 // add to list of mdirs
2701 dir->type = LFS_TYPE_DIR;
2702 lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2703
2704 return 0;
2705 }
2706
lfs_dir_rawclose(lfs_t * lfs,lfs_dir_t * dir)2707 static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) {
2708 // remove from list of mdirs
2709 lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2710
2711 return 0;
2712 }
2713
lfs_dir_rawread(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2714 static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2715 memset(info, 0, sizeof(*info));
2716
2717 // special offset for '.' and '..'
2718 if (dir->pos == 0) {
2719 info->type = LFS_TYPE_DIR;
2720 strcpy(info->name, ".");
2721 dir->pos += 1;
2722 return true;
2723 } else if (dir->pos == 1) {
2724 info->type = LFS_TYPE_DIR;
2725 strcpy(info->name, "..");
2726 dir->pos += 1;
2727 return true;
2728 }
2729
2730 while (true) {
2731 if (dir->id == dir->m.count) {
2732 if (!dir->m.split) {
2733 return false;
2734 }
2735
2736 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2737 if (err) {
2738 return err;
2739 }
2740
2741 dir->id = 0;
2742 }
2743
2744 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2745 if (err && err != LFS_ERR_NOENT) {
2746 return err;
2747 }
2748
2749 dir->id += 1;
2750 if (err != LFS_ERR_NOENT) {
2751 break;
2752 }
2753 }
2754
2755 dir->pos += 1;
2756 return true;
2757 }
2758
lfs_dir_rawseek(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2759 static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2760 // simply walk from head dir
2761 int err = lfs_dir_rawrewind(lfs, dir);
2762 if (err) {
2763 return err;
2764 }
2765
2766 // first two for ./..
2767 dir->pos = lfs_min(2, off);
2768 off -= dir->pos;
2769
2770 // skip superblock entry
2771 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2772
2773 while (off > 0) {
2774 if (dir->id == dir->m.count) {
2775 if (!dir->m.split) {
2776 return LFS_ERR_INVAL;
2777 }
2778
2779 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2780 if (err) {
2781 return err;
2782 }
2783
2784 dir->id = 0;
2785 }
2786
2787 int diff = lfs_min(dir->m.count - dir->id, off);
2788 dir->id += diff;
2789 dir->pos += diff;
2790 off -= diff;
2791 }
2792
2793 return 0;
2794 }
2795
lfs_dir_rawtell(lfs_t * lfs,lfs_dir_t * dir)2796 static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) {
2797 (void)lfs;
2798 return dir->pos;
2799 }
2800
lfs_dir_rawrewind(lfs_t * lfs,lfs_dir_t * dir)2801 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) {
2802 // reload the head dir
2803 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2804 if (err) {
2805 return err;
2806 }
2807
2808 dir->id = 0;
2809 dir->pos = 0;
2810 return 0;
2811 }
2812
2813
2814 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2815 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2816 lfs_off_t size = *off;
2817 lfs_off_t b = lfs->cfg->block_size - 2*4;
2818 lfs_off_t i = size / b;
2819 if (i == 0) {
2820 return 0;
2821 }
2822
2823 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2824 *off = size - b*i - 4*lfs_popc(i);
2825 return i;
2826 }
2827
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2828 static int lfs_ctz_find(lfs_t *lfs,
2829 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2830 lfs_block_t head, lfs_size_t size,
2831 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2832 if (size == 0) {
2833 *block = LFS_BLOCK_NULL;
2834 *off = 0;
2835 return 0;
2836 }
2837
2838 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2839 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2840
2841 while (current > target) {
2842 lfs_size_t skip = lfs_min(
2843 lfs_npw2(current-target+1) - 1,
2844 lfs_ctz(current));
2845
2846 int err = lfs_bd_read(lfs,
2847 pcache, rcache, sizeof(head),
2848 head, 4*skip, &head, sizeof(head));
2849 head = lfs_fromle32(head);
2850 if (err) {
2851 return err;
2852 }
2853
2854 current -= 1 << skip;
2855 }
2856
2857 *block = head;
2858 *off = pos;
2859 return 0;
2860 }
2861
2862 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2863 static int lfs_ctz_extend(lfs_t *lfs,
2864 lfs_cache_t *pcache, lfs_cache_t *rcache,
2865 lfs_block_t head, lfs_size_t size,
2866 lfs_block_t *block, lfs_off_t *off) {
2867 while (true) {
2868 // go ahead and grab a block
2869 lfs_block_t nblock;
2870 int err = lfs_alloc(lfs, &nblock);
2871 if (err) {
2872 return err;
2873 }
2874
2875 {
2876 err = lfs_bd_erase(lfs, nblock);
2877 if (err) {
2878 if (err == LFS_ERR_CORRUPT) {
2879 goto relocate;
2880 }
2881 return err;
2882 }
2883
2884 if (size == 0) {
2885 *block = nblock;
2886 *off = 0;
2887 return 0;
2888 }
2889
2890 lfs_size_t noff = size - 1;
2891 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2892 noff = noff + 1;
2893
2894 // just copy out the last block if it is incomplete
2895 if (noff != lfs->cfg->block_size) {
2896 for (lfs_off_t i = 0; i < noff; i++) {
2897 uint8_t data;
2898 err = lfs_bd_read(lfs,
2899 NULL, rcache, noff-i,
2900 head, i, &data, 1);
2901 if (err) {
2902 return err;
2903 }
2904
2905 err = lfs_bd_prog(lfs,
2906 pcache, rcache, true,
2907 nblock, i, &data, 1);
2908 if (err) {
2909 if (err == LFS_ERR_CORRUPT) {
2910 goto relocate;
2911 }
2912 return err;
2913 }
2914 }
2915
2916 *block = nblock;
2917 *off = noff;
2918 return 0;
2919 }
2920
2921 // append block
2922 index += 1;
2923 lfs_size_t skips = lfs_ctz(index) + 1;
2924 lfs_block_t nhead = head;
2925 for (lfs_off_t i = 0; i < skips; i++) {
2926 nhead = lfs_tole32(nhead);
2927 err = lfs_bd_prog(lfs, pcache, rcache, true,
2928 nblock, 4*i, &nhead, 4);
2929 nhead = lfs_fromle32(nhead);
2930 if (err) {
2931 if (err == LFS_ERR_CORRUPT) {
2932 goto relocate;
2933 }
2934 return err;
2935 }
2936
2937 if (i != skips-1) {
2938 err = lfs_bd_read(lfs,
2939 NULL, rcache, sizeof(nhead),
2940 nhead, 4*i, &nhead, sizeof(nhead));
2941 nhead = lfs_fromle32(nhead);
2942 if (err) {
2943 return err;
2944 }
2945 }
2946 }
2947
2948 *block = nblock;
2949 *off = 4*skips;
2950 return 0;
2951 }
2952
2953 relocate:
2954 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2955
2956 // just clear cache and try a new block
2957 lfs_cache_drop(lfs, pcache);
2958 }
2959 }
2960 #endif
2961
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2962 static int lfs_ctz_traverse(lfs_t *lfs,
2963 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2964 lfs_block_t head, lfs_size_t size,
2965 int (*cb)(void*, lfs_block_t), void *data) {
2966 if (size == 0) {
2967 return 0;
2968 }
2969
2970 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2971
2972 while (true) {
2973 int err = cb(data, head);
2974 if (err) {
2975 return err;
2976 }
2977
2978 if (index == 0) {
2979 return 0;
2980 }
2981
2982 lfs_block_t heads[2];
2983 int count = 2 - (index & 1);
2984 err = lfs_bd_read(lfs,
2985 pcache, rcache, count*sizeof(head),
2986 head, 0, &heads, count*sizeof(head));
2987 heads[0] = lfs_fromle32(heads[0]);
2988 heads[1] = lfs_fromle32(heads[1]);
2989 if (err) {
2990 return err;
2991 }
2992
2993 for (int i = 0; i < count-1; i++) {
2994 err = cb(data, heads[i]);
2995 if (err) {
2996 return err;
2997 }
2998 }
2999
3000 head = heads[count-1];
3001 index -= count;
3002 }
3003 }
3004
3005
3006 /// Top level file operations ///
lfs_file_rawopencfg(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)3007 static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file,
3008 const char *path, int flags,
3009 const struct lfs_file_config *cfg) {
3010 #ifndef LFS_READONLY
3011 // deorphan if we haven't yet, needed at most once after poweron
3012 if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3013 int err = lfs_fs_forceconsistency(lfs);
3014 if (err) {
3015 return err;
3016 }
3017 }
3018 #else
3019 LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3020 #endif
3021
3022 // setup simple file details
3023 int err;
3024 file->cfg = cfg;
3025 file->flags = flags;
3026 file->pos = 0;
3027 file->off = 0;
3028 file->cache.buffer = NULL;
3029
3030 // allocate entry for file if it doesn't exist
3031 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
3032 if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
3033 err = tag;
3034 goto cleanup;
3035 }
3036
3037 // get id, add to list of mdirs to catch update changes
3038 file->type = LFS_TYPE_REG;
3039 lfs_mlist_append(lfs, (struct lfs_mlist *)file);
3040
3041 #ifdef LFS_READONLY
3042 if (tag == LFS_ERR_NOENT) {
3043 err = LFS_ERR_NOENT;
3044 goto cleanup;
3045 #else
3046 if (tag == LFS_ERR_NOENT) {
3047 if (!(flags & LFS_O_CREAT)) {
3048 err = LFS_ERR_NOENT;
3049 goto cleanup;
3050 }
3051
3052 // check that name fits
3053 lfs_size_t nlen = strlen(path);
3054 if (nlen > lfs->name_max) {
3055 err = LFS_ERR_NAMETOOLONG;
3056 goto cleanup;
3057 }
3058
3059 // get next slot and create entry to remember name
3060 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3061 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
3062 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
3063 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
3064
3065 // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
3066 // not fit in a 128 byte block.
3067 err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
3068 if (err) {
3069 goto cleanup;
3070 }
3071
3072 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
3073 } else if (flags & LFS_O_EXCL) {
3074 err = LFS_ERR_EXIST;
3075 goto cleanup;
3076 #endif
3077 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
3078 err = LFS_ERR_ISDIR;
3079 goto cleanup;
3080 #ifndef LFS_READONLY
3081 } else if (flags & LFS_O_TRUNC) {
3082 // truncate if requested
3083 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
3084 file->flags |= LFS_F_DIRTY;
3085 #endif
3086 } else {
3087 // try to load what's on disk, if it's inlined we'll fix it later
3088 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
3089 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
3090 if (tag < 0) {
3091 err = tag;
3092 goto cleanup;
3093 }
3094 lfs_ctz_fromle32(&file->ctz);
3095 }
3096
3097 // fetch attrs
3098 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
3099 // if opened for read / read-write operations
3100 if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
3101 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3102 LFS_MKTAG(0x7ff, 0x3ff, 0),
3103 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
3104 file->id, file->cfg->attrs[i].size),
3105 file->cfg->attrs[i].buffer);
3106 if (res < 0 && res != LFS_ERR_NOENT) {
3107 err = res;
3108 goto cleanup;
3109 }
3110 }
3111
3112 #ifndef LFS_READONLY
3113 // if opened for write / read-write operations
3114 if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3115 if (file->cfg->attrs[i].size > lfs->attr_max) {
3116 err = LFS_ERR_NOSPC;
3117 goto cleanup;
3118 }
3119
3120 file->flags |= LFS_F_DIRTY;
3121 }
3122 #endif
3123 }
3124
3125 // allocate buffer if needed
3126 if (file->cfg->buffer) {
3127 file->cache.buffer = file->cfg->buffer;
3128 } else {
3129 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
3130 if (!file->cache.buffer) {
3131 err = LFS_ERR_NOMEM;
3132 goto cleanup;
3133 }
3134 }
3135
3136 // zero to avoid information leak
3137 lfs_cache_zero(lfs, &file->cache);
3138
3139 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
3140 // load inline files
3141 file->ctz.head = LFS_BLOCK_INLINE;
3142 file->ctz.size = lfs_tag_size(tag);
3143 file->flags |= LFS_F_INLINE;
3144 file->cache.block = file->ctz.head;
3145 file->cache.off = 0;
3146 file->cache.size = lfs->cfg->cache_size;
3147
3148 // don't always read (may be new/trunc file)
3149 if (file->ctz.size > 0) {
3150 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3151 LFS_MKTAG(0x700, 0x3ff, 0),
3152 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
3153 lfs_min(file->cache.size, 0x3fe)),
3154 file->cache.buffer);
3155 if (res < 0) {
3156 err = res;
3157 goto cleanup;
3158 }
3159 }
3160 }
3161
3162 return 0;
3163
3164 cleanup:
3165 // clean up lingering resources
3166 #ifndef LFS_READONLY
3167 file->flags |= LFS_F_ERRED;
3168 #endif
3169 lfs_file_rawclose(lfs, file);
3170 return err;
3171 }
3172
3173 #ifndef LFS_NO_MALLOC
3174 static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file,
3175 const char *path, int flags) {
3176 static const struct lfs_file_config defaults = {0};
3177 int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults);
3178 return err;
3179 }
3180 #endif
3181
3182 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) {
3183 #ifndef LFS_READONLY
3184 int err = lfs_file_rawsync(lfs, file);
3185 #else
3186 int err = 0;
3187 #endif
3188
3189 // remove from list of mdirs
3190 lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
3191
3192 // clean up memory
3193 if (!file->cfg->buffer) {
3194 lfs_free(file->cache.buffer);
3195 }
3196
3197 return err;
3198 }
3199
3200
3201 #ifndef LFS_READONLY
3202 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
3203 while (true) {
3204 // just relocate what exists into new block
3205 lfs_block_t nblock;
3206 int err = lfs_alloc(lfs, &nblock);
3207 if (err) {
3208 return err;
3209 }
3210
3211 err = lfs_bd_erase(lfs, nblock);
3212 if (err) {
3213 if (err == LFS_ERR_CORRUPT) {
3214 goto relocate;
3215 }
3216 return err;
3217 }
3218
3219 // either read from dirty cache or disk
3220 for (lfs_off_t i = 0; i < file->off; i++) {
3221 uint8_t data;
3222 if (file->flags & LFS_F_INLINE) {
3223 err = lfs_dir_getread(lfs, &file->m,
3224 // note we evict inline files before they can be dirty
3225 NULL, &file->cache, file->off-i,
3226 LFS_MKTAG(0xfff, 0x1ff, 0),
3227 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3228 i, &data, 1);
3229 if (err) {
3230 return err;
3231 }
3232 } else {
3233 err = lfs_bd_read(lfs,
3234 &file->cache, &lfs->rcache, file->off-i,
3235 file->block, i, &data, 1);
3236 if (err) {
3237 return err;
3238 }
3239 }
3240
3241 err = lfs_bd_prog(lfs,
3242 &lfs->pcache, &lfs->rcache, true,
3243 nblock, i, &data, 1);
3244 if (err) {
3245 if (err == LFS_ERR_CORRUPT) {
3246 goto relocate;
3247 }
3248 return err;
3249 }
3250 }
3251
3252 // copy over new state of file
3253 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
3254 file->cache.block = lfs->pcache.block;
3255 file->cache.off = lfs->pcache.off;
3256 file->cache.size = lfs->pcache.size;
3257 lfs_cache_zero(lfs, &lfs->pcache);
3258
3259 file->block = nblock;
3260 file->flags |= LFS_F_WRITING;
3261 return 0;
3262
3263 relocate:
3264 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3265
3266 // just clear cache and try a new block
3267 lfs_cache_drop(lfs, &lfs->pcache);
3268 }
3269 }
3270 #endif
3271
3272 #ifndef LFS_READONLY
3273 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
3274 file->off = file->pos;
3275 lfs_alloc_ack(lfs);
3276 int err = lfs_file_relocate(lfs, file);
3277 if (err) {
3278 return err;
3279 }
3280
3281 file->flags &= ~LFS_F_INLINE;
3282 return 0;
3283 }
3284 #endif
3285
3286 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
3287 if (file->flags & LFS_F_READING) {
3288 if (!(file->flags & LFS_F_INLINE)) {
3289 lfs_cache_drop(lfs, &file->cache);
3290 }
3291 file->flags &= ~LFS_F_READING;
3292 }
3293
3294 #ifndef LFS_READONLY
3295 if (file->flags & LFS_F_WRITING) {
3296 lfs_off_t pos = file->pos;
3297
3298 if (!(file->flags & LFS_F_INLINE)) {
3299 // copy over anything after current branch
3300 lfs_file_t orig = {
3301 .ctz.head = file->ctz.head,
3302 .ctz.size = file->ctz.size,
3303 .flags = LFS_O_RDONLY,
3304 .pos = file->pos,
3305 .cache = lfs->rcache,
3306 };
3307 lfs_cache_drop(lfs, &lfs->rcache);
3308
3309 while (file->pos < file->ctz.size) {
3310 // copy over a byte at a time, leave it up to caching
3311 // to make this efficient
3312 uint8_t data;
3313 lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
3314 if (res < 0) {
3315 return res;
3316 }
3317
3318 res = lfs_file_flushedwrite(lfs, file, &data, 1);
3319 if (res < 0) {
3320 return res;
3321 }
3322
3323 // keep our reference to the rcache in sync
3324 if (lfs->rcache.block != LFS_BLOCK_NULL) {
3325 lfs_cache_drop(lfs, &orig.cache);
3326 lfs_cache_drop(lfs, &lfs->rcache);
3327 }
3328 }
3329
3330 // write out what we have
3331 while (true) {
3332 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
3333 if (err) {
3334 if (err == LFS_ERR_CORRUPT) {
3335 goto relocate;
3336 }
3337 return err;
3338 }
3339
3340 break;
3341
3342 relocate:
3343 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
3344 err = lfs_file_relocate(lfs, file);
3345 if (err) {
3346 return err;
3347 }
3348 }
3349 } else {
3350 file->pos = lfs_max(file->pos, file->ctz.size);
3351 }
3352
3353 // actual file updates
3354 file->ctz.head = file->block;
3355 file->ctz.size = file->pos;
3356 file->flags &= ~LFS_F_WRITING;
3357 file->flags |= LFS_F_DIRTY;
3358
3359 file->pos = pos;
3360 }
3361 #endif
3362
3363 return 0;
3364 }
3365
3366 #ifndef LFS_READONLY
3367 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
3368 if (file->flags & LFS_F_ERRED) {
3369 // it's not safe to do anything if our file errored
3370 return 0;
3371 }
3372
3373 int err = lfs_file_flush(lfs, file);
3374 if (err) {
3375 file->flags |= LFS_F_ERRED;
3376 return err;
3377 }
3378
3379
3380 if ((file->flags & LFS_F_DIRTY) &&
3381 !lfs_pair_isnull(file->m.pair)) {
3382 // update dir entry
3383 uint16_t type;
3384 const void *buffer;
3385 lfs_size_t size;
3386 struct lfs_ctz ctz;
3387 if (file->flags & LFS_F_INLINE) {
3388 // inline the whole file
3389 type = LFS_TYPE_INLINESTRUCT;
3390 buffer = file->cache.buffer;
3391 size = file->ctz.size;
3392 } else {
3393 // update the ctz reference
3394 type = LFS_TYPE_CTZSTRUCT;
3395 // copy ctz so alloc will work during a relocate
3396 ctz = file->ctz;
3397 lfs_ctz_tole32(&ctz);
3398 buffer = &ctz;
3399 size = sizeof(ctz);
3400 }
3401
3402 // commit file data and attributes
3403 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3404 {LFS_MKTAG(type, file->id, size), buffer},
3405 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
3406 file->cfg->attr_count), file->cfg->attrs}));
3407 if (err) {
3408 file->flags |= LFS_F_ERRED;
3409 return err;
3410 }
3411
3412 file->flags &= ~LFS_F_DIRTY;
3413 }
3414
3415 return 0;
3416 }
3417 #endif
3418
3419 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
3420 void *buffer, lfs_size_t size) {
3421 uint8_t *data = buffer;
3422 lfs_size_t nsize = size;
3423
3424 if (file->pos >= file->ctz.size) {
3425 // eof if past end
3426 return 0;
3427 }
3428
3429 size = lfs_min(size, file->ctz.size - file->pos);
3430 nsize = size;
3431
3432 while (nsize > 0) {
3433 // check if we need a new block
3434 if (!(file->flags & LFS_F_READING) ||
3435 file->off == lfs->cfg->block_size) {
3436 if (!(file->flags & LFS_F_INLINE)) {
3437 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3438 file->ctz.head, file->ctz.size,
3439 file->pos, &file->block, &file->off);
3440 if (err) {
3441 return err;
3442 }
3443 } else {
3444 file->block = LFS_BLOCK_INLINE;
3445 file->off = file->pos;
3446 }
3447
3448 file->flags |= LFS_F_READING;
3449 }
3450
3451 // read as much as we can in current block
3452 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3453 if (file->flags & LFS_F_INLINE) {
3454 int err = lfs_dir_getread(lfs, &file->m,
3455 NULL, &file->cache, lfs->cfg->block_size,
3456 LFS_MKTAG(0xfff, 0x1ff, 0),
3457 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3458 file->off, data, diff);
3459 if (err) {
3460 return err;
3461 }
3462 } else {
3463 int err = lfs_bd_read(lfs,
3464 NULL, &file->cache, lfs->cfg->block_size,
3465 file->block, file->off, data, diff);
3466 if (err) {
3467 return err;
3468 }
3469 }
3470
3471 file->pos += diff;
3472 file->off += diff;
3473 data += diff;
3474 nsize -= diff;
3475 }
3476
3477 return size;
3478 }
3479
3480 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
3481 void *buffer, lfs_size_t size) {
3482 LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3483
3484 #ifndef LFS_READONLY
3485 if (file->flags & LFS_F_WRITING) {
3486 // flush out any writes
3487 int err = lfs_file_flush(lfs, file);
3488 if (err) {
3489 return err;
3490 }
3491 }
3492 #endif
3493
3494 return lfs_file_flushedread(lfs, file, buffer, size);
3495 }
3496
3497
3498 #ifndef LFS_READONLY
3499 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
3500 const void *buffer, lfs_size_t size) {
3501 const uint8_t *data = buffer;
3502 lfs_size_t nsize = size;
3503
3504 if ((file->flags & LFS_F_INLINE) &&
3505 lfs_max(file->pos+nsize, file->ctz.size) >
3506 lfs_min(0x3fe, lfs_min(
3507 lfs->cfg->cache_size,
3508 (lfs->cfg->metadata_max ?
3509 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
3510 // inline file doesn't fit anymore
3511 int err = lfs_file_outline(lfs, file);
3512 if (err) {
3513 file->flags |= LFS_F_ERRED;
3514 return err;
3515 }
3516 }
3517
3518 while (nsize > 0) {
3519 // check if we need a new block
3520 if (!(file->flags & LFS_F_WRITING) ||
3521 file->off == lfs->cfg->block_size) {
3522 if (!(file->flags & LFS_F_INLINE)) {
3523 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
3524 // find out which block we're extending from
3525 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3526 file->ctz.head, file->ctz.size,
3527 file->pos-1, &file->block, &(lfs_off_t){0});
3528 if (err) {
3529 file->flags |= LFS_F_ERRED;
3530 return err;
3531 }
3532
3533 // mark cache as dirty since we may have read data into it
3534 lfs_cache_zero(lfs, &file->cache);
3535 }
3536
3537 // extend file with new blocks
3538 lfs_alloc_ack(lfs);
3539 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3540 file->block, file->pos,
3541 &file->block, &file->off);
3542 if (err) {
3543 file->flags |= LFS_F_ERRED;
3544 return err;
3545 }
3546 } else {
3547 file->block = LFS_BLOCK_INLINE;
3548 file->off = file->pos;
3549 }
3550
3551 file->flags |= LFS_F_WRITING;
3552 }
3553
3554 // program as much as we can in current block
3555 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3556 while (true) {
3557 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3558 file->block, file->off, data, diff);
3559 if (err) {
3560 if (err == LFS_ERR_CORRUPT) {
3561 goto relocate;
3562 }
3563 file->flags |= LFS_F_ERRED;
3564 return err;
3565 }
3566
3567 break;
3568 relocate:
3569 err = lfs_file_relocate(lfs, file);
3570 if (err) {
3571 file->flags |= LFS_F_ERRED;
3572 return err;
3573 }
3574 }
3575
3576 file->pos += diff;
3577 file->off += diff;
3578 data += diff;
3579 nsize -= diff;
3580
3581 lfs_alloc_ack(lfs);
3582 }
3583
3584 return size;
3585 }
3586
3587 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
3588 const void *buffer, lfs_size_t size) {
3589 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3590
3591 if (file->flags & LFS_F_READING) {
3592 // drop any reads
3593 int err = lfs_file_flush(lfs, file);
3594 if (err) {
3595 return err;
3596 }
3597 }
3598
3599 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
3600 file->pos = file->ctz.size;
3601 }
3602
3603 if (file->pos + size > lfs->file_max) {
3604 // Larger than file limit?
3605 return LFS_ERR_FBIG;
3606 }
3607
3608 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
3609 // fill with zeros
3610 lfs_off_t pos = file->pos;
3611 file->pos = file->ctz.size;
3612
3613 while (file->pos < pos) {
3614 lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
3615 if (res < 0) {
3616 return res;
3617 }
3618 }
3619 }
3620
3621 lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
3622 if (nsize < 0) {
3623 return nsize;
3624 }
3625
3626 file->flags &= ~LFS_F_ERRED;
3627 return nsize;
3628 }
3629 #endif
3630
3631 static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
3632 lfs_soff_t off, int whence) {
3633 // find new pos
3634 lfs_off_t npos = file->pos;
3635 if (whence == LFS_SEEK_SET) {
3636 npos = off;
3637 } else if (whence == LFS_SEEK_CUR) {
3638 if ((lfs_soff_t)file->pos + off < 0) {
3639 return LFS_ERR_INVAL;
3640 } else {
3641 npos = file->pos + off;
3642 }
3643 } else if (whence == LFS_SEEK_END) {
3644 lfs_soff_t res = lfs_file_rawsize(lfs, file) + off;
3645 if (res < 0) {
3646 return LFS_ERR_INVAL;
3647 } else {
3648 npos = res;
3649 }
3650 }
3651
3652 if (npos > lfs->file_max) {
3653 // file position out of range
3654 return LFS_ERR_INVAL;
3655 }
3656
3657 if (file->pos == npos) {
3658 // noop - position has not changed
3659 return npos;
3660 }
3661
3662 // if we're only reading and our new offset is still in the file's cache
3663 // we can avoid flushing and needing to reread the data
3664 if (
3665 #ifndef LFS_READONLY
3666 !(file->flags & LFS_F_WRITING)
3667 #else
3668 true
3669 #endif
3670 ) {
3671 int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
3672 lfs_off_t noff = npos;
3673 int nindex = lfs_ctz_index(lfs, &noff);
3674 if (oindex == nindex
3675 && noff >= file->cache.off
3676 && noff < file->cache.off + file->cache.size) {
3677 file->pos = npos;
3678 file->off = noff;
3679 return npos;
3680 }
3681 }
3682
3683 // write out everything beforehand, may be noop if rdonly
3684 int err = lfs_file_flush(lfs, file);
3685 if (err) {
3686 return err;
3687 }
3688
3689 // update pos
3690 file->pos = npos;
3691 return npos;
3692 }
3693
3694 #ifndef LFS_READONLY
3695 static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3696 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3697
3698 if (size > LFS_FILE_MAX) {
3699 return LFS_ERR_INVAL;
3700 }
3701
3702 lfs_off_t pos = file->pos;
3703 lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
3704 if (size < oldsize) {
3705 // revert to inline file?
3706 if (size <= lfs_min(0x3fe, lfs_min(
3707 lfs->cfg->cache_size,
3708 (lfs->cfg->metadata_max ?
3709 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
3710 // flush+seek to head
3711 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3712 if (res < 0) {
3713 return (int)res;
3714 }
3715
3716 // read our data into rcache temporarily
3717 lfs_cache_drop(lfs, &lfs->rcache);
3718 res = lfs_file_flushedread(lfs, file,
3719 lfs->rcache.buffer, size);
3720 if (res < 0) {
3721 return (int)res;
3722 }
3723
3724 file->ctz.head = LFS_BLOCK_INLINE;
3725 file->ctz.size = size;
3726 file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE;
3727 file->cache.block = file->ctz.head;
3728 file->cache.off = 0;
3729 file->cache.size = lfs->cfg->cache_size;
3730 memcpy(file->cache.buffer, lfs->rcache.buffer, size);
3731
3732 } else {
3733 // need to flush since directly changing metadata
3734 int err = lfs_file_flush(lfs, file);
3735 if (err) {
3736 return err;
3737 }
3738
3739 // lookup new head in ctz skip list
3740 err = lfs_ctz_find(lfs, NULL, &file->cache,
3741 file->ctz.head, file->ctz.size,
3742 size-1, &file->block, &(lfs_off_t){0});
3743 if (err) {
3744 return err;
3745 }
3746
3747 // need to set pos/block/off consistently so seeking back to
3748 // the old position does not get confused
3749 file->pos = size;
3750 file->ctz.head = file->block;
3751 file->ctz.size = size;
3752 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3753 }
3754 } else if (size > oldsize) {
3755 // flush+seek if not already at end
3756 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
3757 if (res < 0) {
3758 return (int)res;
3759 }
3760
3761 // fill with zeros
3762 while (file->pos < size) {
3763 res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
3764 if (res < 0) {
3765 return (int)res;
3766 }
3767 }
3768 }
3769
3770 // restore pos
3771 lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET);
3772 if (res < 0) {
3773 return (int)res;
3774 }
3775
3776 return 0;
3777 }
3778 #endif
3779
3780 static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) {
3781 (void)lfs;
3782 return file->pos;
3783 }
3784
3785 static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) {
3786 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3787 if (res < 0) {
3788 return (int)res;
3789 }
3790
3791 return 0;
3792 }
3793
3794 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) {
3795 (void)lfs;
3796
3797 #ifndef LFS_READONLY
3798 if (file->flags & LFS_F_WRITING) {
3799 return lfs_max(file->pos, file->ctz.size);
3800 }
3801 #endif
3802
3803 return file->ctz.size;
3804 }
3805
3806
3807 /// General fs operations ///
3808 static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) {
3809 lfs_mdir_t cwd;
3810 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3811 if (tag < 0) {
3812 return (int)tag;
3813 }
3814
3815 return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3816 }
3817
3818 #ifndef LFS_READONLY
3819 static int lfs_rawremove(lfs_t *lfs, const char *path) {
3820 // deorphan if we haven't yet, needed at most once after poweron
3821 int err = lfs_fs_forceconsistency(lfs);
3822 if (err) {
3823 return err;
3824 }
3825
3826 lfs_mdir_t cwd;
3827 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3828 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3829 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3830 }
3831
3832 struct lfs_mlist dir;
3833 dir.next = lfs->mlist;
3834 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3835 // must be empty before removal
3836 lfs_block_t pair[2];
3837 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3838 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3839 if (res < 0) {
3840 return (int)res;
3841 }
3842 lfs_pair_fromle32(pair);
3843
3844 err = lfs_dir_fetch(lfs, &dir.m, pair);
3845 if (err) {
3846 return err;
3847 }
3848
3849 if (dir.m.count > 0 || dir.m.split) {
3850 return LFS_ERR_NOTEMPTY;
3851 }
3852
3853 // mark fs as orphaned
3854 err = lfs_fs_preporphans(lfs, +1);
3855 if (err) {
3856 return err;
3857 }
3858
3859 // I know it's crazy but yes, dir can be changed by our parent's
3860 // commit (if predecessor is child)
3861 dir.type = 0;
3862 dir.id = 0;
3863 lfs->mlist = &dir;
3864 }
3865
3866 // delete the entry
3867 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3868 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3869 if (err) {
3870 lfs->mlist = dir.next;
3871 return err;
3872 }
3873
3874 lfs->mlist = dir.next;
3875 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3876 // fix orphan
3877 err = lfs_fs_preporphans(lfs, -1);
3878 if (err) {
3879 return err;
3880 }
3881
3882 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3883 if (err) {
3884 return err;
3885 }
3886
3887 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3888 if (err) {
3889 return err;
3890 }
3891 }
3892
3893 return 0;
3894 }
3895 #endif
3896
3897 #ifndef LFS_READONLY
3898 static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
3899 // deorphan if we haven't yet, needed at most once after poweron
3900 int err = lfs_fs_forceconsistency(lfs);
3901 if (err) {
3902 return err;
3903 }
3904
3905 // find old entry
3906 lfs_mdir_t oldcwd;
3907 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3908 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3909 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3910 }
3911
3912 // find new entry
3913 lfs_mdir_t newcwd;
3914 uint16_t newid;
3915 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3916 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3917 !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3918 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3919 }
3920
3921 // if we're in the same pair there's a few special cases...
3922 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3923 uint16_t newoldid = lfs_tag_id(oldtag);
3924
3925 struct lfs_mlist prevdir;
3926 prevdir.next = lfs->mlist;
3927 if (prevtag == LFS_ERR_NOENT) {
3928 // check that name fits
3929 lfs_size_t nlen = strlen(newpath);
3930 if (nlen > lfs->name_max) {
3931 return LFS_ERR_NAMETOOLONG;
3932 }
3933
3934 // there is a small chance we are being renamed in the same
3935 // directory/ to an id less than our old id, the global update
3936 // to handle this is a bit messy
3937 if (samepair && newid <= newoldid) {
3938 newoldid += 1;
3939 }
3940 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3941 return LFS_ERR_ISDIR;
3942 } else if (samepair && newid == newoldid) {
3943 // we're renaming to ourselves??
3944 return 0;
3945 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3946 // must be empty before removal
3947 lfs_block_t prevpair[2];
3948 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3949 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3950 if (res < 0) {
3951 return (int)res;
3952 }
3953 lfs_pair_fromle32(prevpair);
3954
3955 // must be empty before removal
3956 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3957 if (err) {
3958 return err;
3959 }
3960
3961 if (prevdir.m.count > 0 || prevdir.m.split) {
3962 return LFS_ERR_NOTEMPTY;
3963 }
3964
3965 // mark fs as orphaned
3966 err = lfs_fs_preporphans(lfs, +1);
3967 if (err) {
3968 return err;
3969 }
3970
3971 // I know it's crazy but yes, dir can be changed by our parent's
3972 // commit (if predecessor is child)
3973 prevdir.type = 0;
3974 prevdir.id = 0;
3975 lfs->mlist = &prevdir;
3976 }
3977
3978 if (!samepair) {
3979 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
3980 }
3981
3982 // move over all attributes
3983 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
3984 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
3985 LFS_TYPE_DELETE, newid, 0), NULL},
3986 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
3987 {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
3988 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
3989 {LFS_MKTAG_IF(samepair,
3990 LFS_TYPE_DELETE, newoldid, 0), NULL}));
3991 if (err) {
3992 lfs->mlist = prevdir.next;
3993 return err;
3994 }
3995
3996 // let commit clean up after move (if we're different! otherwise move
3997 // logic already fixed it for us)
3998 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
3999 // prep gstate and delete move id
4000 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4001 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
4002 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
4003 if (err) {
4004 lfs->mlist = prevdir.next;
4005 return err;
4006 }
4007 }
4008
4009 lfs->mlist = prevdir.next;
4010 if (prevtag != LFS_ERR_NOENT
4011 && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
4012 // fix orphan
4013 err = lfs_fs_preporphans(lfs, -1);
4014 if (err) {
4015 return err;
4016 }
4017
4018 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
4019 if (err) {
4020 return err;
4021 }
4022
4023 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
4024 if (err) {
4025 return err;
4026 }
4027 }
4028
4029 return 0;
4030 }
4031 #endif
4032
4033 static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path,
4034 uint8_t type, void *buffer, lfs_size_t size) {
4035 lfs_mdir_t cwd;
4036 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4037 if (tag < 0) {
4038 return tag;
4039 }
4040
4041 uint16_t id = lfs_tag_id(tag);
4042 if (id == 0x3ff) {
4043 // special case for root
4044 id = 0;
4045 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4046 if (err) {
4047 return err;
4048 }
4049 }
4050
4051 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
4052 LFS_MKTAG(LFS_TYPE_USERATTR + type,
4053 id, lfs_min(size, lfs->attr_max)),
4054 buffer);
4055 if (tag < 0) {
4056 if (tag == LFS_ERR_NOENT) {
4057 return LFS_ERR_NOATTR;
4058 }
4059
4060 return tag;
4061 }
4062
4063 return lfs_tag_size(tag);
4064 }
4065
4066 #ifndef LFS_READONLY
4067 static int lfs_commitattr(lfs_t *lfs, const char *path,
4068 uint8_t type, const void *buffer, lfs_size_t size) {
4069 lfs_mdir_t cwd;
4070 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4071 if (tag < 0) {
4072 return tag;
4073 }
4074
4075 uint16_t id = lfs_tag_id(tag);
4076 if (id == 0x3ff) {
4077 // special case for root
4078 id = 0;
4079 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4080 if (err) {
4081 return err;
4082 }
4083 }
4084
4085 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
4086 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
4087 }
4088 #endif
4089
4090 #ifndef LFS_READONLY
4091 static int lfs_rawsetattr(lfs_t *lfs, const char *path,
4092 uint8_t type, const void *buffer, lfs_size_t size) {
4093 if (size > lfs->attr_max) {
4094 return LFS_ERR_NOSPC;
4095 }
4096
4097 return lfs_commitattr(lfs, path, type, buffer, size);
4098 }
4099 #endif
4100
4101 #ifndef LFS_READONLY
4102 static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) {
4103 return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
4104 }
4105 #endif
4106
4107
4108 /// Filesystem operations ///
4109 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
4110 lfs->cfg = cfg;
4111 lfs->block_count = cfg->block_count; // May be 0
4112 int err = 0;
4113
4114 #ifdef LFS_MULTIVERSION
4115 // this driver only supports minor version < current minor version
4116 LFS_ASSERT(!lfs->cfg->disk_version || (
4117 (0xffff & (lfs->cfg->disk_version >> 16))
4118 == LFS_DISK_VERSION_MAJOR
4119 && (0xffff & (lfs->cfg->disk_version >> 0))
4120 <= LFS_DISK_VERSION_MINOR));
4121 #endif
4122
4123 // check that bool is a truthy-preserving type
4124 //
4125 // note the most common reason for this failure is a before-c99 compiler,
4126 // which littlefs currently does not support
4127 LFS_ASSERT((bool)0x80000000);
4128
4129 // validate that the lfs-cfg sizes were initiated properly before
4130 // performing any arithmetic logics with them
4131 LFS_ASSERT(lfs->cfg->read_size != 0);
4132 LFS_ASSERT(lfs->cfg->prog_size != 0);
4133 LFS_ASSERT(lfs->cfg->cache_size != 0);
4134
4135 // check that block size is a multiple of cache size is a multiple
4136 // of prog and read sizes
4137 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
4138 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
4139 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
4140
4141 // check that the block size is large enough to fit all ctz pointers
4142 LFS_ASSERT(lfs->cfg->block_size >= 128);
4143 // this is the exact calculation for all ctz pointers, if this fails
4144 // and the simpler assert above does not, math must be broken
4145 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
4146 <= lfs->cfg->block_size);
4147
4148 // block_cycles = 0 is no longer supported.
4149 //
4150 // block_cycles is the number of erase cycles before littlefs evicts
4151 // metadata logs as a part of wear leveling. Suggested values are in the
4152 // range of 100-1000, or set block_cycles to -1 to disable block-level
4153 // wear-leveling.
4154 LFS_ASSERT(lfs->cfg->block_cycles != 0);
4155
4156
4157 // setup read cache
4158 if (lfs->cfg->read_buffer) {
4159 lfs->rcache.buffer = lfs->cfg->read_buffer;
4160 } else {
4161 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4162 if (!lfs->rcache.buffer) {
4163 err = LFS_ERR_NOMEM;
4164 goto cleanup;
4165 }
4166 }
4167
4168 // setup program cache
4169 if (lfs->cfg->prog_buffer) {
4170 lfs->pcache.buffer = lfs->cfg->prog_buffer;
4171 } else {
4172 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4173 if (!lfs->pcache.buffer) {
4174 err = LFS_ERR_NOMEM;
4175 goto cleanup;
4176 }
4177 }
4178
4179 // zero to avoid information leaks
4180 lfs_cache_zero(lfs, &lfs->rcache);
4181 lfs_cache_zero(lfs, &lfs->pcache);
4182
4183 // setup lookahead, must be multiple of 64-bits, 32-bit aligned
4184 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
4185 LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
4186 (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
4187 if (lfs->cfg->lookahead_buffer) {
4188 lfs->free.buffer = lfs->cfg->lookahead_buffer;
4189 } else {
4190 lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
4191 if (!lfs->free.buffer) {
4192 err = LFS_ERR_NOMEM;
4193 goto cleanup;
4194 }
4195 }
4196
4197 // check that the size limits are sane
4198 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
4199 lfs->name_max = lfs->cfg->name_max;
4200 if (!lfs->name_max) {
4201 lfs->name_max = LFS_NAME_MAX;
4202 }
4203
4204 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
4205 lfs->file_max = lfs->cfg->file_max;
4206 if (!lfs->file_max) {
4207 lfs->file_max = LFS_FILE_MAX;
4208 }
4209
4210 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
4211 lfs->attr_max = lfs->cfg->attr_max;
4212 if (!lfs->attr_max) {
4213 lfs->attr_max = LFS_ATTR_MAX;
4214 }
4215
4216 LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
4217
4218 // setup default state
4219 lfs->root[0] = LFS_BLOCK_NULL;
4220 lfs->root[1] = LFS_BLOCK_NULL;
4221 lfs->mlist = NULL;
4222 lfs->seed = 0;
4223 lfs->gdisk = (lfs_gstate_t){0};
4224 lfs->gstate = (lfs_gstate_t){0};
4225 lfs->gdelta = (lfs_gstate_t){0};
4226 #ifdef LFS_MIGRATE
4227 lfs->lfs1 = NULL;
4228 #endif
4229
4230 return 0;
4231
4232 cleanup:
4233 lfs_deinit(lfs);
4234 return err;
4235 }
4236
4237 static int lfs_deinit(lfs_t *lfs) {
4238 // free allocated memory
4239 if (!lfs->cfg->read_buffer) {
4240 lfs_free(lfs->rcache.buffer);
4241 }
4242
4243 if (!lfs->cfg->prog_buffer) {
4244 lfs_free(lfs->pcache.buffer);
4245 }
4246
4247 if (!lfs->cfg->lookahead_buffer) {
4248 lfs_free(lfs->free.buffer);
4249 }
4250
4251 return 0;
4252 }
4253
4254
4255
4256 #ifndef LFS_READONLY
4257 static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
4258 int err = 0;
4259 {
4260 err = lfs_init(lfs, cfg);
4261 if (err) {
4262 return err;
4263 }
4264
4265 LFS_ASSERT(cfg->block_count != 0);
4266
4267 // create free lookahead
4268 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
4269 lfs->free.off = 0;
4270 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
4271 lfs->block_count);
4272 lfs->free.i = 0;
4273 lfs_alloc_ack(lfs);
4274
4275 // create root dir
4276 lfs_mdir_t root;
4277 err = lfs_dir_alloc(lfs, &root);
4278 if (err) {
4279 goto cleanup;
4280 }
4281
4282 // write one superblock
4283 lfs_superblock_t superblock = {
4284 .version = lfs_fs_disk_version(lfs),
4285 .block_size = lfs->cfg->block_size,
4286 .block_count = lfs->block_count,
4287 .name_max = lfs->name_max,
4288 .file_max = lfs->file_max,
4289 .attr_max = lfs->attr_max,
4290 };
4291
4292 lfs_superblock_tole32(&superblock);
4293 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4294 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4295 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4296 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4297 &superblock}));
4298 if (err) {
4299 goto cleanup;
4300 }
4301
4302 // force compaction to prevent accidentally mounting any
4303 // older version of littlefs that may live on disk
4304 root.erased = false;
4305 err = lfs_dir_commit(lfs, &root, NULL, 0);
4306 if (err) {
4307 goto cleanup;
4308 }
4309
4310 // sanity check that fetch works
4311 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
4312 if (err) {
4313 goto cleanup;
4314 }
4315 }
4316
4317 cleanup:
4318 lfs_deinit(lfs);
4319 return err;
4320
4321 }
4322 #endif
4323
4324 static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
4325 int err = lfs_init(lfs, cfg);
4326 if (err) {
4327 return err;
4328 }
4329
4330 // scan directory blocks for superblock and any global updates
4331 lfs_mdir_t dir = {.tail = {0, 1}};
4332 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4333 lfs_size_t tortoise_i = 1;
4334 lfs_size_t tortoise_period = 1;
4335 while (!lfs_pair_isnull(dir.tail)) {
4336 // detect cycles with Brent's algorithm
4337 if (lfs_pair_issync(dir.tail, tortoise)) {
4338 LFS_WARN("Cycle detected in tail list");
4339 err = LFS_ERR_CORRUPT;
4340 goto cleanup;
4341 }
4342 if (tortoise_i == tortoise_period) {
4343 tortoise[0] = dir.tail[0];
4344 tortoise[1] = dir.tail[1];
4345 tortoise_i = 0;
4346 tortoise_period *= 2;
4347 }
4348 tortoise_i += 1;
4349
4350 // fetch next block in tail list
4351 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
4352 LFS_MKTAG(0x7ff, 0x3ff, 0),
4353 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
4354 NULL,
4355 lfs_dir_find_match, &(struct lfs_dir_find_match){
4356 lfs, "littlefs", 8});
4357 if (tag < 0) {
4358 err = tag;
4359 goto cleanup;
4360 }
4361
4362 // has superblock?
4363 if (tag && !lfs_tag_isdelete(tag)) {
4364 // update root
4365 lfs->root[0] = dir.pair[0];
4366 lfs->root[1] = dir.pair[1];
4367
4368 // grab superblock
4369 lfs_superblock_t superblock;
4370 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4371 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4372 &superblock);
4373 if (tag < 0) {
4374 err = tag;
4375 goto cleanup;
4376 }
4377 lfs_superblock_fromle32(&superblock);
4378
4379 // check version
4380 uint16_t major_version = (0xffff & (superblock.version >> 16));
4381 uint16_t minor_version = (0xffff & (superblock.version >> 0));
4382 if (major_version != lfs_fs_disk_version_major(lfs)
4383 || minor_version > lfs_fs_disk_version_minor(lfs)) {
4384 LFS_ERROR("Invalid version "
4385 "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16,
4386 major_version,
4387 minor_version,
4388 lfs_fs_disk_version_major(lfs),
4389 lfs_fs_disk_version_minor(lfs));
4390 err = LFS_ERR_INVAL;
4391 goto cleanup;
4392 }
4393
4394 // found older minor version? set an in-device only bit in the
4395 // gstate so we know we need to rewrite the superblock before
4396 // the first write
4397 if (minor_version < lfs_fs_disk_version_minor(lfs)) {
4398 LFS_DEBUG("Found older minor version "
4399 "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16,
4400 major_version,
4401 minor_version,
4402 lfs_fs_disk_version_major(lfs),
4403 lfs_fs_disk_version_minor(lfs));
4404 // note this bit is reserved on disk, so fetching more gstate
4405 // will not interfere here
4406 lfs_fs_prepsuperblock(lfs, true);
4407 }
4408
4409 // check superblock configuration
4410 if (superblock.name_max) {
4411 if (superblock.name_max > lfs->name_max) {
4412 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
4413 superblock.name_max, lfs->name_max);
4414 err = LFS_ERR_INVAL;
4415 goto cleanup;
4416 }
4417
4418 lfs->name_max = superblock.name_max;
4419 }
4420
4421 if (superblock.file_max) {
4422 if (superblock.file_max > lfs->file_max) {
4423 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
4424 superblock.file_max, lfs->file_max);
4425 err = LFS_ERR_INVAL;
4426 goto cleanup;
4427 }
4428
4429 lfs->file_max = superblock.file_max;
4430 }
4431
4432 if (superblock.attr_max) {
4433 if (superblock.attr_max > lfs->attr_max) {
4434 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
4435 superblock.attr_max, lfs->attr_max);
4436 err = LFS_ERR_INVAL;
4437 goto cleanup;
4438 }
4439
4440 lfs->attr_max = superblock.attr_max;
4441 }
4442
4443 // this is where we get the block_count from disk if block_count=0
4444 if (lfs->cfg->block_count
4445 && superblock.block_count != lfs->cfg->block_count) {
4446 LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
4447 superblock.block_count, lfs->cfg->block_count);
4448 err = LFS_ERR_INVAL;
4449 goto cleanup;
4450 }
4451
4452 lfs->block_count = superblock.block_count;
4453
4454 if (superblock.block_size != lfs->cfg->block_size) {
4455 LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
4456 superblock.block_size, lfs->cfg->block_size);
4457 err = LFS_ERR_INVAL;
4458 goto cleanup;
4459 }
4460 }
4461
4462 // has gstate?
4463 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
4464 if (err) {
4465 goto cleanup;
4466 }
4467 }
4468
4469 // update littlefs with gstate
4470 if (!lfs_gstate_iszero(&lfs->gstate)) {
4471 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
4472 lfs->gstate.tag,
4473 lfs->gstate.pair[0],
4474 lfs->gstate.pair[1]);
4475 }
4476 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
4477 lfs->gdisk = lfs->gstate;
4478
4479 // setup free lookahead, to distribute allocations uniformly across
4480 // boots, we start the allocator at a random location
4481 lfs->free.off = lfs->seed % lfs->block_count;
4482 lfs_alloc_drop(lfs);
4483
4484 return 0;
4485
4486 cleanup:
4487 lfs_rawunmount(lfs);
4488 return err;
4489 }
4490
4491 static int lfs_rawunmount(lfs_t *lfs) {
4492 return lfs_deinit(lfs);
4493 }
4494
4495
4496 /// Filesystem filesystem operations ///
4497 static int lfs_fs_rawstat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
4498 // if the superblock is up-to-date, we must be on the most recent
4499 // minor version of littlefs
4500 if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4501 fsinfo->disk_version = lfs_fs_disk_version(lfs);
4502
4503 // otherwise we need to read the minor version on disk
4504 } else {
4505 // fetch the superblock
4506 lfs_mdir_t dir;
4507 int err = lfs_dir_fetch(lfs, &dir, lfs->root);
4508 if (err) {
4509 return err;
4510 }
4511
4512 lfs_superblock_t superblock;
4513 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4514 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4515 &superblock);
4516 if (tag < 0) {
4517 return tag;
4518 }
4519 lfs_superblock_fromle32(&superblock);
4520
4521 // read the on-disk version
4522 fsinfo->disk_version = superblock.version;
4523 }
4524
4525 // filesystem geometry
4526 fsinfo->block_size = lfs->cfg->block_size;
4527 fsinfo->block_count = lfs->block_count;
4528
4529 // other on-disk configuration, we cache all of these for internal use
4530 fsinfo->name_max = lfs->name_max;
4531 fsinfo->file_max = lfs->file_max;
4532 fsinfo->attr_max = lfs->attr_max;
4533
4534 return 0;
4535 }
4536
4537 int lfs_fs_rawtraverse(lfs_t *lfs,
4538 int (*cb)(void *data, lfs_block_t block), void *data,
4539 bool includeorphans) {
4540 // iterate over metadata pairs
4541 lfs_mdir_t dir = {.tail = {0, 1}};
4542
4543 #ifdef LFS_MIGRATE
4544 // also consider v1 blocks during migration
4545 if (lfs->lfs1) {
4546 int err = lfs1_traverse(lfs, cb, data);
4547 if (err) {
4548 return err;
4549 }
4550
4551 dir.tail[0] = lfs->root[0];
4552 dir.tail[1] = lfs->root[1];
4553 }
4554 #endif
4555
4556 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4557 lfs_size_t tortoise_i = 1;
4558 lfs_size_t tortoise_period = 1;
4559 while (!lfs_pair_isnull(dir.tail)) {
4560 // detect cycles with Brent's algorithm
4561 if (lfs_pair_issync(dir.tail, tortoise)) {
4562 LFS_WARN("Cycle detected in tail list");
4563 return LFS_ERR_CORRUPT;
4564 }
4565 if (tortoise_i == tortoise_period) {
4566 tortoise[0] = dir.tail[0];
4567 tortoise[1] = dir.tail[1];
4568 tortoise_i = 0;
4569 tortoise_period *= 2;
4570 }
4571 tortoise_i += 1;
4572
4573 for (int i = 0; i < 2; i++) {
4574 int err = cb(data, dir.tail[i]);
4575 if (err) {
4576 return err;
4577 }
4578 }
4579
4580 // iterate through ids in directory
4581 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
4582 if (err) {
4583 return err;
4584 }
4585
4586 for (uint16_t id = 0; id < dir.count; id++) {
4587 struct lfs_ctz ctz;
4588 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
4589 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
4590 if (tag < 0) {
4591 if (tag == LFS_ERR_NOENT) {
4592 continue;
4593 }
4594 return tag;
4595 }
4596 lfs_ctz_fromle32(&ctz);
4597
4598 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
4599 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4600 ctz.head, ctz.size, cb, data);
4601 if (err) {
4602 return err;
4603 }
4604 } else if (includeorphans &&
4605 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
4606 for (int i = 0; i < 2; i++) {
4607 err = cb(data, (&ctz.head)[i]);
4608 if (err) {
4609 return err;
4610 }
4611 }
4612 }
4613 }
4614 }
4615
4616 #ifndef LFS_READONLY
4617 // iterate over any open files
4618 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
4619 if (f->type != LFS_TYPE_REG) {
4620 continue;
4621 }
4622
4623 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
4624 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4625 f->ctz.head, f->ctz.size, cb, data);
4626 if (err) {
4627 return err;
4628 }
4629 }
4630
4631 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
4632 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4633 f->block, f->pos, cb, data);
4634 if (err) {
4635 return err;
4636 }
4637 }
4638 }
4639 #endif
4640
4641 return 0;
4642 }
4643
4644 #ifndef LFS_READONLY
4645 static int lfs_fs_pred(lfs_t *lfs,
4646 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
4647 // iterate over all directory directory entries
4648 pdir->tail[0] = 0;
4649 pdir->tail[1] = 1;
4650 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4651 lfs_size_t tortoise_i = 1;
4652 lfs_size_t tortoise_period = 1;
4653 while (!lfs_pair_isnull(pdir->tail)) {
4654 // detect cycles with Brent's algorithm
4655 if (lfs_pair_issync(pdir->tail, tortoise)) {
4656 LFS_WARN("Cycle detected in tail list");
4657 return LFS_ERR_CORRUPT;
4658 }
4659 if (tortoise_i == tortoise_period) {
4660 tortoise[0] = pdir->tail[0];
4661 tortoise[1] = pdir->tail[1];
4662 tortoise_i = 0;
4663 tortoise_period *= 2;
4664 }
4665 tortoise_i += 1;
4666
4667 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
4668 return 0;
4669 }
4670
4671 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
4672 if (err) {
4673 return err;
4674 }
4675 }
4676
4677 return LFS_ERR_NOENT;
4678 }
4679 #endif
4680
4681 #ifndef LFS_READONLY
4682 struct lfs_fs_parent_match {
4683 lfs_t *lfs;
4684 const lfs_block_t pair[2];
4685 };
4686 #endif
4687
4688 #ifndef LFS_READONLY
4689 static int lfs_fs_parent_match(void *data,
4690 lfs_tag_t tag, const void *buffer) {
4691 struct lfs_fs_parent_match *find = data;
4692 lfs_t *lfs = find->lfs;
4693 const struct lfs_diskoff *disk = buffer;
4694 (void)tag;
4695
4696 lfs_block_t child[2];
4697 int err = lfs_bd_read(lfs,
4698 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
4699 disk->block, disk->off, &child, sizeof(child));
4700 if (err) {
4701 return err;
4702 }
4703
4704 lfs_pair_fromle32(child);
4705 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
4706 }
4707 #endif
4708
4709 #ifndef LFS_READONLY
4710 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
4711 lfs_mdir_t *parent) {
4712 // use fetchmatch with callback to find pairs
4713 parent->tail[0] = 0;
4714 parent->tail[1] = 1;
4715 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4716 lfs_size_t tortoise_i = 1;
4717 lfs_size_t tortoise_period = 1;
4718 while (!lfs_pair_isnull(parent->tail)) {
4719 // detect cycles with Brent's algorithm
4720 if (lfs_pair_issync(parent->tail, tortoise)) {
4721 LFS_WARN("Cycle detected in tail list");
4722 return LFS_ERR_CORRUPT;
4723 }
4724 if (tortoise_i == tortoise_period) {
4725 tortoise[0] = parent->tail[0];
4726 tortoise[1] = parent->tail[1];
4727 tortoise_i = 0;
4728 tortoise_period *= 2;
4729 }
4730 tortoise_i += 1;
4731
4732 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
4733 LFS_MKTAG(0x7ff, 0, 0x3ff),
4734 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
4735 NULL,
4736 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
4737 lfs, {pair[0], pair[1]}});
4738 if (tag && tag != LFS_ERR_NOENT) {
4739 return tag;
4740 }
4741 }
4742
4743 return LFS_ERR_NOENT;
4744 }
4745 #endif
4746
4747 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) {
4748 lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200))
4749 | (uint32_t)needssuperblock << 9;
4750 }
4751
4752 #ifndef LFS_READONLY
4753 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4754 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0);
4755 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0);
4756 lfs->gstate.tag += orphans;
4757 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4758 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4759
4760 return 0;
4761 }
4762 #endif
4763
4764 #ifndef LFS_READONLY
4765 static void lfs_fs_prepmove(lfs_t *lfs,
4766 uint16_t id, const lfs_block_t pair[2]) {
4767 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4768 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4769 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4770 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4771 }
4772 #endif
4773
4774 #ifndef LFS_READONLY
4775 static int lfs_fs_desuperblock(lfs_t *lfs) {
4776 if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4777 return 0;
4778 }
4779
4780 LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}",
4781 lfs->root[0],
4782 lfs->root[1]);
4783
4784 lfs_mdir_t root;
4785 int err = lfs_dir_fetch(lfs, &root, lfs->root);
4786 if (err) {
4787 return err;
4788 }
4789
4790 // write a new superblock
4791 lfs_superblock_t superblock = {
4792 .version = lfs_fs_disk_version(lfs),
4793 .block_size = lfs->cfg->block_size,
4794 .block_count = lfs->block_count,
4795 .name_max = lfs->name_max,
4796 .file_max = lfs->file_max,
4797 .attr_max = lfs->attr_max,
4798 };
4799
4800 lfs_superblock_tole32(&superblock);
4801 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4802 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4803 &superblock}));
4804 if (err) {
4805 return err;
4806 }
4807
4808 lfs_fs_prepsuperblock(lfs, false);
4809 return 0;
4810 }
4811 #endif
4812
4813 #ifndef LFS_READONLY
4814 static int lfs_fs_demove(lfs_t *lfs) {
4815 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4816 return 0;
4817 }
4818
4819 // Fix bad moves
4820 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4821 lfs->gdisk.pair[0],
4822 lfs->gdisk.pair[1],
4823 lfs_tag_id(lfs->gdisk.tag));
4824
4825 // no other gstate is supported at this time, so if we found something else
4826 // something most likely went wrong in gstate calculation
4827 LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE);
4828
4829 // fetch and delete the moved entry
4830 lfs_mdir_t movedir;
4831 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4832 if (err) {
4833 return err;
4834 }
4835
4836 // prep gstate and delete move id
4837 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4838 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4839 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4840 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4841 if (err) {
4842 return err;
4843 }
4844
4845 return 0;
4846 }
4847 #endif
4848
4849 #ifndef LFS_READONLY
4850 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
4851 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4852 return 0;
4853 }
4854
4855 // Check for orphans in two separate passes:
4856 // - 1 for half-orphans (relocations)
4857 // - 2 for full-orphans (removes/renames)
4858 //
4859 // Two separate passes are needed as half-orphans can contain outdated
4860 // references to full-orphans, effectively hiding them from the deorphan
4861 // search.
4862 //
4863 int pass = 0;
4864 while (pass < 2) {
4865 // Fix any orphans
4866 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4867 lfs_mdir_t dir;
4868 bool moreorphans = false;
4869
4870 // iterate over all directory directory entries
4871 while (!lfs_pair_isnull(pdir.tail)) {
4872 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4873 if (err) {
4874 return err;
4875 }
4876
4877 // check head blocks for orphans
4878 if (!pdir.split) {
4879 // check if we have a parent
4880 lfs_mdir_t parent;
4881 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4882 if (tag < 0 && tag != LFS_ERR_NOENT) {
4883 return tag;
4884 }
4885
4886 if (pass == 0 && tag != LFS_ERR_NOENT) {
4887 lfs_block_t pair[2];
4888 lfs_stag_t state = lfs_dir_get(lfs, &parent,
4889 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4890 if (state < 0) {
4891 return state;
4892 }
4893 lfs_pair_fromle32(pair);
4894
4895 if (!lfs_pair_issync(pair, pdir.tail)) {
4896 // we have desynced
4897 LFS_DEBUG("Fixing half-orphan "
4898 "{0x%"PRIx32", 0x%"PRIx32"} "
4899 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4900 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4901
4902 // fix pending move in this pair? this looks like an
4903 // optimization but is in fact _required_ since
4904 // relocating may outdate the move.
4905 uint16_t moveid = 0x3ff;
4906 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
4907 moveid = lfs_tag_id(lfs->gstate.tag);
4908 LFS_DEBUG("Fixing move while fixing orphans "
4909 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4910 pdir.pair[0], pdir.pair[1], moveid);
4911 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4912 }
4913
4914 lfs_pair_tole32(pair);
4915 state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4916 {LFS_MKTAG_IF(moveid != 0x3ff,
4917 LFS_TYPE_DELETE, moveid, 0), NULL},
4918 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
4919 pair}));
4920 lfs_pair_fromle32(pair);
4921 if (state < 0) {
4922 return state;
4923 }
4924
4925 // did our commit create more orphans?
4926 if (state == LFS_OK_ORPHANED) {
4927 moreorphans = true;
4928 }
4929
4930 // refetch tail
4931 continue;
4932 }
4933 }
4934
4935 // note we only check for full orphans if we may have had a
4936 // power-loss, otherwise orphans are created intentionally
4937 // during operations such as lfs_mkdir
4938 if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) {
4939 // we are an orphan
4940 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
4941 pdir.tail[0], pdir.tail[1]);
4942
4943 // steal state
4944 err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
4945 if (err) {
4946 return err;
4947 }
4948
4949 // steal tail
4950 lfs_pair_tole32(dir.tail);
4951 int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4952 {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
4953 dir.tail}));
4954 lfs_pair_fromle32(dir.tail);
4955 if (state < 0) {
4956 return state;
4957 }
4958
4959 // did our commit create more orphans?
4960 if (state == LFS_OK_ORPHANED) {
4961 moreorphans = true;
4962 }
4963
4964 // refetch tail
4965 continue;
4966 }
4967 }
4968
4969 pdir = dir;
4970 }
4971
4972 pass = moreorphans ? 0 : pass+1;
4973 }
4974
4975 // mark orphans as fixed
4976 return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
4977 }
4978 #endif
4979
4980 #ifndef LFS_READONLY
4981 static int lfs_fs_forceconsistency(lfs_t *lfs) {
4982 int err = lfs_fs_desuperblock(lfs);
4983 if (err) {
4984 return err;
4985 }
4986
4987 err = lfs_fs_demove(lfs);
4988 if (err) {
4989 return err;
4990 }
4991
4992 err = lfs_fs_deorphan(lfs, true);
4993 if (err) {
4994 return err;
4995 }
4996
4997 return 0;
4998 }
4999 #endif
5000
5001 #ifndef LFS_READONLY
5002 int lfs_fs_rawmkconsistent(lfs_t *lfs) {
5003 // lfs_fs_forceconsistency does most of the work here
5004 int err = lfs_fs_forceconsistency(lfs);
5005 if (err) {
5006 return err;
5007 }
5008
5009 // do we have any pending gstate?
5010 lfs_gstate_t delta = {0};
5011 lfs_gstate_xor(&delta, &lfs->gdisk);
5012 lfs_gstate_xor(&delta, &lfs->gstate);
5013 if (!lfs_gstate_iszero(&delta)) {
5014 // lfs_dir_commit will implicitly write out any pending gstate
5015 lfs_mdir_t root;
5016 err = lfs_dir_fetch(lfs, &root, lfs->root);
5017 if (err) {
5018 return err;
5019 }
5020
5021 err = lfs_dir_commit(lfs, &root, NULL, 0);
5022 if (err) {
5023 return err;
5024 }
5025 }
5026
5027 return 0;
5028 }
5029 #endif
5030
5031 static int lfs_fs_size_count(void *p, lfs_block_t block) {
5032 (void)block;
5033 lfs_size_t *size = p;
5034 *size += 1;
5035 return 0;
5036 }
5037
5038 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
5039 lfs_size_t size = 0;
5040 int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false);
5041 if (err) {
5042 return err;
5043 }
5044
5045 return size;
5046 }
5047
5048 #ifndef LFS_READONLY
5049 int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
5050 // shrinking is not supported
5051 LFS_ASSERT(block_count >= lfs->block_count);
5052
5053 if (block_count > lfs->block_count) {
5054 lfs->block_count = block_count;
5055
5056 // fetch the root
5057 lfs_mdir_t root;
5058 int err = lfs_dir_fetch(lfs, &root, lfs->root);
5059 if (err) {
5060 return err;
5061 }
5062
5063 // update the superblock
5064 lfs_superblock_t superblock;
5065 lfs_stag_t tag = lfs_dir_get(lfs, &root, LFS_MKTAG(0x7ff, 0x3ff, 0),
5066 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5067 &superblock);
5068 if (tag < 0) {
5069 return tag;
5070 }
5071 lfs_superblock_fromle32(&superblock);
5072
5073 superblock.block_count = lfs->block_count;
5074
5075 lfs_superblock_tole32(&superblock);
5076 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
5077 {tag, &superblock}));
5078 if (err) {
5079 return err;
5080 }
5081 }
5082
5083 return 0;
5084 }
5085 #endif
5086
5087 #ifdef LFS_MIGRATE
5088 ////// Migration from littelfs v1 below this //////
5089
5090 /// Version info ///
5091
5092 // Software library version
5093 // Major (top-nibble), incremented on backwards incompatible changes
5094 // Minor (bottom-nibble), incremented on feature additions
5095 #define LFS1_VERSION 0x00010007
5096 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
5097 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
5098
5099 // Version of On-disk data structures
5100 // Major (top-nibble), incremented on backwards incompatible changes
5101 // Minor (bottom-nibble), incremented on feature additions
5102 #define LFS1_DISK_VERSION 0x00010001
5103 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
5104 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
5105
5106
5107 /// v1 Definitions ///
5108
5109 // File types
5110 enum lfs1_type {
5111 LFS1_TYPE_REG = 0x11,
5112 LFS1_TYPE_DIR = 0x22,
5113 LFS1_TYPE_SUPERBLOCK = 0x2e,
5114 };
5115
5116 typedef struct lfs1 {
5117 lfs_block_t root[2];
5118 } lfs1_t;
5119
5120 typedef struct lfs1_entry {
5121 lfs_off_t off;
5122
5123 struct lfs1_disk_entry {
5124 uint8_t type;
5125 uint8_t elen;
5126 uint8_t alen;
5127 uint8_t nlen;
5128 union {
5129 struct {
5130 lfs_block_t head;
5131 lfs_size_t size;
5132 } file;
5133 lfs_block_t dir[2];
5134 } u;
5135 } d;
5136 } lfs1_entry_t;
5137
5138 typedef struct lfs1_dir {
5139 struct lfs1_dir *next;
5140 lfs_block_t pair[2];
5141 lfs_off_t off;
5142
5143 lfs_block_t head[2];
5144 lfs_off_t pos;
5145
5146 struct lfs1_disk_dir {
5147 uint32_t rev;
5148 lfs_size_t size;
5149 lfs_block_t tail[2];
5150 } d;
5151 } lfs1_dir_t;
5152
5153 typedef struct lfs1_superblock {
5154 lfs_off_t off;
5155
5156 struct lfs1_disk_superblock {
5157 uint8_t type;
5158 uint8_t elen;
5159 uint8_t alen;
5160 uint8_t nlen;
5161 lfs_block_t root[2];
5162 uint32_t block_size;
5163 uint32_t block_count;
5164 uint32_t version;
5165 char magic[8];
5166 } d;
5167 } lfs1_superblock_t;
5168
5169
5170 /// Low-level wrappers v1->v2 ///
5171 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
5172 *crc = lfs_crc(*crc, buffer, size);
5173 }
5174
5175 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
5176 lfs_off_t off, void *buffer, lfs_size_t size) {
5177 // if we ever do more than writes to alternating pairs,
5178 // this may need to consider pcache
5179 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
5180 block, off, buffer, size);
5181 }
5182
5183 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
5184 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
5185 for (lfs_off_t i = 0; i < size; i++) {
5186 uint8_t c;
5187 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
5188 if (err) {
5189 return err;
5190 }
5191
5192 lfs1_crc(crc, &c, 1);
5193 }
5194
5195 return 0;
5196 }
5197
5198
5199 /// Endian swapping functions ///
5200 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
5201 d->rev = lfs_fromle32(d->rev);
5202 d->size = lfs_fromle32(d->size);
5203 d->tail[0] = lfs_fromle32(d->tail[0]);
5204 d->tail[1] = lfs_fromle32(d->tail[1]);
5205 }
5206
5207 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
5208 d->rev = lfs_tole32(d->rev);
5209 d->size = lfs_tole32(d->size);
5210 d->tail[0] = lfs_tole32(d->tail[0]);
5211 d->tail[1] = lfs_tole32(d->tail[1]);
5212 }
5213
5214 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
5215 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
5216 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
5217 }
5218
5219 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
5220 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
5221 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
5222 }
5223
5224 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
5225 d->root[0] = lfs_fromle32(d->root[0]);
5226 d->root[1] = lfs_fromle32(d->root[1]);
5227 d->block_size = lfs_fromle32(d->block_size);
5228 d->block_count = lfs_fromle32(d->block_count);
5229 d->version = lfs_fromle32(d->version);
5230 }
5231
5232
5233 ///// Metadata pair and directory operations ///
5234 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
5235 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
5236 }
5237
5238 static int lfs1_dir_fetch(lfs_t *lfs,
5239 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
5240 // copy out pair, otherwise may be aliasing dir
5241 const lfs_block_t tpair[2] = {pair[0], pair[1]};
5242 bool valid = false;
5243
5244 // check both blocks for the most recent revision
5245 for (int i = 0; i < 2; i++) {
5246 struct lfs1_disk_dir test;
5247 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
5248 lfs1_dir_fromle32(&test);
5249 if (err) {
5250 if (err == LFS_ERR_CORRUPT) {
5251 continue;
5252 }
5253 return err;
5254 }
5255
5256 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
5257 continue;
5258 }
5259
5260 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
5261 (0x7fffffff & test.size) > lfs->cfg->block_size) {
5262 continue;
5263 }
5264
5265 uint32_t crc = 0xffffffff;
5266 lfs1_dir_tole32(&test);
5267 lfs1_crc(&crc, &test, sizeof(test));
5268 lfs1_dir_fromle32(&test);
5269 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
5270 (0x7fffffff & test.size) - sizeof(test), &crc);
5271 if (err) {
5272 if (err == LFS_ERR_CORRUPT) {
5273 continue;
5274 }
5275 return err;
5276 }
5277
5278 if (crc != 0) {
5279 continue;
5280 }
5281
5282 valid = true;
5283
5284 // setup dir in case it's valid
5285 dir->pair[0] = tpair[(i+0) % 2];
5286 dir->pair[1] = tpair[(i+1) % 2];
5287 dir->off = sizeof(dir->d);
5288 dir->d = test;
5289 }
5290
5291 if (!valid) {
5292 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
5293 tpair[0], tpair[1]);
5294 return LFS_ERR_CORRUPT;
5295 }
5296
5297 return 0;
5298 }
5299
5300 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
5301 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
5302 if (!(0x80000000 & dir->d.size)) {
5303 entry->off = dir->off;
5304 return LFS_ERR_NOENT;
5305 }
5306
5307 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
5308 if (err) {
5309 return err;
5310 }
5311
5312 dir->off = sizeof(dir->d);
5313 dir->pos += sizeof(dir->d) + 4;
5314 }
5315
5316 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
5317 &entry->d, sizeof(entry->d));
5318 lfs1_entry_fromle32(&entry->d);
5319 if (err) {
5320 return err;
5321 }
5322
5323 entry->off = dir->off;
5324 dir->off += lfs1_entry_size(entry);
5325 dir->pos += lfs1_entry_size(entry);
5326 return 0;
5327 }
5328
5329 /// littlefs v1 specific operations ///
5330 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
5331 if (lfs_pair_isnull(lfs->lfs1->root)) {
5332 return 0;
5333 }
5334
5335 // iterate over metadata pairs
5336 lfs1_dir_t dir;
5337 lfs1_entry_t entry;
5338 lfs_block_t cwd[2] = {0, 1};
5339
5340 while (true) {
5341 for (int i = 0; i < 2; i++) {
5342 int err = cb(data, cwd[i]);
5343 if (err) {
5344 return err;
5345 }
5346 }
5347
5348 int err = lfs1_dir_fetch(lfs, &dir, cwd);
5349 if (err) {
5350 return err;
5351 }
5352
5353 // iterate over contents
5354 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
5355 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
5356 &entry.d, sizeof(entry.d));
5357 lfs1_entry_fromle32(&entry.d);
5358 if (err) {
5359 return err;
5360 }
5361
5362 dir.off += lfs1_entry_size(&entry);
5363 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
5364 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
5365 entry.d.u.file.head, entry.d.u.file.size, cb, data);
5366 if (err) {
5367 return err;
5368 }
5369 }
5370 }
5371
5372 // we also need to check if we contain a threaded v2 directory
5373 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
5374 while (dir2.split) {
5375 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5376 if (err) {
5377 break;
5378 }
5379
5380 for (int i = 0; i < 2; i++) {
5381 err = cb(data, dir2.pair[i]);
5382 if (err) {
5383 return err;
5384 }
5385 }
5386 }
5387
5388 cwd[0] = dir.d.tail[0];
5389 cwd[1] = dir.d.tail[1];
5390
5391 if (lfs_pair_isnull(cwd)) {
5392 break;
5393 }
5394 }
5395
5396 return 0;
5397 }
5398
5399 static int lfs1_moved(lfs_t *lfs, const void *e) {
5400 if (lfs_pair_isnull(lfs->lfs1->root)) {
5401 return 0;
5402 }
5403
5404 // skip superblock
5405 lfs1_dir_t cwd;
5406 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
5407 if (err) {
5408 return err;
5409 }
5410
5411 // iterate over all directory directory entries
5412 lfs1_entry_t entry;
5413 while (!lfs_pair_isnull(cwd.d.tail)) {
5414 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
5415 if (err) {
5416 return err;
5417 }
5418
5419 while (true) {
5420 err = lfs1_dir_next(lfs, &cwd, &entry);
5421 if (err && err != LFS_ERR_NOENT) {
5422 return err;
5423 }
5424
5425 if (err == LFS_ERR_NOENT) {
5426 break;
5427 }
5428
5429 if (!(0x80 & entry.d.type) &&
5430 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
5431 return true;
5432 }
5433 }
5434 }
5435
5436 return false;
5437 }
5438
5439 /// Filesystem operations ///
5440 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
5441 const struct lfs_config *cfg) {
5442 int err = 0;
5443 {
5444 err = lfs_init(lfs, cfg);
5445 if (err) {
5446 return err;
5447 }
5448
5449 lfs->lfs1 = lfs1;
5450 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
5451 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
5452
5453 // setup free lookahead
5454 lfs->free.off = 0;
5455 lfs->free.size = 0;
5456 lfs->free.i = 0;
5457 lfs_alloc_ack(lfs);
5458
5459 // load superblock
5460 lfs1_dir_t dir;
5461 lfs1_superblock_t superblock;
5462 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
5463 if (err && err != LFS_ERR_CORRUPT) {
5464 goto cleanup;
5465 }
5466
5467 if (!err) {
5468 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
5469 &superblock.d, sizeof(superblock.d));
5470 lfs1_superblock_fromle32(&superblock.d);
5471 if (err) {
5472 goto cleanup;
5473 }
5474
5475 lfs->lfs1->root[0] = superblock.d.root[0];
5476 lfs->lfs1->root[1] = superblock.d.root[1];
5477 }
5478
5479 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
5480 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
5481 0, 1);
5482 err = LFS_ERR_CORRUPT;
5483 goto cleanup;
5484 }
5485
5486 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
5487 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
5488 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
5489 minor_version > LFS1_DISK_VERSION_MINOR)) {
5490 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
5491 err = LFS_ERR_INVAL;
5492 goto cleanup;
5493 }
5494
5495 return 0;
5496 }
5497
5498 cleanup:
5499 lfs_deinit(lfs);
5500 return err;
5501 }
5502
5503 static int lfs1_unmount(lfs_t *lfs) {
5504 return lfs_deinit(lfs);
5505 }
5506
5507 /// v1 migration ///
5508 static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
5509 struct lfs1 lfs1;
5510
5511 // Indeterminate filesystem size not allowed for migration.
5512 LFS_ASSERT(cfg->block_count != 0);
5513
5514 int err = lfs1_mount(lfs, &lfs1, cfg);
5515 if (err) {
5516 return err;
5517 }
5518
5519 {
5520 // iterate through each directory, copying over entries
5521 // into new directory
5522 lfs1_dir_t dir1;
5523 lfs_mdir_t dir2;
5524 dir1.d.tail[0] = lfs->lfs1->root[0];
5525 dir1.d.tail[1] = lfs->lfs1->root[1];
5526 while (!lfs_pair_isnull(dir1.d.tail)) {
5527 // iterate old dir
5528 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
5529 if (err) {
5530 goto cleanup;
5531 }
5532
5533 // create new dir and bind as temporary pretend root
5534 err = lfs_dir_alloc(lfs, &dir2);
5535 if (err) {
5536 goto cleanup;
5537 }
5538
5539 dir2.rev = dir1.d.rev;
5540 dir1.head[0] = dir1.pair[0];
5541 dir1.head[1] = dir1.pair[1];
5542 lfs->root[0] = dir2.pair[0];
5543 lfs->root[1] = dir2.pair[1];
5544
5545 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5546 if (err) {
5547 goto cleanup;
5548 }
5549
5550 while (true) {
5551 lfs1_entry_t entry1;
5552 err = lfs1_dir_next(lfs, &dir1, &entry1);
5553 if (err && err != LFS_ERR_NOENT) {
5554 goto cleanup;
5555 }
5556
5557 if (err == LFS_ERR_NOENT) {
5558 break;
5559 }
5560
5561 // check that entry has not been moved
5562 if (entry1.d.type & 0x80) {
5563 int moved = lfs1_moved(lfs, &entry1.d.u);
5564 if (moved < 0) {
5565 err = moved;
5566 goto cleanup;
5567 }
5568
5569 if (moved) {
5570 continue;
5571 }
5572
5573 entry1.d.type &= ~0x80;
5574 }
5575
5576 // also fetch name
5577 char name[LFS_NAME_MAX+1];
5578 memset(name, 0, sizeof(name));
5579 err = lfs1_bd_read(lfs, dir1.pair[0],
5580 entry1.off + 4+entry1.d.elen+entry1.d.alen,
5581 name, entry1.d.nlen);
5582 if (err) {
5583 goto cleanup;
5584 }
5585
5586 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
5587
5588 // create entry in new dir
5589 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5590 if (err) {
5591 goto cleanup;
5592 }
5593
5594 uint16_t id;
5595 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
5596 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
5597 err = (err < 0) ? err : LFS_ERR_EXIST;
5598 goto cleanup;
5599 }
5600
5601 lfs1_entry_tole32(&entry1.d);
5602 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5603 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
5604 {LFS_MKTAG_IF_ELSE(isdir,
5605 LFS_TYPE_DIR, id, entry1.d.nlen,
5606 LFS_TYPE_REG, id, entry1.d.nlen),
5607 name},
5608 {LFS_MKTAG_IF_ELSE(isdir,
5609 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
5610 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
5611 &entry1.d.u}));
5612 lfs1_entry_fromle32(&entry1.d);
5613 if (err) {
5614 goto cleanup;
5615 }
5616 }
5617
5618 if (!lfs_pair_isnull(dir1.d.tail)) {
5619 // find last block and update tail to thread into fs
5620 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5621 if (err) {
5622 goto cleanup;
5623 }
5624
5625 while (dir2.split) {
5626 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5627 if (err) {
5628 goto cleanup;
5629 }
5630 }
5631
5632 lfs_pair_tole32(dir2.pair);
5633 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5634 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
5635 lfs_pair_fromle32(dir2.pair);
5636 if (err) {
5637 goto cleanup;
5638 }
5639 }
5640
5641 // Copy over first block to thread into fs. Unfortunately
5642 // if this fails there is not much we can do.
5643 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
5644 "-> {0x%"PRIx32", 0x%"PRIx32"}",
5645 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
5646
5647 err = lfs_bd_erase(lfs, dir1.head[1]);
5648 if (err) {
5649 goto cleanup;
5650 }
5651
5652 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5653 if (err) {
5654 goto cleanup;
5655 }
5656
5657 for (lfs_off_t i = 0; i < dir2.off; i++) {
5658 uint8_t dat;
5659 err = lfs_bd_read(lfs,
5660 NULL, &lfs->rcache, dir2.off,
5661 dir2.pair[0], i, &dat, 1);
5662 if (err) {
5663 goto cleanup;
5664 }
5665
5666 err = lfs_bd_prog(lfs,
5667 &lfs->pcache, &lfs->rcache, true,
5668 dir1.head[1], i, &dat, 1);
5669 if (err) {
5670 goto cleanup;
5671 }
5672 }
5673
5674 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
5675 if (err) {
5676 goto cleanup;
5677 }
5678 }
5679
5680 // Create new superblock. This marks a successful migration!
5681 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
5682 if (err) {
5683 goto cleanup;
5684 }
5685
5686 dir2.pair[0] = dir1.pair[0];
5687 dir2.pair[1] = dir1.pair[1];
5688 dir2.rev = dir1.d.rev;
5689 dir2.off = sizeof(dir2.rev);
5690 dir2.etag = 0xffffffff;
5691 dir2.count = 0;
5692 dir2.tail[0] = lfs->lfs1->root[0];
5693 dir2.tail[1] = lfs->lfs1->root[1];
5694 dir2.erased = false;
5695 dir2.split = true;
5696
5697 lfs_superblock_t superblock = {
5698 .version = LFS_DISK_VERSION,
5699 .block_size = lfs->cfg->block_size,
5700 .block_count = lfs->cfg->block_count,
5701 .name_max = lfs->name_max,
5702 .file_max = lfs->file_max,
5703 .attr_max = lfs->attr_max,
5704 };
5705
5706 lfs_superblock_tole32(&superblock);
5707 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5708 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
5709 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
5710 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5711 &superblock}));
5712 if (err) {
5713 goto cleanup;
5714 }
5715
5716 // sanity check that fetch works
5717 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
5718 if (err) {
5719 goto cleanup;
5720 }
5721
5722 // force compaction to prevent accidentally mounting v1
5723 dir2.erased = false;
5724 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5725 if (err) {
5726 goto cleanup;
5727 }
5728 }
5729
5730 cleanup:
5731 lfs1_unmount(lfs);
5732 return err;
5733 }
5734
5735 #endif
5736
5737
5738 /// Public API wrappers ///
5739
5740 // Here we can add tracing/thread safety easily
5741
5742 // Thread-safe wrappers if enabled
5743 #ifdef LFS_THREADSAFE
5744 #define LFS_LOCK(cfg) cfg->lock(cfg)
5745 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
5746 #else
5747 #define LFS_LOCK(cfg) ((void)cfg, 0)
5748 #define LFS_UNLOCK(cfg) ((void)cfg)
5749 #endif
5750
5751 // Public API
5752 #ifndef LFS_READONLY
5753 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
5754 int err = LFS_LOCK(cfg);
5755 if (err) {
5756 return err;
5757 }
5758 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
5759 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5760 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5761 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5762 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5763 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5764 ".prog_buffer=%p, .lookahead_buffer=%p, "
5765 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5766 ".attr_max=%"PRIu32"})",
5767 (void*)lfs, (void*)cfg, cfg->context,
5768 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5769 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5770 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5771 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5772 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5773 cfg->name_max, cfg->file_max, cfg->attr_max);
5774
5775 err = lfs_rawformat(lfs, cfg);
5776
5777 LFS_TRACE("lfs_format -> %d", err);
5778 LFS_UNLOCK(cfg);
5779 return err;
5780 }
5781 #endif
5782
5783 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
5784 int err = LFS_LOCK(cfg);
5785 if (err) {
5786 return err;
5787 }
5788 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
5789 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5790 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5791 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5792 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5793 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5794 ".prog_buffer=%p, .lookahead_buffer=%p, "
5795 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5796 ".attr_max=%"PRIu32"})",
5797 (void*)lfs, (void*)cfg, cfg->context,
5798 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5799 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5800 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5801 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5802 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5803 cfg->name_max, cfg->file_max, cfg->attr_max);
5804
5805 err = lfs_rawmount(lfs, cfg);
5806
5807 LFS_TRACE("lfs_mount -> %d", err);
5808 LFS_UNLOCK(cfg);
5809 return err;
5810 }
5811
5812 int lfs_unmount(lfs_t *lfs) {
5813 int err = LFS_LOCK(lfs->cfg);
5814 if (err) {
5815 return err;
5816 }
5817 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
5818
5819 err = lfs_rawunmount(lfs);
5820
5821 LFS_TRACE("lfs_unmount -> %d", err);
5822 LFS_UNLOCK(lfs->cfg);
5823 return err;
5824 }
5825
5826 #ifndef LFS_READONLY
5827 int lfs_remove(lfs_t *lfs, const char *path) {
5828 int err = LFS_LOCK(lfs->cfg);
5829 if (err) {
5830 return err;
5831 }
5832 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
5833
5834 err = lfs_rawremove(lfs, path);
5835
5836 LFS_TRACE("lfs_remove -> %d", err);
5837 LFS_UNLOCK(lfs->cfg);
5838 return err;
5839 }
5840 #endif
5841
5842 #ifndef LFS_READONLY
5843 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
5844 int err = LFS_LOCK(lfs->cfg);
5845 if (err) {
5846 return err;
5847 }
5848 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
5849
5850 err = lfs_rawrename(lfs, oldpath, newpath);
5851
5852 LFS_TRACE("lfs_rename -> %d", err);
5853 LFS_UNLOCK(lfs->cfg);
5854 return err;
5855 }
5856 #endif
5857
5858 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
5859 int err = LFS_LOCK(lfs->cfg);
5860 if (err) {
5861 return err;
5862 }
5863 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
5864
5865 err = lfs_rawstat(lfs, path, info);
5866
5867 LFS_TRACE("lfs_stat -> %d", err);
5868 LFS_UNLOCK(lfs->cfg);
5869 return err;
5870 }
5871
5872 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
5873 uint8_t type, void *buffer, lfs_size_t size) {
5874 int err = LFS_LOCK(lfs->cfg);
5875 if (err) {
5876 return err;
5877 }
5878 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5879 (void*)lfs, path, type, buffer, size);
5880
5881 lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size);
5882
5883 LFS_TRACE("lfs_getattr -> %"PRId32, res);
5884 LFS_UNLOCK(lfs->cfg);
5885 return res;
5886 }
5887
5888 #ifndef LFS_READONLY
5889 int lfs_setattr(lfs_t *lfs, const char *path,
5890 uint8_t type, const void *buffer, lfs_size_t size) {
5891 int err = LFS_LOCK(lfs->cfg);
5892 if (err) {
5893 return err;
5894 }
5895 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5896 (void*)lfs, path, type, buffer, size);
5897
5898 err = lfs_rawsetattr(lfs, path, type, buffer, size);
5899
5900 LFS_TRACE("lfs_setattr -> %d", err);
5901 LFS_UNLOCK(lfs->cfg);
5902 return err;
5903 }
5904 #endif
5905
5906 #ifndef LFS_READONLY
5907 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
5908 int err = LFS_LOCK(lfs->cfg);
5909 if (err) {
5910 return err;
5911 }
5912 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
5913
5914 err = lfs_rawremoveattr(lfs, path, type);
5915
5916 LFS_TRACE("lfs_removeattr -> %d", err);
5917 LFS_UNLOCK(lfs->cfg);
5918 return err;
5919 }
5920 #endif
5921
5922 #ifndef LFS_NO_MALLOC
5923 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
5924 int err = LFS_LOCK(lfs->cfg);
5925 if (err) {
5926 return err;
5927 }
5928 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
5929 (void*)lfs, (void*)file, path, flags);
5930 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5931
5932 err = lfs_file_rawopen(lfs, file, path, flags);
5933
5934 LFS_TRACE("lfs_file_open -> %d", err);
5935 LFS_UNLOCK(lfs->cfg);
5936 return err;
5937 }
5938 #endif
5939
5940 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
5941 const char *path, int flags,
5942 const struct lfs_file_config *cfg) {
5943 int err = LFS_LOCK(lfs->cfg);
5944 if (err) {
5945 return err;
5946 }
5947 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
5948 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
5949 (void*)lfs, (void*)file, path, flags,
5950 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
5951 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5952
5953 err = lfs_file_rawopencfg(lfs, file, path, flags, cfg);
5954
5955 LFS_TRACE("lfs_file_opencfg -> %d", err);
5956 LFS_UNLOCK(lfs->cfg);
5957 return err;
5958 }
5959
5960 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
5961 int err = LFS_LOCK(lfs->cfg);
5962 if (err) {
5963 return err;
5964 }
5965 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
5966 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5967
5968 err = lfs_file_rawclose(lfs, file);
5969
5970 LFS_TRACE("lfs_file_close -> %d", err);
5971 LFS_UNLOCK(lfs->cfg);
5972 return err;
5973 }
5974
5975 #ifndef LFS_READONLY
5976 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
5977 int err = LFS_LOCK(lfs->cfg);
5978 if (err) {
5979 return err;
5980 }
5981 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
5982 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5983
5984 err = lfs_file_rawsync(lfs, file);
5985
5986 LFS_TRACE("lfs_file_sync -> %d", err);
5987 LFS_UNLOCK(lfs->cfg);
5988 return err;
5989 }
5990 #endif
5991
5992 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
5993 void *buffer, lfs_size_t size) {
5994 int err = LFS_LOCK(lfs->cfg);
5995 if (err) {
5996 return err;
5997 }
5998 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
5999 (void*)lfs, (void*)file, buffer, size);
6000 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6001
6002 lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size);
6003
6004 LFS_TRACE("lfs_file_read -> %"PRId32, res);
6005 LFS_UNLOCK(lfs->cfg);
6006 return res;
6007 }
6008
6009 #ifndef LFS_READONLY
6010 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
6011 const void *buffer, lfs_size_t size) {
6012 int err = LFS_LOCK(lfs->cfg);
6013 if (err) {
6014 return err;
6015 }
6016 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
6017 (void*)lfs, (void*)file, buffer, size);
6018 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6019
6020 lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size);
6021
6022 LFS_TRACE("lfs_file_write -> %"PRId32, res);
6023 LFS_UNLOCK(lfs->cfg);
6024 return res;
6025 }
6026 #endif
6027
6028 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
6029 lfs_soff_t off, int whence) {
6030 int err = LFS_LOCK(lfs->cfg);
6031 if (err) {
6032 return err;
6033 }
6034 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
6035 (void*)lfs, (void*)file, off, whence);
6036 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6037
6038 lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence);
6039
6040 LFS_TRACE("lfs_file_seek -> %"PRId32, res);
6041 LFS_UNLOCK(lfs->cfg);
6042 return res;
6043 }
6044
6045 #ifndef LFS_READONLY
6046 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
6047 int err = LFS_LOCK(lfs->cfg);
6048 if (err) {
6049 return err;
6050 }
6051 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
6052 (void*)lfs, (void*)file, size);
6053 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6054
6055 err = lfs_file_rawtruncate(lfs, file, size);
6056
6057 LFS_TRACE("lfs_file_truncate -> %d", err);
6058 LFS_UNLOCK(lfs->cfg);
6059 return err;
6060 }
6061 #endif
6062
6063 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
6064 int err = LFS_LOCK(lfs->cfg);
6065 if (err) {
6066 return err;
6067 }
6068 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
6069 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6070
6071 lfs_soff_t res = lfs_file_rawtell(lfs, file);
6072
6073 LFS_TRACE("lfs_file_tell -> %"PRId32, res);
6074 LFS_UNLOCK(lfs->cfg);
6075 return res;
6076 }
6077
6078 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
6079 int err = LFS_LOCK(lfs->cfg);
6080 if (err) {
6081 return err;
6082 }
6083 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
6084
6085 err = lfs_file_rawrewind(lfs, file);
6086
6087 LFS_TRACE("lfs_file_rewind -> %d", err);
6088 LFS_UNLOCK(lfs->cfg);
6089 return err;
6090 }
6091
6092 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
6093 int err = LFS_LOCK(lfs->cfg);
6094 if (err) {
6095 return err;
6096 }
6097 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
6098 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6099
6100 lfs_soff_t res = lfs_file_rawsize(lfs, file);
6101
6102 LFS_TRACE("lfs_file_size -> %"PRId32, res);
6103 LFS_UNLOCK(lfs->cfg);
6104 return res;
6105 }
6106
6107 #ifndef LFS_READONLY
6108 int lfs_mkdir(lfs_t *lfs, const char *path) {
6109 int err = LFS_LOCK(lfs->cfg);
6110 if (err) {
6111 return err;
6112 }
6113 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
6114
6115 err = lfs_rawmkdir(lfs, path);
6116
6117 LFS_TRACE("lfs_mkdir -> %d", err);
6118 LFS_UNLOCK(lfs->cfg);
6119 return err;
6120 }
6121 #endif
6122
6123 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
6124 int err = LFS_LOCK(lfs->cfg);
6125 if (err) {
6126 return err;
6127 }
6128 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
6129 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
6130
6131 err = lfs_dir_rawopen(lfs, dir, path);
6132
6133 LFS_TRACE("lfs_dir_open -> %d", err);
6134 LFS_UNLOCK(lfs->cfg);
6135 return err;
6136 }
6137
6138 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
6139 int err = LFS_LOCK(lfs->cfg);
6140 if (err) {
6141 return err;
6142 }
6143 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
6144
6145 err = lfs_dir_rawclose(lfs, dir);
6146
6147 LFS_TRACE("lfs_dir_close -> %d", err);
6148 LFS_UNLOCK(lfs->cfg);
6149 return err;
6150 }
6151
6152 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
6153 int err = LFS_LOCK(lfs->cfg);
6154 if (err) {
6155 return err;
6156 }
6157 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
6158 (void*)lfs, (void*)dir, (void*)info);
6159
6160 err = lfs_dir_rawread(lfs, dir, info);
6161
6162 LFS_TRACE("lfs_dir_read -> %d", err);
6163 LFS_UNLOCK(lfs->cfg);
6164 return err;
6165 }
6166
6167 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
6168 int err = LFS_LOCK(lfs->cfg);
6169 if (err) {
6170 return err;
6171 }
6172 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
6173 (void*)lfs, (void*)dir, off);
6174
6175 err = lfs_dir_rawseek(lfs, dir, off);
6176
6177 LFS_TRACE("lfs_dir_seek -> %d", err);
6178 LFS_UNLOCK(lfs->cfg);
6179 return err;
6180 }
6181
6182 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
6183 int err = LFS_LOCK(lfs->cfg);
6184 if (err) {
6185 return err;
6186 }
6187 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
6188
6189 lfs_soff_t res = lfs_dir_rawtell(lfs, dir);
6190
6191 LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
6192 LFS_UNLOCK(lfs->cfg);
6193 return res;
6194 }
6195
6196 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
6197 int err = LFS_LOCK(lfs->cfg);
6198 if (err) {
6199 return err;
6200 }
6201 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
6202
6203 err = lfs_dir_rawrewind(lfs, dir);
6204
6205 LFS_TRACE("lfs_dir_rewind -> %d", err);
6206 LFS_UNLOCK(lfs->cfg);
6207 return err;
6208 }
6209
6210 int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
6211 int err = LFS_LOCK(lfs->cfg);
6212 if (err) {
6213 return err;
6214 }
6215 LFS_TRACE("lfs_fs_stat(%p, %p)", (void*)lfs, (void*)fsinfo);
6216
6217 err = lfs_fs_rawstat(lfs, fsinfo);
6218
6219 LFS_TRACE("lfs_fs_stat -> %d", err);
6220 LFS_UNLOCK(lfs->cfg);
6221 return err;
6222 }
6223
6224 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
6225 int err = LFS_LOCK(lfs->cfg);
6226 if (err) {
6227 return err;
6228 }
6229 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
6230
6231 lfs_ssize_t res = lfs_fs_rawsize(lfs);
6232
6233 LFS_TRACE("lfs_fs_size -> %"PRId32, res);
6234 LFS_UNLOCK(lfs->cfg);
6235 return res;
6236 }
6237
6238 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
6239 int err = LFS_LOCK(lfs->cfg);
6240 if (err) {
6241 return err;
6242 }
6243 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
6244 (void*)lfs, (void*)(uintptr_t)cb, data);
6245
6246 err = lfs_fs_rawtraverse(lfs, cb, data, true);
6247
6248 LFS_TRACE("lfs_fs_traverse -> %d", err);
6249 LFS_UNLOCK(lfs->cfg);
6250 return err;
6251 }
6252
6253 #ifndef LFS_READONLY
6254 int lfs_fs_gc(lfs_t *lfs) {
6255 int err = LFS_LOCK(lfs->cfg);
6256 if (err) {
6257 return err;
6258 }
6259 LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
6260
6261 err = lfs_fs_rawgc(lfs);
6262
6263 LFS_TRACE("lfs_fs_gc -> %d", err);
6264 LFS_UNLOCK(lfs->cfg);
6265 return err;
6266 }
6267 #endif
6268
6269 #ifndef LFS_READONLY
6270 int lfs_fs_mkconsistent(lfs_t *lfs) {
6271 int err = LFS_LOCK(lfs->cfg);
6272 if (err) {
6273 return err;
6274 }
6275 LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
6276
6277 err = lfs_fs_rawmkconsistent(lfs);
6278
6279 LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
6280 LFS_UNLOCK(lfs->cfg);
6281 return err;
6282 }
6283 #endif
6284
6285 #ifndef LFS_READONLY
6286 int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
6287 int err = LFS_LOCK(lfs->cfg);
6288 if (err) {
6289 return err;
6290 }
6291 LFS_TRACE("lfs_fs_grow(%p, %"PRIu32")", (void*)lfs, block_count);
6292
6293 err = lfs_fs_rawgrow(lfs, block_count);
6294
6295 LFS_TRACE("lfs_fs_grow -> %d", err);
6296 LFS_UNLOCK(lfs->cfg);
6297 return err;
6298 }
6299 #endif
6300
6301 #ifdef LFS_MIGRATE
6302 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
6303 int err = LFS_LOCK(cfg);
6304 if (err) {
6305 return err;
6306 }
6307 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
6308 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
6309 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
6310 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
6311 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
6312 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
6313 ".prog_buffer=%p, .lookahead_buffer=%p, "
6314 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
6315 ".attr_max=%"PRIu32"})",
6316 (void*)lfs, (void*)cfg, cfg->context,
6317 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
6318 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
6319 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
6320 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
6321 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
6322 cfg->name_max, cfg->file_max, cfg->attr_max);
6323
6324 err = lfs_rawmigrate(lfs, cfg);
6325
6326 LFS_TRACE("lfs_migrate -> %d", err);
6327 LFS_UNLOCK(cfg);
6328 return err;
6329 }
6330 #endif
6331
6332