1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2017, Arm Limited. All rights reserved.
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7 #define LFS_NO_ASSERT
8 #include "lfs.h"
9 #include "lfs_util.h"
10
11 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
12 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
13
14 /// Caching block device operations ///
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)15 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
16 // do not zero, cheaper if cache is readonly or only going to be
17 // written with identical data (during relocates)
18 (void)lfs;
19 rcache->block = LFS_BLOCK_NULL;
20 }
21
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)22 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
23 // zero to avoid information leak
24 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
25 pcache->block = LFS_BLOCK_NULL;
26 }
27
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)28 static int lfs_bd_read(lfs_t *lfs,
29 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
30 lfs_block_t block, lfs_off_t off,
31 void *buffer, lfs_size_t size) {
32 uint8_t *data = buffer;
33 if (block >= lfs->cfg->block_count ||
34 off+size > lfs->cfg->block_size) {
35 return LFS_ERR_CORRUPT;
36 }
37
38 while (size > 0) {
39 lfs_size_t diff = size;
40
41 if (pcache && block == pcache->block &&
42 off < pcache->off + pcache->size) {
43 if (off >= pcache->off) {
44 // is already in pcache?
45 diff = lfs_min(diff, pcache->size - (off-pcache->off));
46 memcpy(data, &pcache->buffer[off-pcache->off], diff);
47
48 data += diff;
49 off += diff;
50 size -= diff;
51 continue;
52 }
53
54 // pcache takes priority
55 diff = lfs_min(diff, pcache->off-off);
56 }
57
58 if (block == rcache->block &&
59 off < rcache->off + rcache->size) {
60 if (off >= rcache->off) {
61 // is already in rcache?
62 diff = lfs_min(diff, rcache->size - (off-rcache->off));
63 memcpy(data, &rcache->buffer[off-rcache->off], diff);
64
65 data += diff;
66 off += diff;
67 size -= diff;
68 continue;
69 }
70
71 // rcache takes priority
72 diff = lfs_min(diff, rcache->off-off);
73 }
74
75 if (size >= hint && off % lfs->cfg->read_size == 0 &&
76 size >= lfs->cfg->read_size) {
77 // bypass cache?
78 diff = lfs_aligndown(diff, lfs->cfg->read_size);
79 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
80 if (err) {
81 return err;
82 }
83
84 data += diff;
85 off += diff;
86 size -= diff;
87 continue;
88 }
89
90 // load to cache, first condition can no longer fail
91 LFS_ASSERT(block < lfs->cfg->block_count);
92 rcache->block = block;
93 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
94 rcache->size = lfs_min(
95 lfs_min(
96 lfs_alignup(off+hint, lfs->cfg->read_size),
97 lfs->cfg->block_size)
98 - rcache->off,
99 lfs->cfg->cache_size);
100 int err = lfs->cfg->read(lfs->cfg, rcache->block,
101 rcache->off, rcache->buffer, rcache->size);
102 LFS_ASSERT(err <= 0);
103 if (err) {
104 return err;
105 }
106 }
107
108 return 0;
109 }
110
111 enum {
112 LFS_CMP_EQ = 0,
113 LFS_CMP_LT = 1,
114 LFS_CMP_GT = 2,
115 };
116
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)117 static int lfs_bd_cmp(lfs_t *lfs,
118 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
119 lfs_block_t block, lfs_off_t off,
120 const void *buffer, lfs_size_t size) {
121 const uint8_t *data = buffer;
122 lfs_size_t diff = 0;
123
124 for (lfs_off_t i = 0; i < size; i += diff) {
125 uint8_t dat[8];
126
127 diff = lfs_min(size-i, sizeof(dat));
128 int res = lfs_bd_read(lfs,
129 pcache, rcache, hint-i,
130 block, off+i, &dat, diff);
131 if (res) {
132 return res;
133 }
134
135 res = memcmp(dat, data + i, diff);
136 if (res) {
137 return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
138 }
139 }
140
141 return LFS_CMP_EQ;
142 }
143
144 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)145 static int lfs_bd_flush(lfs_t *lfs,
146 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
147 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
148 LFS_ASSERT(pcache->block < lfs->cfg->block_count);
149 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
150 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
151 pcache->off, pcache->buffer, diff);
152 LFS_ASSERT(err <= 0);
153 if (err) {
154 return err;
155 }
156
157 if (validate) {
158 // check data on disk
159 lfs_cache_drop(lfs, rcache);
160 int res = lfs_bd_cmp(lfs,
161 NULL, rcache, diff,
162 pcache->block, pcache->off, pcache->buffer, diff);
163 if (res < 0) {
164 return res;
165 }
166
167 if (res != LFS_CMP_EQ) {
168 return LFS_ERR_CORRUPT;
169 }
170 }
171
172 lfs_cache_zero(lfs, pcache);
173 }
174
175 return 0;
176 }
177 #endif
178
179 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)180 static int lfs_bd_sync(lfs_t *lfs,
181 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
182 lfs_cache_drop(lfs, rcache);
183
184 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
185 if (err) {
186 return err;
187 }
188
189 err = lfs->cfg->sync(lfs->cfg);
190 LFS_ASSERT(err <= 0);
191 return err;
192 }
193 #endif
194
195 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)196 static int lfs_bd_prog(lfs_t *lfs,
197 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
198 lfs_block_t block, lfs_off_t off,
199 const void *buffer, lfs_size_t size) {
200 const uint8_t *data = buffer;
201 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count);
202 LFS_ASSERT(off + size <= lfs->cfg->block_size);
203
204 while (size > 0) {
205 if (block == pcache->block &&
206 off >= pcache->off &&
207 off < pcache->off + lfs->cfg->cache_size) {
208 // already fits in pcache?
209 lfs_size_t diff = lfs_min(size,
210 lfs->cfg->cache_size - (off-pcache->off));
211 memcpy(&pcache->buffer[off-pcache->off], data, diff);
212
213 data += diff;
214 off += diff;
215 size -= diff;
216
217 pcache->size = lfs_max(pcache->size, off - pcache->off);
218 if (pcache->size == lfs->cfg->cache_size) {
219 // eagerly flush out pcache if we fill up
220 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
221 if (err) {
222 return err;
223 }
224 }
225
226 continue;
227 }
228
229 // pcache must have been flushed, either by programming and
230 // entire block or manually flushing the pcache
231 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
232
233 // prepare pcache, first condition can no longer fail
234 pcache->block = block;
235 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
236 pcache->size = 0;
237 }
238
239 return 0;
240 }
241 #endif
242
243 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)244 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
245 LFS_ASSERT(block < lfs->cfg->block_count);
246 int err = lfs->cfg->erase(lfs->cfg, block);
247 LFS_ASSERT(err <= 0);
248 return err;
249 }
250 #endif
251
252
253 /// Small type-level utilities ///
254 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])255 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
256 lfs_block_t t = pair[0];
257 pair[0] = pair[1];
258 pair[1] = t;
259 }
260
lfs_pair_isnull(const lfs_block_t pair[2])261 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
262 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
263 }
264
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])265 static inline int lfs_pair_cmp(
266 const lfs_block_t paira[2],
267 const lfs_block_t pairb[2]) {
268 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
269 paira[0] == pairb[1] || paira[1] == pairb[0]);
270 }
271
lfs_pair_sync(const lfs_block_t paira[2],const lfs_block_t pairb[2])272 static inline bool lfs_pair_sync(
273 const lfs_block_t paira[2],
274 const lfs_block_t pairb[2]) {
275 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
276 (paira[0] == pairb[1] && paira[1] == pairb[0]);
277 }
278
lfs_pair_fromle32(lfs_block_t pair[2])279 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
280 pair[0] = lfs_fromle32(pair[0]);
281 pair[1] = lfs_fromle32(pair[1]);
282 }
283
lfs_pair_tole32(lfs_block_t pair[2])284 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
285 pair[0] = lfs_tole32(pair[0]);
286 pair[1] = lfs_tole32(pair[1]);
287 }
288
289 // operations on 32-bit entry tags
290 typedef uint32_t lfs_tag_t;
291 typedef int32_t lfs_stag_t;
292
293 #define LFS_MKTAG(type, id, size) \
294 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
295
296 #define LFS_MKTAG_IF(cond, type, id, size) \
297 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
298
299 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
300 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
301
lfs_tag_isvalid(lfs_tag_t tag)302 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
303 return !(tag & 0x80000000);
304 }
305
lfs_tag_isdelete(lfs_tag_t tag)306 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
307 return ((int32_t)(tag << 22) >> 22) == -1;
308 }
309
lfs_tag_type1(lfs_tag_t tag)310 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
311 return (tag & 0x70000000) >> 20;
312 }
313
lfs_tag_type3(lfs_tag_t tag)314 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
315 return (tag & 0x7ff00000) >> 20;
316 }
317
lfs_tag_chunk(lfs_tag_t tag)318 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
319 return (tag & 0x0ff00000) >> 20;
320 }
321
lfs_tag_splice(lfs_tag_t tag)322 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
323 return (int8_t)lfs_tag_chunk(tag);
324 }
325
lfs_tag_id(lfs_tag_t tag)326 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
327 return (tag & 0x000ffc00) >> 10;
328 }
329
lfs_tag_size(lfs_tag_t tag)330 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
331 return tag & 0x000003ff;
332 }
333
lfs_tag_dsize(lfs_tag_t tag)334 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
335 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
336 }
337
338 // operations on attributes in attribute lists
339 struct lfs_mattr {
340 lfs_tag_t tag;
341 const void *buffer;
342 };
343
344 struct lfs_diskoff {
345 lfs_block_t block;
346 lfs_off_t off;
347 };
348
349 #define LFS_MKATTRS(...) \
350 (struct lfs_mattr[]){__VA_ARGS__}, \
351 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
352
353 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)354 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
355 for (int i = 0; i < 3; i++) {
356 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
357 }
358 }
359
lfs_gstate_iszero(const lfs_gstate_t * a)360 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
361 for (int i = 0; i < 3; i++) {
362 if (((uint32_t*)a)[i] != 0) {
363 return false;
364 }
365 }
366 return true;
367 }
368
lfs_gstate_hasorphans(const lfs_gstate_t * a)369 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
370 return lfs_tag_size(a->tag);
371 }
372
lfs_gstate_getorphans(const lfs_gstate_t * a)373 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
374 return lfs_tag_size(a->tag);
375 }
376
lfs_gstate_hasmove(const lfs_gstate_t * a)377 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
378 return lfs_tag_type1(a->tag);
379 }
380
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)381 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
382 const lfs_block_t *pair) {
383 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
384 }
385
lfs_gstate_fromle32(lfs_gstate_t * a)386 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
387 a->tag = lfs_fromle32(a->tag);
388 a->pair[0] = lfs_fromle32(a->pair[0]);
389 a->pair[1] = lfs_fromle32(a->pair[1]);
390 }
391
lfs_gstate_tole32(lfs_gstate_t * a)392 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
393 a->tag = lfs_tole32(a->tag);
394 a->pair[0] = lfs_tole32(a->pair[0]);
395 a->pair[1] = lfs_tole32(a->pair[1]);
396 }
397
398 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)399 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
400 ctz->head = lfs_fromle32(ctz->head);
401 ctz->size = lfs_fromle32(ctz->size);
402 }
403
404 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)405 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
406 ctz->head = lfs_tole32(ctz->head);
407 ctz->size = lfs_tole32(ctz->size);
408 }
409 #endif
410
lfs_superblock_fromle32(lfs_superblock_t * superblock)411 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
412 superblock->version = lfs_fromle32(superblock->version);
413 superblock->block_size = lfs_fromle32(superblock->block_size);
414 superblock->block_count = lfs_fromle32(superblock->block_count);
415 superblock->name_max = lfs_fromle32(superblock->name_max);
416 superblock->file_max = lfs_fromle32(superblock->file_max);
417 superblock->attr_max = lfs_fromle32(superblock->attr_max);
418 }
419
lfs_superblock_tole32(lfs_superblock_t * superblock)420 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
421 superblock->version = lfs_tole32(superblock->version);
422 superblock->block_size = lfs_tole32(superblock->block_size);
423 superblock->block_count = lfs_tole32(superblock->block_count);
424 superblock->name_max = lfs_tole32(superblock->name_max);
425 superblock->file_max = lfs_tole32(superblock->file_max);
426 superblock->attr_max = lfs_tole32(superblock->attr_max);
427 }
428
429 #ifndef LFS_NO_ASSERT
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)430 static bool lfs_mlist_isopen(struct lfs_mlist *head,
431 struct lfs_mlist *node) {
432 for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
433 if (*p == (struct lfs_mlist*)node) {
434 return true;
435 }
436 }
437
438 return false;
439 }
440 #endif
441
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)442 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
443 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
444 if (*p == mlist) {
445 *p = (*p)->next;
446 break;
447 }
448 }
449 }
450
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)451 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
452 mlist->next = lfs->mlist;
453 lfs->mlist = mlist;
454 }
455
456
457 /// Internal operations predeclared here ///
458 #ifndef LFS_READONLY
459 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
460 const struct lfs_mattr *attrs, int attrcount);
461 static int lfs_dir_compact(lfs_t *lfs,
462 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
463 lfs_mdir_t *source, uint16_t begin, uint16_t end);
464
465 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
466 const void *buffer, lfs_size_t size);
467 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
468 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
469 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
470
471 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
472 static void lfs_fs_prepmove(lfs_t *lfs,
473 uint16_t id, const lfs_block_t pair[2]);
474 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
475 lfs_mdir_t *pdir);
476 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
477 lfs_mdir_t *parent);
478 static int lfs_fs_relocate(lfs_t *lfs,
479 const lfs_block_t oldpair[2], lfs_block_t newpair[2]);
480 static int lfs_fs_forceconsistency(lfs_t *lfs);
481 #endif
482
483 #ifdef LFS_MIGRATE
484 static int lfs1_traverse(lfs_t *lfs,
485 int (*cb)(void*, lfs_block_t), void *data);
486 #endif
487
488 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
489
490 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
491 void *buffer, lfs_size_t size);
492 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
493 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file);
494
495 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs);
496 static int lfs_fs_rawtraverse(lfs_t *lfs,
497 int (*cb)(void *data, lfs_block_t block), void *data,
498 bool includeorphans);
499
500 static int lfs_deinit(lfs_t *lfs);
501 static int lfs_rawunmount(lfs_t *lfs);
502
503
504 /// Block allocator ///
505 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)506 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
507 lfs_t *lfs = (lfs_t*)p;
508 lfs_block_t off = ((block - lfs->free.off)
509 + lfs->cfg->block_count) % lfs->cfg->block_count;
510
511 if (off < lfs->free.size) {
512 lfs->free.buffer[off / 32] |= 1U << (off % 32);
513 }
514
515 return 0;
516 }
517 #endif
518
519 // indicate allocated blocks have been committed into the filesystem, this
520 // is to prevent blocks from being garbage collected in the middle of a
521 // commit operation
lfs_alloc_ack(lfs_t * lfs)522 static void lfs_alloc_ack(lfs_t *lfs) {
523 lfs->free.ack = lfs->cfg->block_count;
524 }
525
526 // drop the lookahead buffer, this is done during mounting and failed
527 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)528 static void lfs_alloc_drop(lfs_t *lfs) {
529 lfs->free.size = 0;
530 lfs->free.i = 0;
531 lfs_alloc_ack(lfs);
532 }
533
534 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)535 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
536 while (true) {
537 while (lfs->free.i != lfs->free.size) {
538 lfs_block_t off = lfs->free.i;
539 lfs->free.i += 1;
540 lfs->free.ack -= 1;
541
542 if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
543 // found a free block
544 *block = (lfs->free.off + off) % lfs->cfg->block_count;
545
546 // eagerly find next off so an alloc ack can
547 // discredit old lookahead blocks
548 while (lfs->free.i != lfs->free.size &&
549 (lfs->free.buffer[lfs->free.i / 32]
550 & (1U << (lfs->free.i % 32)))) {
551 lfs->free.i += 1;
552 lfs->free.ack -= 1;
553 }
554
555 return 0;
556 }
557 }
558
559 // check if we have looked at all blocks since last ack
560 if (lfs->free.ack == 0) {
561 LFS_ERROR("No more free space %"PRIu32,
562 lfs->free.i + lfs->free.off);
563 return LFS_ERR_NOSPC;
564 }
565
566 lfs->free.off = (lfs->free.off + lfs->free.size)
567 % lfs->cfg->block_count;
568 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
569 lfs->free.i = 0;
570
571 // find mask of free blocks from tree
572 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
573 int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
574 if (err) {
575 lfs_alloc_drop(lfs);
576 return err;
577 }
578 }
579 }
580 #endif
581
582 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)583 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
584 lfs_tag_t gmask, lfs_tag_t gtag,
585 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
586 lfs_off_t off = dir->off;
587 lfs_tag_t ntag = dir->etag;
588 lfs_stag_t gdiff = 0;
589
590 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
591 lfs_tag_id(gmask) != 0 &&
592 lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
593 // synthetic moves
594 gdiff -= LFS_MKTAG(0, 1, 0);
595 }
596
597 // iterate over dir block backwards (for faster lookups)
598 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
599 off -= lfs_tag_dsize(ntag);
600 lfs_tag_t tag = ntag;
601 int err = lfs_bd_read(lfs,
602 NULL, &lfs->rcache, sizeof(ntag),
603 dir->pair[0], off, &ntag, sizeof(ntag));
604 if (err) {
605 return err;
606 }
607
608 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
609
610 if (lfs_tag_id(gmask) != 0 &&
611 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
612 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
613 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
614 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
615 // found where we were created
616 return LFS_ERR_NOENT;
617 }
618
619 // move around splices
620 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
621 }
622
623 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
624 if (lfs_tag_isdelete(tag)) {
625 return LFS_ERR_NOENT;
626 }
627
628 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
629 err = lfs_bd_read(lfs,
630 NULL, &lfs->rcache, diff,
631 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
632 if (err) {
633 return err;
634 }
635
636 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
637
638 return tag + gdiff;
639 }
640 }
641
642 return LFS_ERR_NOENT;
643 }
644
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)645 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
646 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
647 return lfs_dir_getslice(lfs, dir,
648 gmask, gtag,
649 0, buffer, lfs_tag_size(gtag));
650 }
651
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)652 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
653 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
654 lfs_tag_t gmask, lfs_tag_t gtag,
655 lfs_off_t off, void *buffer, lfs_size_t size) {
656 uint8_t *data = buffer;
657 if (off+size > lfs->cfg->block_size) {
658 return LFS_ERR_CORRUPT;
659 }
660
661 while (size > 0) {
662 lfs_size_t diff = size;
663
664 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
665 off < pcache->off + pcache->size) {
666 if (off >= pcache->off) {
667 // is already in pcache?
668 diff = lfs_min(diff, pcache->size - (off-pcache->off));
669 memcpy(data, &pcache->buffer[off-pcache->off], diff);
670
671 data += diff;
672 off += diff;
673 size -= diff;
674 continue;
675 }
676
677 // pcache takes priority
678 diff = lfs_min(diff, pcache->off-off);
679 }
680
681 if (rcache->block == LFS_BLOCK_INLINE &&
682 off < rcache->off + rcache->size) {
683 if (off >= rcache->off) {
684 // is already in rcache?
685 diff = lfs_min(diff, rcache->size - (off-rcache->off));
686 memcpy(data, &rcache->buffer[off-rcache->off], diff);
687
688 data += diff;
689 off += diff;
690 size -= diff;
691 continue;
692 }
693
694 // rcache takes priority
695 diff = lfs_min(diff, rcache->off-off);
696 }
697
698 // load to cache, first condition can no longer fail
699 rcache->block = LFS_BLOCK_INLINE;
700 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
701 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
702 lfs->cfg->cache_size);
703 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
704 rcache->off, rcache->buffer, rcache->size);
705 if (err < 0) {
706 return err;
707 }
708 }
709
710 return 0;
711 }
712
713 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)714 static int lfs_dir_traverse_filter(void *p,
715 lfs_tag_t tag, const void *buffer) {
716 lfs_tag_t *filtertag = p;
717 (void)buffer;
718
719 // which mask depends on unique bit in tag structure
720 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
721 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
722 : LFS_MKTAG(0x700, 0x3ff, 0);
723
724 // check for redundancy
725 if ((mask & tag) == (mask & *filtertag) ||
726 lfs_tag_isdelete(*filtertag) ||
727 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
728 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
729 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
730 return true;
731 }
732
733 // check if we need to adjust for created/deleted tags
734 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
735 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
736 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
737 }
738
739 return false;
740 }
741 #endif
742
743 #ifndef LFS_READONLY
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)744 static int lfs_dir_traverse(lfs_t *lfs,
745 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
746 const struct lfs_mattr *attrs, int attrcount,
747 lfs_tag_t tmask, lfs_tag_t ttag,
748 uint16_t begin, uint16_t end, int16_t diff,
749 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
750 // iterate over directory and attrs
751 while (true) {
752 lfs_tag_t tag;
753 const void *buffer;
754 struct lfs_diskoff disk;
755 if (off+lfs_tag_dsize(ptag) < dir->off) {
756 off += lfs_tag_dsize(ptag);
757 int err = lfs_bd_read(lfs,
758 NULL, &lfs->rcache, sizeof(tag),
759 dir->pair[0], off, &tag, sizeof(tag));
760 if (err) {
761 return err;
762 }
763
764 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
765 disk.block = dir->pair[0];
766 disk.off = off+sizeof(lfs_tag_t);
767 buffer = &disk;
768 ptag = tag;
769 } else if (attrcount > 0) {
770 tag = attrs[0].tag;
771 buffer = attrs[0].buffer;
772 attrs += 1;
773 attrcount -= 1;
774 } else {
775 return 0;
776 }
777
778 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
779 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
780 continue;
781 }
782
783 // do we need to filter? inlining the filtering logic here allows
784 // for some minor optimizations
785 if (lfs_tag_id(tmask) != 0) {
786 // scan for duplicates and update tag based on creates/deletes
787 int filter = lfs_dir_traverse(lfs,
788 dir, off, ptag, attrs, attrcount,
789 0, 0, 0, 0, 0,
790 lfs_dir_traverse_filter, &tag);
791 if (filter < 0) {
792 return filter;
793 }
794
795 if (filter) {
796 continue;
797 }
798
799 // in filter range?
800 if (!(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
801 continue;
802 }
803 }
804
805 // handle special cases for mcu-side operations
806 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
807 // do nothing
808 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
809 uint16_t fromid = lfs_tag_size(tag);
810 uint16_t toid = lfs_tag_id(tag);
811 int err = lfs_dir_traverse(lfs,
812 buffer, 0, 0xffffffff, NULL, 0,
813 LFS_MKTAG(0x600, 0x3ff, 0),
814 LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0),
815 fromid, fromid+1, toid-fromid+diff,
816 cb, data);
817 if (err) {
818 return err;
819 }
820 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
821 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
822 const struct lfs_attr *a = buffer;
823 int err = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
824 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
825 if (err) {
826 return err;
827 }
828 }
829 } else {
830 int err = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
831 if (err) {
832 return err;
833 }
834 }
835 }
836 }
837 #endif
838
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)839 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
840 lfs_mdir_t *dir, const lfs_block_t pair[2],
841 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
842 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
843 // we can find tag very efficiently during a fetch, since we're already
844 // scanning the entire directory
845 lfs_stag_t besttag = -1;
846
847 // if either block address is invalid we return LFS_ERR_CORRUPT here,
848 // otherwise later writes to the pair could fail
849 if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) {
850 return LFS_ERR_CORRUPT;
851 }
852
853 // find the block with the most recent revision
854 uint32_t revs[2] = {0, 0};
855 int r = 0;
856 for (int i = 0; i < 2; i++) {
857 int err = lfs_bd_read(lfs,
858 NULL, &lfs->rcache, sizeof(revs[i]),
859 pair[i], 0, &revs[i], sizeof(revs[i]));
860 revs[i] = lfs_fromle32(revs[i]);
861 if (err && err != LFS_ERR_CORRUPT) {
862 return err;
863 }
864
865 if (err != LFS_ERR_CORRUPT &&
866 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
867 r = i;
868 }
869 }
870
871 dir->pair[0] = pair[(r+0)%2];
872 dir->pair[1] = pair[(r+1)%2];
873 dir->rev = revs[(r+0)%2];
874 dir->off = 0; // nonzero = found some commits
875
876 // now scan tags to fetch the actual dir and find possible match
877 for (int i = 0; i < 2; i++) {
878 lfs_off_t off = 0;
879 lfs_tag_t ptag = 0xffffffff;
880
881 uint16_t tempcount = 0;
882 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
883 bool tempsplit = false;
884 lfs_stag_t tempbesttag = besttag;
885
886 dir->rev = lfs_tole32(dir->rev);
887 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
888 dir->rev = lfs_fromle32(dir->rev);
889
890 while (true) {
891 // extract next tag
892 lfs_tag_t tag;
893 off += lfs_tag_dsize(ptag);
894 int err = lfs_bd_read(lfs,
895 NULL, &lfs->rcache, lfs->cfg->block_size,
896 dir->pair[0], off, &tag, sizeof(tag));
897 if (err) {
898 if (err == LFS_ERR_CORRUPT) {
899 // can't continue?
900 dir->erased = false;
901 break;
902 }
903 return err;
904 }
905
906 crc = lfs_crc(crc, &tag, sizeof(tag));
907 tag = lfs_frombe32(tag) ^ ptag;
908
909 // next commit not yet programmed or we're not in valid range
910 if (!lfs_tag_isvalid(tag)) {
911 dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
912 dir->off % lfs->cfg->prog_size == 0);
913 break;
914 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
915 dir->erased = false;
916 break;
917 }
918
919 ptag = tag;
920
921 if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
922 // check the crc attr
923 uint32_t dcrc;
924 err = lfs_bd_read(lfs,
925 NULL, &lfs->rcache, lfs->cfg->block_size,
926 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
927 if (err) {
928 if (err == LFS_ERR_CORRUPT) {
929 dir->erased = false;
930 break;
931 }
932 return err;
933 }
934 dcrc = lfs_fromle32(dcrc);
935
936 if (crc != dcrc) {
937 dir->erased = false;
938 break;
939 }
940
941 // reset the next bit if we need to
942 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
943
944 // toss our crc into the filesystem seed for
945 // pseudorandom numbers, note we use another crc here
946 // as a collection function because it is sufficiently
947 // random and convenient
948 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
949
950 // update with what's found so far
951 besttag = tempbesttag;
952 dir->off = off + lfs_tag_dsize(tag);
953 dir->etag = ptag;
954 dir->count = tempcount;
955 dir->tail[0] = temptail[0];
956 dir->tail[1] = temptail[1];
957 dir->split = tempsplit;
958
959 // reset crc
960 crc = 0xffffffff;
961 continue;
962 }
963
964 // crc the entry first, hopefully leaving it in the cache
965 for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
966 uint8_t dat;
967 err = lfs_bd_read(lfs,
968 NULL, &lfs->rcache, lfs->cfg->block_size,
969 dir->pair[0], off+j, &dat, 1);
970 if (err) {
971 if (err == LFS_ERR_CORRUPT) {
972 dir->erased = false;
973 break;
974 }
975 return err;
976 }
977
978 crc = lfs_crc(crc, &dat, 1);
979 }
980
981 // directory modification tags?
982 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
983 // increase count of files if necessary
984 if (lfs_tag_id(tag) >= tempcount) {
985 tempcount = lfs_tag_id(tag) + 1;
986 }
987 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
988 tempcount += lfs_tag_splice(tag);
989
990 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
991 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
992 tempbesttag |= 0x80000000;
993 } else if (tempbesttag != -1 &&
994 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
995 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
996 }
997 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
998 tempsplit = (lfs_tag_chunk(tag) & 1);
999
1000 err = lfs_bd_read(lfs,
1001 NULL, &lfs->rcache, lfs->cfg->block_size,
1002 dir->pair[0], off+sizeof(tag), &temptail, 8);
1003 if (err) {
1004 if (err == LFS_ERR_CORRUPT) {
1005 dir->erased = false;
1006 break;
1007 }
1008 }
1009 lfs_pair_fromle32(temptail);
1010 }
1011
1012 // found a match for our fetcher?
1013 if ((fmask & tag) == (fmask & ftag)) {
1014 int res = cb(data, tag, &(struct lfs_diskoff){
1015 dir->pair[0], off+sizeof(tag)});
1016 if (res < 0) {
1017 if (res == LFS_ERR_CORRUPT) {
1018 dir->erased = false;
1019 break;
1020 }
1021 return res;
1022 }
1023
1024 if (res == LFS_CMP_EQ) {
1025 // found a match
1026 tempbesttag = tag;
1027 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1028 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1029 // found an identical tag, but contents didn't match
1030 // this must mean that our besttag has been overwritten
1031 tempbesttag = -1;
1032 } else if (res == LFS_CMP_GT &&
1033 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1034 // found a greater match, keep track to keep things sorted
1035 tempbesttag = tag | 0x80000000;
1036 }
1037 }
1038 }
1039
1040 // consider what we have good enough
1041 if (dir->off > 0) {
1042 // synthetic move
1043 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1044 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1045 besttag |= 0x80000000;
1046 } else if (besttag != -1 &&
1047 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1048 besttag -= LFS_MKTAG(0, 1, 0);
1049 }
1050 }
1051
1052 // found tag? or found best id?
1053 if (id) {
1054 *id = lfs_min(lfs_tag_id(besttag), dir->count);
1055 }
1056
1057 if (lfs_tag_isvalid(besttag)) {
1058 return besttag;
1059 } else if (lfs_tag_id(besttag) < dir->count) {
1060 return LFS_ERR_NOENT;
1061 } else {
1062 return 0;
1063 }
1064 }
1065
1066 // failed, try the other block?
1067 lfs_pair_swap(dir->pair);
1068 dir->rev = revs[(r+1)%2];
1069 }
1070
1071 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1072 dir->pair[0], dir->pair[1]);
1073 return LFS_ERR_CORRUPT;
1074 }
1075
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1076 static int lfs_dir_fetch(lfs_t *lfs,
1077 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1078 // note, mask=-1, tag=-1 can never match a tag since this
1079 // pattern has the invalid bit set
1080 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1081 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1082 }
1083
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1084 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1085 lfs_gstate_t *gstate) {
1086 lfs_gstate_t temp;
1087 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1088 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1089 if (res < 0 && res != LFS_ERR_NOENT) {
1090 return res;
1091 }
1092
1093 if (res != LFS_ERR_NOENT) {
1094 // xor together to find resulting gstate
1095 lfs_gstate_fromle32(&temp);
1096 lfs_gstate_xor(gstate, &temp);
1097 }
1098
1099 return 0;
1100 }
1101
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1102 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1103 uint16_t id, struct lfs_info *info) {
1104 if (id == 0x3ff) {
1105 // special case for root
1106 strcpy(info->name, "/");
1107 info->type = LFS_TYPE_DIR;
1108 return 0;
1109 }
1110
1111 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1112 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1113 if (tag < 0) {
1114 return (int)tag;
1115 }
1116
1117 info->type = lfs_tag_type3(tag);
1118
1119 struct lfs_ctz ctz;
1120 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1121 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1122 if (tag < 0) {
1123 return (int)tag;
1124 }
1125 lfs_ctz_fromle32(&ctz);
1126
1127 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1128 info->size = ctz.size;
1129 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1130 info->size = lfs_tag_size(tag);
1131 }
1132
1133 return 0;
1134 }
1135
1136 struct lfs_dir_find_match {
1137 lfs_t *lfs;
1138 const void *name;
1139 lfs_size_t size;
1140 };
1141
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1142 static int lfs_dir_find_match(void *data,
1143 lfs_tag_t tag, const void *buffer) {
1144 struct lfs_dir_find_match *name = data;
1145 lfs_t *lfs = name->lfs;
1146 const struct lfs_diskoff *disk = buffer;
1147
1148 // compare with disk
1149 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1150 int res = lfs_bd_cmp(lfs,
1151 NULL, &lfs->rcache, diff,
1152 disk->block, disk->off, name->name, diff);
1153 if (res != LFS_CMP_EQ) {
1154 return res;
1155 }
1156
1157 // only equal if our size is still the same
1158 if (name->size != lfs_tag_size(tag)) {
1159 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1160 }
1161
1162 // found a match!
1163 return LFS_CMP_EQ;
1164 }
1165
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1166 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1167 const char **path, uint16_t *id) {
1168 // we reduce path to a single name if we can find it
1169 const char *name = *path;
1170 if (id) {
1171 *id = 0x3ff;
1172 }
1173
1174 // default to root dir
1175 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1176 dir->tail[0] = lfs->root[0];
1177 dir->tail[1] = lfs->root[1];
1178
1179 while (true) {
1180 nextname:
1181 // skip slashes
1182 name += strspn(name, "/");
1183 lfs_size_t namelen = strcspn(name, "/");
1184
1185 // skip '.' and root '..'
1186 if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1187 (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1188 name += namelen;
1189 goto nextname;
1190 }
1191
1192 // skip if matched by '..' in name
1193 const char *suffix = name + namelen;
1194 lfs_size_t sufflen;
1195 int depth = 1;
1196 while (true) {
1197 suffix += strspn(suffix, "/");
1198 sufflen = strcspn(suffix, "/");
1199 if (sufflen == 0) {
1200 break;
1201 }
1202
1203 if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1204 depth -= 1;
1205 if (depth == 0) {
1206 name = suffix + sufflen;
1207 goto nextname;
1208 }
1209 } else {
1210 depth += 1;
1211 }
1212
1213 suffix += sufflen;
1214 }
1215
1216 // found path
1217 if (name[0] == '\0') {
1218 return tag;
1219 }
1220
1221 // update what we've found so far
1222 *path = name;
1223
1224 // only continue if we hit a directory
1225 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1226 return LFS_ERR_NOTDIR;
1227 }
1228
1229 // grab the entry data
1230 if (lfs_tag_id(tag) != 0x3ff) {
1231 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1232 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1233 if (res < 0) {
1234 return res;
1235 }
1236 lfs_pair_fromle32(dir->tail);
1237 }
1238
1239 // find entry matching name
1240 while (true) {
1241 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1242 LFS_MKTAG(0x780, 0, 0),
1243 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1244 // are we last name?
1245 (strchr(name, '/') == NULL) ? id : NULL,
1246 lfs_dir_find_match, &(struct lfs_dir_find_match){
1247 lfs, name, namelen});
1248 if (tag < 0) {
1249 return tag;
1250 }
1251
1252 if (tag) {
1253 break;
1254 }
1255
1256 if (!dir->split) {
1257 return LFS_ERR_NOENT;
1258 }
1259 }
1260
1261 // to next name
1262 name += namelen;
1263 }
1264 }
1265
1266 // commit logic
1267 struct lfs_commit {
1268 lfs_block_t block;
1269 lfs_off_t off;
1270 lfs_tag_t ptag;
1271 uint32_t crc;
1272
1273 lfs_off_t begin;
1274 lfs_off_t end;
1275 };
1276
1277 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1278 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1279 const void *buffer, lfs_size_t size) {
1280 int err = lfs_bd_prog(lfs,
1281 &lfs->pcache, &lfs->rcache, false,
1282 commit->block, commit->off ,
1283 (const uint8_t*)buffer, size);
1284 if (err) {
1285 return err;
1286 }
1287
1288 commit->crc = lfs_crc(commit->crc, buffer, size);
1289 commit->off += size;
1290 return 0;
1291 }
1292 #endif
1293
1294 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1295 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1296 lfs_tag_t tag, const void *buffer) {
1297 // check if we fit
1298 lfs_size_t dsize = lfs_tag_dsize(tag);
1299 if (commit->off + dsize > commit->end) {
1300 return LFS_ERR_NOSPC;
1301 }
1302
1303 // write out tag
1304 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1305 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1306 if (err) {
1307 return err;
1308 }
1309
1310 if (!(tag & 0x80000000)) {
1311 // from memory
1312 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1313 if (err) {
1314 return err;
1315 }
1316 } else {
1317 // from disk
1318 const struct lfs_diskoff *disk = buffer;
1319 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1320 // rely on caching to make this efficient
1321 uint8_t dat;
1322 err = lfs_bd_read(lfs,
1323 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1324 disk->block, disk->off+i, &dat, 1);
1325 if (err) {
1326 return err;
1327 }
1328
1329 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1330 if (err) {
1331 return err;
1332 }
1333 }
1334 }
1335
1336 commit->ptag = tag & 0x7fffffff;
1337 return 0;
1338 }
1339 #endif
1340
1341 #ifndef LFS_READONLY
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1342 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1343 // align to program units
1344 const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t),
1345 lfs->cfg->prog_size);
1346
1347 lfs_off_t off1 = 0;
1348 uint32_t crc1 = 0;
1349
1350 // create crc tags to fill up remainder of commit, note that
1351 // padding is not crced, which lets fetches skip padding but
1352 // makes committing a bit more complicated
1353 while (commit->off < end) {
1354 lfs_off_t off = commit->off + sizeof(lfs_tag_t);
1355 lfs_off_t noff = lfs_min(end - off, 0x3fe) + off;
1356 if (noff < end) {
1357 noff = lfs_min(noff, end - 2*sizeof(uint32_t));
1358 }
1359
1360 // read erased state from next program unit
1361 lfs_tag_t tag = 0xffffffff;
1362 int err = lfs_bd_read(lfs,
1363 NULL, &lfs->rcache, sizeof(tag),
1364 commit->block, noff, &tag, sizeof(tag));
1365 if (err && err != LFS_ERR_CORRUPT) {
1366 return err;
1367 }
1368
1369 // build crc tag
1370 bool reset = ~lfs_frombe32(tag) >> 31;
1371 tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
1372
1373 // write out crc
1374 uint32_t footer[2];
1375 footer[0] = lfs_tobe32(tag ^ commit->ptag);
1376 commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
1377 footer[1] = lfs_tole32(commit->crc);
1378 err = lfs_bd_prog(lfs,
1379 &lfs->pcache, &lfs->rcache, false,
1380 commit->block, commit->off, &footer, sizeof(footer));
1381 if (err) {
1382 return err;
1383 }
1384
1385 // keep track of non-padding checksum to verify
1386 if (off1 == 0) {
1387 off1 = commit->off + sizeof(uint32_t);
1388 crc1 = commit->crc;
1389 }
1390
1391 commit->off += sizeof(tag)+lfs_tag_size(tag);
1392 commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
1393 commit->crc = 0xffffffff; // reset crc for next "commit"
1394 }
1395
1396 // flush buffers
1397 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1398 if (err) {
1399 return err;
1400 }
1401
1402 // successful commit, check checksums to make sure
1403 lfs_off_t off = commit->begin;
1404 lfs_off_t noff = off1;
1405 while (off < end) {
1406 uint32_t crc = 0xffffffff;
1407 for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
1408 // check against written crc, may catch blocks that
1409 // become readonly and match our commit size exactly
1410 if (i == off1 && crc != crc1) {
1411 return LFS_ERR_CORRUPT;
1412 }
1413
1414 // leave it up to caching to make this efficient
1415 uint8_t dat;
1416 err = lfs_bd_read(lfs,
1417 NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
1418 commit->block, i, &dat, 1);
1419 if (err) {
1420 return err;
1421 }
1422
1423 crc = lfs_crc(crc, &dat, 1);
1424 }
1425
1426 // detected write error?
1427 if (crc != 0) {
1428 return LFS_ERR_CORRUPT;
1429 }
1430
1431 // skip padding
1432 off = lfs_min(end - noff, 0x3fe) + noff;
1433 if (off < end) {
1434 off = lfs_min(off, end - 2*sizeof(uint32_t));
1435 }
1436 noff = off + sizeof(uint32_t);
1437 }
1438
1439 return 0;
1440 }
1441 #endif
1442
1443 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1444 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1445 // allocate pair of dir blocks (backwards, so we write block 1 first)
1446 for (int i = 0; i < 2; i++) {
1447 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1448 if (err) {
1449 return err;
1450 }
1451 }
1452
1453 // zero for reproducability in case initial block is unreadable
1454 dir->rev = 0;
1455
1456 // rather than clobbering one of the blocks we just pretend
1457 // the revision may be valid
1458 int err = lfs_bd_read(lfs,
1459 NULL, &lfs->rcache, sizeof(dir->rev),
1460 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1461 dir->rev = lfs_fromle32(dir->rev);
1462 if (err && err != LFS_ERR_CORRUPT) {
1463 return err;
1464 }
1465
1466 // to make sure we don't immediately evict, align the new revision count
1467 // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1468 // is tweaked this way
1469 if (lfs->cfg->block_cycles > 0) {
1470 dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1471 }
1472
1473 // set defaults
1474 dir->off = sizeof(dir->rev);
1475 dir->etag = 0xffffffff;
1476 dir->count = 0;
1477 dir->tail[0] = LFS_BLOCK_NULL;
1478 dir->tail[1] = LFS_BLOCK_NULL;
1479 dir->erased = false;
1480 dir->split = false;
1481
1482 // don't write out yet, let caller take care of that
1483 return 0;
1484 }
1485 #endif
1486
1487 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1488 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1489 // steal state
1490 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1491 if (err) {
1492 return err;
1493 }
1494
1495 // steal tail
1496 lfs_pair_tole32(tail->tail);
1497 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1498 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1499 lfs_pair_fromle32(tail->tail);
1500 if (err) {
1501 return err;
1502 }
1503
1504 return 0;
1505 }
1506 #endif
1507
1508 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1509 static int lfs_dir_split(lfs_t *lfs,
1510 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1511 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1512 // create tail directory
1513 lfs_alloc_ack(lfs);
1514 lfs_mdir_t tail;
1515 int err = lfs_dir_alloc(lfs, &tail);
1516 if (err) {
1517 return err;
1518 }
1519
1520 tail.split = dir->split;
1521 tail.tail[0] = dir->tail[0];
1522 tail.tail[1] = dir->tail[1];
1523
1524 err = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1525 if (err) {
1526 return err;
1527 }
1528
1529 dir->tail[0] = tail.pair[0];
1530 dir->tail[1] = tail.pair[1];
1531 dir->split = true;
1532
1533 // update root if needed
1534 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1535 lfs->root[0] = tail.pair[0];
1536 lfs->root[1] = tail.pair[1];
1537 }
1538
1539 return 0;
1540 }
1541 #endif
1542
1543 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1544 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1545 lfs_size_t *size = p;
1546 (void)buffer;
1547
1548 *size += lfs_tag_dsize(tag);
1549 return 0;
1550 }
1551 #endif
1552
1553 #ifndef LFS_READONLY
1554 struct lfs_dir_commit_commit {
1555 lfs_t *lfs;
1556 struct lfs_commit *commit;
1557 };
1558 #endif
1559
1560 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1561 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1562 struct lfs_dir_commit_commit *commit = p;
1563 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1564 }
1565 #endif
1566
1567 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1568 static int lfs_dir_compact(lfs_t *lfs,
1569 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1570 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1571 // save some state in case block is bad
1572 const lfs_block_t oldpair[2] = {dir->pair[0], dir->pair[1]};
1573 bool relocated = false;
1574 bool tired = false;
1575
1576 // should we split?
1577 while (end - begin > 1) {
1578 // find size
1579 lfs_size_t size = 0;
1580 int err = lfs_dir_traverse(lfs,
1581 source, 0, 0xffffffff, attrs, attrcount,
1582 LFS_MKTAG(0x400, 0x3ff, 0),
1583 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1584 begin, end, -begin,
1585 lfs_dir_commit_size, &size);
1586 if (err) {
1587 return err;
1588 }
1589
1590 // space is complicated, we need room for tail, crc, gstate,
1591 // cleanup delete, and we cap at half a block to give room
1592 // for metadata updates.
1593 if (end - begin < 0xff &&
1594 size <= lfs_min(lfs->cfg->block_size - 36,
1595 lfs_alignup((lfs->cfg->metadata_max ?
1596 lfs->cfg->metadata_max : lfs->cfg->block_size)/2,
1597 lfs->cfg->prog_size))) {
1598 break;
1599 }
1600
1601 // can't fit, need to split, we should really be finding the
1602 // largest size that fits with a small binary search, but right now
1603 // it's not worth the code size
1604 uint16_t split = (end - begin) / 2;
1605 err = lfs_dir_split(lfs, dir, attrs, attrcount,
1606 source, begin+split, end);
1607 if (err) {
1608 // if we fail to split, we may be able to overcompact, unless
1609 // we're too big for even the full block, in which case our
1610 // only option is to error
1611 if (err == LFS_ERR_NOSPC && size <= lfs->cfg->block_size - 36) {
1612 break;
1613 }
1614 return err;
1615 }
1616
1617 end = begin + split;
1618 }
1619
1620 // increment revision count
1621 dir->rev += 1;
1622 // If our revision count == n * block_cycles, we should force a relocation,
1623 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1624 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1625 // 1. block_cycles = 1, which would prevent relocations from terminating
1626 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1627 // one metadata block in the pair, effectively making this useless
1628 if (lfs->cfg->block_cycles > 0 &&
1629 (dir->rev % ((lfs->cfg->block_cycles+1)|1) == 0)) {
1630 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1631 // oh no! we're writing too much to the superblock,
1632 // should we expand?
1633 lfs_ssize_t res = lfs_fs_rawsize(lfs);
1634 if (res < 0) {
1635 return res;
1636 }
1637
1638 // do we have extra space? littlefs can't reclaim this space
1639 // by itself, so expand cautiously
1640 if ((lfs_size_t)res < lfs->cfg->block_count/2) {
1641 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
1642 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
1643 source, begin, end);
1644 if (err && err != LFS_ERR_NOSPC) {
1645 return err;
1646 }
1647
1648 // welp, we tried, if we ran out of space there's not much
1649 // we can do, we'll error later if we've become frozen
1650 if (!err) {
1651 end = begin;
1652 }
1653 }
1654 #ifdef LFS_MIGRATE
1655 } else if (lfs->lfs1) {
1656 // do not proactively relocate blocks during migrations, this
1657 // can cause a number of failure states such: clobbering the
1658 // v1 superblock if we relocate root, and invalidating directory
1659 // pointers if we relocate the head of a directory. On top of
1660 // this, relocations increase the overall complexity of
1661 // lfs_migration, which is already a delicate operation.
1662 #endif
1663 } else {
1664 // we're writing too much, time to relocate
1665 tired = true;
1666 goto relocate;
1667 }
1668 }
1669
1670 // begin loop to commit compaction to blocks until a compact sticks
1671 while (true) {
1672 {
1673 // setup commit state
1674 struct lfs_commit commit = {
1675 .block = dir->pair[1],
1676 .off = 0,
1677 .ptag = 0xffffffff,
1678 .crc = 0xffffffff,
1679
1680 .begin = 0,
1681 .end = (lfs->cfg->metadata_max ?
1682 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1683 };
1684
1685 // erase block to write to
1686 int err = lfs_bd_erase(lfs, dir->pair[1]);
1687 if (err) {
1688 if (err == LFS_ERR_CORRUPT) {
1689 goto relocate;
1690 }
1691 return err;
1692 }
1693
1694 // write out header
1695 dir->rev = lfs_tole32(dir->rev);
1696 err = lfs_dir_commitprog(lfs, &commit,
1697 &dir->rev, sizeof(dir->rev));
1698 dir->rev = lfs_fromle32(dir->rev);
1699 if (err) {
1700 if (err == LFS_ERR_CORRUPT) {
1701 goto relocate;
1702 }
1703 return err;
1704 }
1705
1706 // traverse the directory, this time writing out all unique tags
1707 err = lfs_dir_traverse(lfs,
1708 source, 0, 0xffffffff, attrs, attrcount,
1709 LFS_MKTAG(0x400, 0x3ff, 0),
1710 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1711 begin, end, -begin,
1712 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1713 lfs, &commit});
1714 if (err) {
1715 if (err == LFS_ERR_CORRUPT) {
1716 goto relocate;
1717 }
1718 return err;
1719 }
1720
1721 // commit tail, which may be new after last size check
1722 if (!lfs_pair_isnull(dir->tail)) {
1723 lfs_pair_tole32(dir->tail);
1724 err = lfs_dir_commitattr(lfs, &commit,
1725 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
1726 dir->tail);
1727 lfs_pair_fromle32(dir->tail);
1728 if (err) {
1729 if (err == LFS_ERR_CORRUPT) {
1730 goto relocate;
1731 }
1732 return err;
1733 }
1734 }
1735
1736 // bring over gstate?
1737 lfs_gstate_t delta = {0};
1738 if (!relocated) {
1739 lfs_gstate_xor(&delta, &lfs->gdisk);
1740 lfs_gstate_xor(&delta, &lfs->gstate);
1741 }
1742 lfs_gstate_xor(&delta, &lfs->gdelta);
1743 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
1744
1745 err = lfs_dir_getgstate(lfs, dir, &delta);
1746 if (err) {
1747 return err;
1748 }
1749
1750 if (!lfs_gstate_iszero(&delta)) {
1751 lfs_gstate_tole32(&delta);
1752 err = lfs_dir_commitattr(lfs, &commit,
1753 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
1754 sizeof(delta)), &delta);
1755 if (err) {
1756 if (err == LFS_ERR_CORRUPT) {
1757 goto relocate;
1758 }
1759 return err;
1760 }
1761 }
1762
1763 // complete commit with crc
1764 err = lfs_dir_commitcrc(lfs, &commit);
1765 if (err) {
1766 if (err == LFS_ERR_CORRUPT) {
1767 goto relocate;
1768 }
1769 return err;
1770 }
1771
1772 // successful compaction, swap dir pair to indicate most recent
1773 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
1774 lfs_pair_swap(dir->pair);
1775 dir->count = end - begin;
1776 dir->off = commit.off;
1777 dir->etag = commit.ptag;
1778 // update gstate
1779 lfs->gdelta = (lfs_gstate_t){0};
1780 if (!relocated) {
1781 lfs->gdisk = lfs->gstate;
1782 }
1783 }
1784 break;
1785
1786 relocate:
1787 // commit was corrupted, drop caches and prepare to relocate block
1788 relocated = true;
1789 lfs_cache_drop(lfs, &lfs->pcache);
1790 if (!tired) {
1791 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
1792 }
1793
1794 // can't relocate superblock, filesystem is now frozen
1795 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1796 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
1797 dir->pair[1]);
1798 return LFS_ERR_NOSPC;
1799 }
1800
1801 // relocate half of pair
1802 int err = lfs_alloc(lfs, &dir->pair[1]);
1803 if (err && (err != LFS_ERR_NOSPC || !tired)) {
1804 return err;
1805 }
1806
1807 tired = false;
1808 continue;
1809 }
1810
1811 if (relocated) {
1812 // update references if we relocated
1813 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
1814 "-> {0x%"PRIx32", 0x%"PRIx32"}",
1815 oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
1816 int err = lfs_fs_relocate(lfs, oldpair, dir->pair);
1817 if (err) {
1818 return err;
1819 }
1820 }
1821
1822 return 0;
1823 }
1824 #endif
1825
1826 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)1827 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
1828 const struct lfs_mattr *attrs, int attrcount) {
1829 // check for any inline files that aren't RAM backed and
1830 // forcefully evict them, needed for filesystem consistency
1831 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
1832 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
1833 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
1834 f->ctz.size > lfs->cfg->cache_size) {
1835 int err = lfs_file_outline(lfs, f);
1836 if (err) {
1837 return err;
1838 }
1839
1840 err = lfs_file_flush(lfs, f);
1841 if (err) {
1842 return err;
1843 }
1844 }
1845 }
1846
1847 // calculate changes to the directory
1848 lfs_mdir_t olddir = *dir;
1849 bool hasdelete = false;
1850 for (int i = 0; i < attrcount; i++) {
1851 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
1852 dir->count += 1;
1853 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
1854 LFS_ASSERT(dir->count > 0);
1855 dir->count -= 1;
1856 hasdelete = true;
1857 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
1858 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
1859 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
1860 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
1861 lfs_pair_fromle32(dir->tail);
1862 }
1863 }
1864
1865 // should we actually drop the directory block?
1866 if (hasdelete && dir->count == 0) {
1867 lfs_mdir_t pdir;
1868 int err = lfs_fs_pred(lfs, dir->pair, &pdir);
1869 if (err && err != LFS_ERR_NOENT) {
1870 *dir = olddir;
1871 return err;
1872 }
1873
1874 if (err != LFS_ERR_NOENT && pdir.split) {
1875 err = lfs_dir_drop(lfs, &pdir, dir);
1876 if (err) {
1877 *dir = olddir;
1878 return err;
1879 }
1880 }
1881 }
1882
1883 if (dir->erased || dir->count >= 0xff) {
1884 // try to commit
1885 struct lfs_commit commit = {
1886 .block = dir->pair[0],
1887 .off = dir->off,
1888 .ptag = dir->etag,
1889 .crc = 0xffffffff,
1890
1891 .begin = dir->off,
1892 .end = (lfs->cfg->metadata_max ?
1893 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1894 };
1895
1896 // traverse attrs that need to be written out
1897 lfs_pair_tole32(dir->tail);
1898 int err = lfs_dir_traverse(lfs,
1899 dir, dir->off, dir->etag, attrs, attrcount,
1900 0, 0, 0, 0, 0,
1901 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1902 lfs, &commit});
1903 lfs_pair_fromle32(dir->tail);
1904 if (err) {
1905 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
1906 goto compact;
1907 }
1908 *dir = olddir;
1909 return err;
1910 }
1911
1912 // commit any global diffs if we have any
1913 lfs_gstate_t delta = {0};
1914 lfs_gstate_xor(&delta, &lfs->gstate);
1915 lfs_gstate_xor(&delta, &lfs->gdisk);
1916 lfs_gstate_xor(&delta, &lfs->gdelta);
1917 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
1918 if (!lfs_gstate_iszero(&delta)) {
1919 err = lfs_dir_getgstate(lfs, dir, &delta);
1920 if (err) {
1921 *dir = olddir;
1922 return err;
1923 }
1924
1925 lfs_gstate_tole32(&delta);
1926 err = lfs_dir_commitattr(lfs, &commit,
1927 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
1928 sizeof(delta)), &delta);
1929 if (err) {
1930 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
1931 goto compact;
1932 }
1933 *dir = olddir;
1934 return err;
1935 }
1936 }
1937
1938 // finalize commit with the crc
1939 err = lfs_dir_commitcrc(lfs, &commit);
1940 if (err) {
1941 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
1942 goto compact;
1943 }
1944 *dir = olddir;
1945 return err;
1946 }
1947
1948 // successful commit, update dir
1949 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
1950 dir->off = commit.off;
1951 dir->etag = commit.ptag;
1952 // and update gstate
1953 lfs->gdisk = lfs->gstate;
1954 lfs->gdelta = (lfs_gstate_t){0};
1955 } else {
1956 compact:
1957 // fall back to compaction
1958 lfs_cache_drop(lfs, &lfs->pcache);
1959
1960 int err = lfs_dir_compact(lfs, dir, attrs, attrcount,
1961 dir, 0, dir->count);
1962 if (err) {
1963 *dir = olddir;
1964 return err;
1965 }
1966 }
1967
1968 // this complicated bit of logic is for fixing up any active
1969 // metadata-pairs that we may have affected
1970 //
1971 // note we have to make two passes since the mdir passed to
1972 // lfs_dir_commit could also be in this list, and even then
1973 // we need to copy the pair so they don't get clobbered if we refetch
1974 // our mdir.
1975 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
1976 if (&d->m != dir && lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
1977 d->m = *dir;
1978 for (int i = 0; i < attrcount; i++) {
1979 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
1980 d->id == lfs_tag_id(attrs[i].tag)) {
1981 d->m.pair[0] = LFS_BLOCK_NULL;
1982 d->m.pair[1] = LFS_BLOCK_NULL;
1983 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
1984 d->id > lfs_tag_id(attrs[i].tag)) {
1985 d->id -= 1;
1986 if (d->type == LFS_TYPE_DIR) {
1987 ((lfs_dir_t*)d)->pos -= 1;
1988 }
1989 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
1990 d->id >= lfs_tag_id(attrs[i].tag)) {
1991 d->id += 1;
1992 if (d->type == LFS_TYPE_DIR) {
1993 ((lfs_dir_t*)d)->pos += 1;
1994 }
1995 }
1996 }
1997 }
1998 }
1999
2000 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2001 if (lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
2002 while (d->id >= d->m.count && d->m.split) {
2003 // we split and id is on tail now
2004 d->id -= d->m.count;
2005 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2006 if (err) {
2007 return err;
2008 }
2009 }
2010 }
2011 }
2012
2013 return 0;
2014 }
2015 #endif
2016
2017
2018 /// Top level directory operations ///
2019 #ifndef LFS_READONLY
lfs_rawmkdir(lfs_t * lfs,const char * path)2020 static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
2021 // deorphan if we haven't yet, needed at most once after poweron
2022 int err = lfs_fs_forceconsistency(lfs);
2023 if (err) {
2024 return err;
2025 }
2026
2027 struct lfs_mlist cwd;
2028 cwd.next = lfs->mlist;
2029 uint16_t id;
2030 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2031 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
2032 return (err < 0) ? err : LFS_ERR_EXIST;
2033 }
2034
2035 // check that name fits
2036 lfs_size_t nlen = strlen(path);
2037 if (nlen > lfs->name_max) {
2038 return LFS_ERR_NAMETOOLONG;
2039 }
2040
2041 // build up new directory
2042 lfs_alloc_ack(lfs);
2043 lfs_mdir_t dir;
2044 err = lfs_dir_alloc(lfs, &dir);
2045 if (err) {
2046 return err;
2047 }
2048
2049 // find end of list
2050 lfs_mdir_t pred = cwd.m;
2051 while (pred.split) {
2052 err = lfs_dir_fetch(lfs, &pred, pred.tail);
2053 if (err) {
2054 return err;
2055 }
2056 }
2057
2058 // setup dir
2059 lfs_pair_tole32(pred.tail);
2060 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2061 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2062 lfs_pair_fromle32(pred.tail);
2063 if (err) {
2064 return err;
2065 }
2066
2067 // current block end of list?
2068 if (cwd.m.split) {
2069 // update tails, this creates a desync
2070 err = lfs_fs_preporphans(lfs, +1);
2071 if (err) {
2072 return err;
2073 }
2074
2075 // it's possible our predecessor has to be relocated, and if
2076 // our parent is our predecessor's predecessor, this could have
2077 // caused our parent to go out of date, fortunately we can hook
2078 // ourselves into littlefs to catch this
2079 cwd.type = 0;
2080 cwd.id = 0;
2081 lfs->mlist = &cwd;
2082
2083 lfs_pair_tole32(dir.pair);
2084 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2085 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2086 lfs_pair_fromle32(dir.pair);
2087 if (err) {
2088 lfs->mlist = cwd.next;
2089 return err;
2090 }
2091
2092 lfs->mlist = cwd.next;
2093 err = lfs_fs_preporphans(lfs, -1);
2094 if (err) {
2095 return err;
2096 }
2097 }
2098
2099 // now insert into our parent block
2100 lfs_pair_tole32(dir.pair);
2101 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2102 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2103 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2104 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2105 {LFS_MKTAG_IF(!cwd.m.split,
2106 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2107 lfs_pair_fromle32(dir.pair);
2108 if (err) {
2109 return err;
2110 }
2111
2112 return 0;
2113 }
2114 #endif
2115
lfs_dir_rawopen(lfs_t * lfs,lfs_dir_t * dir,const char * path)2116 static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2117 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2118 if (tag < 0) {
2119 return tag;
2120 }
2121
2122 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2123 return LFS_ERR_NOTDIR;
2124 }
2125
2126 lfs_block_t pair[2];
2127 if (lfs_tag_id(tag) == 0x3ff) {
2128 // handle root dir separately
2129 pair[0] = lfs->root[0];
2130 pair[1] = lfs->root[1];
2131 } else {
2132 // get dir pair from parent
2133 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2134 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2135 if (res < 0) {
2136 return res;
2137 }
2138 lfs_pair_fromle32(pair);
2139 }
2140
2141 // fetch first pair
2142 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2143 if (err) {
2144 return err;
2145 }
2146
2147 // setup entry
2148 dir->head[0] = dir->m.pair[0];
2149 dir->head[1] = dir->m.pair[1];
2150 dir->id = 0;
2151 dir->pos = 0;
2152
2153 // add to list of mdirs
2154 dir->type = LFS_TYPE_DIR;
2155 lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2156
2157 return 0;
2158 }
2159
lfs_dir_rawclose(lfs_t * lfs,lfs_dir_t * dir)2160 static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) {
2161 // remove from list of mdirs
2162 lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2163
2164 return 0;
2165 }
2166
lfs_dir_rawread(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2167 static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2168 memset(info, 0, sizeof(*info));
2169
2170 // special offset for '.' and '..'
2171 if (dir->pos == 0) {
2172 info->type = LFS_TYPE_DIR;
2173 strcpy(info->name, ".");
2174 dir->pos += 1;
2175 return true;
2176 } else if (dir->pos == 1) {
2177 info->type = LFS_TYPE_DIR;
2178 strcpy(info->name, "..");
2179 dir->pos += 1;
2180 return true;
2181 }
2182
2183 while (true) {
2184 if (dir->id == dir->m.count) {
2185 if (!dir->m.split) {
2186 return false;
2187 }
2188
2189 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2190 if (err) {
2191 return err;
2192 }
2193
2194 dir->id = 0;
2195 }
2196
2197 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2198 if (err && err != LFS_ERR_NOENT) {
2199 return err;
2200 }
2201
2202 dir->id += 1;
2203 if (err != LFS_ERR_NOENT) {
2204 break;
2205 }
2206 }
2207
2208 dir->pos += 1;
2209 return true;
2210 }
2211
lfs_dir_rawseek(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2212 static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2213 // simply walk from head dir
2214 int err = lfs_dir_rawrewind(lfs, dir);
2215 if (err) {
2216 return err;
2217 }
2218
2219 // first two for ./..
2220 dir->pos = lfs_min(2, off);
2221 off -= dir->pos;
2222
2223 // skip superblock entry
2224 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2225
2226 while (off > 0) {
2227 int diff = lfs_min(dir->m.count - dir->id, off);
2228 dir->id += diff;
2229 dir->pos += diff;
2230 off -= diff;
2231
2232 if (dir->id == dir->m.count) {
2233 if (!dir->m.split) {
2234 return LFS_ERR_INVAL;
2235 }
2236
2237 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2238 if (err) {
2239 return err;
2240 }
2241
2242 dir->id = 0;
2243 }
2244 }
2245
2246 return 0;
2247 }
2248
lfs_dir_rawtell(lfs_t * lfs,lfs_dir_t * dir)2249 static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) {
2250 (void)lfs;
2251 return dir->pos;
2252 }
2253
lfs_dir_rawrewind(lfs_t * lfs,lfs_dir_t * dir)2254 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) {
2255 // reload the head dir
2256 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2257 if (err) {
2258 return err;
2259 }
2260
2261 dir->id = 0;
2262 dir->pos = 0;
2263 return 0;
2264 }
2265
2266
2267 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2268 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2269 lfs_off_t size = *off;
2270 lfs_off_t b = lfs->cfg->block_size - 2*4;
2271 lfs_off_t i = size / b;
2272 if (i == 0) {
2273 return 0;
2274 }
2275
2276 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2277 *off = size - b*i - 4*lfs_popc(i);
2278 return i;
2279 }
2280
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2281 static int lfs_ctz_find(lfs_t *lfs,
2282 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2283 lfs_block_t head, lfs_size_t size,
2284 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2285 if (size == 0) {
2286 *block = LFS_BLOCK_NULL;
2287 *off = 0;
2288 return 0;
2289 }
2290
2291 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2292 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2293
2294 while (current > target) {
2295 lfs_size_t skip = lfs_min(
2296 lfs_npw2(current-target+1) - 1,
2297 lfs_ctz(current));
2298
2299 int err = lfs_bd_read(lfs,
2300 pcache, rcache, sizeof(head),
2301 head, 4*skip, &head, sizeof(head));
2302 head = lfs_fromle32(head);
2303 if (err) {
2304 return err;
2305 }
2306
2307 current -= 1 << skip;
2308 }
2309
2310 *block = head;
2311 *off = pos;
2312 return 0;
2313 }
2314
2315 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2316 static int lfs_ctz_extend(lfs_t *lfs,
2317 lfs_cache_t *pcache, lfs_cache_t *rcache,
2318 lfs_block_t head, lfs_size_t size,
2319 lfs_block_t *block, lfs_off_t *off) {
2320 while (true) {
2321 // go ahead and grab a block
2322 lfs_block_t nblock;
2323 int err = lfs_alloc(lfs, &nblock);
2324 if (err) {
2325 return err;
2326 }
2327
2328 {
2329 err = lfs_bd_erase(lfs, nblock);
2330 if (err) {
2331 if (err == LFS_ERR_CORRUPT) {
2332 goto relocate;
2333 }
2334 return err;
2335 }
2336
2337 if (size == 0) {
2338 *block = nblock;
2339 *off = 0;
2340 return 0;
2341 }
2342
2343 lfs_size_t noff = size - 1;
2344 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2345 noff = noff + 1;
2346
2347 // just copy out the last block if it is incomplete
2348 if (noff != lfs->cfg->block_size) {
2349 for (lfs_off_t i = 0; i < noff; i++) {
2350 uint8_t data;
2351 err = lfs_bd_read(lfs,
2352 NULL, rcache, noff-i,
2353 head, i, &data, 1);
2354 if (err) {
2355 return err;
2356 }
2357
2358 err = lfs_bd_prog(lfs,
2359 pcache, rcache, true,
2360 nblock, i, &data, 1);
2361 if (err) {
2362 if (err == LFS_ERR_CORRUPT) {
2363 goto relocate;
2364 }
2365 return err;
2366 }
2367 }
2368
2369 *block = nblock;
2370 *off = noff;
2371 return 0;
2372 }
2373
2374 // append block
2375 index += 1;
2376 lfs_size_t skips = lfs_ctz(index) + 1;
2377 lfs_block_t nhead = head;
2378 for (lfs_off_t i = 0; i < skips; i++) {
2379 nhead = lfs_tole32(nhead);
2380 err = lfs_bd_prog(lfs, pcache, rcache, true,
2381 nblock, 4*i, &nhead, 4);
2382 nhead = lfs_fromle32(nhead);
2383 if (err) {
2384 if (err == LFS_ERR_CORRUPT) {
2385 goto relocate;
2386 }
2387 return err;
2388 }
2389
2390 if (i != skips-1) {
2391 err = lfs_bd_read(lfs,
2392 NULL, rcache, sizeof(nhead),
2393 nhead, 4*i, &nhead, sizeof(nhead));
2394 nhead = lfs_fromle32(nhead);
2395 if (err) {
2396 return err;
2397 }
2398 }
2399 }
2400
2401 *block = nblock;
2402 *off = 4*skips;
2403 return 0;
2404 }
2405
2406 relocate:
2407 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2408
2409 // just clear cache and try a new block
2410 lfs_cache_drop(lfs, pcache);
2411 }
2412 }
2413 #endif
2414
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2415 static int lfs_ctz_traverse(lfs_t *lfs,
2416 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2417 lfs_block_t head, lfs_size_t size,
2418 int (*cb)(void*, lfs_block_t), void *data) {
2419 if (size == 0) {
2420 return 0;
2421 }
2422
2423 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2424
2425 while (true) {
2426 int err = cb(data, head);
2427 if (err) {
2428 return err;
2429 }
2430
2431 if (index == 0) {
2432 return 0;
2433 }
2434
2435 lfs_block_t heads[2];
2436 int count = 2 - (index & 1);
2437 err = lfs_bd_read(lfs,
2438 pcache, rcache, count*sizeof(head),
2439 head, 0, &heads, count*sizeof(head));
2440 heads[0] = lfs_fromle32(heads[0]);
2441 heads[1] = lfs_fromle32(heads[1]);
2442 if (err) {
2443 return err;
2444 }
2445
2446 for (int i = 0; i < count-1; i++) {
2447 err = cb(data, heads[i]);
2448 if (err) {
2449 return err;
2450 }
2451 }
2452
2453 head = heads[count-1];
2454 index -= count;
2455 }
2456 }
2457
2458
2459 /// Top level file operations ///
lfs_file_rawopencfg(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)2460 static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file,
2461 const char *path, int flags,
2462 const struct lfs_file_config *cfg) {
2463 #ifndef LFS_READONLY
2464 // deorphan if we haven't yet, needed at most once after poweron
2465 if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
2466 int err = lfs_fs_forceconsistency(lfs);
2467 if (err) {
2468 return err;
2469 }
2470 }
2471 #else
2472 LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
2473 #endif
2474
2475 // setup simple file details
2476 int err;
2477 file->cfg = cfg;
2478 file->flags = flags;
2479 file->pos = 0;
2480 file->off = 0;
2481 file->cache.buffer = NULL;
2482
2483 // allocate entry for file if it doesn't exist
2484 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
2485 if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
2486 err = tag;
2487 goto cleanup;
2488 }
2489
2490 // get id, add to list of mdirs to catch update changes
2491 file->type = LFS_TYPE_REG;
2492 lfs_mlist_append(lfs, (struct lfs_mlist *)file);
2493
2494 #ifdef LFS_READONLY
2495 if (tag == LFS_ERR_NOENT) {
2496 err = LFS_ERR_NOENT;
2497 goto cleanup;
2498 #else
2499 if (tag == LFS_ERR_NOENT) {
2500 if (!(flags & LFS_O_CREAT)) {
2501 err = LFS_ERR_NOENT;
2502 goto cleanup;
2503 }
2504
2505 // check that name fits
2506 lfs_size_t nlen = strlen(path);
2507 if (nlen > lfs->name_max) {
2508 err = LFS_ERR_NAMETOOLONG;
2509 goto cleanup;
2510 }
2511
2512 // get next slot and create entry to remember name
2513 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
2514 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
2515 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
2516 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
2517 if (err) {
2518 err = LFS_ERR_NAMETOOLONG;
2519 goto cleanup;
2520 }
2521
2522 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
2523 } else if (flags & LFS_O_EXCL) {
2524 err = LFS_ERR_EXIST;
2525 goto cleanup;
2526 #endif
2527 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
2528 err = LFS_ERR_ISDIR;
2529 goto cleanup;
2530 #ifndef LFS_READONLY
2531 } else if (flags & LFS_O_TRUNC) {
2532 // truncate if requested
2533 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
2534 file->flags |= LFS_F_DIRTY;
2535 #endif
2536 } else {
2537 // try to load what's on disk, if it's inlined we'll fix it later
2538 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
2539 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
2540 if (tag < 0) {
2541 err = tag;
2542 goto cleanup;
2543 }
2544 lfs_ctz_fromle32(&file->ctz);
2545 }
2546
2547 // fetch attrs
2548 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
2549 // if opened for read / read-write operations
2550 if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
2551 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2552 LFS_MKTAG(0x7ff, 0x3ff, 0),
2553 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
2554 file->id, file->cfg->attrs[i].size),
2555 file->cfg->attrs[i].buffer);
2556 if (res < 0 && res != LFS_ERR_NOENT) {
2557 err = res;
2558 goto cleanup;
2559 }
2560 }
2561
2562 #ifndef LFS_READONLY
2563 // if opened for write / read-write operations
2564 if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
2565 if (file->cfg->attrs[i].size > lfs->attr_max) {
2566 err = LFS_ERR_NOSPC;
2567 goto cleanup;
2568 }
2569
2570 file->flags |= LFS_F_DIRTY;
2571 }
2572 #endif
2573 }
2574
2575 // allocate buffer if needed
2576 if (file->cfg->buffer) {
2577 file->cache.buffer = file->cfg->buffer;
2578 } else {
2579 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
2580 if (!file->cache.buffer) {
2581 err = LFS_ERR_NOMEM;
2582 goto cleanup;
2583 }
2584 }
2585
2586 // zero to avoid information leak
2587 lfs_cache_zero(lfs, &file->cache);
2588
2589 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
2590 // load inline files
2591 file->ctz.head = LFS_BLOCK_INLINE;
2592 file->ctz.size = lfs_tag_size(tag);
2593 file->flags |= LFS_F_INLINE;
2594 file->cache.block = file->ctz.head;
2595 file->cache.off = 0;
2596 file->cache.size = lfs->cfg->cache_size;
2597
2598 // don't always read (may be new/trunc file)
2599 if (file->ctz.size > 0) {
2600 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2601 LFS_MKTAG(0x700, 0x3ff, 0),
2602 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
2603 lfs_min(file->cache.size, 0x3fe)),
2604 file->cache.buffer);
2605 if (res < 0) {
2606 err = res;
2607 goto cleanup;
2608 }
2609 }
2610 }
2611
2612 return 0;
2613
2614 cleanup:
2615 // clean up lingering resources
2616 #ifndef LFS_READONLY
2617 file->flags |= LFS_F_ERRED;
2618 #endif
2619 lfs_file_rawclose(lfs, file);
2620 return err;
2621 }
2622
2623 static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file,
2624 const char *path, int flags) {
2625 static const struct lfs_file_config defaults = {0};
2626 int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults);
2627 return err;
2628 }
2629
2630 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) {
2631 #ifndef LFS_READONLY
2632 int err = lfs_file_rawsync(lfs, file);
2633 #else
2634 int err = 0;
2635 #endif
2636
2637 // remove from list of mdirs
2638 lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
2639
2640 // clean up memory
2641 if (!file->cfg->buffer) {
2642 lfs_free(file->cache.buffer);
2643 }
2644
2645 return err;
2646 }
2647
2648
2649 #ifndef LFS_READONLY
2650 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
2651 while (true) {
2652 // just relocate what exists into new block
2653 lfs_block_t nblock;
2654 int err = lfs_alloc(lfs, &nblock);
2655 if (err) {
2656 return err;
2657 }
2658
2659 err = lfs_bd_erase(lfs, nblock);
2660 if (err) {
2661 if (err == LFS_ERR_CORRUPT) {
2662 goto relocate;
2663 }
2664 return err;
2665 }
2666
2667 // either read from dirty cache or disk
2668 for (lfs_off_t i = 0; i < file->off; i++) {
2669 uint8_t data;
2670 if (file->flags & LFS_F_INLINE) {
2671 err = lfs_dir_getread(lfs, &file->m,
2672 // note we evict inline files before they can be dirty
2673 NULL, &file->cache, file->off-i,
2674 LFS_MKTAG(0xfff, 0x1ff, 0),
2675 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
2676 i, &data, 1);
2677 if (err) {
2678 return err;
2679 }
2680 } else {
2681 err = lfs_bd_read(lfs,
2682 &file->cache, &lfs->rcache, file->off-i,
2683 file->block, i, &data, 1);
2684 if (err) {
2685 return err;
2686 }
2687 }
2688
2689 err = lfs_bd_prog(lfs,
2690 &lfs->pcache, &lfs->rcache, true,
2691 nblock, i, &data, 1);
2692 if (err) {
2693 if (err == LFS_ERR_CORRUPT) {
2694 goto relocate;
2695 }
2696 return err;
2697 }
2698 }
2699
2700 // copy over new state of file
2701 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
2702 file->cache.block = lfs->pcache.block;
2703 file->cache.off = lfs->pcache.off;
2704 file->cache.size = lfs->pcache.size;
2705 lfs_cache_zero(lfs, &lfs->pcache);
2706
2707 file->block = nblock;
2708 file->flags |= LFS_F_WRITING;
2709 return 0;
2710
2711 relocate:
2712 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2713
2714 // just clear cache and try a new block
2715 lfs_cache_drop(lfs, &lfs->pcache);
2716 }
2717 }
2718 #endif
2719
2720 #ifndef LFS_READONLY
2721 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
2722 file->off = file->pos;
2723 lfs_alloc_ack(lfs);
2724 int err = lfs_file_relocate(lfs, file);
2725 if (err) {
2726 return err;
2727 }
2728
2729 file->flags &= ~LFS_F_INLINE;
2730 return 0;
2731 }
2732 #endif
2733
2734 #ifndef LFS_READONLY
2735 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
2736 if (file->flags & LFS_F_READING) {
2737 if (!(file->flags & LFS_F_INLINE)) {
2738 lfs_cache_drop(lfs, &file->cache);
2739 }
2740 file->flags &= ~LFS_F_READING;
2741 }
2742
2743 if (file->flags & LFS_F_WRITING) {
2744 lfs_off_t pos = file->pos;
2745
2746 if (!(file->flags & LFS_F_INLINE)) {
2747 // copy over anything after current branch
2748 lfs_file_t orig = {
2749 .ctz.head = file->ctz.head,
2750 .ctz.size = file->ctz.size,
2751 .flags = LFS_O_RDONLY,
2752 .pos = file->pos,
2753 .cache = lfs->rcache,
2754 };
2755 lfs_cache_drop(lfs, &lfs->rcache);
2756
2757 while (file->pos < file->ctz.size) {
2758 // copy over a byte at a time, leave it up to caching
2759 // to make this efficient
2760 uint8_t data;
2761 lfs_ssize_t res = lfs_file_rawread(lfs, &orig, &data, 1);
2762 if (res < 0) {
2763 return res;
2764 }
2765
2766 res = lfs_file_rawwrite(lfs, file, &data, 1);
2767 if (res < 0) {
2768 return res;
2769 }
2770
2771 // keep our reference to the rcache in sync
2772 if (lfs->rcache.block != LFS_BLOCK_NULL) {
2773 lfs_cache_drop(lfs, &orig.cache);
2774 lfs_cache_drop(lfs, &lfs->rcache);
2775 }
2776 }
2777
2778 // write out what we have
2779 while (true) {
2780 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
2781 if (err) {
2782 if (err == LFS_ERR_CORRUPT) {
2783 goto relocate;
2784 }
2785 return err;
2786 }
2787
2788 break;
2789
2790 relocate:
2791 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
2792 err = lfs_file_relocate(lfs, file);
2793 if (err) {
2794 return err;
2795 }
2796 }
2797 } else {
2798 file->pos = lfs_max(file->pos, file->ctz.size);
2799 }
2800
2801 // actual file updates
2802 file->ctz.head = file->block;
2803 file->ctz.size = file->pos;
2804 file->flags &= ~LFS_F_WRITING;
2805 file->flags |= LFS_F_DIRTY;
2806
2807 file->pos = pos;
2808 }
2809
2810 return 0;
2811 }
2812 #endif
2813
2814 #ifndef LFS_READONLY
2815 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
2816 if (file->flags & LFS_F_ERRED) {
2817 // it's not safe to do anything if our file errored
2818 return 0;
2819 }
2820
2821 int err = lfs_file_flush(lfs, file);
2822 if (err) {
2823 file->flags |= LFS_F_ERRED;
2824 return err;
2825 }
2826
2827
2828 if ((file->flags & LFS_F_DIRTY) &&
2829 !lfs_pair_isnull(file->m.pair)) {
2830 // update dir entry
2831 uint16_t type;
2832 const void *buffer;
2833 lfs_size_t size;
2834 struct lfs_ctz ctz;
2835 if (file->flags & LFS_F_INLINE) {
2836 // inline the whole file
2837 type = LFS_TYPE_INLINESTRUCT;
2838 buffer = file->cache.buffer;
2839 size = file->ctz.size;
2840 } else {
2841 // update the ctz reference
2842 type = LFS_TYPE_CTZSTRUCT;
2843 // copy ctz so alloc will work during a relocate
2844 ctz = file->ctz;
2845 lfs_ctz_tole32(&ctz);
2846 buffer = &ctz;
2847 size = sizeof(ctz);
2848 }
2849
2850 // commit file data and attributes
2851 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
2852 {LFS_MKTAG(type, file->id, size), buffer},
2853 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
2854 file->cfg->attr_count), file->cfg->attrs}));
2855 if (err) {
2856 file->flags |= LFS_F_ERRED;
2857 return err;
2858 }
2859
2860 file->flags &= ~LFS_F_DIRTY;
2861 }
2862
2863 return 0;
2864 }
2865 #endif
2866
2867 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
2868 void *buffer, lfs_size_t size) {
2869 LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
2870
2871 uint8_t *data = buffer;
2872 lfs_size_t nsize = size;
2873
2874 #ifndef LFS_READONLY
2875 if (file->flags & LFS_F_WRITING) {
2876 // flush out any writes
2877 int err = lfs_file_flush(lfs, file);
2878 if (err) {
2879 return err;
2880 }
2881 }
2882 #endif
2883
2884 if (file->pos >= file->ctz.size) {
2885 // eof if past end
2886 return 0;
2887 }
2888
2889 size = lfs_min(size, file->ctz.size - file->pos);
2890 nsize = size;
2891
2892 while (nsize > 0) {
2893 // check if we need a new block
2894 if (!(file->flags & LFS_F_READING) ||
2895 file->off == lfs->cfg->block_size) {
2896 if (!(file->flags & LFS_F_INLINE)) {
2897 int err = lfs_ctz_find(lfs, NULL, &file->cache,
2898 file->ctz.head, file->ctz.size,
2899 file->pos, &file->block, &file->off);
2900 if (err) {
2901 return err;
2902 }
2903 } else {
2904 file->block = LFS_BLOCK_INLINE;
2905 file->off = file->pos;
2906 }
2907
2908 file->flags |= LFS_F_READING;
2909 }
2910
2911 // read as much as we can in current block
2912 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
2913 if (file->flags & LFS_F_INLINE) {
2914 int err = lfs_dir_getread(lfs, &file->m,
2915 NULL, &file->cache, lfs->cfg->block_size,
2916 LFS_MKTAG(0xfff, 0x1ff, 0),
2917 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
2918 file->off, data, diff);
2919 if (err) {
2920 return err;
2921 }
2922 } else {
2923 int err = lfs_bd_read(lfs,
2924 NULL, &file->cache, lfs->cfg->block_size,
2925 file->block, file->off, data, diff);
2926 if (err) {
2927 return err;
2928 }
2929 }
2930
2931 file->pos += diff;
2932 file->off += diff;
2933 data += diff;
2934 nsize -= diff;
2935 }
2936
2937 return size;
2938 }
2939
2940 #ifndef LFS_READONLY
2941 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
2942 const void *buffer, lfs_size_t size) {
2943 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
2944
2945 const uint8_t *data = buffer;
2946 lfs_size_t nsize = size;
2947
2948 if (file->flags & LFS_F_READING) {
2949 // drop any reads
2950 int err = lfs_file_flush(lfs, file);
2951 if (err) {
2952 return err;
2953 }
2954 }
2955
2956 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
2957 file->pos = file->ctz.size;
2958 }
2959
2960 if (file->pos + size > lfs->file_max) {
2961 // Larger than file limit?
2962 return LFS_ERR_FBIG;
2963 }
2964
2965 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
2966 // fill with zeros
2967 lfs_off_t pos = file->pos;
2968 file->pos = file->ctz.size;
2969
2970 while (file->pos < pos) {
2971 lfs_ssize_t res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
2972 if (res < 0) {
2973 return res;
2974 }
2975 }
2976 }
2977
2978 if ((file->flags & LFS_F_INLINE) &&
2979 lfs_max(file->pos+nsize, file->ctz.size) >
2980 lfs_min(0x3fe, lfs_min(
2981 lfs->cfg->cache_size,
2982 (lfs->cfg->metadata_max ?
2983 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
2984 // inline file doesn't fit anymore
2985 int err = lfs_file_outline(lfs, file);
2986 if (err) {
2987 file->flags |= LFS_F_ERRED;
2988 return err;
2989 }
2990 }
2991
2992 while (nsize > 0) {
2993 // check if we need a new block
2994 if (!(file->flags & LFS_F_WRITING) ||
2995 file->off == lfs->cfg->block_size) {
2996 if (!(file->flags & LFS_F_INLINE)) {
2997 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
2998 // find out which block we're extending from
2999 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3000 file->ctz.head, file->ctz.size,
3001 file->pos-1, &file->block, &file->off);
3002 if (err) {
3003 file->flags |= LFS_F_ERRED;
3004 return err;
3005 }
3006
3007 // mark cache as dirty since we may have read data into it
3008 lfs_cache_zero(lfs, &file->cache);
3009 }
3010
3011 // extend file with new blocks
3012 lfs_alloc_ack(lfs);
3013 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3014 file->block, file->pos,
3015 &file->block, &file->off);
3016 if (err) {
3017 file->flags |= LFS_F_ERRED;
3018 return err;
3019 }
3020 } else {
3021 file->block = LFS_BLOCK_INLINE;
3022 file->off = file->pos;
3023 }
3024
3025 file->flags |= LFS_F_WRITING;
3026 }
3027
3028 // program as much as we can in current block
3029 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3030 while (true) {
3031 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3032 file->block, file->off, data, diff);
3033 if (err) {
3034 if (err == LFS_ERR_CORRUPT) {
3035 goto relocate;
3036 }
3037 file->flags |= LFS_F_ERRED;
3038 return err;
3039 }
3040
3041 break;
3042 relocate:
3043 err = lfs_file_relocate(lfs, file);
3044 if (err) {
3045 file->flags |= LFS_F_ERRED;
3046 return err;
3047 }
3048 }
3049
3050 file->pos += diff;
3051 file->off += diff;
3052 data += diff;
3053 nsize -= diff;
3054
3055 lfs_alloc_ack(lfs);
3056 }
3057
3058 file->flags &= ~LFS_F_ERRED;
3059 return size;
3060 }
3061 #endif
3062
3063 static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
3064 lfs_soff_t off, int whence) {
3065 // find new pos
3066 lfs_off_t npos = file->pos;
3067 if (whence == LFS_SEEK_SET) {
3068 npos = off;
3069 } else if (whence == LFS_SEEK_CUR) {
3070 npos = file->pos + off;
3071 } else if (whence == LFS_SEEK_END) {
3072 npos = lfs_file_rawsize(lfs, file) + off;
3073 }
3074
3075 if (npos > lfs->file_max) {
3076 // file position out of range
3077 return LFS_ERR_INVAL;
3078 }
3079
3080 if (file->pos == npos) {
3081 // noop - position has not changed
3082 return npos;
3083 }
3084
3085 #ifndef LFS_READONLY
3086 // write out everything beforehand, may be noop if rdonly
3087 int err = lfs_file_flush(lfs, file);
3088 if (err) {
3089 return err;
3090 }
3091 #endif
3092
3093 // update pos
3094 file->pos = npos;
3095 return npos;
3096 }
3097
3098 #ifndef LFS_READONLY
3099 static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3100 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3101
3102 if (size > LFS_FILE_MAX) {
3103 return LFS_ERR_INVAL;
3104 }
3105
3106 lfs_off_t pos = file->pos;
3107 lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
3108 if (size < oldsize) {
3109 // need to flush since directly changing metadata
3110 int err = lfs_file_flush(lfs, file);
3111 if (err) {
3112 return err;
3113 }
3114
3115 // lookup new head in ctz skip list
3116 err = lfs_ctz_find(lfs, NULL, &file->cache,
3117 file->ctz.head, file->ctz.size,
3118 size, &file->block, &file->off);
3119 if (err) {
3120 return err;
3121 }
3122
3123 // need to set pos/block/off consistently so seeking back to
3124 // the old position does not get confused
3125 file->pos = size;
3126 file->ctz.head = file->block;
3127 file->ctz.size = size;
3128 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3129 } else if (size > oldsize) {
3130 // flush+seek if not already at end
3131 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
3132 if (res < 0) {
3133 return (int)res;
3134 }
3135
3136 // fill with zeros
3137 while (file->pos < size) {
3138 res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
3139 if (res < 0) {
3140 return (int)res;
3141 }
3142 }
3143 }
3144
3145 // restore pos
3146 lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET);
3147 if (res < 0) {
3148 return (int)res;
3149 }
3150
3151 return 0;
3152 }
3153 #endif
3154
3155 static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) {
3156 (void)lfs;
3157 return file->pos;
3158 }
3159
3160 static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) {
3161 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3162 if (res < 0) {
3163 return (int)res;
3164 }
3165
3166 return 0;
3167 }
3168
3169 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) {
3170 (void)lfs;
3171
3172 #ifndef LFS_READONLY
3173 if (file->flags & LFS_F_WRITING) {
3174 return lfs_max(file->pos, file->ctz.size);
3175 }
3176 #endif
3177
3178 return file->ctz.size;
3179 }
3180
3181
3182 /// General fs operations ///
3183 static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) {
3184 lfs_mdir_t cwd;
3185 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3186 if (tag < 0) {
3187 return (int)tag;
3188 }
3189
3190 return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3191 }
3192
3193 #ifndef LFS_READONLY
3194 static int lfs_rawremove(lfs_t *lfs, const char *path) {
3195 // deorphan if we haven't yet, needed at most once after poweron
3196 int err = lfs_fs_forceconsistency(lfs);
3197 if (err) {
3198 return err;
3199 }
3200
3201 lfs_mdir_t cwd;
3202 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3203 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3204 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3205 }
3206
3207 struct lfs_mlist dir;
3208 dir.next = lfs->mlist;
3209 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3210 // must be empty before removal
3211 lfs_block_t pair[2];
3212 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3213 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3214 if (res < 0) {
3215 return (int)res;
3216 }
3217 lfs_pair_fromle32(pair);
3218
3219 err = lfs_dir_fetch(lfs, &dir.m, pair);
3220 if (err) {
3221 return err;
3222 }
3223
3224 if (dir.m.count > 0 || dir.m.split) {
3225 return LFS_ERR_NOTEMPTY;
3226 }
3227
3228 // mark fs as orphaned
3229 err = lfs_fs_preporphans(lfs, +1);
3230 if (err) {
3231 return err;
3232 }
3233
3234 // I know it's crazy but yes, dir can be changed by our parent's
3235 // commit (if predecessor is child)
3236 dir.type = 0;
3237 dir.id = 0;
3238 lfs->mlist = &dir;
3239 }
3240
3241 // delete the entry
3242 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3243 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3244 if (err) {
3245 lfs->mlist = dir.next;
3246 return err;
3247 }
3248
3249 lfs->mlist = dir.next;
3250 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3251 // fix orphan
3252 err = lfs_fs_preporphans(lfs, -1);
3253 if (err) {
3254 return err;
3255 }
3256
3257 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3258 if (err) {
3259 return err;
3260 }
3261
3262 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3263 if (err) {
3264 return err;
3265 }
3266 }
3267
3268 return 0;
3269 }
3270 #endif
3271
3272 #ifndef LFS_READONLY
3273 static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
3274 // deorphan if we haven't yet, needed at most once after poweron
3275 int err = lfs_fs_forceconsistency(lfs);
3276 if (err) {
3277 return err;
3278 }
3279
3280 // find old entry
3281 lfs_mdir_t oldcwd;
3282 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3283 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3284 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3285 }
3286
3287 // find new entry
3288 lfs_mdir_t newcwd;
3289 uint16_t newid;
3290 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3291 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3292 !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3293 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3294 }
3295
3296 // if we're in the same pair there's a few special cases...
3297 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3298 uint16_t newoldid = lfs_tag_id(oldtag);
3299
3300 struct lfs_mlist prevdir;
3301 prevdir.next = lfs->mlist;
3302 if (prevtag == LFS_ERR_NOENT) {
3303 // check that name fits
3304 lfs_size_t nlen = strlen(newpath);
3305 if (nlen > lfs->name_max) {
3306 return LFS_ERR_NAMETOOLONG;
3307 }
3308
3309 // there is a small chance we are being renamed in the same
3310 // directory/ to an id less than our old id, the global update
3311 // to handle this is a bit messy
3312 if (samepair && newid <= newoldid) {
3313 newoldid += 1;
3314 }
3315 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3316 return LFS_ERR_ISDIR;
3317 } else if (samepair && newid == newoldid) {
3318 // we're renaming to ourselves??
3319 return 0;
3320 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3321 // must be empty before removal
3322 lfs_block_t prevpair[2];
3323 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3324 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3325 if (res < 0) {
3326 return (int)res;
3327 }
3328 lfs_pair_fromle32(prevpair);
3329
3330 // must be empty before removal
3331 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3332 if (err) {
3333 return err;
3334 }
3335
3336 if (prevdir.m.count > 0 || prevdir.m.split) {
3337 return LFS_ERR_NOTEMPTY;
3338 }
3339
3340 // mark fs as orphaned
3341 err = lfs_fs_preporphans(lfs, +1);
3342 if (err) {
3343 return err;
3344 }
3345
3346 // I know it's crazy but yes, dir can be changed by our parent's
3347 // commit (if predecessor is child)
3348 prevdir.type = 0;
3349 prevdir.id = 0;
3350 lfs->mlist = &prevdir;
3351 }
3352
3353 if (!samepair) {
3354 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
3355 }
3356
3357 // move over all attributes
3358 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
3359 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
3360 LFS_TYPE_DELETE, newid, 0), NULL},
3361 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
3362 {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
3363 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
3364 {LFS_MKTAG_IF(samepair,
3365 LFS_TYPE_DELETE, newoldid, 0), NULL}));
3366 if (err) {
3367 lfs->mlist = prevdir.next;
3368 return err;
3369 }
3370
3371 // let commit clean up after move (if we're different! otherwise move
3372 // logic already fixed it for us)
3373 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
3374 // prep gstate and delete move id
3375 lfs_fs_prepmove(lfs, 0x3ff, NULL);
3376 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
3377 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
3378 if (err) {
3379 lfs->mlist = prevdir.next;
3380 return err;
3381 }
3382 }
3383
3384 lfs->mlist = prevdir.next;
3385 if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3386 // fix orphan
3387 err = lfs_fs_preporphans(lfs, -1);
3388 if (err) {
3389 return err;
3390 }
3391
3392 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
3393 if (err) {
3394 return err;
3395 }
3396
3397 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
3398 if (err) {
3399 return err;
3400 }
3401 }
3402
3403 return 0;
3404 }
3405 #endif
3406
3407 static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path,
3408 uint8_t type, void *buffer, lfs_size_t size) {
3409 lfs_mdir_t cwd;
3410 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3411 if (tag < 0) {
3412 return tag;
3413 }
3414
3415 uint16_t id = lfs_tag_id(tag);
3416 if (id == 0x3ff) {
3417 // special case for root
3418 id = 0;
3419 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3420 if (err) {
3421 return err;
3422 }
3423 }
3424
3425 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
3426 LFS_MKTAG(LFS_TYPE_USERATTR + type,
3427 id, lfs_min(size, lfs->attr_max)),
3428 buffer);
3429 if (tag < 0) {
3430 if (tag == LFS_ERR_NOENT) {
3431 return LFS_ERR_NOATTR;
3432 }
3433
3434 return tag;
3435 }
3436
3437 return lfs_tag_size(tag);
3438 }
3439
3440 #ifndef LFS_READONLY
3441 static int lfs_commitattr(lfs_t *lfs, const char *path,
3442 uint8_t type, const void *buffer, lfs_size_t size) {
3443 lfs_mdir_t cwd;
3444 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3445 if (tag < 0) {
3446 return tag;
3447 }
3448
3449 uint16_t id = lfs_tag_id(tag);
3450 if (id == 0x3ff) {
3451 // special case for root
3452 id = 0;
3453 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3454 if (err) {
3455 return err;
3456 }
3457 }
3458
3459 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3460 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
3461 }
3462 #endif
3463
3464 #ifndef LFS_READONLY
3465 static int lfs_rawsetattr(lfs_t *lfs, const char *path,
3466 uint8_t type, const void *buffer, lfs_size_t size) {
3467 if (size > lfs->attr_max) {
3468 return LFS_ERR_NOSPC;
3469 }
3470
3471 return lfs_commitattr(lfs, path, type, buffer, size);
3472 }
3473 #endif
3474
3475 #ifndef LFS_READONLY
3476 static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) {
3477 return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
3478 }
3479 #endif
3480
3481
3482 /// Filesystem operations ///
3483 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
3484 lfs->cfg = cfg;
3485 int err = 0;
3486
3487 // validate that the lfs-cfg sizes were initiated properly before
3488 // performing any arithmetic logics with them
3489 LFS_ASSERT(lfs->cfg->read_size != 0);
3490 LFS_ASSERT(lfs->cfg->prog_size != 0);
3491 LFS_ASSERT(lfs->cfg->cache_size != 0);
3492
3493 // check that block size is a multiple of cache size is a multiple
3494 // of prog and read sizes
3495 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
3496 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
3497 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
3498
3499 // check that the block size is large enough to fit ctz pointers
3500 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
3501 <= lfs->cfg->block_size);
3502
3503 // block_cycles = 0 is no longer supported.
3504 //
3505 // block_cycles is the number of erase cycles before littlefs evicts
3506 // metadata logs as a part of wear leveling. Suggested values are in the
3507 // range of 100-1000, or set block_cycles to -1 to disable block-level
3508 // wear-leveling.
3509 LFS_ASSERT(lfs->cfg->block_cycles != 0);
3510
3511
3512 // setup read cache
3513 if (lfs->cfg->read_buffer) {
3514 lfs->rcache.buffer = lfs->cfg->read_buffer;
3515 } else {
3516 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3517 if (!lfs->rcache.buffer) {
3518 err = LFS_ERR_NOMEM;
3519 goto cleanup;
3520 }
3521 }
3522
3523 // setup program cache
3524 if (lfs->cfg->prog_buffer) {
3525 lfs->pcache.buffer = lfs->cfg->prog_buffer;
3526 } else {
3527 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3528 if (!lfs->pcache.buffer) {
3529 err = LFS_ERR_NOMEM;
3530 goto cleanup;
3531 }
3532 }
3533
3534 // zero to avoid information leaks
3535 lfs_cache_zero(lfs, &lfs->rcache);
3536 lfs_cache_zero(lfs, &lfs->pcache);
3537
3538 // setup lookahead, must be multiple of 64-bits, 32-bit aligned
3539 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
3540 LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
3541 (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
3542 if (lfs->cfg->lookahead_buffer) {
3543 lfs->free.buffer = lfs->cfg->lookahead_buffer;
3544 } else {
3545 lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
3546 if (!lfs->free.buffer) {
3547 err = LFS_ERR_NOMEM;
3548 goto cleanup;
3549 }
3550 }
3551
3552 // check that the size limits are sane
3553 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
3554 lfs->name_max = lfs->cfg->name_max;
3555 if (!lfs->name_max) {
3556 lfs->name_max = LFS_NAME_MAX;
3557 }
3558
3559 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
3560 lfs->file_max = lfs->cfg->file_max;
3561 if (!lfs->file_max) {
3562 lfs->file_max = LFS_FILE_MAX;
3563 }
3564
3565 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
3566 lfs->attr_max = lfs->cfg->attr_max;
3567 if (!lfs->attr_max) {
3568 lfs->attr_max = LFS_ATTR_MAX;
3569 }
3570
3571 LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
3572
3573 // setup default state
3574 lfs->root[0] = LFS_BLOCK_NULL;
3575 lfs->root[1] = LFS_BLOCK_NULL;
3576 lfs->mlist = NULL;
3577 lfs->seed = 0;
3578 lfs->gdisk = (lfs_gstate_t){0};
3579 lfs->gstate = (lfs_gstate_t){0};
3580 lfs->gdelta = (lfs_gstate_t){0};
3581 #ifdef LFS_MIGRATE
3582 lfs->lfs1 = NULL;
3583 #endif
3584
3585 return 0;
3586
3587 cleanup:
3588 lfs_deinit(lfs);
3589 return err;
3590 }
3591
3592 static int lfs_deinit(lfs_t *lfs) {
3593 // free allocated memory
3594 if (!lfs->cfg->read_buffer) {
3595 lfs_free(lfs->rcache.buffer);
3596 }
3597
3598 if (!lfs->cfg->prog_buffer) {
3599 lfs_free(lfs->pcache.buffer);
3600 }
3601
3602 if (!lfs->cfg->lookahead_buffer) {
3603 lfs_free(lfs->free.buffer);
3604 }
3605
3606 return 0;
3607 }
3608
3609 #ifndef LFS_READONLY
3610 static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
3611 int err = 0;
3612 {
3613 err = lfs_init(lfs, cfg);
3614 if (err) {
3615 return err;
3616 }
3617
3618 // create free lookahead
3619 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
3620 lfs->free.off = 0;
3621 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
3622 lfs->cfg->block_count);
3623 lfs->free.i = 0;
3624 lfs_alloc_ack(lfs);
3625
3626 // create root dir
3627 lfs_mdir_t root;
3628 err = lfs_dir_alloc(lfs, &root);
3629 if (err) {
3630 goto cleanup;
3631 }
3632
3633 // write one superblock
3634 lfs_superblock_t superblock = {
3635 .version = LFS_DISK_VERSION,
3636 .block_size = lfs->cfg->block_size,
3637 .block_count = lfs->cfg->block_count,
3638 .name_max = lfs->name_max,
3639 .file_max = lfs->file_max,
3640 .attr_max = lfs->attr_max,
3641 };
3642
3643 lfs_superblock_tole32(&superblock);
3644 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
3645 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
3646 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
3647 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
3648 &superblock}));
3649 if (err) {
3650 goto cleanup;
3651 }
3652
3653 // force compaction to prevent accidentally mounting any
3654 // older version of littlefs that may live on disk
3655 root.erased = false;
3656 err = lfs_dir_commit(lfs, &root, NULL, 0);
3657 if (err) {
3658 goto cleanup;
3659 }
3660
3661 // sanity check that fetch works
3662 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
3663 if (err) {
3664 goto cleanup;
3665 }
3666 }
3667
3668 cleanup:
3669 lfs_deinit(lfs);
3670 return err;
3671
3672 }
3673 #endif
3674
3675 static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
3676 int err = lfs_init(lfs, cfg);
3677 if (err) {
3678 return err;
3679 }
3680
3681 // scan directory blocks for superblock and any global updates
3682 lfs_mdir_t dir = {.tail = {0, 1}};
3683 lfs_block_t cycle = 0;
3684 while (!lfs_pair_isnull(dir.tail)) {
3685 if (cycle >= lfs->cfg->block_count/2) {
3686 // loop detected
3687 err = LFS_ERR_CORRUPT;
3688 goto cleanup;
3689 }
3690 cycle += 1;
3691
3692 // fetch next block in tail list
3693 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
3694 LFS_MKTAG(0x7ff, 0x3ff, 0),
3695 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
3696 NULL,
3697 lfs_dir_find_match, &(struct lfs_dir_find_match){
3698 lfs, "littlefs", 8});
3699 if (tag < 0) {
3700 err = tag;
3701 goto cleanup;
3702 }
3703
3704 // has superblock?
3705 if (tag && !lfs_tag_isdelete(tag)) {
3706 // update root
3707 lfs->root[0] = dir.pair[0];
3708 lfs->root[1] = dir.pair[1];
3709
3710 // grab superblock
3711 lfs_superblock_t superblock;
3712 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
3713 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
3714 &superblock);
3715 if (tag < 0) {
3716 err = tag;
3717 goto cleanup;
3718 }
3719 lfs_superblock_fromle32(&superblock);
3720
3721 // check version
3722 uint16_t major_version = (0xffff & (superblock.version >> 16));
3723 uint16_t minor_version = (0xffff & (superblock.version >> 0));
3724 if ((major_version != LFS_DISK_VERSION_MAJOR ||
3725 minor_version > LFS_DISK_VERSION_MINOR)) {
3726 LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16,
3727 major_version, minor_version);
3728 err = LFS_ERR_INVAL;
3729 goto cleanup;
3730 }
3731
3732 // check superblock configuration
3733 if (superblock.name_max) {
3734 if (superblock.name_max > lfs->name_max) {
3735 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
3736 superblock.name_max, lfs->name_max);
3737 err = LFS_ERR_INVAL;
3738 goto cleanup;
3739 }
3740
3741 lfs->name_max = superblock.name_max;
3742 }
3743
3744 if (superblock.file_max) {
3745 if (superblock.file_max > lfs->file_max) {
3746 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
3747 superblock.file_max, lfs->file_max);
3748 err = LFS_ERR_INVAL;
3749 goto cleanup;
3750 }
3751
3752 lfs->file_max = superblock.file_max;
3753 }
3754
3755 if (superblock.attr_max) {
3756 if (superblock.attr_max > lfs->attr_max) {
3757 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
3758 superblock.attr_max, lfs->attr_max);
3759 err = LFS_ERR_INVAL;
3760 goto cleanup;
3761 }
3762
3763 lfs->attr_max = superblock.attr_max;
3764 }
3765 }
3766
3767 // has gstate?
3768 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
3769 if (err) {
3770 goto cleanup;
3771 }
3772 }
3773
3774 // found superblock?
3775 if (lfs_pair_isnull(lfs->root)) {
3776 err = LFS_ERR_INVAL;
3777 goto cleanup;
3778 }
3779
3780 // update littlefs with gstate
3781 if (!lfs_gstate_iszero(&lfs->gstate)) {
3782 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
3783 lfs->gstate.tag,
3784 lfs->gstate.pair[0],
3785 lfs->gstate.pair[1]);
3786 }
3787 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
3788 lfs->gdisk = lfs->gstate;
3789
3790 // setup free lookahead, to distribute allocations uniformly across
3791 // boots, we start the allocator at a random location
3792 lfs->free.off = lfs->seed % lfs->cfg->block_count;
3793 lfs_alloc_drop(lfs);
3794
3795 return 0;
3796
3797 cleanup:
3798 lfs_rawunmount(lfs);
3799 return err;
3800 }
3801
3802 static int lfs_rawunmount(lfs_t *lfs) {
3803 return lfs_deinit(lfs);
3804 }
3805
3806
3807 /// Filesystem filesystem operations ///
3808 int lfs_fs_rawtraverse(lfs_t *lfs,
3809 int (*cb)(void *data, lfs_block_t block), void *data,
3810 bool includeorphans) {
3811 // iterate over metadata pairs
3812 lfs_mdir_t dir = {.tail = {0, 1}};
3813
3814 #ifdef LFS_MIGRATE
3815 // also consider v1 blocks during migration
3816 if (lfs->lfs1) {
3817 int err = lfs1_traverse(lfs, cb, data);
3818 if (err) {
3819 return err;
3820 }
3821
3822 dir.tail[0] = lfs->root[0];
3823 dir.tail[1] = lfs->root[1];
3824 }
3825 #endif
3826
3827 lfs_block_t cycle = 0;
3828 while (!lfs_pair_isnull(dir.tail)) {
3829 if (cycle >= lfs->cfg->block_count/2) {
3830 // loop detected
3831 return LFS_ERR_CORRUPT;
3832 }
3833 cycle += 1;
3834
3835 for (int i = 0; i < 2; i++) {
3836 int err = cb(data, dir.tail[i]);
3837 if (err) {
3838 return err;
3839 }
3840 }
3841
3842 // iterate through ids in directory
3843 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
3844 if (err) {
3845 return err;
3846 }
3847
3848 for (uint16_t id = 0; id < dir.count; id++) {
3849 struct lfs_ctz ctz;
3850 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
3851 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
3852 if (tag < 0) {
3853 if (tag == LFS_ERR_NOENT) {
3854 continue;
3855 }
3856 return tag;
3857 }
3858 lfs_ctz_fromle32(&ctz);
3859
3860 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
3861 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
3862 ctz.head, ctz.size, cb, data);
3863 if (err) {
3864 return err;
3865 }
3866 } else if (includeorphans &&
3867 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
3868 for (int i = 0; i < 2; i++) {
3869 err = cb(data, (&ctz.head)[i]);
3870 if (err) {
3871 return err;
3872 }
3873 }
3874 }
3875 }
3876 }
3877
3878 #ifndef LFS_READONLY
3879 // iterate over any open files
3880 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
3881 if (f->type != LFS_TYPE_REG) {
3882 continue;
3883 }
3884
3885 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
3886 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
3887 f->ctz.head, f->ctz.size, cb, data);
3888 if (err) {
3889 return err;
3890 }
3891 }
3892
3893 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
3894 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
3895 f->block, f->pos, cb, data);
3896 if (err) {
3897 return err;
3898 }
3899 }
3900 }
3901 #endif
3902
3903 return 0;
3904 }
3905
3906 #ifndef LFS_READONLY
3907 static int lfs_fs_pred(lfs_t *lfs,
3908 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
3909 // iterate over all directory directory entries
3910 pdir->tail[0] = 0;
3911 pdir->tail[1] = 1;
3912 lfs_block_t cycle = 0;
3913 while (!lfs_pair_isnull(pdir->tail)) {
3914 if (cycle >= lfs->cfg->block_count/2) {
3915 // loop detected
3916 return LFS_ERR_CORRUPT;
3917 }
3918 cycle += 1;
3919
3920 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
3921 return 0;
3922 }
3923
3924 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
3925 if (err) {
3926 return err;
3927 }
3928 }
3929
3930 return LFS_ERR_NOENT;
3931 }
3932 #endif
3933
3934 #ifndef LFS_READONLY
3935 struct lfs_fs_parent_match {
3936 lfs_t *lfs;
3937 const lfs_block_t pair[2];
3938 };
3939 #endif
3940
3941 #ifndef LFS_READONLY
3942 static int lfs_fs_parent_match(void *data,
3943 lfs_tag_t tag, const void *buffer) {
3944 struct lfs_fs_parent_match *find = data;
3945 lfs_t *lfs = find->lfs;
3946 const struct lfs_diskoff *disk = buffer;
3947 (void)tag;
3948
3949 lfs_block_t child[2];
3950 int err = lfs_bd_read(lfs,
3951 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
3952 disk->block, disk->off, &child, sizeof(child));
3953 if (err) {
3954 return err;
3955 }
3956
3957 lfs_pair_fromle32(child);
3958 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
3959 }
3960 #endif
3961
3962 #ifndef LFS_READONLY
3963 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
3964 lfs_mdir_t *parent) {
3965 // use fetchmatch with callback to find pairs
3966 parent->tail[0] = 0;
3967 parent->tail[1] = 1;
3968 lfs_block_t cycle = 0;
3969 while (!lfs_pair_isnull(parent->tail)) {
3970 if (cycle >= lfs->cfg->block_count/2) {
3971 // loop detected
3972 return LFS_ERR_CORRUPT;
3973 }
3974 cycle += 1;
3975
3976 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
3977 LFS_MKTAG(0x7ff, 0, 0x3ff),
3978 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
3979 NULL,
3980 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
3981 lfs, {pair[0], pair[1]}});
3982 if (tag && tag != LFS_ERR_NOENT) {
3983 return tag;
3984 }
3985 }
3986
3987 return LFS_ERR_NOENT;
3988 }
3989 #endif
3990
3991 #ifndef LFS_READONLY
3992 static int lfs_fs_relocate(lfs_t *lfs,
3993 const lfs_block_t oldpair[2], lfs_block_t newpair[2]) {
3994 // update internal root
3995 if (lfs_pair_cmp(oldpair, lfs->root) == 0) {
3996 lfs->root[0] = newpair[0];
3997 lfs->root[1] = newpair[1];
3998 }
3999
4000 // update internally tracked dirs
4001 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
4002 if (lfs_pair_cmp(oldpair, d->m.pair) == 0) {
4003 d->m.pair[0] = newpair[0];
4004 d->m.pair[1] = newpair[1];
4005 }
4006
4007 if (d->type == LFS_TYPE_DIR &&
4008 lfs_pair_cmp(oldpair, ((lfs_dir_t*)d)->head) == 0) {
4009 ((lfs_dir_t*)d)->head[0] = newpair[0];
4010 ((lfs_dir_t*)d)->head[1] = newpair[1];
4011 }
4012 }
4013
4014 // find parent
4015 lfs_mdir_t parent;
4016 lfs_stag_t tag = lfs_fs_parent(lfs, oldpair, &parent);
4017 if (tag < 0 && tag != LFS_ERR_NOENT) {
4018 return tag;
4019 }
4020
4021 if (tag != LFS_ERR_NOENT) {
4022 // update disk, this creates a desync
4023 int err = lfs_fs_preporphans(lfs, +1);
4024 if (err) {
4025 return err;
4026 }
4027
4028 // fix pending move in this pair? this looks like an optimization but
4029 // is in fact _required_ since relocating may outdate the move.
4030 uint16_t moveid = 0x3ff;
4031 if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
4032 moveid = lfs_tag_id(lfs->gstate.tag);
4033 LFS_DEBUG("Fixing move while relocating "
4034 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4035 parent.pair[0], parent.pair[1], moveid);
4036 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4037 if (moveid < lfs_tag_id(tag)) {
4038 tag -= LFS_MKTAG(0, 1, 0);
4039 }
4040 }
4041
4042 lfs_pair_tole32(newpair);
4043 err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
4044 {LFS_MKTAG_IF(moveid != 0x3ff,
4045 LFS_TYPE_DELETE, moveid, 0), NULL},
4046 {tag, newpair}));
4047 lfs_pair_fromle32(newpair);
4048 if (err) {
4049 return err;
4050 }
4051
4052 // next step, clean up orphans
4053 err = lfs_fs_preporphans(lfs, -1);
4054 if (err) {
4055 return err;
4056 }
4057 }
4058
4059 // find pred
4060 int err = lfs_fs_pred(lfs, oldpair, &parent);
4061 if (err && err != LFS_ERR_NOENT) {
4062 return err;
4063 }
4064
4065 // if we can't find dir, it must be new
4066 if (err != LFS_ERR_NOENT) {
4067 // fix pending move in this pair? this looks like an optimization but
4068 // is in fact _required_ since relocating may outdate the move.
4069 uint16_t moveid = 0x3ff;
4070 if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
4071 moveid = lfs_tag_id(lfs->gstate.tag);
4072 LFS_DEBUG("Fixing move while relocating "
4073 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4074 parent.pair[0], parent.pair[1], moveid);
4075 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4076 }
4077
4078 // replace bad pair, either we clean up desync, or no desync occured
4079 lfs_pair_tole32(newpair);
4080 err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
4081 {LFS_MKTAG_IF(moveid != 0x3ff,
4082 LFS_TYPE_DELETE, moveid, 0), NULL},
4083 {LFS_MKTAG(LFS_TYPE_TAIL + parent.split, 0x3ff, 8), newpair}));
4084 lfs_pair_fromle32(newpair);
4085 if (err) {
4086 return err;
4087 }
4088 }
4089
4090 return 0;
4091 }
4092 #endif
4093
4094 #ifndef LFS_READONLY
4095 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4096 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
4097 lfs->gstate.tag += orphans;
4098 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4099 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4100
4101 return 0;
4102 }
4103 #endif
4104
4105 #ifndef LFS_READONLY
4106 static void lfs_fs_prepmove(lfs_t *lfs,
4107 uint16_t id, const lfs_block_t pair[2]) {
4108 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4109 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4110 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4111 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4112 }
4113 #endif
4114
4115 #ifndef LFS_READONLY
4116 static int lfs_fs_demove(lfs_t *lfs) {
4117 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4118 return 0;
4119 }
4120
4121 // Fix bad moves
4122 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4123 lfs->gdisk.pair[0],
4124 lfs->gdisk.pair[1],
4125 lfs_tag_id(lfs->gdisk.tag));
4126
4127 // fetch and delete the moved entry
4128 lfs_mdir_t movedir;
4129 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4130 if (err) {
4131 return err;
4132 }
4133
4134 // prep gstate and delete move id
4135 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4136 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4137 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4138 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4139 if (err) {
4140 return err;
4141 }
4142
4143 return 0;
4144 }
4145 #endif
4146
4147 #ifndef LFS_READONLY
4148 static int lfs_fs_deorphan(lfs_t *lfs) {
4149 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4150 return 0;
4151 }
4152
4153 // Fix any orphans
4154 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4155 lfs_mdir_t dir;
4156
4157 // iterate over all directory directory entries
4158 while (!lfs_pair_isnull(pdir.tail)) {
4159 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4160 if (err) {
4161 return err;
4162 }
4163
4164 // check head blocks for orphans
4165 if (!pdir.split) {
4166 // check if we have a parent
4167 lfs_mdir_t parent;
4168 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4169 if (tag < 0 && tag != LFS_ERR_NOENT) {
4170 return tag;
4171 }
4172
4173 if (tag == LFS_ERR_NOENT) {
4174 // we are an orphan
4175 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
4176 pdir.tail[0], pdir.tail[1]);
4177
4178 err = lfs_dir_drop(lfs, &pdir, &dir);
4179 if (err) {
4180 return err;
4181 }
4182
4183 // refetch tail
4184 continue;
4185 }
4186
4187 lfs_block_t pair[2];
4188 lfs_stag_t res = lfs_dir_get(lfs, &parent,
4189 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4190 if (res < 0) {
4191 return res;
4192 }
4193 lfs_pair_fromle32(pair);
4194
4195 if (!lfs_pair_sync(pair, pdir.tail)) {
4196 // we have desynced
4197 LFS_DEBUG("Fixing half-orphan {0x%"PRIx32", 0x%"PRIx32"} "
4198 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4199 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4200
4201 lfs_pair_tole32(pair);
4202 err = lfs_dir_commit(lfs, &pdir, LFS_MKATTRS(
4203 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pair}));
4204 lfs_pair_fromle32(pair);
4205 if (err) {
4206 return err;
4207 }
4208
4209 // refetch tail
4210 continue;
4211 }
4212 }
4213
4214 pdir = dir;
4215 }
4216
4217 // mark orphans as fixed
4218 return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
4219 }
4220 #endif
4221
4222 #ifndef LFS_READONLY
4223 static int lfs_fs_forceconsistency(lfs_t *lfs) {
4224 int err = lfs_fs_demove(lfs);
4225 if (err) {
4226 return err;
4227 }
4228
4229 err = lfs_fs_deorphan(lfs);
4230 if (err) {
4231 return err;
4232 }
4233
4234 return 0;
4235 }
4236 #endif
4237
4238 static int lfs_fs_size_count(void *p, lfs_block_t block) {
4239 (void)block;
4240 lfs_size_t *size = p;
4241 *size += 1;
4242 return 0;
4243 }
4244
4245 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
4246 lfs_size_t size = 0;
4247 int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false);
4248 if (err) {
4249 return err;
4250 }
4251
4252 return size;
4253 }
4254
4255 #ifdef LFS_MIGRATE
4256 ////// Migration from littelfs v1 below this //////
4257
4258 /// Version info ///
4259
4260 // Software library version
4261 // Major (top-nibble), incremented on backwards incompatible changes
4262 // Minor (bottom-nibble), incremented on feature additions
4263 #define LFS1_VERSION 0x00010007
4264 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
4265 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
4266
4267 // Version of On-disk data structures
4268 // Major (top-nibble), incremented on backwards incompatible changes
4269 // Minor (bottom-nibble), incremented on feature additions
4270 #define LFS1_DISK_VERSION 0x00010001
4271 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
4272 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
4273
4274
4275 /// v1 Definitions ///
4276
4277 // File types
4278 enum lfs1_type {
4279 LFS1_TYPE_REG = 0x11,
4280 LFS1_TYPE_DIR = 0x22,
4281 LFS1_TYPE_SUPERBLOCK = 0x2e,
4282 };
4283
4284 typedef struct lfs1 {
4285 lfs_block_t root[2];
4286 } lfs1_t;
4287
4288 typedef struct lfs1_entry {
4289 lfs_off_t off;
4290
4291 struct lfs1_disk_entry {
4292 uint8_t type;
4293 uint8_t elen;
4294 uint8_t alen;
4295 uint8_t nlen;
4296 union {
4297 struct {
4298 lfs_block_t head;
4299 lfs_size_t size;
4300 } file;
4301 lfs_block_t dir[2];
4302 } u;
4303 } d;
4304 } lfs1_entry_t;
4305
4306 typedef struct lfs1_dir {
4307 struct lfs1_dir *next;
4308 lfs_block_t pair[2];
4309 lfs_off_t off;
4310
4311 lfs_block_t head[2];
4312 lfs_off_t pos;
4313
4314 struct lfs1_disk_dir {
4315 uint32_t rev;
4316 lfs_size_t size;
4317 lfs_block_t tail[2];
4318 } d;
4319 } lfs1_dir_t;
4320
4321 typedef struct lfs1_superblock {
4322 lfs_off_t off;
4323
4324 struct lfs1_disk_superblock {
4325 uint8_t type;
4326 uint8_t elen;
4327 uint8_t alen;
4328 uint8_t nlen;
4329 lfs_block_t root[2];
4330 uint32_t block_size;
4331 uint32_t block_count;
4332 uint32_t version;
4333 char magic[8];
4334 } d;
4335 } lfs1_superblock_t;
4336
4337
4338 /// Low-level wrappers v1->v2 ///
4339 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
4340 *crc = lfs_crc(*crc, buffer, size);
4341 }
4342
4343 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
4344 lfs_off_t off, void *buffer, lfs_size_t size) {
4345 // if we ever do more than writes to alternating pairs,
4346 // this may need to consider pcache
4347 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
4348 block, off, buffer, size);
4349 }
4350
4351 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
4352 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
4353 for (lfs_off_t i = 0; i < size; i++) {
4354 uint8_t c;
4355 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
4356 if (err) {
4357 return err;
4358 }
4359
4360 lfs1_crc(crc, &c, 1);
4361 }
4362
4363 return 0;
4364 }
4365
4366
4367 /// Endian swapping functions ///
4368 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
4369 d->rev = lfs_fromle32(d->rev);
4370 d->size = lfs_fromle32(d->size);
4371 d->tail[0] = lfs_fromle32(d->tail[0]);
4372 d->tail[1] = lfs_fromle32(d->tail[1]);
4373 }
4374
4375 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
4376 d->rev = lfs_tole32(d->rev);
4377 d->size = lfs_tole32(d->size);
4378 d->tail[0] = lfs_tole32(d->tail[0]);
4379 d->tail[1] = lfs_tole32(d->tail[1]);
4380 }
4381
4382 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
4383 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
4384 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
4385 }
4386
4387 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
4388 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
4389 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
4390 }
4391
4392 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
4393 d->root[0] = lfs_fromle32(d->root[0]);
4394 d->root[1] = lfs_fromle32(d->root[1]);
4395 d->block_size = lfs_fromle32(d->block_size);
4396 d->block_count = lfs_fromle32(d->block_count);
4397 d->version = lfs_fromle32(d->version);
4398 }
4399
4400
4401 ///// Metadata pair and directory operations ///
4402 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
4403 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
4404 }
4405
4406 static int lfs1_dir_fetch(lfs_t *lfs,
4407 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
4408 // copy out pair, otherwise may be aliasing dir
4409 const lfs_block_t tpair[2] = {pair[0], pair[1]};
4410 bool valid = false;
4411
4412 // check both blocks for the most recent revision
4413 for (int i = 0; i < 2; i++) {
4414 struct lfs1_disk_dir test;
4415 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
4416 lfs1_dir_fromle32(&test);
4417 if (err) {
4418 if (err == LFS_ERR_CORRUPT) {
4419 continue;
4420 }
4421 return err;
4422 }
4423
4424 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
4425 continue;
4426 }
4427
4428 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
4429 (0x7fffffff & test.size) > lfs->cfg->block_size) {
4430 continue;
4431 }
4432
4433 uint32_t crc = 0xffffffff;
4434 lfs1_dir_tole32(&test);
4435 lfs1_crc(&crc, &test, sizeof(test));
4436 lfs1_dir_fromle32(&test);
4437 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
4438 (0x7fffffff & test.size) - sizeof(test), &crc);
4439 if (err) {
4440 if (err == LFS_ERR_CORRUPT) {
4441 continue;
4442 }
4443 return err;
4444 }
4445
4446 if (crc != 0) {
4447 continue;
4448 }
4449
4450 valid = true;
4451
4452 // setup dir in case it's valid
4453 dir->pair[0] = tpair[(i+0) % 2];
4454 dir->pair[1] = tpair[(i+1) % 2];
4455 dir->off = sizeof(dir->d);
4456 dir->d = test;
4457 }
4458
4459 if (!valid) {
4460 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
4461 tpair[0], tpair[1]);
4462 return LFS_ERR_CORRUPT;
4463 }
4464
4465 return 0;
4466 }
4467
4468 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
4469 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
4470 if (!(0x80000000 & dir->d.size)) {
4471 entry->off = dir->off;
4472 return LFS_ERR_NOENT;
4473 }
4474
4475 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
4476 if (err) {
4477 return err;
4478 }
4479
4480 dir->off = sizeof(dir->d);
4481 dir->pos += sizeof(dir->d) + 4;
4482 }
4483
4484 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
4485 &entry->d, sizeof(entry->d));
4486 lfs1_entry_fromle32(&entry->d);
4487 if (err) {
4488 return err;
4489 }
4490
4491 entry->off = dir->off;
4492 dir->off += lfs1_entry_size(entry);
4493 dir->pos += lfs1_entry_size(entry);
4494 return 0;
4495 }
4496
4497 /// littlefs v1 specific operations ///
4498 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
4499 if (lfs_pair_isnull(lfs->lfs1->root)) {
4500 return 0;
4501 }
4502
4503 // iterate over metadata pairs
4504 lfs1_dir_t dir;
4505 lfs1_entry_t entry;
4506 lfs_block_t cwd[2] = {0, 1};
4507
4508 while (true) {
4509 for (int i = 0; i < 2; i++) {
4510 int err = cb(data, cwd[i]);
4511 if (err) {
4512 return err;
4513 }
4514 }
4515
4516 int err = lfs1_dir_fetch(lfs, &dir, cwd);
4517 if (err) {
4518 return err;
4519 }
4520
4521 // iterate over contents
4522 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
4523 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
4524 &entry.d, sizeof(entry.d));
4525 lfs1_entry_fromle32(&entry.d);
4526 if (err) {
4527 return err;
4528 }
4529
4530 dir.off += lfs1_entry_size(&entry);
4531 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
4532 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4533 entry.d.u.file.head, entry.d.u.file.size, cb, data);
4534 if (err) {
4535 return err;
4536 }
4537 }
4538 }
4539
4540 // we also need to check if we contain a threaded v2 directory
4541 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
4542 while (dir2.split) {
4543 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
4544 if (err) {
4545 break;
4546 }
4547
4548 for (int i = 0; i < 2; i++) {
4549 err = cb(data, dir2.pair[i]);
4550 if (err) {
4551 return err;
4552 }
4553 }
4554 }
4555
4556 cwd[0] = dir.d.tail[0];
4557 cwd[1] = dir.d.tail[1];
4558
4559 if (lfs_pair_isnull(cwd)) {
4560 break;
4561 }
4562 }
4563
4564 return 0;
4565 }
4566
4567 static int lfs1_moved(lfs_t *lfs, const void *e) {
4568 if (lfs_pair_isnull(lfs->lfs1->root)) {
4569 return 0;
4570 }
4571
4572 // skip superblock
4573 lfs1_dir_t cwd;
4574 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
4575 if (err) {
4576 return err;
4577 }
4578
4579 // iterate over all directory directory entries
4580 lfs1_entry_t entry;
4581 while (!lfs_pair_isnull(cwd.d.tail)) {
4582 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
4583 if (err) {
4584 return err;
4585 }
4586
4587 while (true) {
4588 err = lfs1_dir_next(lfs, &cwd, &entry);
4589 if (err && err != LFS_ERR_NOENT) {
4590 return err;
4591 }
4592
4593 if (err == LFS_ERR_NOENT) {
4594 break;
4595 }
4596
4597 if (!(0x80 & entry.d.type) &&
4598 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
4599 return true;
4600 }
4601 }
4602 }
4603
4604 return false;
4605 }
4606
4607 /// Filesystem operations ///
4608 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
4609 const struct lfs_config *cfg) {
4610 int err = 0;
4611 {
4612 err = lfs_init(lfs, cfg);
4613 if (err) {
4614 return err;
4615 }
4616
4617 lfs->lfs1 = lfs1;
4618 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
4619 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
4620
4621 // setup free lookahead
4622 lfs->free.off = 0;
4623 lfs->free.size = 0;
4624 lfs->free.i = 0;
4625 lfs_alloc_ack(lfs);
4626
4627 // load superblock
4628 lfs1_dir_t dir;
4629 lfs1_superblock_t superblock;
4630 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
4631 if (err && err != LFS_ERR_CORRUPT) {
4632 goto cleanup;
4633 }
4634
4635 if (!err) {
4636 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
4637 &superblock.d, sizeof(superblock.d));
4638 lfs1_superblock_fromle32(&superblock.d);
4639 if (err) {
4640 goto cleanup;
4641 }
4642
4643 lfs->lfs1->root[0] = superblock.d.root[0];
4644 lfs->lfs1->root[1] = superblock.d.root[1];
4645 }
4646
4647 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
4648 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
4649 0, 1);
4650 err = LFS_ERR_CORRUPT;
4651 goto cleanup;
4652 }
4653
4654 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
4655 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
4656 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
4657 minor_version > LFS1_DISK_VERSION_MINOR)) {
4658 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
4659 err = LFS_ERR_INVAL;
4660 goto cleanup;
4661 }
4662
4663 return 0;
4664 }
4665
4666 cleanup:
4667 lfs_deinit(lfs);
4668 return err;
4669 }
4670
4671 static int lfs1_unmount(lfs_t *lfs) {
4672 return lfs_deinit(lfs);
4673 }
4674
4675 /// v1 migration ///
4676 static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
4677 struct lfs1 lfs1;
4678 int err = lfs1_mount(lfs, &lfs1, cfg);
4679 if (err) {
4680 return err;
4681 }
4682
4683 {
4684 // iterate through each directory, copying over entries
4685 // into new directory
4686 lfs1_dir_t dir1;
4687 lfs_mdir_t dir2;
4688 dir1.d.tail[0] = lfs->lfs1->root[0];
4689 dir1.d.tail[1] = lfs->lfs1->root[1];
4690 while (!lfs_pair_isnull(dir1.d.tail)) {
4691 // iterate old dir
4692 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
4693 if (err) {
4694 goto cleanup;
4695 }
4696
4697 // create new dir and bind as temporary pretend root
4698 err = lfs_dir_alloc(lfs, &dir2);
4699 if (err) {
4700 goto cleanup;
4701 }
4702
4703 dir2.rev = dir1.d.rev;
4704 dir1.head[0] = dir1.pair[0];
4705 dir1.head[1] = dir1.pair[1];
4706 lfs->root[0] = dir2.pair[0];
4707 lfs->root[1] = dir2.pair[1];
4708
4709 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
4710 if (err) {
4711 goto cleanup;
4712 }
4713
4714 while (true) {
4715 lfs1_entry_t entry1;
4716 err = lfs1_dir_next(lfs, &dir1, &entry1);
4717 if (err && err != LFS_ERR_NOENT) {
4718 goto cleanup;
4719 }
4720
4721 if (err == LFS_ERR_NOENT) {
4722 break;
4723 }
4724
4725 // check that entry has not been moved
4726 if (entry1.d.type & 0x80) {
4727 int moved = lfs1_moved(lfs, &entry1.d.u);
4728 if (moved < 0) {
4729 err = moved;
4730 goto cleanup;
4731 }
4732
4733 if (moved) {
4734 continue;
4735 }
4736
4737 entry1.d.type &= ~0x80;
4738 }
4739
4740 // also fetch name
4741 char name[LFS_NAME_MAX+1];
4742 memset(name, 0, sizeof(name));
4743 err = lfs1_bd_read(lfs, dir1.pair[0],
4744 entry1.off + 4+entry1.d.elen+entry1.d.alen,
4745 name, entry1.d.nlen);
4746 if (err) {
4747 goto cleanup;
4748 }
4749
4750 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
4751
4752 // create entry in new dir
4753 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
4754 if (err) {
4755 goto cleanup;
4756 }
4757
4758 uint16_t id;
4759 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
4760 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
4761 err = (err < 0) ? err : LFS_ERR_EXIST;
4762 goto cleanup;
4763 }
4764
4765 lfs1_entry_tole32(&entry1.d);
4766 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
4767 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
4768 {LFS_MKTAG_IF_ELSE(isdir,
4769 LFS_TYPE_DIR, id, entry1.d.nlen,
4770 LFS_TYPE_REG, id, entry1.d.nlen),
4771 name},
4772 {LFS_MKTAG_IF_ELSE(isdir,
4773 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
4774 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
4775 &entry1.d.u}));
4776 lfs1_entry_fromle32(&entry1.d);
4777 if (err) {
4778 goto cleanup;
4779 }
4780 }
4781
4782 if (!lfs_pair_isnull(dir1.d.tail)) {
4783 // find last block and update tail to thread into fs
4784 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
4785 if (err) {
4786 goto cleanup;
4787 }
4788
4789 while (dir2.split) {
4790 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
4791 if (err) {
4792 goto cleanup;
4793 }
4794 }
4795
4796 lfs_pair_tole32(dir2.pair);
4797 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
4798 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
4799 lfs_pair_fromle32(dir2.pair);
4800 if (err) {
4801 goto cleanup;
4802 }
4803 }
4804
4805 // Copy over first block to thread into fs. Unfortunately
4806 // if this fails there is not much we can do.
4807 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
4808 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4809 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
4810
4811 err = lfs_bd_erase(lfs, dir1.head[1]);
4812 if (err) {
4813 goto cleanup;
4814 }
4815
4816 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
4817 if (err) {
4818 goto cleanup;
4819 }
4820
4821 for (lfs_off_t i = 0; i < dir2.off; i++) {
4822 uint8_t dat;
4823 err = lfs_bd_read(lfs,
4824 NULL, &lfs->rcache, dir2.off,
4825 dir2.pair[0], i, &dat, 1);
4826 if (err) {
4827 goto cleanup;
4828 }
4829
4830 err = lfs_bd_prog(lfs,
4831 &lfs->pcache, &lfs->rcache, true,
4832 dir1.head[1], i, &dat, 1);
4833 if (err) {
4834 goto cleanup;
4835 }
4836 }
4837
4838 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
4839 if (err) {
4840 goto cleanup;
4841 }
4842 }
4843
4844 // Create new superblock. This marks a successful migration!
4845 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
4846 if (err) {
4847 goto cleanup;
4848 }
4849
4850 dir2.pair[0] = dir1.pair[0];
4851 dir2.pair[1] = dir1.pair[1];
4852 dir2.rev = dir1.d.rev;
4853 dir2.off = sizeof(dir2.rev);
4854 dir2.etag = 0xffffffff;
4855 dir2.count = 0;
4856 dir2.tail[0] = lfs->lfs1->root[0];
4857 dir2.tail[1] = lfs->lfs1->root[1];
4858 dir2.erased = false;
4859 dir2.split = true;
4860
4861 lfs_superblock_t superblock = {
4862 .version = LFS_DISK_VERSION,
4863 .block_size = lfs->cfg->block_size,
4864 .block_count = lfs->cfg->block_count,
4865 .name_max = lfs->name_max,
4866 .file_max = lfs->file_max,
4867 .attr_max = lfs->attr_max,
4868 };
4869
4870 lfs_superblock_tole32(&superblock);
4871 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
4872 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4873 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4874 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4875 &superblock}));
4876 if (err) {
4877 goto cleanup;
4878 }
4879
4880 // sanity check that fetch works
4881 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
4882 if (err) {
4883 goto cleanup;
4884 }
4885
4886 // force compaction to prevent accidentally mounting v1
4887 dir2.erased = false;
4888 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
4889 if (err) {
4890 goto cleanup;
4891 }
4892 }
4893
4894 cleanup:
4895 lfs1_unmount(lfs);
4896 return err;
4897 }
4898
4899 #endif
4900
4901
4902 /// Public API wrappers ///
4903
4904 // Here we can add tracing/thread safety easily
4905
4906 // Thread-safe wrappers if enabled
4907 #ifdef LFS_THREADSAFE
4908 #define LFS_LOCK(cfg) cfg->lock(cfg)
4909 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
4910 #else
4911 #define LFS_LOCK(cfg) ((void)cfg, 0)
4912 #define LFS_UNLOCK(cfg) ((void)cfg)
4913 #endif
4914
4915 // Public API
4916 #ifndef LFS_READONLY
4917 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
4918 int err = LFS_LOCK(cfg);
4919 if (err) {
4920 return err;
4921 }
4922 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
4923 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
4924 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
4925 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
4926 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
4927 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
4928 ".prog_buffer=%p, .lookahead_buffer=%p, "
4929 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
4930 ".attr_max=%"PRIu32"})",
4931 (void*)lfs, (void*)cfg, cfg->context,
4932 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
4933 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
4934 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
4935 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
4936 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
4937 cfg->name_max, cfg->file_max, cfg->attr_max);
4938
4939 err = lfs_rawformat(lfs, cfg);
4940
4941 LFS_TRACE("lfs_format -> %d", err);
4942 LFS_UNLOCK(cfg);
4943 return err;
4944 }
4945 #endif
4946
4947 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
4948 int err = LFS_LOCK(cfg);
4949 if (err) {
4950 return err;
4951 }
4952 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
4953 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
4954 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
4955 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
4956 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
4957 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
4958 ".prog_buffer=%p, .lookahead_buffer=%p, "
4959 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
4960 ".attr_max=%"PRIu32"})",
4961 (void*)lfs, (void*)cfg, cfg->context,
4962 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
4963 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
4964 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
4965 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
4966 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
4967 cfg->name_max, cfg->file_max, cfg->attr_max);
4968
4969 err = lfs_rawmount(lfs, cfg);
4970
4971 LFS_TRACE("lfs_mount -> %d", err);
4972 LFS_UNLOCK(cfg);
4973 return err;
4974 }
4975
4976 int lfs_unmount(lfs_t *lfs) {
4977 int err = LFS_LOCK(lfs->cfg);
4978 if (err) {
4979 return err;
4980 }
4981 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
4982
4983 err = lfs_rawunmount(lfs);
4984
4985 LFS_TRACE("lfs_unmount -> %d", err);
4986 LFS_UNLOCK(lfs->cfg);
4987 return err;
4988 }
4989
4990 #ifndef LFS_READONLY
4991 int lfs_remove(lfs_t *lfs, const char *path) {
4992 int err = LFS_LOCK(lfs->cfg);
4993 if (err) {
4994 return err;
4995 }
4996 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
4997
4998 err = lfs_rawremove(lfs, path);
4999
5000 LFS_TRACE("lfs_remove -> %d", err);
5001 LFS_UNLOCK(lfs->cfg);
5002 return err;
5003 }
5004 #endif
5005
5006 #ifndef LFS_READONLY
5007 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
5008 int err = LFS_LOCK(lfs->cfg);
5009 if (err) {
5010 return err;
5011 }
5012 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
5013
5014 err = lfs_rawrename(lfs, oldpath, newpath);
5015
5016 LFS_TRACE("lfs_rename -> %d", err);
5017 LFS_UNLOCK(lfs->cfg);
5018 return err;
5019 }
5020 #endif
5021
5022 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
5023 int err = LFS_LOCK(lfs->cfg);
5024 if (err) {
5025 return err;
5026 }
5027 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
5028
5029 err = lfs_rawstat(lfs, path, info);
5030
5031 LFS_TRACE("lfs_stat -> %d", err);
5032 LFS_UNLOCK(lfs->cfg);
5033 return err;
5034 }
5035
5036 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
5037 uint8_t type, void *buffer, lfs_size_t size) {
5038 int err = LFS_LOCK(lfs->cfg);
5039 if (err) {
5040 return err;
5041 }
5042 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5043 (void*)lfs, path, type, buffer, size);
5044
5045 lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size);
5046
5047 LFS_TRACE("lfs_getattr -> %"PRId32, res);
5048 LFS_UNLOCK(lfs->cfg);
5049 return res;
5050 }
5051
5052 #ifndef LFS_READONLY
5053 int lfs_setattr(lfs_t *lfs, const char *path,
5054 uint8_t type, const void *buffer, lfs_size_t size) {
5055 int err = LFS_LOCK(lfs->cfg);
5056 if (err) {
5057 return err;
5058 }
5059 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5060 (void*)lfs, path, type, buffer, size);
5061
5062 err = lfs_rawsetattr(lfs, path, type, buffer, size);
5063
5064 LFS_TRACE("lfs_setattr -> %d", err);
5065 LFS_UNLOCK(lfs->cfg);
5066 return err;
5067 }
5068 #endif
5069
5070 #ifndef LFS_READONLY
5071 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
5072 int err = LFS_LOCK(lfs->cfg);
5073 if (err) {
5074 return err;
5075 }
5076 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
5077
5078 err = lfs_rawremoveattr(lfs, path, type);
5079
5080 LFS_TRACE("lfs_removeattr -> %d", err);
5081 LFS_UNLOCK(lfs->cfg);
5082 return err;
5083 }
5084 #endif
5085
5086 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
5087 int err = LFS_LOCK(lfs->cfg);
5088 if (err) {
5089 return err;
5090 }
5091 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
5092 (void*)lfs, (void*)file, path, flags);
5093 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5094
5095 err = lfs_file_rawopen(lfs, file, path, flags);
5096
5097 LFS_TRACE("lfs_file_open -> %d", err);
5098 LFS_UNLOCK(lfs->cfg);
5099 return err;
5100 }
5101
5102 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
5103 const char *path, int flags,
5104 const struct lfs_file_config *cfg) {
5105 int err = LFS_LOCK(lfs->cfg);
5106 if (err) {
5107 return err;
5108 }
5109 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
5110 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
5111 (void*)lfs, (void*)file, path, flags,
5112 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
5113 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5114
5115 err = lfs_file_rawopencfg(lfs, file, path, flags, cfg);
5116
5117 LFS_TRACE("lfs_file_opencfg -> %d", err);
5118 LFS_UNLOCK(lfs->cfg);
5119 return err;
5120 }
5121
5122 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
5123 int err = LFS_LOCK(lfs->cfg);
5124 if (err) {
5125 return err;
5126 }
5127 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
5128 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5129
5130 err = lfs_file_rawclose(lfs, file);
5131
5132 LFS_TRACE("lfs_file_close -> %d", err);
5133 LFS_UNLOCK(lfs->cfg);
5134 return err;
5135 }
5136
5137 #ifndef LFS_READONLY
5138 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
5139 int err = LFS_LOCK(lfs->cfg);
5140 if (err) {
5141 return err;
5142 }
5143 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
5144 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5145
5146 err = lfs_file_rawsync(lfs, file);
5147
5148 LFS_TRACE("lfs_file_sync -> %d", err);
5149 LFS_UNLOCK(lfs->cfg);
5150 return err;
5151 }
5152 #endif
5153
5154 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
5155 void *buffer, lfs_size_t size) {
5156 int err = LFS_LOCK(lfs->cfg);
5157 if (err) {
5158 return err;
5159 }
5160 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
5161 (void*)lfs, (void*)file, buffer, size);
5162 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5163
5164 lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size);
5165
5166 LFS_TRACE("lfs_file_read -> %"PRId32, res);
5167 LFS_UNLOCK(lfs->cfg);
5168 return res;
5169 }
5170
5171 #ifndef LFS_READONLY
5172 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
5173 const void *buffer, lfs_size_t size) {
5174 int err = LFS_LOCK(lfs->cfg);
5175 if (err) {
5176 return err;
5177 }
5178 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
5179 (void*)lfs, (void*)file, buffer, size);
5180 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5181
5182 lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size);
5183
5184 LFS_TRACE("lfs_file_write -> %"PRId32, res);
5185 LFS_UNLOCK(lfs->cfg);
5186 return res;
5187 }
5188 #endif
5189
5190 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
5191 lfs_soff_t off, int whence) {
5192 int err = LFS_LOCK(lfs->cfg);
5193 if (err) {
5194 return err;
5195 }
5196 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
5197 (void*)lfs, (void*)file, off, whence);
5198 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5199
5200 lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence);
5201
5202 LFS_TRACE("lfs_file_seek -> %"PRId32, res);
5203 LFS_UNLOCK(lfs->cfg);
5204 return res;
5205 }
5206
5207 #ifndef LFS_READONLY
5208 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
5209 int err = LFS_LOCK(lfs->cfg);
5210 if (err) {
5211 return err;
5212 }
5213 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
5214 (void*)lfs, (void*)file, size);
5215 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5216
5217 err = lfs_file_rawtruncate(lfs, file, size);
5218
5219 LFS_TRACE("lfs_file_truncate -> %d", err);
5220 LFS_UNLOCK(lfs->cfg);
5221 return err;
5222 }
5223 #endif
5224
5225 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
5226 int err = LFS_LOCK(lfs->cfg);
5227 if (err) {
5228 return err;
5229 }
5230 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
5231 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5232
5233 lfs_soff_t res = lfs_file_rawtell(lfs, file);
5234
5235 LFS_TRACE("lfs_file_tell -> %"PRId32, res);
5236 LFS_UNLOCK(lfs->cfg);
5237 return res;
5238 }
5239
5240 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
5241 int err = LFS_LOCK(lfs->cfg);
5242 if (err) {
5243 return err;
5244 }
5245 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
5246
5247 err = lfs_file_rawrewind(lfs, file);
5248
5249 LFS_TRACE("lfs_file_rewind -> %d", err);
5250 LFS_UNLOCK(lfs->cfg);
5251 return err;
5252 }
5253
5254 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
5255 int err = LFS_LOCK(lfs->cfg);
5256 if (err) {
5257 return err;
5258 }
5259 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
5260 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5261
5262 lfs_soff_t res = lfs_file_rawsize(lfs, file);
5263
5264 LFS_TRACE("lfs_file_size -> %"PRId32, res);
5265 LFS_UNLOCK(lfs->cfg);
5266 return res;
5267 }
5268
5269 #ifndef LFS_READONLY
5270 int lfs_mkdir(lfs_t *lfs, const char *path) {
5271 int err = LFS_LOCK(lfs->cfg);
5272 if (err) {
5273 return err;
5274 }
5275 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
5276
5277 err = lfs_rawmkdir(lfs, path);
5278
5279 LFS_TRACE("lfs_mkdir -> %d", err);
5280 LFS_UNLOCK(lfs->cfg);
5281 return err;
5282 }
5283 #endif
5284
5285 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
5286 int err = LFS_LOCK(lfs->cfg);
5287 if (err) {
5288 return err;
5289 }
5290 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
5291 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
5292
5293 err = lfs_dir_rawopen(lfs, dir, path);
5294
5295 LFS_TRACE("lfs_dir_open -> %d", err);
5296 LFS_UNLOCK(lfs->cfg);
5297 return err;
5298 }
5299
5300 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
5301 int err = LFS_LOCK(lfs->cfg);
5302 if (err) {
5303 return err;
5304 }
5305 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
5306
5307 err = lfs_dir_rawclose(lfs, dir);
5308
5309 LFS_TRACE("lfs_dir_close -> %d", err);
5310 LFS_UNLOCK(lfs->cfg);
5311 return err;
5312 }
5313
5314 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
5315 int err = LFS_LOCK(lfs->cfg);
5316 if (err) {
5317 return err;
5318 }
5319 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
5320 (void*)lfs, (void*)dir, (void*)info);
5321
5322 err = lfs_dir_rawread(lfs, dir, info);
5323
5324 LFS_TRACE("lfs_dir_read -> %d", err);
5325 LFS_UNLOCK(lfs->cfg);
5326 return err;
5327 }
5328
5329 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
5330 int err = LFS_LOCK(lfs->cfg);
5331 if (err) {
5332 return err;
5333 }
5334 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
5335 (void*)lfs, (void*)dir, off);
5336
5337 err = lfs_dir_rawseek(lfs, dir, off);
5338
5339 LFS_TRACE("lfs_dir_seek -> %d", err);
5340 LFS_UNLOCK(lfs->cfg);
5341 return err;
5342 }
5343
5344 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
5345 int err = LFS_LOCK(lfs->cfg);
5346 if (err) {
5347 return err;
5348 }
5349 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
5350
5351 lfs_soff_t res = lfs_dir_rawtell(lfs, dir);
5352
5353 LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
5354 LFS_UNLOCK(lfs->cfg);
5355 return res;
5356 }
5357
5358 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
5359 int err = LFS_LOCK(lfs->cfg);
5360 if (err) {
5361 return err;
5362 }
5363 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
5364
5365 err = lfs_dir_rawrewind(lfs, dir);
5366
5367 LFS_TRACE("lfs_dir_rewind -> %d", err);
5368 LFS_UNLOCK(lfs->cfg);
5369 return err;
5370 }
5371
5372 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
5373 int err = LFS_LOCK(lfs->cfg);
5374 if (err) {
5375 return err;
5376 }
5377 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
5378
5379 lfs_ssize_t res = lfs_fs_rawsize(lfs);
5380
5381 LFS_TRACE("lfs_fs_size -> %"PRId32, res);
5382 LFS_UNLOCK(lfs->cfg);
5383 return res;
5384 }
5385
5386 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
5387 int err = LFS_LOCK(lfs->cfg);
5388 if (err) {
5389 return err;
5390 }
5391 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
5392 (void*)lfs, (void*)(uintptr_t)cb, data);
5393
5394 err = lfs_fs_rawtraverse(lfs, cb, data, true);
5395
5396 LFS_TRACE("lfs_fs_traverse -> %d", err);
5397 LFS_UNLOCK(lfs->cfg);
5398 return err;
5399 }
5400
5401 #ifdef LFS_MIGRATE
5402 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
5403 int err = LFS_LOCK(cfg);
5404 if (err) {
5405 return err;
5406 }
5407 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
5408 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5409 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5410 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5411 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5412 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5413 ".prog_buffer=%p, .lookahead_buffer=%p, "
5414 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5415 ".attr_max=%"PRIu32"})",
5416 (void*)lfs, (void*)cfg, cfg->context,
5417 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5418 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5419 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5420 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5421 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5422 cfg->name_max, cfg->file_max, cfg->attr_max);
5423
5424 err = lfs_rawmigrate(lfs, cfg);
5425
5426 LFS_TRACE("lfs_migrate -> %d", err);
5427 LFS_UNLOCK(cfg);
5428 return err;
5429 }
5430 #endif
5431
5432