1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/kernel.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/compiler.h>
17 #include <linux/sched.h> /* For cond_resched() */
18 #include "nodelist.h"
19 #include "debug.h"
20
21 /**
22 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info
24 * @minsize: Minimum acceptable size of allocation
25 * @len: Returned value of allocation length
26 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
27 *
28 * Requests a block of physical space on the flash. Returns zero for success
29 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
30 * error if appropriate. Doesn't return len since that's
31 *
32 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
33 * allocation semaphore, to prevent more than one allocation from being
34 * active at any time. The semaphore is later released by jffs2_commit_allocation()
35 *
36 * jffs2_reserve_space() may trigger garbage collection in order to make room
37 * for the requested allocation.
38 */
39
40 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
41 uint32_t *len, uint32_t sumsize);
42
jffs2_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,int prio,uint32_t sumsize)43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
44 uint32_t *len, int prio, uint32_t sumsize)
45 {
46 int ret = -EAGAIN;
47 int blocksneeded = c->resv_blocks_write;
48 /* align it */
49 minsize = PAD(minsize);
50
51 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
52 mutex_lock(&c->alloc_sem);
53
54 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
55
56 spin_lock(&c->erase_completion_lock);
57
58 /* this needs a little more thought (true <tglx> :)) */
59 while(ret == -EAGAIN) {
60 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
61 uint32_t dirty, avail;
62
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
73 * of nodes.
74 */
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
79 __func__);
80 break;
81 }
82 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
83 dirty, c->unchecked_size,
84 c->sector_size);
85
86 spin_unlock(&c->erase_completion_lock);
87 mutex_unlock(&c->alloc_sem);
88 return -ENOSPC;
89 }
90
91 /* Calc possibly available space. Possibly available means that we
92 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 * more usable space. This will affect the sum only once, as gc first finishes checking
94 * of nodes.
95 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 * blocksneeded * sector_size.
97 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 * the check above passes.
99 */
100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 if ( (avail / c->sector_size) <= blocksneeded) {
102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
104 __func__);
105 break;
106 }
107
108 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 avail, blocksneeded * c->sector_size);
110 spin_unlock(&c->erase_completion_lock);
111 mutex_unlock(&c->alloc_sem);
112 return -ENOSPC;
113 }
114
115 mutex_unlock(&c->alloc_sem);
116
117 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 c->nr_free_blocks, c->nr_erasing_blocks,
119 c->free_size, c->dirty_size, c->wasted_size,
120 c->used_size, c->erasing_size, c->bad_size,
121 c->free_size + c->dirty_size +
122 c->wasted_size + c->used_size +
123 c->erasing_size + c->bad_size,
124 c->flash_size);
125 spin_unlock(&c->erase_completion_lock);
126
127 ret = jffs2_garbage_collect_pass(c);
128
129 if (ret == -EAGAIN) {
130 spin_lock(&c->erase_completion_lock);
131 if (c->nr_erasing_blocks &&
132 list_empty(&c->erase_pending_list) &&
133 list_empty(&c->erase_complete_list)) {
134 DECLARE_WAITQUEUE(wait, current);
135 set_current_state(TASK_UNINTERRUPTIBLE);
136 add_wait_queue(&c->erase_wait, &wait);
137 jffs2_dbg(1, "%s waiting for erase to complete\n",
138 __func__);
139 spin_unlock(&c->erase_completion_lock);
140
141 schedule();
142 } else
143 spin_unlock(&c->erase_completion_lock);
144 } else if (ret)
145 return ret;
146
147 cond_resched();
148
149 if (signal_pending(current))
150 return -EINTR;
151
152 mutex_lock(&c->alloc_sem);
153 spin_lock(&c->erase_completion_lock);
154 }
155
156 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
157 if (ret) {
158 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
159 }
160 }
161 spin_unlock(&c->erase_completion_lock);
162 if (!ret)
163 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
164 if (ret)
165 mutex_unlock(&c->alloc_sem);
166 return ret;
167 }
168
jffs2_reserve_space_gc(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)169 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
170 uint32_t *len, uint32_t sumsize)
171 {
172 int ret = -EAGAIN;
173 minsize = PAD(minsize);
174
175 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
176
177 spin_lock(&c->erase_completion_lock);
178 while(ret == -EAGAIN) {
179 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
180 if (ret) {
181 jffs2_dbg(1, "%s(): looping, ret is %d\n",
182 __func__, ret);
183 }
184 }
185 spin_unlock(&c->erase_completion_lock);
186 if (!ret)
187 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
188
189 return ret;
190 }
191
192
193 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
194
jffs2_close_nextblock(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)195 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
196 {
197
198 if (c->nextblock == NULL) {
199 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
200 __func__, jeb->offset);
201 return;
202 }
203 /* Check, if we have a dirty block now, or if it was dirty already */
204 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
205 c->dirty_size += jeb->wasted_size;
206 c->wasted_size -= jeb->wasted_size;
207 jeb->dirty_size += jeb->wasted_size;
208 jeb->wasted_size = 0;
209 if (VERYDIRTY(c, jeb->dirty_size)) {
210 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
211 jeb->offset, jeb->free_size, jeb->dirty_size,
212 jeb->used_size);
213 list_add_tail(&jeb->list, &c->very_dirty_list);
214 } else {
215 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
216 jeb->offset, jeb->free_size, jeb->dirty_size,
217 jeb->used_size);
218 list_add_tail(&jeb->list, &c->dirty_list);
219 }
220 } else {
221 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
222 jeb->offset, jeb->free_size, jeb->dirty_size,
223 jeb->used_size);
224 list_add_tail(&jeb->list, &c->clean_list);
225 }
226 c->nextblock = NULL;
227
228 }
229
230 /* Select a new jeb for nextblock */
231
jffs2_find_nextblock(struct jffs2_sb_info * c)232 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
233 {
234 struct list_head *next;
235
236 /* Take the next block off the 'free' list */
237
238 if (list_empty(&c->free_list)) {
239
240 if (!c->nr_erasing_blocks &&
241 !list_empty(&c->erasable_list)) {
242 struct jffs2_eraseblock *ejeb;
243
244 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
245 list_move_tail(&ejeb->list, &c->erase_pending_list);
246 c->nr_erasing_blocks++;
247 jffs2_garbage_collect_trigger(c);
248 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
249 __func__, ejeb->offset);
250 }
251
252 if (!c->nr_erasing_blocks &&
253 !list_empty(&c->erasable_pending_wbuf_list)) {
254 jffs2_dbg(1, "%s(): Flushing write buffer\n",
255 __func__);
256 /* c->nextblock is NULL, no update to c->nextblock allowed */
257 spin_unlock(&c->erase_completion_lock);
258 jffs2_flush_wbuf_pad(c);
259 spin_lock(&c->erase_completion_lock);
260 /* Have another go. It'll be on the erasable_list now */
261 return -EAGAIN;
262 }
263
264 if (!c->nr_erasing_blocks) {
265 /* Ouch. We're in GC, or we wouldn't have got here.
266 And there's no space left. At all. */
267 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
268 c->nr_erasing_blocks, c->nr_free_blocks,
269 list_empty(&c->erasable_list) ? "yes" : "no",
270 list_empty(&c->erasing_list) ? "yes" : "no",
271 list_empty(&c->erase_pending_list) ? "yes" : "no");
272 return -ENOSPC;
273 }
274
275 spin_unlock(&c->erase_completion_lock);
276 /* Don't wait for it; just erase one right now */
277 jffs2_erase_pending_blocks(c, 1);
278 spin_lock(&c->erase_completion_lock);
279
280 /* An erase may have failed, decreasing the
281 amount of free space available. So we must
282 restart from the beginning */
283 return -EAGAIN;
284 }
285
286 next = c->free_list.next;
287 list_del(next);
288 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
289 c->nr_free_blocks--;
290
291 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
292
293 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
294 /* adjust write buffer offset, else we get a non contiguous write bug */
295 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
296 c->wbuf_ofs = 0xffffffff;
297 #endif
298
299 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
300 __func__, c->nextblock->offset);
301
302 return 0;
303 }
304
305 /* Called with alloc sem _and_ erase_completion_lock */
jffs2_do_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)306 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
307 uint32_t *len, uint32_t sumsize)
308 {
309 struct jffs2_eraseblock *jeb = c->nextblock;
310 uint32_t reserved_size; /* for summary information at the end of the jeb */
311 int ret;
312
313 restart:
314 reserved_size = 0;
315
316 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
317 /* NOSUM_SIZE means not to generate summary */
318
319 if (jeb) {
320 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
321 dbg_summary("minsize=%d , jeb->free=%d ,"
322 "summary->size=%d , sumsize=%d\n",
323 minsize, jeb->free_size,
324 c->summary->sum_size, sumsize);
325 }
326
327 /* Is there enough space for writing out the current node, or we have to
328 write out summary information now, close this jeb and select new nextblock? */
329 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
330 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
331
332 /* Has summary been disabled for this jeb? */
333 if (jffs2_sum_is_disabled(c->summary)) {
334 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
335 goto restart;
336 }
337
338 /* Writing out the collected summary information */
339 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
340 ret = jffs2_sum_write_sumnode(c);
341
342 if (ret)
343 return ret;
344
345 if (jffs2_sum_is_disabled(c->summary)) {
346 /* jffs2_write_sumnode() couldn't write out the summary information
347 diabling summary for this jeb and free the collected information
348 */
349 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
350 goto restart;
351 }
352
353 jffs2_close_nextblock(c, jeb);
354 jeb = NULL;
355 /* keep always valid value in reserved_size */
356 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
357 }
358 } else {
359 if (jeb && minsize > jeb->free_size) {
360 uint32_t waste;
361
362 /* Skip the end of this block and file it as having some dirty space */
363 /* If there's a pending write to it, flush now */
364
365 if (jffs2_wbuf_dirty(c)) {
366 spin_unlock(&c->erase_completion_lock);
367 jffs2_dbg(1, "%s(): Flushing write buffer\n",
368 __func__);
369 jffs2_flush_wbuf_pad(c);
370 spin_lock(&c->erase_completion_lock);
371 jeb = c->nextblock;
372 goto restart;
373 }
374
375 spin_unlock(&c->erase_completion_lock);
376
377 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
378
379 /* Just lock it again and continue. Nothing much can change because
380 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
381 we hold c->erase_completion_lock in the majority of this function...
382 but that's a question for another (more caffeine-rich) day. */
383 spin_lock(&c->erase_completion_lock);
384
385 if (ret)
386 return ret;
387
388 waste = jeb->free_size;
389 jffs2_link_node_ref(c, jeb,
390 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
391 waste, NULL);
392 /* FIXME: that made it count as dirty. Convert to wasted */
393 jeb->dirty_size -= waste;
394 c->dirty_size -= waste;
395 jeb->wasted_size += waste;
396 c->wasted_size += waste;
397
398 jffs2_close_nextblock(c, jeb);
399 jeb = NULL;
400 }
401 }
402
403 if (!jeb) {
404
405 ret = jffs2_find_nextblock(c);
406 if (ret)
407 return ret;
408
409 jeb = c->nextblock;
410
411 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
412 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
413 jeb->offset, jeb->free_size);
414 goto restart;
415 }
416 }
417 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
418 enough space */
419 *len = jeb->free_size - reserved_size;
420
421 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
422 !jeb->first_node->next_in_ino) {
423 /* Only node in it beforehand was a CLEANMARKER node (we think).
424 So mark it obsolete now that there's going to be another node
425 in the block. This will reduce used_size to zero but We've
426 already set c->nextblock so that jffs2_mark_node_obsolete()
427 won't try to refile it to the dirty_list.
428 */
429 spin_unlock(&c->erase_completion_lock);
430 jffs2_mark_node_obsolete(c, jeb->first_node);
431 spin_lock(&c->erase_completion_lock);
432 }
433
434 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
435 __func__,
436 *len, jeb->offset + (c->sector_size - jeb->free_size));
437 return 0;
438 }
439
440 /**
441 * jffs2_add_physical_node_ref - add a physical node reference to the list
442 * @c: superblock info
443 * @new: new node reference to add
444 * @len: length of this physical node
445 *
446 * Should only be used to report nodes for which space has been allocated
447 * by jffs2_reserve_space.
448 *
449 * Must be called with the alloc_sem held.
450 */
451
jffs2_add_physical_node_ref(struct jffs2_sb_info * c,uint32_t ofs,uint32_t len,struct jffs2_inode_cache * ic)452 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
453 uint32_t ofs, uint32_t len,
454 struct jffs2_inode_cache *ic)
455 {
456 struct jffs2_eraseblock *jeb;
457 struct jffs2_raw_node_ref *new;
458
459 jeb = &c->blocks[ofs / c->sector_size];
460
461 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
462 __func__, ofs & ~3, ofs & 3, len);
463 #if 1
464 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
465 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
466 even after refiling c->nextblock */
467 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
468 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
469 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
470 ofs & ~3, ofs & 3);
471 if (c->nextblock)
472 pr_warn("nextblock 0x%08x", c->nextblock->offset);
473 else
474 pr_warn("No nextblock");
475 pr_cont(", expected at %08x\n",
476 jeb->offset + (c->sector_size - jeb->free_size));
477 return ERR_PTR(-EINVAL);
478 }
479 #endif
480 spin_lock(&c->erase_completion_lock);
481
482 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
483
484 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
485 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
486 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
487 jeb->offset, jeb->free_size, jeb->dirty_size,
488 jeb->used_size);
489 if (jffs2_wbuf_dirty(c)) {
490 /* Flush the last write in the block if it's outstanding */
491 spin_unlock(&c->erase_completion_lock);
492 jffs2_flush_wbuf_pad(c);
493 spin_lock(&c->erase_completion_lock);
494 }
495
496 list_add_tail(&jeb->list, &c->clean_list);
497 c->nextblock = NULL;
498 }
499 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
500 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
501
502 spin_unlock(&c->erase_completion_lock);
503
504 return new;
505 }
506
507
jffs2_complete_reservation(struct jffs2_sb_info * c)508 void jffs2_complete_reservation(struct jffs2_sb_info *c)
509 {
510 jffs2_dbg(1, "jffs2_complete_reservation()\n");
511 spin_lock(&c->erase_completion_lock);
512 jffs2_garbage_collect_trigger(c);
513 spin_unlock(&c->erase_completion_lock);
514 mutex_unlock(&c->alloc_sem);
515 }
516
on_list(struct list_head * obj,struct list_head * head)517 static inline int on_list(struct list_head *obj, struct list_head *head)
518 {
519 struct list_head *this;
520
521 list_for_each(this, head) {
522 if (this == obj) {
523 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
524 return 1;
525
526 }
527 }
528 return 0;
529 }
530
jffs2_mark_node_obsolete(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * ref)531 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
532 {
533 struct jffs2_eraseblock *jeb;
534 int blocknr;
535 struct jffs2_unknown_node n;
536 int ret, addedsize;
537 size_t retlen;
538 uint32_t freed_len;
539
540 if(unlikely(!ref)) {
541 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
542 return;
543 }
544 if (ref_obsolete(ref)) {
545 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
546 __func__, ref_offset(ref));
547 return;
548 }
549 blocknr = ref->flash_offset / c->sector_size;
550 if (blocknr >= c->nr_blocks) {
551 pr_notice("raw node at 0x%08x is off the end of device!\n",
552 ref->flash_offset);
553 BUG();
554 }
555 jeb = &c->blocks[blocknr];
556
557 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
558 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
559 /* Hm. This may confuse static lock analysis. If any of the above
560 three conditions is false, we're going to return from this
561 function without actually obliterating any nodes or freeing
562 any jffs2_raw_node_refs. So we don't need to stop erases from
563 happening, or protect against people holding an obsolete
564 jffs2_raw_node_ref without the erase_completion_lock. */
565 mutex_lock(&c->erase_free_sem);
566 }
567
568 spin_lock(&c->erase_completion_lock);
569
570 freed_len = ref_totlen(c, jeb, ref);
571
572 if (ref_flags(ref) == REF_UNCHECKED) {
573 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
574 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
575 freed_len, blocknr,
576 ref->flash_offset, jeb->used_size);
577 BUG();
578 })
579 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
580 ref_offset(ref), freed_len);
581 jeb->unchecked_size -= freed_len;
582 c->unchecked_size -= freed_len;
583 } else {
584 D1(if (unlikely(jeb->used_size < freed_len)) {
585 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
586 freed_len, blocknr,
587 ref->flash_offset, jeb->used_size);
588 BUG();
589 })
590 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
591 ref_offset(ref), freed_len);
592 jeb->used_size -= freed_len;
593 c->used_size -= freed_len;
594 }
595
596 // Take care, that wasted size is taken into concern
597 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
598 jffs2_dbg(1, "Dirtying\n");
599 addedsize = freed_len;
600 jeb->dirty_size += freed_len;
601 c->dirty_size += freed_len;
602
603 /* Convert wasted space to dirty, if not a bad block */
604 if (jeb->wasted_size) {
605 if (on_list(&jeb->list, &c->bad_used_list)) {
606 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
607 jeb->offset);
608 addedsize = 0; /* To fool the refiling code later */
609 } else {
610 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
611 jeb->wasted_size, jeb->offset);
612 addedsize += jeb->wasted_size;
613 jeb->dirty_size += jeb->wasted_size;
614 c->dirty_size += jeb->wasted_size;
615 c->wasted_size -= jeb->wasted_size;
616 jeb->wasted_size = 0;
617 }
618 }
619 } else {
620 jffs2_dbg(1, "Wasting\n");
621 addedsize = 0;
622 jeb->wasted_size += freed_len;
623 c->wasted_size += freed_len;
624 }
625 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
626
627 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
628 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
629
630 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
631 /* Flash scanning is in progress. Don't muck about with the block
632 lists because they're not ready yet, and don't actually
633 obliterate nodes that look obsolete. If they weren't
634 marked obsolete on the flash at the time they _became_
635 obsolete, there was probably a reason for that. */
636 spin_unlock(&c->erase_completion_lock);
637 /* We didn't lock the erase_free_sem */
638 return;
639 }
640
641 if (jeb == c->nextblock) {
642 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
643 jeb->offset);
644 } else if (!jeb->used_size && !jeb->unchecked_size) {
645 if (jeb == c->gcblock) {
646 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
647 jeb->offset);
648 c->gcblock = NULL;
649 } else {
650 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
651 jeb->offset);
652 list_del(&jeb->list);
653 }
654 if (jffs2_wbuf_dirty(c)) {
655 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
656 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
657 } else {
658 if (jiffies & 127) {
659 /* Most of the time, we just erase it immediately. Otherwise we
660 spend ages scanning it on mount, etc. */
661 jffs2_dbg(1, "...and adding to erase_pending_list\n");
662 list_add_tail(&jeb->list, &c->erase_pending_list);
663 c->nr_erasing_blocks++;
664 jffs2_garbage_collect_trigger(c);
665 } else {
666 /* Sometimes, however, we leave it elsewhere so it doesn't get
667 immediately reused, and we spread the load a bit. */
668 jffs2_dbg(1, "...and adding to erasable_list\n");
669 list_add_tail(&jeb->list, &c->erasable_list);
670 }
671 }
672 jffs2_dbg(1, "Done OK\n");
673 } else if (jeb == c->gcblock) {
674 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
675 jeb->offset);
676 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
677 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
678 jeb->offset);
679 list_del(&jeb->list);
680 jffs2_dbg(1, "...and adding to dirty_list\n");
681 list_add_tail(&jeb->list, &c->dirty_list);
682 } else if (VERYDIRTY(c, jeb->dirty_size) &&
683 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
684 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
685 jeb->offset);
686 list_del(&jeb->list);
687 jffs2_dbg(1, "...and adding to very_dirty_list\n");
688 list_add_tail(&jeb->list, &c->very_dirty_list);
689 } else {
690 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
691 jeb->offset, jeb->free_size, jeb->dirty_size,
692 jeb->used_size);
693 }
694
695 spin_unlock(&c->erase_completion_lock);
696
697 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
698 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
699 /* We didn't lock the erase_free_sem */
700 return;
701 }
702
703 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
704 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
705 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
706 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
707
708 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
709 ref_offset(ref));
710 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
711 if (ret) {
712 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
713 ref_offset(ref), ret);
714 goto out_erase_sem;
715 }
716 if (retlen != sizeof(n)) {
717 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
718 ref_offset(ref), retlen);
719 goto out_erase_sem;
720 }
721 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
722 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
723 je32_to_cpu(n.totlen), freed_len);
724 goto out_erase_sem;
725 }
726 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
727 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
728 ref_offset(ref), je16_to_cpu(n.nodetype));
729 goto out_erase_sem;
730 }
731 /* XXX FIXME: This is ugly now */
732 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
733 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
734 if (ret) {
735 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
736 ref_offset(ref), ret);
737 goto out_erase_sem;
738 }
739 if (retlen != sizeof(n)) {
740 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
741 ref_offset(ref), retlen);
742 goto out_erase_sem;
743 }
744
745 /* Nodes which have been marked obsolete no longer need to be
746 associated with any inode. Remove them from the per-inode list.
747
748 Note we can't do this for NAND at the moment because we need
749 obsolete dirent nodes to stay on the lists, because of the
750 horridness in jffs2_garbage_collect_deletion_dirent(). Also
751 because we delete the inocache, and on NAND we need that to
752 stay around until all the nodes are actually erased, in order
753 to stop us from giving the same inode number to another newly
754 created inode. */
755 if (ref->next_in_ino) {
756 struct jffs2_inode_cache *ic;
757 struct jffs2_raw_node_ref **p;
758
759 spin_lock(&c->erase_completion_lock);
760
761 ic = jffs2_raw_ref_to_ic(ref);
762 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
763 ;
764
765 *p = ref->next_in_ino;
766 ref->next_in_ino = NULL;
767
768 switch (ic->class) {
769 #ifdef CONFIG_JFFS2_FS_XATTR
770 case RAWNODE_CLASS_XATTR_DATUM:
771 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
772 break;
773 case RAWNODE_CLASS_XATTR_REF:
774 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
775 break;
776 #endif
777 default:
778 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
779 jffs2_del_ino_cache(c, ic);
780 break;
781 }
782 spin_unlock(&c->erase_completion_lock);
783 }
784
785 out_erase_sem:
786 mutex_unlock(&c->erase_free_sem);
787 }
788
jffs2_thread_should_wake(struct jffs2_sb_info * c)789 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
790 {
791 int ret = 0;
792 uint32_t dirty;
793 int nr_very_dirty = 0;
794 struct jffs2_eraseblock *jeb;
795
796 if (!list_empty(&c->erase_complete_list) ||
797 !list_empty(&c->erase_pending_list))
798 return 1;
799
800 if (c->unchecked_size) {
801 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
802 c->unchecked_size, c->checked_ino);
803 return 1;
804 }
805
806 /* dirty_size contains blocks on erase_pending_list
807 * those blocks are counted in c->nr_erasing_blocks.
808 * If one block is actually erased, it is not longer counted as dirty_space
809 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
810 * with c->nr_erasing_blocks * c->sector_size again.
811 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
812 * This helps us to force gc and pick eventually a clean block to spread the load.
813 */
814 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
815
816 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
817 (dirty > c->nospc_dirty_size))
818 ret = 1;
819
820 list_for_each_entry(jeb, &c->very_dirty_list, list) {
821 nr_very_dirty++;
822 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
823 ret = 1;
824 /* In debug mode, actually go through and count them all */
825 D1(continue);
826 break;
827 }
828 }
829
830 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
831 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
832 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
833
834 return ret;
835 }
836