Lines Matching refs:c
33 static int dbg_populate_lsave(struct ubifs_info *c);
99 static int get_cnodes_to_commit(struct ubifs_info *c) in get_cnodes_to_commit() argument
104 if (!c->nroot) in get_cnodes_to_commit()
107 if (!test_bit(DIRTY_CNODE, &c->nroot->flags)) in get_cnodes_to_commit()
110 c->lpt_cnext = first_dirty_cnode(c->nroot); in get_cnodes_to_commit()
111 cnode = c->lpt_cnext; in get_cnodes_to_commit()
120 cnode->cnext = c->lpt_cnext; in get_cnodes_to_commit()
129 ubifs_assert(cnt == c->dirty_nn_cnt + c->dirty_pn_cnt); in get_cnodes_to_commit()
140 static void upd_ltab(struct ubifs_info *c, int lnum, int free, int dirty) in upd_ltab() argument
143 lnum, c->ltab[lnum - c->lpt_first].free, in upd_ltab()
144 c->ltab[lnum - c->lpt_first].dirty, free, dirty); in upd_ltab()
145 ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); in upd_ltab()
146 c->ltab[lnum - c->lpt_first].free = free; in upd_ltab()
147 c->ltab[lnum - c->lpt_first].dirty += dirty; in upd_ltab()
160 static int alloc_lpt_leb(struct ubifs_info *c, int *lnum) in alloc_lpt_leb() argument
164 n = *lnum - c->lpt_first + 1; in alloc_lpt_leb()
165 for (i = n; i < c->lpt_lebs; i++) { in alloc_lpt_leb()
166 if (c->ltab[i].tgc || c->ltab[i].cmt) in alloc_lpt_leb()
168 if (c->ltab[i].free == c->leb_size) { in alloc_lpt_leb()
169 c->ltab[i].cmt = 1; in alloc_lpt_leb()
170 *lnum = i + c->lpt_first; in alloc_lpt_leb()
176 if (c->ltab[i].tgc || c->ltab[i].cmt) in alloc_lpt_leb()
178 if (c->ltab[i].free == c->leb_size) { in alloc_lpt_leb()
179 c->ltab[i].cmt = 1; in alloc_lpt_leb()
180 *lnum = i + c->lpt_first; in alloc_lpt_leb()
193 static int layout_cnodes(struct ubifs_info *c) in layout_cnodes() argument
198 err = dbg_chk_lpt_sz(c, 0, 0); in layout_cnodes()
201 cnode = c->lpt_cnext; in layout_cnodes()
204 lnum = c->nhead_lnum; in layout_cnodes()
205 offs = c->nhead_offs; in layout_cnodes()
207 done_lsave = !c->big_lpt; in layout_cnodes()
209 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) { in layout_cnodes()
211 c->lsave_lnum = lnum; in layout_cnodes()
212 c->lsave_offs = offs; in layout_cnodes()
213 offs += c->lsave_sz; in layout_cnodes()
214 dbg_chk_lpt_sz(c, 1, c->lsave_sz); in layout_cnodes()
217 if (offs + c->ltab_sz <= c->leb_size) { in layout_cnodes()
219 c->ltab_lnum = lnum; in layout_cnodes()
220 c->ltab_offs = offs; in layout_cnodes()
221 offs += c->ltab_sz; in layout_cnodes()
222 dbg_chk_lpt_sz(c, 1, c->ltab_sz); in layout_cnodes()
227 len = c->nnode_sz; in layout_cnodes()
228 c->dirty_nn_cnt -= 1; in layout_cnodes()
230 len = c->pnode_sz; in layout_cnodes()
231 c->dirty_pn_cnt -= 1; in layout_cnodes()
233 while (offs + len > c->leb_size) { in layout_cnodes()
234 alen = ALIGN(offs, c->min_io_size); in layout_cnodes()
235 upd_ltab(c, lnum, c->leb_size - alen, alen - offs); in layout_cnodes()
236 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in layout_cnodes()
237 err = alloc_lpt_leb(c, &lnum); in layout_cnodes()
241 ubifs_assert(lnum >= c->lpt_first && in layout_cnodes()
242 lnum <= c->lpt_last); in layout_cnodes()
246 c->lsave_lnum = lnum; in layout_cnodes()
247 c->lsave_offs = offs; in layout_cnodes()
248 offs += c->lsave_sz; in layout_cnodes()
249 dbg_chk_lpt_sz(c, 1, c->lsave_sz); in layout_cnodes()
254 c->ltab_lnum = lnum; in layout_cnodes()
255 c->ltab_offs = offs; in layout_cnodes()
256 offs += c->ltab_sz; in layout_cnodes()
257 dbg_chk_lpt_sz(c, 1, c->ltab_sz); in layout_cnodes()
266 c->lpt_lnum = lnum; in layout_cnodes()
267 c->lpt_offs = offs; in layout_cnodes()
270 dbg_chk_lpt_sz(c, 1, len); in layout_cnodes()
272 } while (cnode && cnode != c->lpt_cnext); in layout_cnodes()
276 if (offs + c->lsave_sz > c->leb_size) { in layout_cnodes()
277 alen = ALIGN(offs, c->min_io_size); in layout_cnodes()
278 upd_ltab(c, lnum, c->leb_size - alen, alen - offs); in layout_cnodes()
279 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in layout_cnodes()
280 err = alloc_lpt_leb(c, &lnum); in layout_cnodes()
284 ubifs_assert(lnum >= c->lpt_first && in layout_cnodes()
285 lnum <= c->lpt_last); in layout_cnodes()
288 c->lsave_lnum = lnum; in layout_cnodes()
289 c->lsave_offs = offs; in layout_cnodes()
290 offs += c->lsave_sz; in layout_cnodes()
291 dbg_chk_lpt_sz(c, 1, c->lsave_sz); in layout_cnodes()
296 if (offs + c->ltab_sz > c->leb_size) { in layout_cnodes()
297 alen = ALIGN(offs, c->min_io_size); in layout_cnodes()
298 upd_ltab(c, lnum, c->leb_size - alen, alen - offs); in layout_cnodes()
299 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in layout_cnodes()
300 err = alloc_lpt_leb(c, &lnum); in layout_cnodes()
304 ubifs_assert(lnum >= c->lpt_first && in layout_cnodes()
305 lnum <= c->lpt_last); in layout_cnodes()
307 c->ltab_lnum = lnum; in layout_cnodes()
308 c->ltab_offs = offs; in layout_cnodes()
309 offs += c->ltab_sz; in layout_cnodes()
310 dbg_chk_lpt_sz(c, 1, c->ltab_sz); in layout_cnodes()
313 alen = ALIGN(offs, c->min_io_size); in layout_cnodes()
314 upd_ltab(c, lnum, c->leb_size - alen, alen - offs); in layout_cnodes()
315 dbg_chk_lpt_sz(c, 4, alen - offs); in layout_cnodes()
316 err = dbg_chk_lpt_sz(c, 3, alen); in layout_cnodes()
324 ubifs_dump_lpt_info(c); in layout_cnodes()
325 ubifs_dump_lpt_lebs(c); in layout_cnodes()
344 static int realloc_lpt_leb(struct ubifs_info *c, int *lnum) in realloc_lpt_leb() argument
348 n = *lnum - c->lpt_first + 1; in realloc_lpt_leb()
349 for (i = n; i < c->lpt_lebs; i++) in realloc_lpt_leb()
350 if (c->ltab[i].cmt) { in realloc_lpt_leb()
351 c->ltab[i].cmt = 0; in realloc_lpt_leb()
352 *lnum = i + c->lpt_first; in realloc_lpt_leb()
357 if (c->ltab[i].cmt) { in realloc_lpt_leb()
358 c->ltab[i].cmt = 0; in realloc_lpt_leb()
359 *lnum = i + c->lpt_first; in realloc_lpt_leb()
371 static int write_cnodes(struct ubifs_info *c) in write_cnodes() argument
375 void *buf = c->lpt_buf; in write_cnodes()
377 cnode = c->lpt_cnext; in write_cnodes()
380 lnum = c->nhead_lnum; in write_cnodes()
381 offs = c->nhead_offs; in write_cnodes()
385 err = ubifs_leb_unmap(c, lnum); in write_cnodes()
390 done_lsave = !c->big_lpt; in write_cnodes()
392 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) { in write_cnodes()
394 ubifs_pack_lsave(c, buf + offs, c->lsave); in write_cnodes()
395 offs += c->lsave_sz; in write_cnodes()
396 dbg_chk_lpt_sz(c, 1, c->lsave_sz); in write_cnodes()
399 if (offs + c->ltab_sz <= c->leb_size) { in write_cnodes()
401 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); in write_cnodes()
402 offs += c->ltab_sz; in write_cnodes()
403 dbg_chk_lpt_sz(c, 1, c->ltab_sz); in write_cnodes()
409 len = c->nnode_sz; in write_cnodes()
411 len = c->pnode_sz; in write_cnodes()
412 while (offs + len > c->leb_size) { in write_cnodes()
415 alen = ALIGN(wlen, c->min_io_size); in write_cnodes()
417 err = ubifs_leb_write(c, lnum, buf + from, from, in write_cnodes()
422 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in write_cnodes()
423 err = realloc_lpt_leb(c, &lnum); in write_cnodes()
427 ubifs_assert(lnum >= c->lpt_first && in write_cnodes()
428 lnum <= c->lpt_last); in write_cnodes()
429 err = ubifs_leb_unmap(c, lnum); in write_cnodes()
435 ubifs_pack_lsave(c, buf + offs, c->lsave); in write_cnodes()
436 offs += c->lsave_sz; in write_cnodes()
437 dbg_chk_lpt_sz(c, 1, c->lsave_sz); in write_cnodes()
442 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); in write_cnodes()
443 offs += c->ltab_sz; in write_cnodes()
444 dbg_chk_lpt_sz(c, 1, c->ltab_sz); in write_cnodes()
450 ubifs_pack_nnode(c, buf + offs, in write_cnodes()
453 ubifs_pack_pnode(c, buf + offs, in write_cnodes()
466 dbg_chk_lpt_sz(c, 1, len); in write_cnodes()
468 } while (cnode && cnode != c->lpt_cnext); in write_cnodes()
472 if (offs + c->lsave_sz > c->leb_size) { in write_cnodes()
474 alen = ALIGN(wlen, c->min_io_size); in write_cnodes()
476 err = ubifs_leb_write(c, lnum, buf + from, from, alen); in write_cnodes()
479 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in write_cnodes()
480 err = realloc_lpt_leb(c, &lnum); in write_cnodes()
484 ubifs_assert(lnum >= c->lpt_first && in write_cnodes()
485 lnum <= c->lpt_last); in write_cnodes()
486 err = ubifs_leb_unmap(c, lnum); in write_cnodes()
491 ubifs_pack_lsave(c, buf + offs, c->lsave); in write_cnodes()
492 offs += c->lsave_sz; in write_cnodes()
493 dbg_chk_lpt_sz(c, 1, c->lsave_sz); in write_cnodes()
498 if (offs + c->ltab_sz > c->leb_size) { in write_cnodes()
500 alen = ALIGN(wlen, c->min_io_size); in write_cnodes()
502 err = ubifs_leb_write(c, lnum, buf + from, from, alen); in write_cnodes()
505 dbg_chk_lpt_sz(c, 2, c->leb_size - offs); in write_cnodes()
506 err = realloc_lpt_leb(c, &lnum); in write_cnodes()
510 ubifs_assert(lnum >= c->lpt_first && in write_cnodes()
511 lnum <= c->lpt_last); in write_cnodes()
512 err = ubifs_leb_unmap(c, lnum); in write_cnodes()
516 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); in write_cnodes()
517 offs += c->ltab_sz; in write_cnodes()
518 dbg_chk_lpt_sz(c, 1, c->ltab_sz); in write_cnodes()
523 alen = ALIGN(wlen, c->min_io_size); in write_cnodes()
525 err = ubifs_leb_write(c, lnum, buf + from, from, alen); in write_cnodes()
529 dbg_chk_lpt_sz(c, 4, alen - wlen); in write_cnodes()
530 err = dbg_chk_lpt_sz(c, 3, ALIGN(offs, c->min_io_size)); in write_cnodes()
534 c->nhead_lnum = lnum; in write_cnodes()
535 c->nhead_offs = ALIGN(offs, c->min_io_size); in write_cnodes()
537 dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs); in write_cnodes()
538 dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs); in write_cnodes()
539 dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); in write_cnodes()
540 if (c->big_lpt) in write_cnodes()
541 dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); in write_cnodes()
548 ubifs_dump_lpt_info(c); in write_cnodes()
549 ubifs_dump_lpt_lebs(c); in write_cnodes()
563 static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c, in next_pnode_to_dirty() argument
573 return ubifs_get_pnode(c, nnode, iip); in next_pnode_to_dirty()
589 nnode = ubifs_get_nnode(c, nnode, iip); in next_pnode_to_dirty()
606 nnode = ubifs_get_nnode(c, nnode, iip); in next_pnode_to_dirty()
617 return ubifs_get_pnode(c, nnode, iip); in next_pnode_to_dirty()
628 static struct ubifs_pnode *pnode_lookup(struct ubifs_info *c, int i) in pnode_lookup() argument
633 if (!c->nroot) { in pnode_lookup()
634 err = ubifs_read_nnode(c, NULL, 0); in pnode_lookup()
639 nnode = c->nroot; in pnode_lookup()
640 shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; in pnode_lookup()
641 for (h = 1; h < c->lpt_hght; h++) { in pnode_lookup()
644 nnode = ubifs_get_nnode(c, nnode, iip); in pnode_lookup()
649 return ubifs_get_pnode(c, nnode, iip); in pnode_lookup()
657 static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode) in add_pnode_dirt() argument
659 ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum, in add_pnode_dirt()
660 c->pnode_sz); in add_pnode_dirt()
668 static void do_make_pnode_dirty(struct ubifs_info *c, struct ubifs_pnode *pnode) in do_make_pnode_dirty() argument
674 c->dirty_pn_cnt += 1; in do_make_pnode_dirty()
675 add_pnode_dirt(c, pnode); in do_make_pnode_dirty()
680 c->dirty_nn_cnt += 1; in do_make_pnode_dirty()
681 ubifs_add_nnode_dirt(c, nnode); in do_make_pnode_dirty()
700 static int make_tree_dirty(struct ubifs_info *c) in make_tree_dirty() argument
704 pnode = pnode_lookup(c, 0); in make_tree_dirty()
709 do_make_pnode_dirty(c, pnode); in make_tree_dirty()
710 pnode = next_pnode_to_dirty(c, pnode); in make_tree_dirty()
724 static int need_write_all(struct ubifs_info *c) in need_write_all() argument
729 for (i = 0; i < c->lpt_lebs; i++) { in need_write_all()
730 if (i + c->lpt_first == c->nhead_lnum) in need_write_all()
731 free += c->leb_size - c->nhead_offs; in need_write_all()
732 else if (c->ltab[i].free == c->leb_size) in need_write_all()
733 free += c->leb_size; in need_write_all()
734 else if (c->ltab[i].free + c->ltab[i].dirty == c->leb_size) in need_write_all()
735 free += c->leb_size; in need_write_all()
738 if (free <= c->lpt_sz * 2) in need_write_all()
751 static void lpt_tgc_start(struct ubifs_info *c) in lpt_tgc_start() argument
755 for (i = 0; i < c->lpt_lebs; i++) { in lpt_tgc_start()
756 if (i + c->lpt_first == c->nhead_lnum) in lpt_tgc_start()
758 if (c->ltab[i].dirty > 0 && in lpt_tgc_start()
759 c->ltab[i].free + c->ltab[i].dirty == c->leb_size) { in lpt_tgc_start()
760 c->ltab[i].tgc = 1; in lpt_tgc_start()
761 c->ltab[i].free = c->leb_size; in lpt_tgc_start()
762 c->ltab[i].dirty = 0; in lpt_tgc_start()
763 dbg_lp("LEB %d", i + c->lpt_first); in lpt_tgc_start()
777 static int lpt_tgc_end(struct ubifs_info *c) in lpt_tgc_end() argument
781 for (i = 0; i < c->lpt_lebs; i++) in lpt_tgc_end()
782 if (c->ltab[i].tgc) { in lpt_tgc_end()
783 err = ubifs_leb_unmap(c, i + c->lpt_first); in lpt_tgc_end()
786 c->ltab[i].tgc = 0; in lpt_tgc_end()
787 dbg_lp("LEB %d", i + c->lpt_first); in lpt_tgc_end()
804 static void populate_lsave(struct ubifs_info *c) in populate_lsave() argument
810 ubifs_assert(c->big_lpt); in populate_lsave()
811 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) { in populate_lsave()
812 c->lpt_drty_flgs |= LSAVE_DIRTY; in populate_lsave()
813 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); in populate_lsave()
816 if (dbg_populate_lsave(c)) in populate_lsave()
819 list_for_each_entry(lprops, &c->empty_list, list) { in populate_lsave()
820 c->lsave[cnt++] = lprops->lnum; in populate_lsave()
821 if (cnt >= c->lsave_cnt) in populate_lsave()
824 list_for_each_entry(lprops, &c->freeable_list, list) { in populate_lsave()
825 c->lsave[cnt++] = lprops->lnum; in populate_lsave()
826 if (cnt >= c->lsave_cnt) in populate_lsave()
829 list_for_each_entry(lprops, &c->frdi_idx_list, list) { in populate_lsave()
830 c->lsave[cnt++] = lprops->lnum; in populate_lsave()
831 if (cnt >= c->lsave_cnt) in populate_lsave()
834 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; in populate_lsave()
836 c->lsave[cnt++] = heap->arr[i]->lnum; in populate_lsave()
837 if (cnt >= c->lsave_cnt) in populate_lsave()
840 heap = &c->lpt_heap[LPROPS_DIRTY - 1]; in populate_lsave()
842 c->lsave[cnt++] = heap->arr[i]->lnum; in populate_lsave()
843 if (cnt >= c->lsave_cnt) in populate_lsave()
846 heap = &c->lpt_heap[LPROPS_FREE - 1]; in populate_lsave()
848 c->lsave[cnt++] = heap->arr[i]->lnum; in populate_lsave()
849 if (cnt >= c->lsave_cnt) in populate_lsave()
853 while (cnt < c->lsave_cnt) in populate_lsave()
854 c->lsave[cnt++] = c->main_first; in populate_lsave()
865 static struct ubifs_nnode *nnode_lookup(struct ubifs_info *c, int i) in nnode_lookup() argument
870 if (!c->nroot) { in nnode_lookup()
871 err = ubifs_read_nnode(c, NULL, 0); in nnode_lookup()
875 nnode = c->nroot; in nnode_lookup()
881 nnode = ubifs_get_nnode(c, nnode, iip); in nnode_lookup()
903 static int make_nnode_dirty(struct ubifs_info *c, int node_num, int lnum, in make_nnode_dirty() argument
908 nnode = nnode_lookup(c, node_num); in make_nnode_dirty()
917 } else if (c->lpt_lnum != lnum || c->lpt_offs != offs) in make_nnode_dirty()
921 c->dirty_nn_cnt += 1; in make_nnode_dirty()
922 ubifs_add_nnode_dirt(c, nnode); in make_nnode_dirty()
927 c->dirty_nn_cnt += 1; in make_nnode_dirty()
928 ubifs_add_nnode_dirt(c, nnode); in make_nnode_dirty()
952 static int make_pnode_dirty(struct ubifs_info *c, int node_num, int lnum, in make_pnode_dirty() argument
958 pnode = pnode_lookup(c, node_num); in make_pnode_dirty()
964 do_make_pnode_dirty(c, pnode); in make_pnode_dirty()
982 static int make_ltab_dirty(struct ubifs_info *c, int lnum, int offs) in make_ltab_dirty() argument
984 if (lnum != c->ltab_lnum || offs != c->ltab_offs) in make_ltab_dirty()
986 if (!(c->lpt_drty_flgs & LTAB_DIRTY)) { in make_ltab_dirty()
987 c->lpt_drty_flgs |= LTAB_DIRTY; in make_ltab_dirty()
988 ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz); in make_ltab_dirty()
1007 static int make_lsave_dirty(struct ubifs_info *c, int lnum, int offs) in make_lsave_dirty() argument
1009 if (lnum != c->lsave_lnum || offs != c->lsave_offs) in make_lsave_dirty()
1011 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) { in make_lsave_dirty()
1012 c->lpt_drty_flgs |= LSAVE_DIRTY; in make_lsave_dirty()
1013 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); in make_lsave_dirty()
1034 static int make_node_dirty(struct ubifs_info *c, int node_type, int node_num, in make_node_dirty() argument
1039 return make_nnode_dirty(c, node_num, lnum, offs); in make_node_dirty()
1041 return make_pnode_dirty(c, node_num, lnum, offs); in make_node_dirty()
1043 return make_ltab_dirty(c, lnum, offs); in make_node_dirty()
1045 return make_lsave_dirty(c, lnum, offs); in make_node_dirty()
1055 static int get_lpt_node_len(const struct ubifs_info *c, int node_type) in get_lpt_node_len() argument
1059 return c->nnode_sz; in get_lpt_node_len()
1061 return c->pnode_sz; in get_lpt_node_len()
1063 return c->ltab_sz; in get_lpt_node_len()
1065 return c->lsave_sz; in get_lpt_node_len()
1076 static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len) in get_pad_len() argument
1080 if (c->min_io_size == 1) in get_pad_len()
1082 offs = c->leb_size - len; in get_pad_len()
1083 pad_len = ALIGN(offs, c->min_io_size) - offs; in get_pad_len()
1093 static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf, in get_lpt_node_type() argument
1100 *node_num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits); in get_lpt_node_type()
1112 static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len) in is_a_node() argument
1123 node_len = get_lpt_node_len(c, node_type); in is_a_node()
1148 static int lpt_gc_lnum(struct ubifs_info *c, int lnum) in lpt_gc_lnum() argument
1150 int err, len = c->leb_size, node_type, node_num, node_len, offs; in lpt_gc_lnum()
1151 void *buf = c->lpt_buf; in lpt_gc_lnum()
1155 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); in lpt_gc_lnum()
1160 if (!is_a_node(c, buf, len)) { in lpt_gc_lnum()
1163 pad_len = get_pad_len(c, buf, len); in lpt_gc_lnum()
1171 node_type = get_lpt_node_type(c, buf, &node_num); in lpt_gc_lnum()
1172 node_len = get_lpt_node_len(c, node_type); in lpt_gc_lnum()
1173 offs = c->leb_size - len; in lpt_gc_lnum()
1175 mutex_lock(&c->lp_mutex); in lpt_gc_lnum()
1176 err = make_node_dirty(c, node_type, node_num, lnum, offs); in lpt_gc_lnum()
1177 mutex_unlock(&c->lp_mutex); in lpt_gc_lnum()
1193 static int lpt_gc(struct ubifs_info *c) in lpt_gc() argument
1197 mutex_lock(&c->lp_mutex); in lpt_gc()
1198 for (i = 0; i < c->lpt_lebs; i++) { in lpt_gc()
1199 ubifs_assert(!c->ltab[i].tgc); in lpt_gc()
1200 if (i + c->lpt_first == c->nhead_lnum || in lpt_gc()
1201 c->ltab[i].free + c->ltab[i].dirty == c->leb_size) in lpt_gc()
1203 if (c->ltab[i].dirty > dirty) { in lpt_gc()
1204 dirty = c->ltab[i].dirty; in lpt_gc()
1205 lnum = i + c->lpt_first; in lpt_gc()
1208 mutex_unlock(&c->lp_mutex); in lpt_gc()
1211 return lpt_gc_lnum(c, lnum); in lpt_gc()
1224 int ubifs_lpt_start_commit(struct ubifs_info *c) in ubifs_lpt_start_commit() argument
1230 mutex_lock(&c->lp_mutex); in ubifs_lpt_start_commit()
1231 err = dbg_chk_lpt_free_spc(c); in ubifs_lpt_start_commit()
1234 err = dbg_check_ltab(c); in ubifs_lpt_start_commit()
1238 if (c->check_lpt_free) { in ubifs_lpt_start_commit()
1245 c->check_lpt_free = 0; in ubifs_lpt_start_commit()
1246 while (need_write_all(c)) { in ubifs_lpt_start_commit()
1247 mutex_unlock(&c->lp_mutex); in ubifs_lpt_start_commit()
1248 err = lpt_gc(c); in ubifs_lpt_start_commit()
1251 mutex_lock(&c->lp_mutex); in ubifs_lpt_start_commit()
1255 lpt_tgc_start(c); in ubifs_lpt_start_commit()
1257 if (!c->dirty_pn_cnt) { in ubifs_lpt_start_commit()
1263 if (!c->big_lpt && need_write_all(c)) { in ubifs_lpt_start_commit()
1265 err = make_tree_dirty(c); in ubifs_lpt_start_commit()
1268 lpt_tgc_start(c); in ubifs_lpt_start_commit()
1271 if (c->big_lpt) in ubifs_lpt_start_commit()
1272 populate_lsave(c); in ubifs_lpt_start_commit()
1274 cnt = get_cnodes_to_commit(c); in ubifs_lpt_start_commit()
1277 err = layout_cnodes(c); in ubifs_lpt_start_commit()
1282 memcpy(c->ltab_cmt, c->ltab, in ubifs_lpt_start_commit()
1283 sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs); in ubifs_lpt_start_commit()
1284 c->lpt_drty_flgs &= ~(LTAB_DIRTY | LSAVE_DIRTY); in ubifs_lpt_start_commit()
1287 mutex_unlock(&c->lp_mutex); in ubifs_lpt_start_commit()
1295 static void free_obsolete_cnodes(struct ubifs_info *c) in free_obsolete_cnodes() argument
1299 cnext = c->lpt_cnext; in free_obsolete_cnodes()
1309 } while (cnext != c->lpt_cnext); in free_obsolete_cnodes()
1310 c->lpt_cnext = NULL; in free_obsolete_cnodes()
1322 int ubifs_lpt_end_commit(struct ubifs_info *c) in ubifs_lpt_end_commit() argument
1328 if (!c->lpt_cnext) in ubifs_lpt_end_commit()
1331 err = write_cnodes(c); in ubifs_lpt_end_commit()
1335 mutex_lock(&c->lp_mutex); in ubifs_lpt_end_commit()
1336 free_obsolete_cnodes(c); in ubifs_lpt_end_commit()
1337 mutex_unlock(&c->lp_mutex); in ubifs_lpt_end_commit()
1349 int ubifs_lpt_post_commit(struct ubifs_info *c) in ubifs_lpt_post_commit() argument
1353 mutex_lock(&c->lp_mutex); in ubifs_lpt_post_commit()
1354 err = lpt_tgc_end(c); in ubifs_lpt_post_commit()
1357 if (c->big_lpt) in ubifs_lpt_post_commit()
1358 while (need_write_all(c)) { in ubifs_lpt_post_commit()
1359 mutex_unlock(&c->lp_mutex); in ubifs_lpt_post_commit()
1360 err = lpt_gc(c); in ubifs_lpt_post_commit()
1363 mutex_lock(&c->lp_mutex); in ubifs_lpt_post_commit()
1366 mutex_unlock(&c->lp_mutex); in ubifs_lpt_post_commit()
1378 static struct ubifs_nnode *first_nnode(struct ubifs_info *c, int *hght) in first_nnode() argument
1383 nnode = c->nroot; in first_nnode()
1387 for (h = 1; h < c->lpt_hght; h++) { in first_nnode()
1412 static struct ubifs_nnode *next_nnode(struct ubifs_info *c, in next_nnode() argument
1434 for (h = *hght + 1; h < c->lpt_hght; h++) { in next_nnode()
1455 void ubifs_lpt_free(struct ubifs_info *c, int wr_only) in ubifs_lpt_free() argument
1462 free_obsolete_cnodes(c); /* Leftover from a failed commit */ in ubifs_lpt_free()
1464 vfree(c->ltab_cmt); in ubifs_lpt_free()
1465 c->ltab_cmt = NULL; in ubifs_lpt_free()
1466 vfree(c->lpt_buf); in ubifs_lpt_free()
1467 c->lpt_buf = NULL; in ubifs_lpt_free()
1468 kfree(c->lsave); in ubifs_lpt_free()
1469 c->lsave = NULL; in ubifs_lpt_free()
1476 nnode = first_nnode(c, &hght); in ubifs_lpt_free()
1480 nnode = next_nnode(c, nnode, &hght); in ubifs_lpt_free()
1483 kfree(c->lpt_heap[i].arr); in ubifs_lpt_free()
1484 kfree(c->dirty_idx.arr); in ubifs_lpt_free()
1485 kfree(c->nroot); in ubifs_lpt_free()
1486 vfree(c->ltab); in ubifs_lpt_free()
1487 kfree(c->lpt_nod_buf); in ubifs_lpt_free()
1515 static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs) in dbg_is_nnode_dirty() argument
1521 nnode = first_nnode(c, &hght); in dbg_is_nnode_dirty()
1522 for (; nnode; nnode = next_nnode(c, nnode, &hght)) { in dbg_is_nnode_dirty()
1534 if (c->lpt_lnum != lnum || c->lpt_offs != offs) in dbg_is_nnode_dirty()
1550 static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs) in dbg_is_pnode_dirty() argument
1554 cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT); in dbg_is_pnode_dirty()
1560 pnode = pnode_lookup(c, i); in dbg_is_pnode_dirty()
1579 static int dbg_is_ltab_dirty(struct ubifs_info *c, int lnum, int offs) in dbg_is_ltab_dirty() argument
1581 if (lnum != c->ltab_lnum || offs != c->ltab_offs) in dbg_is_ltab_dirty()
1583 return (c->lpt_drty_flgs & LTAB_DIRTY) != 0; in dbg_is_ltab_dirty()
1592 static int dbg_is_lsave_dirty(struct ubifs_info *c, int lnum, int offs) in dbg_is_lsave_dirty() argument
1594 if (lnum != c->lsave_lnum || offs != c->lsave_offs) in dbg_is_lsave_dirty()
1596 return (c->lpt_drty_flgs & LSAVE_DIRTY) != 0; in dbg_is_lsave_dirty()
1606 static int dbg_is_node_dirty(struct ubifs_info *c, int node_type, int lnum, in dbg_is_node_dirty() argument
1611 return dbg_is_nnode_dirty(c, lnum, offs); in dbg_is_node_dirty()
1613 return dbg_is_pnode_dirty(c, lnum, offs); in dbg_is_node_dirty()
1615 return dbg_is_ltab_dirty(c, lnum, offs); in dbg_is_node_dirty()
1617 return dbg_is_lsave_dirty(c, lnum, offs); in dbg_is_node_dirty()
1630 static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) in dbg_check_ltab_lnum() argument
1632 int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len; in dbg_check_ltab_lnum()
1636 if (!dbg_is_chk_lprops(c)) in dbg_check_ltab_lnum()
1639 buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); in dbg_check_ltab_lnum()
1647 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); in dbg_check_ltab_lnum()
1652 if (!is_a_node(c, p, len)) { in dbg_check_ltab_lnum()
1655 pad_len = get_pad_len(c, p, len); in dbg_check_ltab_lnum()
1664 lnum, c->leb_size - len); in dbg_check_ltab_lnum()
1667 i = lnum - c->lpt_first; in dbg_check_ltab_lnum()
1668 if (len != c->ltab[i].free) { in dbg_check_ltab_lnum()
1670 lnum, len, c->ltab[i].free); in dbg_check_ltab_lnum()
1673 if (dirty != c->ltab[i].dirty) { in dbg_check_ltab_lnum()
1675 lnum, dirty, c->ltab[i].dirty); in dbg_check_ltab_lnum()
1680 node_type = get_lpt_node_type(c, p, &node_num); in dbg_check_ltab_lnum()
1681 node_len = get_lpt_node_len(c, node_type); in dbg_check_ltab_lnum()
1682 ret = dbg_is_node_dirty(c, node_type, lnum, c->leb_size - len); in dbg_check_ltab_lnum()
1701 int dbg_check_ltab(struct ubifs_info *c) in dbg_check_ltab() argument
1705 if (!dbg_is_chk_lprops(c)) in dbg_check_ltab()
1709 cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT); in dbg_check_ltab()
1713 pnode = pnode_lookup(c, i); in dbg_check_ltab()
1720 err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *)c->nroot, 0, 0); in dbg_check_ltab()
1725 for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) { in dbg_check_ltab()
1726 err = dbg_check_ltab_lnum(c, lnum); in dbg_check_ltab()
1743 int dbg_chk_lpt_free_spc(struct ubifs_info *c) in dbg_chk_lpt_free_spc() argument
1748 if (!dbg_is_chk_lprops(c)) in dbg_chk_lpt_free_spc()
1751 for (i = 0; i < c->lpt_lebs; i++) { in dbg_chk_lpt_free_spc()
1752 if (c->ltab[i].tgc || c->ltab[i].cmt) in dbg_chk_lpt_free_spc()
1754 if (i + c->lpt_first == c->nhead_lnum) in dbg_chk_lpt_free_spc()
1755 free += c->leb_size - c->nhead_offs; in dbg_chk_lpt_free_spc()
1756 else if (c->ltab[i].free == c->leb_size) in dbg_chk_lpt_free_spc()
1757 free += c->leb_size; in dbg_chk_lpt_free_spc()
1759 if (free < c->lpt_sz) { in dbg_chk_lpt_free_spc()
1761 free, c->lpt_sz); in dbg_chk_lpt_free_spc()
1762 ubifs_dump_lpt_info(c); in dbg_chk_lpt_free_spc()
1763 ubifs_dump_lpt_lebs(c); in dbg_chk_lpt_free_spc()
1784 int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) in dbg_chk_lpt_sz() argument
1786 struct ubifs_debug_info *d = c->dbg; in dbg_chk_lpt_sz()
1790 if (!dbg_is_chk_lprops(c)) in dbg_chk_lpt_sz()
1799 if (c->dirty_pn_cnt > c->pnode_cnt) { in dbg_chk_lpt_sz()
1801 c->dirty_pn_cnt, c->pnode_cnt); in dbg_chk_lpt_sz()
1804 if (c->dirty_nn_cnt > c->nnode_cnt) { in dbg_chk_lpt_sz()
1806 c->dirty_nn_cnt, c->nnode_cnt); in dbg_chk_lpt_sz()
1819 chk_lpt_sz = c->leb_size; in dbg_chk_lpt_sz()
1821 chk_lpt_sz += len - c->nhead_offs; in dbg_chk_lpt_sz()
1827 if (d->chk_lpt_sz > c->lpt_sz) { in dbg_chk_lpt_sz()
1829 d->chk_lpt_sz, c->lpt_sz); in dbg_chk_lpt_sz()
1842 lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; in dbg_chk_lpt_sz()
1843 lpt_sz += (long long)c->nnode_cnt * c->nnode_sz; in dbg_chk_lpt_sz()
1844 lpt_sz += c->ltab_sz; in dbg_chk_lpt_sz()
1845 if (c->big_lpt) in dbg_chk_lpt_sz()
1846 lpt_sz += c->lsave_sz; in dbg_chk_lpt_sz()
1853 ubifs_dump_lpt_info(c); in dbg_chk_lpt_sz()
1854 ubifs_dump_lpt_lebs(c); in dbg_chk_lpt_sz()
1882 static void dump_lpt_leb(const struct ubifs_info *c, int lnum) in dump_lpt_leb() argument
1884 int err, len = c->leb_size, node_type, node_num, node_len, offs; in dump_lpt_leb()
1888 buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); in dump_lpt_leb()
1894 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); in dump_lpt_leb()
1899 offs = c->leb_size - len; in dump_lpt_leb()
1900 if (!is_a_node(c, p, len)) { in dump_lpt_leb()
1903 pad_len = get_pad_len(c, p, len); in dump_lpt_leb()
1917 node_type = get_lpt_node_type(c, p, &node_num); in dump_lpt_leb()
1921 node_len = c->pnode_sz; in dump_lpt_leb()
1922 if (c->big_lpt) in dump_lpt_leb()
1934 node_len = c->nnode_sz; in dump_lpt_leb()
1935 if (c->big_lpt) in dump_lpt_leb()
1941 err = ubifs_unpack_nnode(c, p, &nnode); in dump_lpt_leb()
1957 node_len = c->ltab_sz; in dump_lpt_leb()
1961 node_len = c->lsave_sz; in dump_lpt_leb()
1986 void ubifs_dump_lpt_lebs(const struct ubifs_info *c) in ubifs_dump_lpt_lebs() argument
1991 for (i = 0; i < c->lpt_lebs; i++) in ubifs_dump_lpt_lebs()
1992 dump_lpt_leb(c, i + c->lpt_first); in ubifs_dump_lpt_lebs()
2005 static int dbg_populate_lsave(struct ubifs_info *c) in dbg_populate_lsave() argument
2011 if (!dbg_is_chk_gen(c)) in dbg_populate_lsave()
2016 for (i = 0; i < c->lsave_cnt; i++) in dbg_populate_lsave()
2017 c->lsave[i] = c->main_first; in dbg_populate_lsave()
2019 list_for_each_entry(lprops, &c->empty_list, list) in dbg_populate_lsave()
2020 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; in dbg_populate_lsave()
2021 list_for_each_entry(lprops, &c->freeable_list, list) in dbg_populate_lsave()
2022 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; in dbg_populate_lsave()
2023 list_for_each_entry(lprops, &c->frdi_idx_list, list) in dbg_populate_lsave()
2024 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; in dbg_populate_lsave()
2026 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; in dbg_populate_lsave()
2028 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; in dbg_populate_lsave()
2029 heap = &c->lpt_heap[LPROPS_DIRTY - 1]; in dbg_populate_lsave()
2031 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; in dbg_populate_lsave()
2032 heap = &c->lpt_heap[LPROPS_FREE - 1]; in dbg_populate_lsave()
2034 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; in dbg_populate_lsave()