1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * NILFS segment constructor.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11 #include <linux/pagemap.h>
12 #include <linux/buffer_head.h>
13 #include <linux/writeback.h>
14 #include <linux/bitops.h>
15 #include <linux/bio.h>
16 #include <linux/completion.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/crc32.h>
22 #include <linux/pagevec.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "page.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 #include "segbuf.h"
34
35
36 /*
37 * Segment constructor
38 */
39 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
41 #define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
45
46 /* Construction mode */
47 enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
61 };
62
63 /* Stage numbers of dirty block collection */
64 enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75 };
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/nilfs2.h>
79
80 /*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
nilfs_sc_cstage_inc(struct nilfs_sc_info * sci)90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91 {
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94 }
95
nilfs_sc_cstage_set(struct nilfs_sc_info * sci,int next_scnt)96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97 {
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100 }
101
nilfs_sc_cstage_get(struct nilfs_sc_info * sci)102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103 {
104 return sci->sc_stage.scnt;
105 }
106
107 /* State flags of collection */
108 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
110 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113 /* Operations depending on the construction mode and file type */
114 struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127 };
128
129 /*
130 * Other definitions
131 */
132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137 #define nilfs_cnt32_ge(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)((a) - (b)) >= 0))
140
nilfs_prepare_segment_lock(struct super_block * sb,struct nilfs_transaction_info * ti)141 static int nilfs_prepare_segment_lock(struct super_block *sb,
142 struct nilfs_transaction_info *ti)
143 {
144 struct nilfs_transaction_info *cur_ti = current->journal_info;
145 void *save = NULL;
146
147 if (cur_ti) {
148 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149 return ++cur_ti->ti_count;
150
151 /*
152 * If journal_info field is occupied by other FS,
153 * it is saved and will be restored on
154 * nilfs_transaction_commit().
155 */
156 nilfs_warn(sb, "journal info from a different FS");
157 save = current->journal_info;
158 }
159 if (!ti) {
160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161 if (!ti)
162 return -ENOMEM;
163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164 } else {
165 ti->ti_flags = 0;
166 }
167 ti->ti_count = 0;
168 ti->ti_save = save;
169 ti->ti_magic = NILFS_TI_MAGIC;
170 current->journal_info = ti;
171 return 0;
172 }
173
174 /**
175 * nilfs_transaction_begin - start indivisible file operations.
176 * @sb: super block
177 * @ti: nilfs_transaction_info
178 * @vacancy_check: flags for vacancy rate checks
179 *
180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181 * the segment semaphore, to make a segment construction and write tasks
182 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
183 * The region enclosed by these two functions can be nested. To avoid a
184 * deadlock, the semaphore is only acquired or released in the outermost call.
185 *
186 * This function allocates a nilfs_transaction_info struct to keep context
187 * information on it. It is initialized and hooked onto the current task in
188 * the outermost call. If a pre-allocated struct is given to @ti, it is used
189 * instead; otherwise a new struct is assigned from a slab.
190 *
191 * When @vacancy_check flag is set, this function will check the amount of
192 * free space, and will wait for the GC to reclaim disk space if low capacity.
193 *
194 * Return Value: On success, 0 is returned. On error, one of the following
195 * negative error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
199 * %-ENOSPC - No space left on device
200 */
nilfs_transaction_begin(struct super_block * sb,struct nilfs_transaction_info * ti,int vacancy_check)201 int nilfs_transaction_begin(struct super_block *sb,
202 struct nilfs_transaction_info *ti,
203 int vacancy_check)
204 {
205 struct the_nilfs *nilfs;
206 int ret = nilfs_prepare_segment_lock(sb, ti);
207 struct nilfs_transaction_info *trace_ti;
208
209 if (unlikely(ret < 0))
210 return ret;
211 if (ret > 0) {
212 trace_ti = current->journal_info;
213
214 trace_nilfs2_transaction_transition(sb, trace_ti,
215 trace_ti->ti_count, trace_ti->ti_flags,
216 TRACE_NILFS2_TRANSACTION_BEGIN);
217 return 0;
218 }
219
220 sb_start_intwrite(sb);
221
222 nilfs = sb->s_fs_info;
223 down_read(&nilfs->ns_segctor_sem);
224 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225 up_read(&nilfs->ns_segctor_sem);
226 ret = -ENOSPC;
227 goto failed;
228 }
229
230 trace_ti = current->journal_info;
231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232 trace_ti->ti_flags,
233 TRACE_NILFS2_TRANSACTION_BEGIN);
234 return 0;
235
236 failed:
237 ti = current->journal_info;
238 current->journal_info = ti->ti_save;
239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240 kmem_cache_free(nilfs_transaction_cachep, ti);
241 sb_end_intwrite(sb);
242 return ret;
243 }
244
245 /**
246 * nilfs_transaction_commit - commit indivisible file operations.
247 * @sb: super block
248 *
249 * nilfs_transaction_commit() releases the read semaphore which is
250 * acquired by nilfs_transaction_begin(). This is only performed
251 * in outermost call of this function. If a commit flag is set,
252 * nilfs_transaction_commit() sets a timer to start the segment
253 * constructor. If a sync flag is set, it starts construction
254 * directly.
255 */
nilfs_transaction_commit(struct super_block * sb)256 int nilfs_transaction_commit(struct super_block *sb)
257 {
258 struct nilfs_transaction_info *ti = current->journal_info;
259 struct the_nilfs *nilfs = sb->s_fs_info;
260 int err = 0;
261
262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
263 ti->ti_flags |= NILFS_TI_COMMIT;
264 if (ti->ti_count > 0) {
265 ti->ti_count--;
266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
268 return 0;
269 }
270 if (nilfs->ns_writer) {
271 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
273 if (ti->ti_flags & NILFS_TI_COMMIT)
274 nilfs_segctor_start_timer(sci);
275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
276 nilfs_segctor_do_flush(sci, 0);
277 }
278 up_read(&nilfs->ns_segctor_sem);
279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
288 sb_end_intwrite(sb);
289 return err;
290 }
291
nilfs_transaction_abort(struct super_block * sb)292 void nilfs_transaction_abort(struct super_block *sb)
293 {
294 struct nilfs_transaction_info *ti = current->journal_info;
295 struct the_nilfs *nilfs = sb->s_fs_info;
296
297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298 if (ti->ti_count > 0) {
299 ti->ti_count--;
300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
302 return;
303 }
304 up_read(&nilfs->ns_segctor_sem);
305
306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
309 current->journal_info = ti->ti_save;
310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311 kmem_cache_free(nilfs_transaction_cachep, ti);
312 sb_end_intwrite(sb);
313 }
314
nilfs_relax_pressure_in_lock(struct super_block * sb)315 void nilfs_relax_pressure_in_lock(struct super_block *sb)
316 {
317 struct the_nilfs *nilfs = sb->s_fs_info;
318 struct nilfs_sc_info *sci = nilfs->ns_writer;
319
320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
321 return;
322
323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324 up_read(&nilfs->ns_segctor_sem);
325
326 down_write(&nilfs->ns_segctor_sem);
327 if (sci->sc_flush_request &&
328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329 struct nilfs_transaction_info *ti = current->journal_info;
330
331 ti->ti_flags |= NILFS_TI_WRITER;
332 nilfs_segctor_do_immediate_flush(sci);
333 ti->ti_flags &= ~NILFS_TI_WRITER;
334 }
335 downgrade_write(&nilfs->ns_segctor_sem);
336 }
337
nilfs_transaction_lock(struct super_block * sb,struct nilfs_transaction_info * ti,int gcflag)338 static void nilfs_transaction_lock(struct super_block *sb,
339 struct nilfs_transaction_info *ti,
340 int gcflag)
341 {
342 struct nilfs_transaction_info *cur_ti = current->journal_info;
343 struct the_nilfs *nilfs = sb->s_fs_info;
344 struct nilfs_sc_info *sci = nilfs->ns_writer;
345
346 WARN_ON(cur_ti);
347 ti->ti_flags = NILFS_TI_WRITER;
348 ti->ti_count = 0;
349 ti->ti_save = cur_ti;
350 ti->ti_magic = NILFS_TI_MAGIC;
351 current->journal_info = ti;
352
353 for (;;) {
354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
357 down_write(&nilfs->ns_segctor_sem);
358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
359 break;
360
361 nilfs_segctor_do_immediate_flush(sci);
362
363 up_write(&nilfs->ns_segctor_sem);
364 cond_resched();
365 }
366 if (gcflag)
367 ti->ti_flags |= NILFS_TI_GC;
368
369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
371 }
372
nilfs_transaction_unlock(struct super_block * sb)373 static void nilfs_transaction_unlock(struct super_block *sb)
374 {
375 struct nilfs_transaction_info *ti = current->journal_info;
376 struct the_nilfs *nilfs = sb->s_fs_info;
377
378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379 BUG_ON(ti->ti_count > 0);
380
381 up_write(&nilfs->ns_segctor_sem);
382 current->journal_info = ti->ti_save;
383
384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
386 }
387
nilfs_segctor_map_segsum_entry(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,unsigned int bytes)388 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389 struct nilfs_segsum_pointer *ssp,
390 unsigned int bytes)
391 {
392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
393 unsigned int blocksize = sci->sc_super->s_blocksize;
394 void *p;
395
396 if (unlikely(ssp->offset + bytes > blocksize)) {
397 ssp->offset = 0;
398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399 &segbuf->sb_segsum_buffers));
400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401 }
402 p = ssp->bh->b_data + ssp->offset;
403 ssp->offset += bytes;
404 return p;
405 }
406
407 /**
408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409 * @sci: nilfs_sc_info
410 */
nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info * sci)411 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412 {
413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414 struct buffer_head *sumbh;
415 unsigned int sumbytes;
416 unsigned int flags = 0;
417 int err;
418
419 if (nilfs_doing_gc())
420 flags = NILFS_SS_GC;
421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
422 if (unlikely(err))
423 return err;
424
425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426 sumbytes = segbuf->sb_sum.sumbytes;
427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430 return 0;
431 }
432
433 /**
434 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
435 * @sci: segment constructor object
436 *
437 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
438 * the current segment summary block.
439 */
nilfs_segctor_zeropad_segsum(struct nilfs_sc_info * sci)440 static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
441 {
442 struct nilfs_segsum_pointer *ssp;
443
444 ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
445 if (ssp->offset < ssp->bh->b_size)
446 memset(ssp->bh->b_data + ssp->offset, 0,
447 ssp->bh->b_size - ssp->offset);
448 }
449
nilfs_segctor_feed_segment(struct nilfs_sc_info * sci)450 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
451 {
452 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
453 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
454 return -E2BIG; /*
455 * The current segment is filled up
456 * (internal code)
457 */
458 nilfs_segctor_zeropad_segsum(sci);
459 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
460 return nilfs_segctor_reset_segment_buffer(sci);
461 }
462
nilfs_segctor_add_super_root(struct nilfs_sc_info * sci)463 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
464 {
465 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
466 int err;
467
468 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
469 err = nilfs_segctor_feed_segment(sci);
470 if (err)
471 return err;
472 segbuf = sci->sc_curseg;
473 }
474 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
475 if (likely(!err))
476 segbuf->sb_sum.flags |= NILFS_SS_SR;
477 return err;
478 }
479
480 /*
481 * Functions for making segment summary and payloads
482 */
nilfs_segctor_segsum_block_required(struct nilfs_sc_info * sci,const struct nilfs_segsum_pointer * ssp,unsigned int binfo_size)483 static int nilfs_segctor_segsum_block_required(
484 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
485 unsigned int binfo_size)
486 {
487 unsigned int blocksize = sci->sc_super->s_blocksize;
488 /* Size of finfo and binfo is enough small against blocksize */
489
490 return ssp->offset + binfo_size +
491 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
492 blocksize;
493 }
494
nilfs_segctor_begin_finfo(struct nilfs_sc_info * sci,struct inode * inode)495 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
496 struct inode *inode)
497 {
498 sci->sc_curseg->sb_sum.nfinfo++;
499 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
500 nilfs_segctor_map_segsum_entry(
501 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
502
503 if (NILFS_I(inode)->i_root &&
504 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
505 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
506 /* skip finfo */
507 }
508
nilfs_segctor_end_finfo(struct nilfs_sc_info * sci,struct inode * inode)509 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
510 struct inode *inode)
511 {
512 struct nilfs_finfo *finfo;
513 struct nilfs_inode_info *ii;
514 struct nilfs_segment_buffer *segbuf;
515 __u64 cno;
516
517 if (sci->sc_blk_cnt == 0)
518 return;
519
520 ii = NILFS_I(inode);
521
522 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
523 cno = ii->i_cno;
524 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
525 cno = 0;
526 else
527 cno = sci->sc_cno;
528
529 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
530 sizeof(*finfo));
531 finfo->fi_ino = cpu_to_le64(inode->i_ino);
532 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
533 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
534 finfo->fi_cno = cpu_to_le64(cno);
535
536 segbuf = sci->sc_curseg;
537 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
538 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
539 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
540 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
541 }
542
nilfs_segctor_add_file_block(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode,unsigned int binfo_size)543 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
544 struct buffer_head *bh,
545 struct inode *inode,
546 unsigned int binfo_size)
547 {
548 struct nilfs_segment_buffer *segbuf;
549 int required, err = 0;
550
551 retry:
552 segbuf = sci->sc_curseg;
553 required = nilfs_segctor_segsum_block_required(
554 sci, &sci->sc_binfo_ptr, binfo_size);
555 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
556 nilfs_segctor_end_finfo(sci, inode);
557 err = nilfs_segctor_feed_segment(sci);
558 if (err)
559 return err;
560 goto retry;
561 }
562 if (unlikely(required)) {
563 nilfs_segctor_zeropad_segsum(sci);
564 err = nilfs_segbuf_extend_segsum(segbuf);
565 if (unlikely(err))
566 goto failed;
567 }
568 if (sci->sc_blk_cnt == 0)
569 nilfs_segctor_begin_finfo(sci, inode);
570
571 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
572 /* Substitution to vblocknr is delayed until update_blocknr() */
573 nilfs_segbuf_add_file_buffer(segbuf, bh);
574 sci->sc_blk_cnt++;
575 failed:
576 return err;
577 }
578
579 /*
580 * Callback functions that enumerate, mark, and collect dirty blocks
581 */
nilfs_collect_file_data(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)582 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
583 struct buffer_head *bh, struct inode *inode)
584 {
585 int err;
586
587 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
588 if (err < 0)
589 return err;
590
591 err = nilfs_segctor_add_file_block(sci, bh, inode,
592 sizeof(struct nilfs_binfo_v));
593 if (!err)
594 sci->sc_datablk_cnt++;
595 return err;
596 }
597
nilfs_collect_file_node(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)598 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
599 struct buffer_head *bh,
600 struct inode *inode)
601 {
602 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
603 }
604
nilfs_collect_file_bmap(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)605 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
606 struct buffer_head *bh,
607 struct inode *inode)
608 {
609 WARN_ON(!buffer_dirty(bh));
610 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
611 }
612
nilfs_write_file_data_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)613 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
614 struct nilfs_segsum_pointer *ssp,
615 union nilfs_binfo *binfo)
616 {
617 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
618 sci, ssp, sizeof(*binfo_v));
619 *binfo_v = binfo->bi_v;
620 }
621
nilfs_write_file_node_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)622 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
623 struct nilfs_segsum_pointer *ssp,
624 union nilfs_binfo *binfo)
625 {
626 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
627 sci, ssp, sizeof(*vblocknr));
628 *vblocknr = binfo->bi_v.bi_vblocknr;
629 }
630
631 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
632 .collect_data = nilfs_collect_file_data,
633 .collect_node = nilfs_collect_file_node,
634 .collect_bmap = nilfs_collect_file_bmap,
635 .write_data_binfo = nilfs_write_file_data_binfo,
636 .write_node_binfo = nilfs_write_file_node_binfo,
637 };
638
nilfs_collect_dat_data(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)639 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
640 struct buffer_head *bh, struct inode *inode)
641 {
642 int err;
643
644 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
645 if (err < 0)
646 return err;
647
648 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
649 if (!err)
650 sci->sc_datablk_cnt++;
651 return err;
652 }
653
nilfs_collect_dat_bmap(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)654 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
655 struct buffer_head *bh, struct inode *inode)
656 {
657 WARN_ON(!buffer_dirty(bh));
658 return nilfs_segctor_add_file_block(sci, bh, inode,
659 sizeof(struct nilfs_binfo_dat));
660 }
661
nilfs_write_dat_data_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)662 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
663 struct nilfs_segsum_pointer *ssp,
664 union nilfs_binfo *binfo)
665 {
666 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
667 sizeof(*blkoff));
668 *blkoff = binfo->bi_dat.bi_blkoff;
669 }
670
nilfs_write_dat_node_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)671 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
672 struct nilfs_segsum_pointer *ssp,
673 union nilfs_binfo *binfo)
674 {
675 struct nilfs_binfo_dat *binfo_dat =
676 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
677 *binfo_dat = binfo->bi_dat;
678 }
679
680 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
681 .collect_data = nilfs_collect_dat_data,
682 .collect_node = nilfs_collect_file_node,
683 .collect_bmap = nilfs_collect_dat_bmap,
684 .write_data_binfo = nilfs_write_dat_data_binfo,
685 .write_node_binfo = nilfs_write_dat_node_binfo,
686 };
687
688 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
689 .collect_data = nilfs_collect_file_data,
690 .collect_node = NULL,
691 .collect_bmap = NULL,
692 .write_data_binfo = nilfs_write_file_data_binfo,
693 .write_node_binfo = NULL,
694 };
695
nilfs_lookup_dirty_data_buffers(struct inode * inode,struct list_head * listp,size_t nlimit,loff_t start,loff_t end)696 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
697 struct list_head *listp,
698 size_t nlimit,
699 loff_t start, loff_t end)
700 {
701 struct address_space *mapping = inode->i_mapping;
702 struct folio_batch fbatch;
703 pgoff_t index = 0, last = ULONG_MAX;
704 size_t ndirties = 0;
705 int i;
706
707 if (unlikely(start != 0 || end != LLONG_MAX)) {
708 /*
709 * A valid range is given for sync-ing data pages. The
710 * range is rounded to per-page; extra dirty buffers
711 * may be included if blocksize < pagesize.
712 */
713 index = start >> PAGE_SHIFT;
714 last = end >> PAGE_SHIFT;
715 }
716 folio_batch_init(&fbatch);
717 repeat:
718 if (unlikely(index > last) ||
719 !filemap_get_folios_tag(mapping, &index, last,
720 PAGECACHE_TAG_DIRTY, &fbatch))
721 return ndirties;
722
723 for (i = 0; i < folio_batch_count(&fbatch); i++) {
724 struct buffer_head *bh, *head;
725 struct folio *folio = fbatch.folios[i];
726
727 folio_lock(folio);
728 if (unlikely(folio->mapping != mapping)) {
729 /* Exclude folios removed from the address space */
730 folio_unlock(folio);
731 continue;
732 }
733 head = folio_buffers(folio);
734 if (!head) {
735 create_empty_buffers(&folio->page, i_blocksize(inode), 0);
736 head = folio_buffers(folio);
737 }
738 folio_unlock(folio);
739
740 bh = head;
741 do {
742 if (!buffer_dirty(bh) || buffer_async_write(bh))
743 continue;
744 get_bh(bh);
745 list_add_tail(&bh->b_assoc_buffers, listp);
746 ndirties++;
747 if (unlikely(ndirties >= nlimit)) {
748 folio_batch_release(&fbatch);
749 cond_resched();
750 return ndirties;
751 }
752 } while (bh = bh->b_this_page, bh != head);
753 }
754 folio_batch_release(&fbatch);
755 cond_resched();
756 goto repeat;
757 }
758
nilfs_lookup_dirty_node_buffers(struct inode * inode,struct list_head * listp)759 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
760 struct list_head *listp)
761 {
762 struct nilfs_inode_info *ii = NILFS_I(inode);
763 struct inode *btnc_inode = ii->i_assoc_inode;
764 struct folio_batch fbatch;
765 struct buffer_head *bh, *head;
766 unsigned int i;
767 pgoff_t index = 0;
768
769 if (!btnc_inode)
770 return;
771 folio_batch_init(&fbatch);
772
773 while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
774 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
775 for (i = 0; i < folio_batch_count(&fbatch); i++) {
776 bh = head = folio_buffers(fbatch.folios[i]);
777 do {
778 if (buffer_dirty(bh) &&
779 !buffer_async_write(bh)) {
780 get_bh(bh);
781 list_add_tail(&bh->b_assoc_buffers,
782 listp);
783 }
784 bh = bh->b_this_page;
785 } while (bh != head);
786 }
787 folio_batch_release(&fbatch);
788 cond_resched();
789 }
790 }
791
nilfs_dispose_list(struct the_nilfs * nilfs,struct list_head * head,int force)792 static void nilfs_dispose_list(struct the_nilfs *nilfs,
793 struct list_head *head, int force)
794 {
795 struct nilfs_inode_info *ii, *n;
796 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
797 unsigned int nv = 0;
798
799 while (!list_empty(head)) {
800 spin_lock(&nilfs->ns_inode_lock);
801 list_for_each_entry_safe(ii, n, head, i_dirty) {
802 list_del_init(&ii->i_dirty);
803 if (force) {
804 if (unlikely(ii->i_bh)) {
805 brelse(ii->i_bh);
806 ii->i_bh = NULL;
807 }
808 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
809 set_bit(NILFS_I_QUEUED, &ii->i_state);
810 list_add_tail(&ii->i_dirty,
811 &nilfs->ns_dirty_files);
812 continue;
813 }
814 ivec[nv++] = ii;
815 if (nv == SC_N_INODEVEC)
816 break;
817 }
818 spin_unlock(&nilfs->ns_inode_lock);
819
820 for (pii = ivec; nv > 0; pii++, nv--)
821 iput(&(*pii)->vfs_inode);
822 }
823 }
824
nilfs_iput_work_func(struct work_struct * work)825 static void nilfs_iput_work_func(struct work_struct *work)
826 {
827 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
828 sc_iput_work);
829 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
830
831 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
832 }
833
nilfs_test_metadata_dirty(struct the_nilfs * nilfs,struct nilfs_root * root)834 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
835 struct nilfs_root *root)
836 {
837 int ret = 0;
838
839 if (nilfs_mdt_fetch_dirty(root->ifile))
840 ret++;
841 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
842 ret++;
843 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
844 ret++;
845 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
846 ret++;
847 return ret;
848 }
849
nilfs_segctor_clean(struct nilfs_sc_info * sci)850 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
851 {
852 return list_empty(&sci->sc_dirty_files) &&
853 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
854 sci->sc_nfreesegs == 0 &&
855 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
856 }
857
nilfs_segctor_confirm(struct nilfs_sc_info * sci)858 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
859 {
860 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
861 int ret = 0;
862
863 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
864 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
865
866 spin_lock(&nilfs->ns_inode_lock);
867 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
868 ret++;
869
870 spin_unlock(&nilfs->ns_inode_lock);
871 return ret;
872 }
873
nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info * sci)874 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
875 {
876 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
877
878 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
879 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
880 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
881 nilfs_mdt_clear_dirty(nilfs->ns_dat);
882 }
883
nilfs_segctor_create_checkpoint(struct nilfs_sc_info * sci)884 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
885 {
886 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
887 struct buffer_head *bh_cp;
888 struct nilfs_checkpoint *raw_cp;
889 int err;
890
891 /* XXX: this interface will be changed */
892 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
893 &raw_cp, &bh_cp);
894 if (likely(!err)) {
895 /*
896 * The following code is duplicated with cpfile. But, it is
897 * needed to collect the checkpoint even if it was not newly
898 * created.
899 */
900 mark_buffer_dirty(bh_cp);
901 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
902 nilfs_cpfile_put_checkpoint(
903 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
904 } else if (err == -EINVAL || err == -ENOENT) {
905 nilfs_error(sci->sc_super,
906 "checkpoint creation failed due to metadata corruption.");
907 err = -EIO;
908 }
909 return err;
910 }
911
nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info * sci)912 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
913 {
914 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
915 struct buffer_head *bh_cp;
916 struct nilfs_checkpoint *raw_cp;
917 int err;
918
919 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
920 &raw_cp, &bh_cp);
921 if (unlikely(err)) {
922 if (err == -EINVAL || err == -ENOENT) {
923 nilfs_error(sci->sc_super,
924 "checkpoint finalization failed due to metadata corruption.");
925 err = -EIO;
926 }
927 goto failed_ibh;
928 }
929 raw_cp->cp_snapshot_list.ssl_next = 0;
930 raw_cp->cp_snapshot_list.ssl_prev = 0;
931 raw_cp->cp_inodes_count =
932 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
933 raw_cp->cp_blocks_count =
934 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
935 raw_cp->cp_nblk_inc =
936 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
937 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
938 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
939
940 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
941 nilfs_checkpoint_clear_minor(raw_cp);
942 else
943 nilfs_checkpoint_set_minor(raw_cp);
944
945 nilfs_write_inode_common(sci->sc_root->ifile,
946 &raw_cp->cp_ifile_inode, 1);
947 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
948 return 0;
949
950 failed_ibh:
951 return err;
952 }
953
nilfs_fill_in_file_bmap(struct inode * ifile,struct nilfs_inode_info * ii)954 static void nilfs_fill_in_file_bmap(struct inode *ifile,
955 struct nilfs_inode_info *ii)
956
957 {
958 struct buffer_head *ibh;
959 struct nilfs_inode *raw_inode;
960
961 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
962 ibh = ii->i_bh;
963 BUG_ON(!ibh);
964 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
965 ibh);
966 nilfs_bmap_write(ii->i_bmap, raw_inode);
967 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
968 }
969 }
970
nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info * sci)971 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
972 {
973 struct nilfs_inode_info *ii;
974
975 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
976 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
977 set_bit(NILFS_I_COLLECTED, &ii->i_state);
978 }
979 }
980
nilfs_segctor_fill_in_super_root(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)981 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
982 struct the_nilfs *nilfs)
983 {
984 struct buffer_head *bh_sr;
985 struct nilfs_super_root *raw_sr;
986 unsigned int isz, srsz;
987
988 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
989
990 lock_buffer(bh_sr);
991 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
992 isz = nilfs->ns_inode_size;
993 srsz = NILFS_SR_BYTES(isz);
994
995 raw_sr->sr_sum = 0; /* Ensure initialization within this update */
996 raw_sr->sr_bytes = cpu_to_le16(srsz);
997 raw_sr->sr_nongc_ctime
998 = cpu_to_le64(nilfs_doing_gc() ?
999 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
1000 raw_sr->sr_flags = 0;
1001
1002 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
1003 NILFS_SR_DAT_OFFSET(isz), 1);
1004 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
1005 NILFS_SR_CPFILE_OFFSET(isz), 1);
1006 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
1007 NILFS_SR_SUFILE_OFFSET(isz), 1);
1008 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
1009 set_buffer_uptodate(bh_sr);
1010 unlock_buffer(bh_sr);
1011 }
1012
nilfs_redirty_inodes(struct list_head * head)1013 static void nilfs_redirty_inodes(struct list_head *head)
1014 {
1015 struct nilfs_inode_info *ii;
1016
1017 list_for_each_entry(ii, head, i_dirty) {
1018 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
1019 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
1020 }
1021 }
1022
nilfs_drop_collected_inodes(struct list_head * head)1023 static void nilfs_drop_collected_inodes(struct list_head *head)
1024 {
1025 struct nilfs_inode_info *ii;
1026
1027 list_for_each_entry(ii, head, i_dirty) {
1028 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1029 continue;
1030
1031 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1032 set_bit(NILFS_I_UPDATED, &ii->i_state);
1033 }
1034 }
1035
nilfs_segctor_apply_buffers(struct nilfs_sc_info * sci,struct inode * inode,struct list_head * listp,int (* collect)(struct nilfs_sc_info *,struct buffer_head *,struct inode *))1036 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1037 struct inode *inode,
1038 struct list_head *listp,
1039 int (*collect)(struct nilfs_sc_info *,
1040 struct buffer_head *,
1041 struct inode *))
1042 {
1043 struct buffer_head *bh, *n;
1044 int err = 0;
1045
1046 if (collect) {
1047 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1048 list_del_init(&bh->b_assoc_buffers);
1049 err = collect(sci, bh, inode);
1050 brelse(bh);
1051 if (unlikely(err))
1052 goto dispose_buffers;
1053 }
1054 return 0;
1055 }
1056
1057 dispose_buffers:
1058 while (!list_empty(listp)) {
1059 bh = list_first_entry(listp, struct buffer_head,
1060 b_assoc_buffers);
1061 list_del_init(&bh->b_assoc_buffers);
1062 brelse(bh);
1063 }
1064 return err;
1065 }
1066
nilfs_segctor_buffer_rest(struct nilfs_sc_info * sci)1067 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1068 {
1069 /* Remaining number of blocks within segment buffer */
1070 return sci->sc_segbuf_nblocks -
1071 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1072 }
1073
nilfs_segctor_scan_file(struct nilfs_sc_info * sci,struct inode * inode,const struct nilfs_sc_operations * sc_ops)1074 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1075 struct inode *inode,
1076 const struct nilfs_sc_operations *sc_ops)
1077 {
1078 LIST_HEAD(data_buffers);
1079 LIST_HEAD(node_buffers);
1080 int err;
1081
1082 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1083 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1084
1085 n = nilfs_lookup_dirty_data_buffers(
1086 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1087 if (n > rest) {
1088 err = nilfs_segctor_apply_buffers(
1089 sci, inode, &data_buffers,
1090 sc_ops->collect_data);
1091 BUG_ON(!err); /* always receive -E2BIG or true error */
1092 goto break_or_fail;
1093 }
1094 }
1095 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1096
1097 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1098 err = nilfs_segctor_apply_buffers(
1099 sci, inode, &data_buffers, sc_ops->collect_data);
1100 if (unlikely(err)) {
1101 /* dispose node list */
1102 nilfs_segctor_apply_buffers(
1103 sci, inode, &node_buffers, NULL);
1104 goto break_or_fail;
1105 }
1106 sci->sc_stage.flags |= NILFS_CF_NODE;
1107 }
1108 /* Collect node */
1109 err = nilfs_segctor_apply_buffers(
1110 sci, inode, &node_buffers, sc_ops->collect_node);
1111 if (unlikely(err))
1112 goto break_or_fail;
1113
1114 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1115 err = nilfs_segctor_apply_buffers(
1116 sci, inode, &node_buffers, sc_ops->collect_bmap);
1117 if (unlikely(err))
1118 goto break_or_fail;
1119
1120 nilfs_segctor_end_finfo(sci, inode);
1121 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1122
1123 break_or_fail:
1124 return err;
1125 }
1126
nilfs_segctor_scan_file_dsync(struct nilfs_sc_info * sci,struct inode * inode)1127 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1128 struct inode *inode)
1129 {
1130 LIST_HEAD(data_buffers);
1131 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1132 int err;
1133
1134 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1135 sci->sc_dsync_start,
1136 sci->sc_dsync_end);
1137
1138 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1139 nilfs_collect_file_data);
1140 if (!err) {
1141 nilfs_segctor_end_finfo(sci, inode);
1142 BUG_ON(n > rest);
1143 /* always receive -E2BIG or true error if n > rest */
1144 }
1145 return err;
1146 }
1147
nilfs_segctor_collect_blocks(struct nilfs_sc_info * sci,int mode)1148 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1149 {
1150 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1151 struct list_head *head;
1152 struct nilfs_inode_info *ii;
1153 size_t ndone;
1154 int err = 0;
1155
1156 switch (nilfs_sc_cstage_get(sci)) {
1157 case NILFS_ST_INIT:
1158 /* Pre-processes */
1159 sci->sc_stage.flags = 0;
1160
1161 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1162 sci->sc_nblk_inc = 0;
1163 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1164 if (mode == SC_LSEG_DSYNC) {
1165 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1166 goto dsync_mode;
1167 }
1168 }
1169
1170 sci->sc_stage.dirty_file_ptr = NULL;
1171 sci->sc_stage.gc_inode_ptr = NULL;
1172 if (mode == SC_FLUSH_DAT) {
1173 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1174 goto dat_stage;
1175 }
1176 nilfs_sc_cstage_inc(sci);
1177 fallthrough;
1178 case NILFS_ST_GC:
1179 if (nilfs_doing_gc()) {
1180 head = &sci->sc_gc_inodes;
1181 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1182 head, i_dirty);
1183 list_for_each_entry_continue(ii, head, i_dirty) {
1184 err = nilfs_segctor_scan_file(
1185 sci, &ii->vfs_inode,
1186 &nilfs_sc_file_ops);
1187 if (unlikely(err)) {
1188 sci->sc_stage.gc_inode_ptr = list_entry(
1189 ii->i_dirty.prev,
1190 struct nilfs_inode_info,
1191 i_dirty);
1192 goto break_or_fail;
1193 }
1194 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1195 }
1196 sci->sc_stage.gc_inode_ptr = NULL;
1197 }
1198 nilfs_sc_cstage_inc(sci);
1199 fallthrough;
1200 case NILFS_ST_FILE:
1201 head = &sci->sc_dirty_files;
1202 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1203 i_dirty);
1204 list_for_each_entry_continue(ii, head, i_dirty) {
1205 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1206
1207 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1208 &nilfs_sc_file_ops);
1209 if (unlikely(err)) {
1210 sci->sc_stage.dirty_file_ptr =
1211 list_entry(ii->i_dirty.prev,
1212 struct nilfs_inode_info,
1213 i_dirty);
1214 goto break_or_fail;
1215 }
1216 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1217 /* XXX: required ? */
1218 }
1219 sci->sc_stage.dirty_file_ptr = NULL;
1220 if (mode == SC_FLUSH_FILE) {
1221 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1222 return 0;
1223 }
1224 nilfs_sc_cstage_inc(sci);
1225 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1226 fallthrough;
1227 case NILFS_ST_IFILE:
1228 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1229 &nilfs_sc_file_ops);
1230 if (unlikely(err))
1231 break;
1232 nilfs_sc_cstage_inc(sci);
1233 /* Creating a checkpoint */
1234 err = nilfs_segctor_create_checkpoint(sci);
1235 if (unlikely(err))
1236 break;
1237 fallthrough;
1238 case NILFS_ST_CPFILE:
1239 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1240 &nilfs_sc_file_ops);
1241 if (unlikely(err))
1242 break;
1243 nilfs_sc_cstage_inc(sci);
1244 fallthrough;
1245 case NILFS_ST_SUFILE:
1246 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1247 sci->sc_nfreesegs, &ndone);
1248 if (unlikely(err)) {
1249 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1250 sci->sc_freesegs, ndone,
1251 NULL);
1252 break;
1253 }
1254 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1255
1256 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1257 &nilfs_sc_file_ops);
1258 if (unlikely(err))
1259 break;
1260 nilfs_sc_cstage_inc(sci);
1261 fallthrough;
1262 case NILFS_ST_DAT:
1263 dat_stage:
1264 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1265 &nilfs_sc_dat_ops);
1266 if (unlikely(err))
1267 break;
1268 if (mode == SC_FLUSH_DAT) {
1269 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1270 return 0;
1271 }
1272 nilfs_sc_cstage_inc(sci);
1273 fallthrough;
1274 case NILFS_ST_SR:
1275 if (mode == SC_LSEG_SR) {
1276 /* Appending a super root */
1277 err = nilfs_segctor_add_super_root(sci);
1278 if (unlikely(err))
1279 break;
1280 }
1281 /* End of a logical segment */
1282 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1283 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1284 return 0;
1285 case NILFS_ST_DSYNC:
1286 dsync_mode:
1287 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1288 ii = sci->sc_dsync_inode;
1289 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1290 break;
1291
1292 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1293 if (unlikely(err))
1294 break;
1295 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1296 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1297 return 0;
1298 case NILFS_ST_DONE:
1299 return 0;
1300 default:
1301 BUG();
1302 }
1303
1304 break_or_fail:
1305 return err;
1306 }
1307
1308 /**
1309 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1310 * @sci: nilfs_sc_info
1311 * @nilfs: nilfs object
1312 */
nilfs_segctor_begin_construction(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1313 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1314 struct the_nilfs *nilfs)
1315 {
1316 struct nilfs_segment_buffer *segbuf, *prev;
1317 __u64 nextnum;
1318 int err, alloc = 0;
1319
1320 segbuf = nilfs_segbuf_new(sci->sc_super);
1321 if (unlikely(!segbuf))
1322 return -ENOMEM;
1323
1324 if (list_empty(&sci->sc_write_logs)) {
1325 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1326 nilfs->ns_pseg_offset, nilfs);
1327 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1328 nilfs_shift_to_next_segment(nilfs);
1329 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1330 }
1331
1332 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1333 nextnum = nilfs->ns_nextnum;
1334
1335 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1336 /* Start from the head of a new full segment */
1337 alloc++;
1338 } else {
1339 /* Continue logs */
1340 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1341 nilfs_segbuf_map_cont(segbuf, prev);
1342 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1343 nextnum = prev->sb_nextnum;
1344
1345 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1346 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1347 segbuf->sb_sum.seg_seq++;
1348 alloc++;
1349 }
1350 }
1351
1352 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1353 if (err)
1354 goto failed;
1355
1356 if (alloc) {
1357 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1358 if (err)
1359 goto failed;
1360 }
1361 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1362
1363 BUG_ON(!list_empty(&sci->sc_segbufs));
1364 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1365 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1366 return 0;
1367
1368 failed:
1369 nilfs_segbuf_free(segbuf);
1370 return err;
1371 }
1372
nilfs_segctor_extend_segments(struct nilfs_sc_info * sci,struct the_nilfs * nilfs,int nadd)1373 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1374 struct the_nilfs *nilfs, int nadd)
1375 {
1376 struct nilfs_segment_buffer *segbuf, *prev;
1377 struct inode *sufile = nilfs->ns_sufile;
1378 __u64 nextnextnum;
1379 LIST_HEAD(list);
1380 int err, ret, i;
1381
1382 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1383 /*
1384 * Since the segment specified with nextnum might be allocated during
1385 * the previous construction, the buffer including its segusage may
1386 * not be dirty. The following call ensures that the buffer is dirty
1387 * and will pin the buffer on memory until the sufile is written.
1388 */
1389 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1390 if (unlikely(err))
1391 return err;
1392
1393 for (i = 0; i < nadd; i++) {
1394 /* extend segment info */
1395 err = -ENOMEM;
1396 segbuf = nilfs_segbuf_new(sci->sc_super);
1397 if (unlikely(!segbuf))
1398 goto failed;
1399
1400 /* map this buffer to region of segment on-disk */
1401 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1402 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1403
1404 /* allocate the next next full segment */
1405 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1406 if (unlikely(err))
1407 goto failed_segbuf;
1408
1409 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1410 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1411
1412 list_add_tail(&segbuf->sb_list, &list);
1413 prev = segbuf;
1414 }
1415 list_splice_tail(&list, &sci->sc_segbufs);
1416 return 0;
1417
1418 failed_segbuf:
1419 nilfs_segbuf_free(segbuf);
1420 failed:
1421 list_for_each_entry(segbuf, &list, sb_list) {
1422 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1423 WARN_ON(ret); /* never fails */
1424 }
1425 nilfs_destroy_logs(&list);
1426 return err;
1427 }
1428
nilfs_free_incomplete_logs(struct list_head * logs,struct the_nilfs * nilfs)1429 static void nilfs_free_incomplete_logs(struct list_head *logs,
1430 struct the_nilfs *nilfs)
1431 {
1432 struct nilfs_segment_buffer *segbuf, *prev;
1433 struct inode *sufile = nilfs->ns_sufile;
1434 int ret;
1435
1436 segbuf = NILFS_FIRST_SEGBUF(logs);
1437 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1438 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1439 WARN_ON(ret); /* never fails */
1440 }
1441 if (atomic_read(&segbuf->sb_err)) {
1442 /* Case 1: The first segment failed */
1443 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1444 /*
1445 * Case 1a: Partial segment appended into an existing
1446 * segment
1447 */
1448 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1449 segbuf->sb_fseg_end);
1450 else /* Case 1b: New full segment */
1451 set_nilfs_discontinued(nilfs);
1452 }
1453
1454 prev = segbuf;
1455 list_for_each_entry_continue(segbuf, logs, sb_list) {
1456 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1457 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1458 WARN_ON(ret); /* never fails */
1459 }
1460 if (atomic_read(&segbuf->sb_err) &&
1461 segbuf->sb_segnum != nilfs->ns_nextnum)
1462 /* Case 2: extended segment (!= next) failed */
1463 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1464 prev = segbuf;
1465 }
1466 }
1467
nilfs_segctor_update_segusage(struct nilfs_sc_info * sci,struct inode * sufile)1468 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1469 struct inode *sufile)
1470 {
1471 struct nilfs_segment_buffer *segbuf;
1472 unsigned long live_blocks;
1473 int ret;
1474
1475 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1476 live_blocks = segbuf->sb_sum.nblocks +
1477 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1478 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1479 live_blocks,
1480 sci->sc_seg_ctime);
1481 WARN_ON(ret); /* always succeed because the segusage is dirty */
1482 }
1483 }
1484
nilfs_cancel_segusage(struct list_head * logs,struct inode * sufile)1485 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1486 {
1487 struct nilfs_segment_buffer *segbuf;
1488 int ret;
1489
1490 segbuf = NILFS_FIRST_SEGBUF(logs);
1491 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1492 segbuf->sb_pseg_start -
1493 segbuf->sb_fseg_start, 0);
1494 WARN_ON(ret); /* always succeed because the segusage is dirty */
1495
1496 list_for_each_entry_continue(segbuf, logs, sb_list) {
1497 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1498 0, 0);
1499 WARN_ON(ret); /* always succeed */
1500 }
1501 }
1502
nilfs_segctor_truncate_segments(struct nilfs_sc_info * sci,struct nilfs_segment_buffer * last,struct inode * sufile)1503 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1504 struct nilfs_segment_buffer *last,
1505 struct inode *sufile)
1506 {
1507 struct nilfs_segment_buffer *segbuf = last;
1508 int ret;
1509
1510 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1511 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1512 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1513 WARN_ON(ret);
1514 }
1515 nilfs_truncate_logs(&sci->sc_segbufs, last);
1516 }
1517
1518
nilfs_segctor_collect(struct nilfs_sc_info * sci,struct the_nilfs * nilfs,int mode)1519 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1520 struct the_nilfs *nilfs, int mode)
1521 {
1522 struct nilfs_cstage prev_stage = sci->sc_stage;
1523 int err, nadd = 1;
1524
1525 /* Collection retry loop */
1526 for (;;) {
1527 sci->sc_nblk_this_inc = 0;
1528 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1529
1530 err = nilfs_segctor_reset_segment_buffer(sci);
1531 if (unlikely(err))
1532 goto failed;
1533
1534 err = nilfs_segctor_collect_blocks(sci, mode);
1535 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1536 if (!err)
1537 break;
1538
1539 if (unlikely(err != -E2BIG))
1540 goto failed;
1541
1542 /* The current segment is filled up */
1543 if (mode != SC_LSEG_SR ||
1544 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1545 break;
1546
1547 nilfs_clear_logs(&sci->sc_segbufs);
1548
1549 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1550 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1551 sci->sc_freesegs,
1552 sci->sc_nfreesegs,
1553 NULL);
1554 WARN_ON(err); /* do not happen */
1555 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1556 }
1557
1558 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1559 if (unlikely(err))
1560 return err;
1561
1562 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1563 sci->sc_stage = prev_stage;
1564 }
1565 nilfs_segctor_zeropad_segsum(sci);
1566 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1567 return 0;
1568
1569 failed:
1570 return err;
1571 }
1572
nilfs_list_replace_buffer(struct buffer_head * old_bh,struct buffer_head * new_bh)1573 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1574 struct buffer_head *new_bh)
1575 {
1576 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1577
1578 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1579 /* The caller must release old_bh */
1580 }
1581
1582 static int
nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info * sci,struct nilfs_segment_buffer * segbuf,int mode)1583 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1584 struct nilfs_segment_buffer *segbuf,
1585 int mode)
1586 {
1587 struct inode *inode = NULL;
1588 sector_t blocknr;
1589 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1590 unsigned long nblocks = 0, ndatablk = 0;
1591 const struct nilfs_sc_operations *sc_op = NULL;
1592 struct nilfs_segsum_pointer ssp;
1593 struct nilfs_finfo *finfo = NULL;
1594 union nilfs_binfo binfo;
1595 struct buffer_head *bh, *bh_org;
1596 ino_t ino = 0;
1597 int err = 0;
1598
1599 if (!nfinfo)
1600 goto out;
1601
1602 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1603 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1604 ssp.offset = sizeof(struct nilfs_segment_summary);
1605
1606 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1607 if (bh == segbuf->sb_super_root)
1608 break;
1609 if (!finfo) {
1610 finfo = nilfs_segctor_map_segsum_entry(
1611 sci, &ssp, sizeof(*finfo));
1612 ino = le64_to_cpu(finfo->fi_ino);
1613 nblocks = le32_to_cpu(finfo->fi_nblocks);
1614 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1615
1616 inode = bh->b_folio->mapping->host;
1617
1618 if (mode == SC_LSEG_DSYNC)
1619 sc_op = &nilfs_sc_dsync_ops;
1620 else if (ino == NILFS_DAT_INO)
1621 sc_op = &nilfs_sc_dat_ops;
1622 else /* file blocks */
1623 sc_op = &nilfs_sc_file_ops;
1624 }
1625 bh_org = bh;
1626 get_bh(bh_org);
1627 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1628 &binfo);
1629 if (bh != bh_org)
1630 nilfs_list_replace_buffer(bh_org, bh);
1631 brelse(bh_org);
1632 if (unlikely(err))
1633 goto failed_bmap;
1634
1635 if (ndatablk > 0)
1636 sc_op->write_data_binfo(sci, &ssp, &binfo);
1637 else
1638 sc_op->write_node_binfo(sci, &ssp, &binfo);
1639
1640 blocknr++;
1641 if (--nblocks == 0) {
1642 finfo = NULL;
1643 if (--nfinfo == 0)
1644 break;
1645 } else if (ndatablk > 0)
1646 ndatablk--;
1647 }
1648 out:
1649 return 0;
1650
1651 failed_bmap:
1652 return err;
1653 }
1654
nilfs_segctor_assign(struct nilfs_sc_info * sci,int mode)1655 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1656 {
1657 struct nilfs_segment_buffer *segbuf;
1658 int err;
1659
1660 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1661 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1662 if (unlikely(err))
1663 return err;
1664 nilfs_segbuf_fill_in_segsum(segbuf);
1665 }
1666 return 0;
1667 }
1668
nilfs_begin_page_io(struct page * page)1669 static void nilfs_begin_page_io(struct page *page)
1670 {
1671 if (!page || PageWriteback(page))
1672 /*
1673 * For split b-tree node pages, this function may be called
1674 * twice. We ignore the 2nd or later calls by this check.
1675 */
1676 return;
1677
1678 lock_page(page);
1679 clear_page_dirty_for_io(page);
1680 set_page_writeback(page);
1681 unlock_page(page);
1682 }
1683
nilfs_segctor_prepare_write(struct nilfs_sc_info * sci)1684 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1685 {
1686 struct nilfs_segment_buffer *segbuf;
1687 struct page *bd_page = NULL, *fs_page = NULL;
1688
1689 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1690 struct buffer_head *bh;
1691
1692 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1693 b_assoc_buffers) {
1694 if (bh->b_page != bd_page) {
1695 if (bd_page) {
1696 lock_page(bd_page);
1697 wait_on_page_writeback(bd_page);
1698 clear_page_dirty_for_io(bd_page);
1699 set_page_writeback(bd_page);
1700 unlock_page(bd_page);
1701 }
1702 bd_page = bh->b_page;
1703 }
1704 }
1705
1706 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1707 b_assoc_buffers) {
1708 if (bh == segbuf->sb_super_root) {
1709 if (bh->b_page != bd_page) {
1710 lock_page(bd_page);
1711 wait_on_page_writeback(bd_page);
1712 clear_page_dirty_for_io(bd_page);
1713 set_page_writeback(bd_page);
1714 unlock_page(bd_page);
1715 bd_page = bh->b_page;
1716 }
1717 break;
1718 }
1719 set_buffer_async_write(bh);
1720 if (bh->b_page != fs_page) {
1721 nilfs_begin_page_io(fs_page);
1722 fs_page = bh->b_page;
1723 }
1724 }
1725 }
1726 if (bd_page) {
1727 lock_page(bd_page);
1728 wait_on_page_writeback(bd_page);
1729 clear_page_dirty_for_io(bd_page);
1730 set_page_writeback(bd_page);
1731 unlock_page(bd_page);
1732 }
1733 nilfs_begin_page_io(fs_page);
1734 }
1735
nilfs_segctor_write(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1736 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1737 struct the_nilfs *nilfs)
1738 {
1739 int ret;
1740
1741 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1742 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1743 return ret;
1744 }
1745
nilfs_end_page_io(struct page * page,int err)1746 static void nilfs_end_page_io(struct page *page, int err)
1747 {
1748 if (!page)
1749 return;
1750
1751 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1752 /*
1753 * For b-tree node pages, this function may be called twice
1754 * or more because they might be split in a segment.
1755 */
1756 if (PageDirty(page)) {
1757 /*
1758 * For pages holding split b-tree node buffers, dirty
1759 * flag on the buffers may be cleared discretely.
1760 * In that case, the page is once redirtied for
1761 * remaining buffers, and it must be cancelled if
1762 * all the buffers get cleaned later.
1763 */
1764 lock_page(page);
1765 if (nilfs_page_buffers_clean(page))
1766 __nilfs_clear_page_dirty(page);
1767 unlock_page(page);
1768 }
1769 return;
1770 }
1771
1772 if (!err) {
1773 if (!nilfs_page_buffers_clean(page))
1774 __set_page_dirty_nobuffers(page);
1775 ClearPageError(page);
1776 } else {
1777 __set_page_dirty_nobuffers(page);
1778 SetPageError(page);
1779 }
1780
1781 end_page_writeback(page);
1782 }
1783
nilfs_abort_logs(struct list_head * logs,int err)1784 static void nilfs_abort_logs(struct list_head *logs, int err)
1785 {
1786 struct nilfs_segment_buffer *segbuf;
1787 struct page *bd_page = NULL, *fs_page = NULL;
1788 struct buffer_head *bh;
1789
1790 if (list_empty(logs))
1791 return;
1792
1793 list_for_each_entry(segbuf, logs, sb_list) {
1794 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1795 b_assoc_buffers) {
1796 clear_buffer_uptodate(bh);
1797 if (bh->b_page != bd_page) {
1798 if (bd_page)
1799 end_page_writeback(bd_page);
1800 bd_page = bh->b_page;
1801 }
1802 }
1803
1804 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1805 b_assoc_buffers) {
1806 if (bh == segbuf->sb_super_root) {
1807 clear_buffer_uptodate(bh);
1808 if (bh->b_page != bd_page) {
1809 end_page_writeback(bd_page);
1810 bd_page = bh->b_page;
1811 }
1812 break;
1813 }
1814 clear_buffer_async_write(bh);
1815 if (bh->b_page != fs_page) {
1816 nilfs_end_page_io(fs_page, err);
1817 fs_page = bh->b_page;
1818 }
1819 }
1820 }
1821 if (bd_page)
1822 end_page_writeback(bd_page);
1823
1824 nilfs_end_page_io(fs_page, err);
1825 }
1826
nilfs_segctor_abort_construction(struct nilfs_sc_info * sci,struct the_nilfs * nilfs,int err)1827 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1828 struct the_nilfs *nilfs, int err)
1829 {
1830 LIST_HEAD(logs);
1831 int ret;
1832
1833 list_splice_tail_init(&sci->sc_write_logs, &logs);
1834 ret = nilfs_wait_on_logs(&logs);
1835 nilfs_abort_logs(&logs, ret ? : err);
1836
1837 list_splice_tail_init(&sci->sc_segbufs, &logs);
1838 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1839 nilfs_free_incomplete_logs(&logs, nilfs);
1840
1841 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1842 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1843 sci->sc_freesegs,
1844 sci->sc_nfreesegs,
1845 NULL);
1846 WARN_ON(ret); /* do not happen */
1847 }
1848
1849 nilfs_destroy_logs(&logs);
1850 }
1851
nilfs_set_next_segment(struct the_nilfs * nilfs,struct nilfs_segment_buffer * segbuf)1852 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1853 struct nilfs_segment_buffer *segbuf)
1854 {
1855 nilfs->ns_segnum = segbuf->sb_segnum;
1856 nilfs->ns_nextnum = segbuf->sb_nextnum;
1857 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1858 + segbuf->sb_sum.nblocks;
1859 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1860 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1861 }
1862
nilfs_segctor_complete_write(struct nilfs_sc_info * sci)1863 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1864 {
1865 struct nilfs_segment_buffer *segbuf;
1866 struct page *bd_page = NULL, *fs_page = NULL;
1867 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1868 int update_sr = false;
1869
1870 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1871 struct buffer_head *bh;
1872
1873 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1874 b_assoc_buffers) {
1875 set_buffer_uptodate(bh);
1876 clear_buffer_dirty(bh);
1877 if (bh->b_page != bd_page) {
1878 if (bd_page)
1879 end_page_writeback(bd_page);
1880 bd_page = bh->b_page;
1881 }
1882 }
1883 /*
1884 * We assume that the buffers which belong to the same page
1885 * continue over the buffer list.
1886 * Under this assumption, the last BHs of pages is
1887 * identifiable by the discontinuity of bh->b_page
1888 * (page != fs_page).
1889 *
1890 * For B-tree node blocks, however, this assumption is not
1891 * guaranteed. The cleanup code of B-tree node pages needs
1892 * special care.
1893 */
1894 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1895 b_assoc_buffers) {
1896 const unsigned long set_bits = BIT(BH_Uptodate);
1897 const unsigned long clear_bits =
1898 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1899 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1900 BIT(BH_NILFS_Redirected));
1901
1902 if (bh == segbuf->sb_super_root) {
1903 set_buffer_uptodate(bh);
1904 clear_buffer_dirty(bh);
1905 if (bh->b_page != bd_page) {
1906 end_page_writeback(bd_page);
1907 bd_page = bh->b_page;
1908 }
1909 update_sr = true;
1910 break;
1911 }
1912 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1913 if (bh->b_page != fs_page) {
1914 nilfs_end_page_io(fs_page, 0);
1915 fs_page = bh->b_page;
1916 }
1917 }
1918
1919 if (!nilfs_segbuf_simplex(segbuf)) {
1920 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1921 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1922 sci->sc_lseg_stime = jiffies;
1923 }
1924 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1925 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1926 }
1927 }
1928 /*
1929 * Since pages may continue over multiple segment buffers,
1930 * end of the last page must be checked outside of the loop.
1931 */
1932 if (bd_page)
1933 end_page_writeback(bd_page);
1934
1935 nilfs_end_page_io(fs_page, 0);
1936
1937 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1938
1939 if (nilfs_doing_gc())
1940 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1941 else
1942 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1943
1944 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1945
1946 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1947 nilfs_set_next_segment(nilfs, segbuf);
1948
1949 if (update_sr) {
1950 nilfs->ns_flushed_device = 0;
1951 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1952 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1953
1954 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1955 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1956 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1957 nilfs_segctor_clear_metadata_dirty(sci);
1958 } else
1959 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1960 }
1961
nilfs_segctor_wait(struct nilfs_sc_info * sci)1962 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1963 {
1964 int ret;
1965
1966 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1967 if (!ret) {
1968 nilfs_segctor_complete_write(sci);
1969 nilfs_destroy_logs(&sci->sc_write_logs);
1970 }
1971 return ret;
1972 }
1973
nilfs_segctor_collect_dirty_files(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1974 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1975 struct the_nilfs *nilfs)
1976 {
1977 struct nilfs_inode_info *ii, *n;
1978 struct inode *ifile = sci->sc_root->ifile;
1979
1980 spin_lock(&nilfs->ns_inode_lock);
1981 retry:
1982 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1983 if (!ii->i_bh) {
1984 struct buffer_head *ibh;
1985 int err;
1986
1987 spin_unlock(&nilfs->ns_inode_lock);
1988 err = nilfs_ifile_get_inode_block(
1989 ifile, ii->vfs_inode.i_ino, &ibh);
1990 if (unlikely(err)) {
1991 nilfs_warn(sci->sc_super,
1992 "log writer: error %d getting inode block (ino=%lu)",
1993 err, ii->vfs_inode.i_ino);
1994 return err;
1995 }
1996 spin_lock(&nilfs->ns_inode_lock);
1997 if (likely(!ii->i_bh))
1998 ii->i_bh = ibh;
1999 else
2000 brelse(ibh);
2001 goto retry;
2002 }
2003
2004 // Always redirty the buffer to avoid race condition
2005 mark_buffer_dirty(ii->i_bh);
2006 nilfs_mdt_mark_dirty(ifile);
2007
2008 clear_bit(NILFS_I_QUEUED, &ii->i_state);
2009 set_bit(NILFS_I_BUSY, &ii->i_state);
2010 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
2011 }
2012 spin_unlock(&nilfs->ns_inode_lock);
2013
2014 return 0;
2015 }
2016
nilfs_segctor_drop_written_files(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)2017 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
2018 struct the_nilfs *nilfs)
2019 {
2020 struct nilfs_inode_info *ii, *n;
2021 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
2022 int defer_iput = false;
2023
2024 spin_lock(&nilfs->ns_inode_lock);
2025 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2026 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2027 test_bit(NILFS_I_DIRTY, &ii->i_state))
2028 continue;
2029
2030 clear_bit(NILFS_I_BUSY, &ii->i_state);
2031 brelse(ii->i_bh);
2032 ii->i_bh = NULL;
2033 list_del_init(&ii->i_dirty);
2034 if (!ii->vfs_inode.i_nlink || during_mount) {
2035 /*
2036 * Defer calling iput() to avoid deadlocks if
2037 * i_nlink == 0 or mount is not yet finished.
2038 */
2039 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2040 defer_iput = true;
2041 } else {
2042 spin_unlock(&nilfs->ns_inode_lock);
2043 iput(&ii->vfs_inode);
2044 spin_lock(&nilfs->ns_inode_lock);
2045 }
2046 }
2047 spin_unlock(&nilfs->ns_inode_lock);
2048
2049 if (defer_iput)
2050 schedule_work(&sci->sc_iput_work);
2051 }
2052
2053 /*
2054 * Main procedure of segment constructor
2055 */
nilfs_segctor_do_construct(struct nilfs_sc_info * sci,int mode)2056 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2057 {
2058 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2059 int err;
2060
2061 if (sb_rdonly(sci->sc_super))
2062 return -EROFS;
2063
2064 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2065 sci->sc_cno = nilfs->ns_cno;
2066
2067 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2068 if (unlikely(err))
2069 goto out;
2070
2071 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2072 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2073
2074 if (nilfs_segctor_clean(sci))
2075 goto out;
2076
2077 do {
2078 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2079
2080 err = nilfs_segctor_begin_construction(sci, nilfs);
2081 if (unlikely(err))
2082 goto out;
2083
2084 /* Update time stamp */
2085 sci->sc_seg_ctime = ktime_get_real_seconds();
2086
2087 err = nilfs_segctor_collect(sci, nilfs, mode);
2088 if (unlikely(err))
2089 goto failed;
2090
2091 /* Avoid empty segment */
2092 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2093 nilfs_segbuf_empty(sci->sc_curseg)) {
2094 nilfs_segctor_abort_construction(sci, nilfs, 1);
2095 goto out;
2096 }
2097
2098 err = nilfs_segctor_assign(sci, mode);
2099 if (unlikely(err))
2100 goto failed;
2101
2102 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2103 nilfs_segctor_fill_in_file_bmap(sci);
2104
2105 if (mode == SC_LSEG_SR &&
2106 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2107 err = nilfs_segctor_fill_in_checkpoint(sci);
2108 if (unlikely(err))
2109 goto failed_to_write;
2110
2111 nilfs_segctor_fill_in_super_root(sci, nilfs);
2112 }
2113 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2114
2115 /* Write partial segments */
2116 nilfs_segctor_prepare_write(sci);
2117
2118 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2119 nilfs->ns_crc_seed);
2120
2121 err = nilfs_segctor_write(sci, nilfs);
2122 if (unlikely(err))
2123 goto failed_to_write;
2124
2125 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2126 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2127 /*
2128 * At this point, we avoid double buffering
2129 * for blocksize < pagesize because page dirty
2130 * flag is turned off during write and dirty
2131 * buffers are not properly collected for
2132 * pages crossing over segments.
2133 */
2134 err = nilfs_segctor_wait(sci);
2135 if (err)
2136 goto failed_to_write;
2137 }
2138 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2139
2140 out:
2141 nilfs_segctor_drop_written_files(sci, nilfs);
2142 return err;
2143
2144 failed_to_write:
2145 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2146 nilfs_redirty_inodes(&sci->sc_dirty_files);
2147
2148 failed:
2149 if (nilfs_doing_gc())
2150 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2151 nilfs_segctor_abort_construction(sci, nilfs, err);
2152 goto out;
2153 }
2154
2155 /**
2156 * nilfs_segctor_start_timer - set timer of background write
2157 * @sci: nilfs_sc_info
2158 *
2159 * If the timer has already been set, it ignores the new request.
2160 * This function MUST be called within a section locking the segment
2161 * semaphore.
2162 */
nilfs_segctor_start_timer(struct nilfs_sc_info * sci)2163 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2164 {
2165 spin_lock(&sci->sc_state_lock);
2166 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2167 if (sci->sc_task) {
2168 sci->sc_timer.expires = jiffies + sci->sc_interval;
2169 add_timer(&sci->sc_timer);
2170 }
2171 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2172 }
2173 spin_unlock(&sci->sc_state_lock);
2174 }
2175
nilfs_segctor_do_flush(struct nilfs_sc_info * sci,int bn)2176 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2177 {
2178 spin_lock(&sci->sc_state_lock);
2179 if (!(sci->sc_flush_request & BIT(bn))) {
2180 unsigned long prev_req = sci->sc_flush_request;
2181
2182 sci->sc_flush_request |= BIT(bn);
2183 if (!prev_req)
2184 wake_up(&sci->sc_wait_daemon);
2185 }
2186 spin_unlock(&sci->sc_state_lock);
2187 }
2188
2189 /**
2190 * nilfs_flush_segment - trigger a segment construction for resource control
2191 * @sb: super block
2192 * @ino: inode number of the file to be flushed out.
2193 */
nilfs_flush_segment(struct super_block * sb,ino_t ino)2194 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2195 {
2196 struct the_nilfs *nilfs = sb->s_fs_info;
2197 struct nilfs_sc_info *sci = nilfs->ns_writer;
2198
2199 if (!sci || nilfs_doing_construction())
2200 return;
2201 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2202 /* assign bit 0 to data files */
2203 }
2204
2205 struct nilfs_segctor_wait_request {
2206 wait_queue_entry_t wq;
2207 __u32 seq;
2208 int err;
2209 atomic_t done;
2210 };
2211
nilfs_segctor_sync(struct nilfs_sc_info * sci)2212 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2213 {
2214 struct nilfs_segctor_wait_request wait_req;
2215 int err = 0;
2216
2217 init_wait(&wait_req.wq);
2218 wait_req.err = 0;
2219 atomic_set(&wait_req.done, 0);
2220 init_waitqueue_entry(&wait_req.wq, current);
2221
2222 /*
2223 * To prevent a race issue where completion notifications from the
2224 * log writer thread are missed, increment the request sequence count
2225 * "sc_seq_request" and insert a wait queue entry using the current
2226 * sequence number into the "sc_wait_request" queue at the same time
2227 * within the lock section of "sc_state_lock".
2228 */
2229 spin_lock(&sci->sc_state_lock);
2230 wait_req.seq = ++sci->sc_seq_request;
2231 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2232 spin_unlock(&sci->sc_state_lock);
2233
2234 wake_up(&sci->sc_wait_daemon);
2235
2236 for (;;) {
2237 set_current_state(TASK_INTERRUPTIBLE);
2238
2239 /*
2240 * Synchronize only while the log writer thread is alive.
2241 * Leave flushing out after the log writer thread exits to
2242 * the cleanup work in nilfs_segctor_destroy().
2243 */
2244 if (!sci->sc_task)
2245 break;
2246
2247 if (atomic_read(&wait_req.done)) {
2248 err = wait_req.err;
2249 break;
2250 }
2251 if (!signal_pending(current)) {
2252 schedule();
2253 continue;
2254 }
2255 err = -ERESTARTSYS;
2256 break;
2257 }
2258 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2259 return err;
2260 }
2261
nilfs_segctor_wakeup(struct nilfs_sc_info * sci,int err,bool force)2262 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
2263 {
2264 struct nilfs_segctor_wait_request *wrq, *n;
2265 unsigned long flags;
2266
2267 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2268 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2269 if (!atomic_read(&wrq->done) &&
2270 (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
2271 wrq->err = err;
2272 atomic_set(&wrq->done, 1);
2273 }
2274 if (atomic_read(&wrq->done)) {
2275 wrq->wq.func(&wrq->wq,
2276 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2277 0, NULL);
2278 }
2279 }
2280 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2281 }
2282
2283 /**
2284 * nilfs_construct_segment - construct a logical segment
2285 * @sb: super block
2286 *
2287 * Return Value: On success, 0 is returned. On errors, one of the following
2288 * negative error code is returned.
2289 *
2290 * %-EROFS - Read only filesystem.
2291 *
2292 * %-EIO - I/O error
2293 *
2294 * %-ENOSPC - No space left on device (only in a panic state).
2295 *
2296 * %-ERESTARTSYS - Interrupted.
2297 *
2298 * %-ENOMEM - Insufficient memory available.
2299 */
nilfs_construct_segment(struct super_block * sb)2300 int nilfs_construct_segment(struct super_block *sb)
2301 {
2302 struct the_nilfs *nilfs = sb->s_fs_info;
2303 struct nilfs_sc_info *sci = nilfs->ns_writer;
2304 struct nilfs_transaction_info *ti;
2305
2306 if (sb_rdonly(sb) || unlikely(!sci))
2307 return -EROFS;
2308
2309 /* A call inside transactions causes a deadlock. */
2310 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2311
2312 return nilfs_segctor_sync(sci);
2313 }
2314
2315 /**
2316 * nilfs_construct_dsync_segment - construct a data-only logical segment
2317 * @sb: super block
2318 * @inode: inode whose data blocks should be written out
2319 * @start: start byte offset
2320 * @end: end byte offset (inclusive)
2321 *
2322 * Return Value: On success, 0 is returned. On errors, one of the following
2323 * negative error code is returned.
2324 *
2325 * %-EROFS - Read only filesystem.
2326 *
2327 * %-EIO - I/O error
2328 *
2329 * %-ENOSPC - No space left on device (only in a panic state).
2330 *
2331 * %-ERESTARTSYS - Interrupted.
2332 *
2333 * %-ENOMEM - Insufficient memory available.
2334 */
nilfs_construct_dsync_segment(struct super_block * sb,struct inode * inode,loff_t start,loff_t end)2335 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2336 loff_t start, loff_t end)
2337 {
2338 struct the_nilfs *nilfs = sb->s_fs_info;
2339 struct nilfs_sc_info *sci = nilfs->ns_writer;
2340 struct nilfs_inode_info *ii;
2341 struct nilfs_transaction_info ti;
2342 int err = 0;
2343
2344 if (sb_rdonly(sb) || unlikely(!sci))
2345 return -EROFS;
2346
2347 nilfs_transaction_lock(sb, &ti, 0);
2348
2349 ii = NILFS_I(inode);
2350 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2351 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2352 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2353 nilfs_discontinued(nilfs)) {
2354 nilfs_transaction_unlock(sb);
2355 err = nilfs_segctor_sync(sci);
2356 return err;
2357 }
2358
2359 spin_lock(&nilfs->ns_inode_lock);
2360 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2361 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2362 spin_unlock(&nilfs->ns_inode_lock);
2363 nilfs_transaction_unlock(sb);
2364 return 0;
2365 }
2366 spin_unlock(&nilfs->ns_inode_lock);
2367 sci->sc_dsync_inode = ii;
2368 sci->sc_dsync_start = start;
2369 sci->sc_dsync_end = end;
2370
2371 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2372 if (!err)
2373 nilfs->ns_flushed_device = 0;
2374
2375 nilfs_transaction_unlock(sb);
2376 return err;
2377 }
2378
2379 #define FLUSH_FILE_BIT (0x1) /* data file only */
2380 #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2381
2382 /**
2383 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2384 * @sci: segment constructor object
2385 */
nilfs_segctor_accept(struct nilfs_sc_info * sci)2386 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2387 {
2388 bool thread_is_alive;
2389
2390 spin_lock(&sci->sc_state_lock);
2391 sci->sc_seq_accepted = sci->sc_seq_request;
2392 thread_is_alive = (bool)sci->sc_task;
2393 spin_unlock(&sci->sc_state_lock);
2394
2395 /*
2396 * This function does not race with the log writer thread's
2397 * termination. Therefore, deleting sc_timer, which should not be
2398 * done after the log writer thread exits, can be done safely outside
2399 * the area protected by sc_state_lock.
2400 */
2401 if (thread_is_alive)
2402 del_timer_sync(&sci->sc_timer);
2403 }
2404
2405 /**
2406 * nilfs_segctor_notify - notify the result of request to caller threads
2407 * @sci: segment constructor object
2408 * @mode: mode of log forming
2409 * @err: error code to be notified
2410 */
nilfs_segctor_notify(struct nilfs_sc_info * sci,int mode,int err)2411 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2412 {
2413 /* Clear requests (even when the construction failed) */
2414 spin_lock(&sci->sc_state_lock);
2415
2416 if (mode == SC_LSEG_SR) {
2417 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2418 sci->sc_seq_done = sci->sc_seq_accepted;
2419 nilfs_segctor_wakeup(sci, err, false);
2420 sci->sc_flush_request = 0;
2421 } else {
2422 if (mode == SC_FLUSH_FILE)
2423 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2424 else if (mode == SC_FLUSH_DAT)
2425 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2426
2427 /* re-enable timer if checkpoint creation was not done */
2428 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
2429 time_before(jiffies, sci->sc_timer.expires))
2430 add_timer(&sci->sc_timer);
2431 }
2432 spin_unlock(&sci->sc_state_lock);
2433 }
2434
2435 /**
2436 * nilfs_segctor_construct - form logs and write them to disk
2437 * @sci: segment constructor object
2438 * @mode: mode of log forming
2439 */
nilfs_segctor_construct(struct nilfs_sc_info * sci,int mode)2440 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2441 {
2442 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2443 struct nilfs_super_block **sbp;
2444 int err = 0;
2445
2446 nilfs_segctor_accept(sci);
2447
2448 if (nilfs_discontinued(nilfs))
2449 mode = SC_LSEG_SR;
2450 if (!nilfs_segctor_confirm(sci))
2451 err = nilfs_segctor_do_construct(sci, mode);
2452
2453 if (likely(!err)) {
2454 if (mode != SC_FLUSH_DAT)
2455 atomic_set(&nilfs->ns_ndirtyblks, 0);
2456 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2457 nilfs_discontinued(nilfs)) {
2458 down_write(&nilfs->ns_sem);
2459 err = -EIO;
2460 sbp = nilfs_prepare_super(sci->sc_super,
2461 nilfs_sb_will_flip(nilfs));
2462 if (likely(sbp)) {
2463 nilfs_set_log_cursor(sbp[0], nilfs);
2464 err = nilfs_commit_super(sci->sc_super,
2465 NILFS_SB_COMMIT);
2466 }
2467 up_write(&nilfs->ns_sem);
2468 }
2469 }
2470
2471 nilfs_segctor_notify(sci, mode, err);
2472 return err;
2473 }
2474
nilfs_construction_timeout(struct timer_list * t)2475 static void nilfs_construction_timeout(struct timer_list *t)
2476 {
2477 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2478
2479 wake_up_process(sci->sc_timer_task);
2480 }
2481
2482 static void
nilfs_remove_written_gcinodes(struct the_nilfs * nilfs,struct list_head * head)2483 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2484 {
2485 struct nilfs_inode_info *ii, *n;
2486
2487 list_for_each_entry_safe(ii, n, head, i_dirty) {
2488 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2489 continue;
2490 list_del_init(&ii->i_dirty);
2491 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2492 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2493 iput(&ii->vfs_inode);
2494 }
2495 }
2496
nilfs_clean_segments(struct super_block * sb,struct nilfs_argv * argv,void ** kbufs)2497 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2498 void **kbufs)
2499 {
2500 struct the_nilfs *nilfs = sb->s_fs_info;
2501 struct nilfs_sc_info *sci = nilfs->ns_writer;
2502 struct nilfs_transaction_info ti;
2503 int err;
2504
2505 if (unlikely(!sci))
2506 return -EROFS;
2507
2508 nilfs_transaction_lock(sb, &ti, 1);
2509
2510 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2511 if (unlikely(err))
2512 goto out_unlock;
2513
2514 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2515 if (unlikely(err)) {
2516 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2517 goto out_unlock;
2518 }
2519
2520 sci->sc_freesegs = kbufs[4];
2521 sci->sc_nfreesegs = argv[4].v_nmembs;
2522 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2523
2524 for (;;) {
2525 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2526 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2527
2528 if (likely(!err))
2529 break;
2530
2531 nilfs_warn(sb, "error %d cleaning segments", err);
2532 set_current_state(TASK_INTERRUPTIBLE);
2533 schedule_timeout(sci->sc_interval);
2534 }
2535 if (nilfs_test_opt(nilfs, DISCARD)) {
2536 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2537 sci->sc_nfreesegs);
2538 if (ret) {
2539 nilfs_warn(sb,
2540 "error %d on discard request, turning discards off for the device",
2541 ret);
2542 nilfs_clear_opt(nilfs, DISCARD);
2543 }
2544 }
2545
2546 out_unlock:
2547 sci->sc_freesegs = NULL;
2548 sci->sc_nfreesegs = 0;
2549 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2550 nilfs_transaction_unlock(sb);
2551 return err;
2552 }
2553
nilfs_segctor_thread_construct(struct nilfs_sc_info * sci,int mode)2554 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2555 {
2556 struct nilfs_transaction_info ti;
2557
2558 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2559 nilfs_segctor_construct(sci, mode);
2560
2561 /*
2562 * Unclosed segment should be retried. We do this using sc_timer.
2563 * Timeout of sc_timer will invoke complete construction which leads
2564 * to close the current logical segment.
2565 */
2566 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2567 nilfs_segctor_start_timer(sci);
2568
2569 nilfs_transaction_unlock(sci->sc_super);
2570 }
2571
nilfs_segctor_do_immediate_flush(struct nilfs_sc_info * sci)2572 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2573 {
2574 int mode = 0;
2575
2576 spin_lock(&sci->sc_state_lock);
2577 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2578 SC_FLUSH_DAT : SC_FLUSH_FILE;
2579 spin_unlock(&sci->sc_state_lock);
2580
2581 if (mode) {
2582 nilfs_segctor_do_construct(sci, mode);
2583
2584 spin_lock(&sci->sc_state_lock);
2585 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2586 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2587 spin_unlock(&sci->sc_state_lock);
2588 }
2589 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2590 }
2591
nilfs_segctor_flush_mode(struct nilfs_sc_info * sci)2592 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2593 {
2594 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2595 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2596 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2597 return SC_FLUSH_FILE;
2598 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2599 return SC_FLUSH_DAT;
2600 }
2601 return SC_LSEG_SR;
2602 }
2603
2604 /**
2605 * nilfs_segctor_thread - main loop of the segment constructor thread.
2606 * @arg: pointer to a struct nilfs_sc_info.
2607 *
2608 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2609 * to execute segment constructions.
2610 */
nilfs_segctor_thread(void * arg)2611 static int nilfs_segctor_thread(void *arg)
2612 {
2613 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2614 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2615 int timeout = 0;
2616
2617 sci->sc_timer_task = current;
2618 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2619
2620 /* start sync. */
2621 sci->sc_task = current;
2622 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2623 nilfs_info(sci->sc_super,
2624 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2625 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2626
2627 spin_lock(&sci->sc_state_lock);
2628 loop:
2629 for (;;) {
2630 int mode;
2631
2632 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2633 goto end_thread;
2634
2635 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2636 mode = SC_LSEG_SR;
2637 else if (sci->sc_flush_request)
2638 mode = nilfs_segctor_flush_mode(sci);
2639 else
2640 break;
2641
2642 spin_unlock(&sci->sc_state_lock);
2643 nilfs_segctor_thread_construct(sci, mode);
2644 spin_lock(&sci->sc_state_lock);
2645 timeout = 0;
2646 }
2647
2648
2649 if (freezing(current)) {
2650 spin_unlock(&sci->sc_state_lock);
2651 try_to_freeze();
2652 spin_lock(&sci->sc_state_lock);
2653 } else {
2654 DEFINE_WAIT(wait);
2655 int should_sleep = 1;
2656
2657 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2658 TASK_INTERRUPTIBLE);
2659
2660 if (sci->sc_seq_request != sci->sc_seq_done)
2661 should_sleep = 0;
2662 else if (sci->sc_flush_request)
2663 should_sleep = 0;
2664 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2665 should_sleep = time_before(jiffies,
2666 sci->sc_timer.expires);
2667
2668 if (should_sleep) {
2669 spin_unlock(&sci->sc_state_lock);
2670 schedule();
2671 spin_lock(&sci->sc_state_lock);
2672 }
2673 finish_wait(&sci->sc_wait_daemon, &wait);
2674 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2675 time_after_eq(jiffies, sci->sc_timer.expires));
2676
2677 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2678 set_nilfs_discontinued(nilfs);
2679 }
2680 goto loop;
2681
2682 end_thread:
2683 /* end sync. */
2684 sci->sc_task = NULL;
2685 timer_shutdown_sync(&sci->sc_timer);
2686 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2687 spin_unlock(&sci->sc_state_lock);
2688 return 0;
2689 }
2690
nilfs_segctor_start_thread(struct nilfs_sc_info * sci)2691 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2692 {
2693 struct task_struct *t;
2694
2695 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2696 if (IS_ERR(t)) {
2697 int err = PTR_ERR(t);
2698
2699 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2700 err);
2701 return err;
2702 }
2703 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2704 return 0;
2705 }
2706
nilfs_segctor_kill_thread(struct nilfs_sc_info * sci)2707 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2708 __acquires(&sci->sc_state_lock)
2709 __releases(&sci->sc_state_lock)
2710 {
2711 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2712
2713 while (sci->sc_task) {
2714 wake_up(&sci->sc_wait_daemon);
2715 spin_unlock(&sci->sc_state_lock);
2716 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2717 spin_lock(&sci->sc_state_lock);
2718 }
2719 }
2720
2721 /*
2722 * Setup & clean-up functions
2723 */
nilfs_segctor_new(struct super_block * sb,struct nilfs_root * root)2724 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2725 struct nilfs_root *root)
2726 {
2727 struct the_nilfs *nilfs = sb->s_fs_info;
2728 struct nilfs_sc_info *sci;
2729
2730 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2731 if (!sci)
2732 return NULL;
2733
2734 sci->sc_super = sb;
2735
2736 nilfs_get_root(root);
2737 sci->sc_root = root;
2738
2739 init_waitqueue_head(&sci->sc_wait_request);
2740 init_waitqueue_head(&sci->sc_wait_daemon);
2741 init_waitqueue_head(&sci->sc_wait_task);
2742 spin_lock_init(&sci->sc_state_lock);
2743 INIT_LIST_HEAD(&sci->sc_dirty_files);
2744 INIT_LIST_HEAD(&sci->sc_segbufs);
2745 INIT_LIST_HEAD(&sci->sc_write_logs);
2746 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2747 INIT_LIST_HEAD(&sci->sc_iput_queue);
2748 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2749
2750 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2751 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2752 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2753
2754 if (nilfs->ns_interval)
2755 sci->sc_interval = HZ * nilfs->ns_interval;
2756 if (nilfs->ns_watermark)
2757 sci->sc_watermark = nilfs->ns_watermark;
2758 return sci;
2759 }
2760
nilfs_segctor_write_out(struct nilfs_sc_info * sci)2761 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2762 {
2763 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2764
2765 /*
2766 * The segctord thread was stopped and its timer was removed.
2767 * But some tasks remain.
2768 */
2769 do {
2770 struct nilfs_transaction_info ti;
2771
2772 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2773 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2774 nilfs_transaction_unlock(sci->sc_super);
2775
2776 flush_work(&sci->sc_iput_work);
2777
2778 } while (ret && ret != -EROFS && retrycount-- > 0);
2779 }
2780
2781 /**
2782 * nilfs_segctor_destroy - destroy the segment constructor.
2783 * @sci: nilfs_sc_info
2784 *
2785 * nilfs_segctor_destroy() kills the segctord thread and frees
2786 * the nilfs_sc_info struct.
2787 * Caller must hold the segment semaphore.
2788 */
nilfs_segctor_destroy(struct nilfs_sc_info * sci)2789 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2790 {
2791 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2792 int flag;
2793
2794 up_write(&nilfs->ns_segctor_sem);
2795
2796 spin_lock(&sci->sc_state_lock);
2797 nilfs_segctor_kill_thread(sci);
2798 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2799 || sci->sc_seq_request != sci->sc_seq_done);
2800 spin_unlock(&sci->sc_state_lock);
2801
2802 /*
2803 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
2804 * be called from delayed iput() via nilfs_evict_inode() and can race
2805 * with the above log writer thread termination.
2806 */
2807 nilfs_segctor_wakeup(sci, 0, true);
2808
2809 if (flush_work(&sci->sc_iput_work))
2810 flag = true;
2811
2812 if (flag || !nilfs_segctor_confirm(sci))
2813 nilfs_segctor_write_out(sci);
2814
2815 if (!list_empty(&sci->sc_dirty_files)) {
2816 nilfs_warn(sci->sc_super,
2817 "disposed unprocessed dirty file(s) when stopping log writer");
2818 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2819 }
2820
2821 if (!list_empty(&sci->sc_iput_queue)) {
2822 nilfs_warn(sci->sc_super,
2823 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2824 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2825 }
2826
2827 WARN_ON(!list_empty(&sci->sc_segbufs));
2828 WARN_ON(!list_empty(&sci->sc_write_logs));
2829
2830 nilfs_put_root(sci->sc_root);
2831
2832 down_write(&nilfs->ns_segctor_sem);
2833
2834 kfree(sci);
2835 }
2836
2837 /**
2838 * nilfs_attach_log_writer - attach log writer
2839 * @sb: super block instance
2840 * @root: root object of the current filesystem tree
2841 *
2842 * This allocates a log writer object, initializes it, and starts the
2843 * log writer.
2844 *
2845 * Return Value: On success, 0 is returned. On error, one of the following
2846 * negative error code is returned.
2847 *
2848 * %-ENOMEM - Insufficient memory available.
2849 */
nilfs_attach_log_writer(struct super_block * sb,struct nilfs_root * root)2850 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2851 {
2852 struct the_nilfs *nilfs = sb->s_fs_info;
2853 int err;
2854
2855 if (nilfs->ns_writer) {
2856 /*
2857 * This happens if the filesystem is made read-only by
2858 * __nilfs_error or nilfs_remount and then remounted
2859 * read/write. In these cases, reuse the existing
2860 * writer.
2861 */
2862 return 0;
2863 }
2864
2865 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2866 if (!nilfs->ns_writer)
2867 return -ENOMEM;
2868
2869 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2870
2871 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2872 if (unlikely(err))
2873 nilfs_detach_log_writer(sb);
2874
2875 return err;
2876 }
2877
2878 /**
2879 * nilfs_detach_log_writer - destroy log writer
2880 * @sb: super block instance
2881 *
2882 * This kills log writer daemon, frees the log writer object, and
2883 * destroys list of dirty files.
2884 */
nilfs_detach_log_writer(struct super_block * sb)2885 void nilfs_detach_log_writer(struct super_block *sb)
2886 {
2887 struct the_nilfs *nilfs = sb->s_fs_info;
2888 LIST_HEAD(garbage_list);
2889
2890 down_write(&nilfs->ns_segctor_sem);
2891 if (nilfs->ns_writer) {
2892 nilfs_segctor_destroy(nilfs->ns_writer);
2893 nilfs->ns_writer = NULL;
2894 }
2895 set_nilfs_purging(nilfs);
2896
2897 /* Force to free the list of dirty files */
2898 spin_lock(&nilfs->ns_inode_lock);
2899 if (!list_empty(&nilfs->ns_dirty_files)) {
2900 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2901 nilfs_warn(sb,
2902 "disposed unprocessed dirty file(s) when detaching log writer");
2903 }
2904 spin_unlock(&nilfs->ns_inode_lock);
2905 up_write(&nilfs->ns_segctor_sem);
2906
2907 nilfs_dispose_list(nilfs, &garbage_list, 1);
2908 clear_nilfs_purging(nilfs);
2909 }
2910