1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * segment.c - NILFS segment constructor.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11 #include <linux/pagemap.h>
12 #include <linux/buffer_head.h>
13 #include <linux/writeback.h>
14 #include <linux/bitops.h>
15 #include <linux/bio.h>
16 #include <linux/completion.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/crc32.h>
22 #include <linux/pagevec.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "page.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 #include "segbuf.h"
34
35
36 /*
37 * Segment constructor
38 */
39 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
41 #define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
45
46 /* Construction mode */
47 enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
61 };
62
63 /* Stage numbers of dirty block collection */
64 enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75 };
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/nilfs2.h>
79
80 /*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
nilfs_sc_cstage_inc(struct nilfs_sc_info * sci)90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91 {
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94 }
95
nilfs_sc_cstage_set(struct nilfs_sc_info * sci,int next_scnt)96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97 {
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100 }
101
nilfs_sc_cstage_get(struct nilfs_sc_info * sci)102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103 {
104 return sci->sc_stage.scnt;
105 }
106
107 /* State flags of collection */
108 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
110 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113 /* Operations depending on the construction mode and file type */
114 struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127 };
128
129 /*
130 * Other definitions
131 */
132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137 #define nilfs_cnt32_gt(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)(b) - (__s32)(a) < 0))
140 #define nilfs_cnt32_ge(a, b) \
141 (typecheck(__u32, a) && typecheck(__u32, b) && \
142 ((__s32)(a) - (__s32)(b) >= 0))
143 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
144 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
145
nilfs_prepare_segment_lock(struct super_block * sb,struct nilfs_transaction_info * ti)146 static int nilfs_prepare_segment_lock(struct super_block *sb,
147 struct nilfs_transaction_info *ti)
148 {
149 struct nilfs_transaction_info *cur_ti = current->journal_info;
150 void *save = NULL;
151
152 if (cur_ti) {
153 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
154 return ++cur_ti->ti_count;
155
156 /*
157 * If journal_info field is occupied by other FS,
158 * it is saved and will be restored on
159 * nilfs_transaction_commit().
160 */
161 nilfs_warn(sb, "journal info from a different FS");
162 save = current->journal_info;
163 }
164 if (!ti) {
165 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
166 if (!ti)
167 return -ENOMEM;
168 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
169 } else {
170 ti->ti_flags = 0;
171 }
172 ti->ti_count = 0;
173 ti->ti_save = save;
174 ti->ti_magic = NILFS_TI_MAGIC;
175 current->journal_info = ti;
176 return 0;
177 }
178
179 /**
180 * nilfs_transaction_begin - start indivisible file operations.
181 * @sb: super block
182 * @ti: nilfs_transaction_info
183 * @vacancy_check: flags for vacancy rate checks
184 *
185 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
186 * the segment semaphore, to make a segment construction and write tasks
187 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
188 * The region enclosed by these two functions can be nested. To avoid a
189 * deadlock, the semaphore is only acquired or released in the outermost call.
190 *
191 * This function allocates a nilfs_transaction_info struct to keep context
192 * information on it. It is initialized and hooked onto the current task in
193 * the outermost call. If a pre-allocated struct is given to @ti, it is used
194 * instead; otherwise a new struct is assigned from a slab.
195 *
196 * When @vacancy_check flag is set, this function will check the amount of
197 * free space, and will wait for the GC to reclaim disk space if low capacity.
198 *
199 * Return Value: On success, 0 is returned. On error, one of the following
200 * negative error code is returned.
201 *
202 * %-ENOMEM - Insufficient memory available.
203 *
204 * %-ENOSPC - No space left on device
205 */
nilfs_transaction_begin(struct super_block * sb,struct nilfs_transaction_info * ti,int vacancy_check)206 int nilfs_transaction_begin(struct super_block *sb,
207 struct nilfs_transaction_info *ti,
208 int vacancy_check)
209 {
210 struct the_nilfs *nilfs;
211 int ret = nilfs_prepare_segment_lock(sb, ti);
212 struct nilfs_transaction_info *trace_ti;
213
214 if (unlikely(ret < 0))
215 return ret;
216 if (ret > 0) {
217 trace_ti = current->journal_info;
218
219 trace_nilfs2_transaction_transition(sb, trace_ti,
220 trace_ti->ti_count, trace_ti->ti_flags,
221 TRACE_NILFS2_TRANSACTION_BEGIN);
222 return 0;
223 }
224
225 sb_start_intwrite(sb);
226
227 nilfs = sb->s_fs_info;
228 down_read(&nilfs->ns_segctor_sem);
229 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
230 up_read(&nilfs->ns_segctor_sem);
231 ret = -ENOSPC;
232 goto failed;
233 }
234
235 trace_ti = current->journal_info;
236 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
237 trace_ti->ti_flags,
238 TRACE_NILFS2_TRANSACTION_BEGIN);
239 return 0;
240
241 failed:
242 ti = current->journal_info;
243 current->journal_info = ti->ti_save;
244 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
245 kmem_cache_free(nilfs_transaction_cachep, ti);
246 sb_end_intwrite(sb);
247 return ret;
248 }
249
250 /**
251 * nilfs_transaction_commit - commit indivisible file operations.
252 * @sb: super block
253 *
254 * nilfs_transaction_commit() releases the read semaphore which is
255 * acquired by nilfs_transaction_begin(). This is only performed
256 * in outermost call of this function. If a commit flag is set,
257 * nilfs_transaction_commit() sets a timer to start the segment
258 * constructor. If a sync flag is set, it starts construction
259 * directly.
260 */
nilfs_transaction_commit(struct super_block * sb)261 int nilfs_transaction_commit(struct super_block *sb)
262 {
263 struct nilfs_transaction_info *ti = current->journal_info;
264 struct the_nilfs *nilfs = sb->s_fs_info;
265 int err = 0;
266
267 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
268 ti->ti_flags |= NILFS_TI_COMMIT;
269 if (ti->ti_count > 0) {
270 ti->ti_count--;
271 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
272 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
273 return 0;
274 }
275 if (nilfs->ns_writer) {
276 struct nilfs_sc_info *sci = nilfs->ns_writer;
277
278 if (ti->ti_flags & NILFS_TI_COMMIT)
279 nilfs_segctor_start_timer(sci);
280 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
281 nilfs_segctor_do_flush(sci, 0);
282 }
283 up_read(&nilfs->ns_segctor_sem);
284 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
285 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
286
287 current->journal_info = ti->ti_save;
288
289 if (ti->ti_flags & NILFS_TI_SYNC)
290 err = nilfs_construct_segment(sb);
291 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
292 kmem_cache_free(nilfs_transaction_cachep, ti);
293 sb_end_intwrite(sb);
294 return err;
295 }
296
nilfs_transaction_abort(struct super_block * sb)297 void nilfs_transaction_abort(struct super_block *sb)
298 {
299 struct nilfs_transaction_info *ti = current->journal_info;
300 struct the_nilfs *nilfs = sb->s_fs_info;
301
302 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
303 if (ti->ti_count > 0) {
304 ti->ti_count--;
305 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
306 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
307 return;
308 }
309 up_read(&nilfs->ns_segctor_sem);
310
311 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
312 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
313
314 current->journal_info = ti->ti_save;
315 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
316 kmem_cache_free(nilfs_transaction_cachep, ti);
317 sb_end_intwrite(sb);
318 }
319
nilfs_relax_pressure_in_lock(struct super_block * sb)320 void nilfs_relax_pressure_in_lock(struct super_block *sb)
321 {
322 struct the_nilfs *nilfs = sb->s_fs_info;
323 struct nilfs_sc_info *sci = nilfs->ns_writer;
324
325 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
326 return;
327
328 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
329 up_read(&nilfs->ns_segctor_sem);
330
331 down_write(&nilfs->ns_segctor_sem);
332 if (sci->sc_flush_request &&
333 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
334 struct nilfs_transaction_info *ti = current->journal_info;
335
336 ti->ti_flags |= NILFS_TI_WRITER;
337 nilfs_segctor_do_immediate_flush(sci);
338 ti->ti_flags &= ~NILFS_TI_WRITER;
339 }
340 downgrade_write(&nilfs->ns_segctor_sem);
341 }
342
nilfs_transaction_lock(struct super_block * sb,struct nilfs_transaction_info * ti,int gcflag)343 static void nilfs_transaction_lock(struct super_block *sb,
344 struct nilfs_transaction_info *ti,
345 int gcflag)
346 {
347 struct nilfs_transaction_info *cur_ti = current->journal_info;
348 struct the_nilfs *nilfs = sb->s_fs_info;
349 struct nilfs_sc_info *sci = nilfs->ns_writer;
350
351 WARN_ON(cur_ti);
352 ti->ti_flags = NILFS_TI_WRITER;
353 ti->ti_count = 0;
354 ti->ti_save = cur_ti;
355 ti->ti_magic = NILFS_TI_MAGIC;
356 current->journal_info = ti;
357
358 for (;;) {
359 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
360 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
361
362 down_write(&nilfs->ns_segctor_sem);
363 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
364 break;
365
366 nilfs_segctor_do_immediate_flush(sci);
367
368 up_write(&nilfs->ns_segctor_sem);
369 cond_resched();
370 }
371 if (gcflag)
372 ti->ti_flags |= NILFS_TI_GC;
373
374 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
375 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
376 }
377
nilfs_transaction_unlock(struct super_block * sb)378 static void nilfs_transaction_unlock(struct super_block *sb)
379 {
380 struct nilfs_transaction_info *ti = current->journal_info;
381 struct the_nilfs *nilfs = sb->s_fs_info;
382
383 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
384 BUG_ON(ti->ti_count > 0);
385
386 up_write(&nilfs->ns_segctor_sem);
387 current->journal_info = ti->ti_save;
388
389 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
390 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
391 }
392
nilfs_segctor_map_segsum_entry(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,unsigned int bytes)393 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
394 struct nilfs_segsum_pointer *ssp,
395 unsigned int bytes)
396 {
397 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
398 unsigned int blocksize = sci->sc_super->s_blocksize;
399 void *p;
400
401 if (unlikely(ssp->offset + bytes > blocksize)) {
402 ssp->offset = 0;
403 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
404 &segbuf->sb_segsum_buffers));
405 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
406 }
407 p = ssp->bh->b_data + ssp->offset;
408 ssp->offset += bytes;
409 return p;
410 }
411
412 /**
413 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
414 * @sci: nilfs_sc_info
415 */
nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info * sci)416 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
417 {
418 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
419 struct buffer_head *sumbh;
420 unsigned int sumbytes;
421 unsigned int flags = 0;
422 int err;
423
424 if (nilfs_doing_gc())
425 flags = NILFS_SS_GC;
426 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
427 if (unlikely(err))
428 return err;
429
430 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
431 sumbytes = segbuf->sb_sum.sumbytes;
432 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
433 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
434 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
435 return 0;
436 }
437
nilfs_segctor_feed_segment(struct nilfs_sc_info * sci)438 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
439 {
440 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
441 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
442 return -E2BIG; /*
443 * The current segment is filled up
444 * (internal code)
445 */
446 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
447 return nilfs_segctor_reset_segment_buffer(sci);
448 }
449
nilfs_segctor_add_super_root(struct nilfs_sc_info * sci)450 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
451 {
452 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
453 int err;
454
455 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
456 err = nilfs_segctor_feed_segment(sci);
457 if (err)
458 return err;
459 segbuf = sci->sc_curseg;
460 }
461 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
462 if (likely(!err))
463 segbuf->sb_sum.flags |= NILFS_SS_SR;
464 return err;
465 }
466
467 /*
468 * Functions for making segment summary and payloads
469 */
nilfs_segctor_segsum_block_required(struct nilfs_sc_info * sci,const struct nilfs_segsum_pointer * ssp,unsigned int binfo_size)470 static int nilfs_segctor_segsum_block_required(
471 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
472 unsigned int binfo_size)
473 {
474 unsigned int blocksize = sci->sc_super->s_blocksize;
475 /* Size of finfo and binfo is enough small against blocksize */
476
477 return ssp->offset + binfo_size +
478 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
479 blocksize;
480 }
481
nilfs_segctor_begin_finfo(struct nilfs_sc_info * sci,struct inode * inode)482 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
483 struct inode *inode)
484 {
485 sci->sc_curseg->sb_sum.nfinfo++;
486 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
487 nilfs_segctor_map_segsum_entry(
488 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
489
490 if (NILFS_I(inode)->i_root &&
491 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
492 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
493 /* skip finfo */
494 }
495
nilfs_segctor_end_finfo(struct nilfs_sc_info * sci,struct inode * inode)496 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
497 struct inode *inode)
498 {
499 struct nilfs_finfo *finfo;
500 struct nilfs_inode_info *ii;
501 struct nilfs_segment_buffer *segbuf;
502 __u64 cno;
503
504 if (sci->sc_blk_cnt == 0)
505 return;
506
507 ii = NILFS_I(inode);
508
509 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
510 cno = ii->i_cno;
511 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
512 cno = 0;
513 else
514 cno = sci->sc_cno;
515
516 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
517 sizeof(*finfo));
518 finfo->fi_ino = cpu_to_le64(inode->i_ino);
519 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
520 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
521 finfo->fi_cno = cpu_to_le64(cno);
522
523 segbuf = sci->sc_curseg;
524 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
525 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
526 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
527 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
528 }
529
nilfs_segctor_add_file_block(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode,unsigned int binfo_size)530 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
531 struct buffer_head *bh,
532 struct inode *inode,
533 unsigned int binfo_size)
534 {
535 struct nilfs_segment_buffer *segbuf;
536 int required, err = 0;
537
538 retry:
539 segbuf = sci->sc_curseg;
540 required = nilfs_segctor_segsum_block_required(
541 sci, &sci->sc_binfo_ptr, binfo_size);
542 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
543 nilfs_segctor_end_finfo(sci, inode);
544 err = nilfs_segctor_feed_segment(sci);
545 if (err)
546 return err;
547 goto retry;
548 }
549 if (unlikely(required)) {
550 err = nilfs_segbuf_extend_segsum(segbuf);
551 if (unlikely(err))
552 goto failed;
553 }
554 if (sci->sc_blk_cnt == 0)
555 nilfs_segctor_begin_finfo(sci, inode);
556
557 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
558 /* Substitution to vblocknr is delayed until update_blocknr() */
559 nilfs_segbuf_add_file_buffer(segbuf, bh);
560 sci->sc_blk_cnt++;
561 failed:
562 return err;
563 }
564
565 /*
566 * Callback functions that enumerate, mark, and collect dirty blocks
567 */
nilfs_collect_file_data(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)568 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
569 struct buffer_head *bh, struct inode *inode)
570 {
571 int err;
572
573 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
574 if (err < 0)
575 return err;
576
577 err = nilfs_segctor_add_file_block(sci, bh, inode,
578 sizeof(struct nilfs_binfo_v));
579 if (!err)
580 sci->sc_datablk_cnt++;
581 return err;
582 }
583
nilfs_collect_file_node(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)584 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
585 struct buffer_head *bh,
586 struct inode *inode)
587 {
588 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
589 }
590
nilfs_collect_file_bmap(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)591 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
592 struct buffer_head *bh,
593 struct inode *inode)
594 {
595 WARN_ON(!buffer_dirty(bh));
596 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
597 }
598
nilfs_write_file_data_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)599 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
600 struct nilfs_segsum_pointer *ssp,
601 union nilfs_binfo *binfo)
602 {
603 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
604 sci, ssp, sizeof(*binfo_v));
605 *binfo_v = binfo->bi_v;
606 }
607
nilfs_write_file_node_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)608 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
609 struct nilfs_segsum_pointer *ssp,
610 union nilfs_binfo *binfo)
611 {
612 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
613 sci, ssp, sizeof(*vblocknr));
614 *vblocknr = binfo->bi_v.bi_vblocknr;
615 }
616
617 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
618 .collect_data = nilfs_collect_file_data,
619 .collect_node = nilfs_collect_file_node,
620 .collect_bmap = nilfs_collect_file_bmap,
621 .write_data_binfo = nilfs_write_file_data_binfo,
622 .write_node_binfo = nilfs_write_file_node_binfo,
623 };
624
nilfs_collect_dat_data(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)625 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
626 struct buffer_head *bh, struct inode *inode)
627 {
628 int err;
629
630 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
631 if (err < 0)
632 return err;
633
634 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
635 if (!err)
636 sci->sc_datablk_cnt++;
637 return err;
638 }
639
nilfs_collect_dat_bmap(struct nilfs_sc_info * sci,struct buffer_head * bh,struct inode * inode)640 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
641 struct buffer_head *bh, struct inode *inode)
642 {
643 WARN_ON(!buffer_dirty(bh));
644 return nilfs_segctor_add_file_block(sci, bh, inode,
645 sizeof(struct nilfs_binfo_dat));
646 }
647
nilfs_write_dat_data_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)648 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
649 struct nilfs_segsum_pointer *ssp,
650 union nilfs_binfo *binfo)
651 {
652 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
653 sizeof(*blkoff));
654 *blkoff = binfo->bi_dat.bi_blkoff;
655 }
656
nilfs_write_dat_node_binfo(struct nilfs_sc_info * sci,struct nilfs_segsum_pointer * ssp,union nilfs_binfo * binfo)657 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
658 struct nilfs_segsum_pointer *ssp,
659 union nilfs_binfo *binfo)
660 {
661 struct nilfs_binfo_dat *binfo_dat =
662 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
663 *binfo_dat = binfo->bi_dat;
664 }
665
666 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
667 .collect_data = nilfs_collect_dat_data,
668 .collect_node = nilfs_collect_file_node,
669 .collect_bmap = nilfs_collect_dat_bmap,
670 .write_data_binfo = nilfs_write_dat_data_binfo,
671 .write_node_binfo = nilfs_write_dat_node_binfo,
672 };
673
674 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
675 .collect_data = nilfs_collect_file_data,
676 .collect_node = NULL,
677 .collect_bmap = NULL,
678 .write_data_binfo = nilfs_write_file_data_binfo,
679 .write_node_binfo = NULL,
680 };
681
nilfs_lookup_dirty_data_buffers(struct inode * inode,struct list_head * listp,size_t nlimit,loff_t start,loff_t end)682 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
683 struct list_head *listp,
684 size_t nlimit,
685 loff_t start, loff_t end)
686 {
687 struct address_space *mapping = inode->i_mapping;
688 struct pagevec pvec;
689 pgoff_t index = 0, last = ULONG_MAX;
690 size_t ndirties = 0;
691 int i;
692
693 if (unlikely(start != 0 || end != LLONG_MAX)) {
694 /*
695 * A valid range is given for sync-ing data pages. The
696 * range is rounded to per-page; extra dirty buffers
697 * may be included if blocksize < pagesize.
698 */
699 index = start >> PAGE_SHIFT;
700 last = end >> PAGE_SHIFT;
701 }
702 pagevec_init(&pvec);
703 repeat:
704 if (unlikely(index > last) ||
705 !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
706 PAGECACHE_TAG_DIRTY))
707 return ndirties;
708
709 for (i = 0; i < pagevec_count(&pvec); i++) {
710 struct buffer_head *bh, *head;
711 struct page *page = pvec.pages[i];
712
713 lock_page(page);
714 if (!page_has_buffers(page))
715 create_empty_buffers(page, i_blocksize(inode), 0);
716 unlock_page(page);
717
718 bh = head = page_buffers(page);
719 do {
720 if (!buffer_dirty(bh) || buffer_async_write(bh))
721 continue;
722 get_bh(bh);
723 list_add_tail(&bh->b_assoc_buffers, listp);
724 ndirties++;
725 if (unlikely(ndirties >= nlimit)) {
726 pagevec_release(&pvec);
727 cond_resched();
728 return ndirties;
729 }
730 } while (bh = bh->b_this_page, bh != head);
731 }
732 pagevec_release(&pvec);
733 cond_resched();
734 goto repeat;
735 }
736
nilfs_lookup_dirty_node_buffers(struct inode * inode,struct list_head * listp)737 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
738 struct list_head *listp)
739 {
740 struct nilfs_inode_info *ii = NILFS_I(inode);
741 struct inode *btnc_inode = ii->i_assoc_inode;
742 struct pagevec pvec;
743 struct buffer_head *bh, *head;
744 unsigned int i;
745 pgoff_t index = 0;
746
747 if (!btnc_inode)
748 return;
749
750 pagevec_init(&pvec);
751
752 while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
753 PAGECACHE_TAG_DIRTY)) {
754 for (i = 0; i < pagevec_count(&pvec); i++) {
755 bh = head = page_buffers(pvec.pages[i]);
756 do {
757 if (buffer_dirty(bh) &&
758 !buffer_async_write(bh)) {
759 get_bh(bh);
760 list_add_tail(&bh->b_assoc_buffers,
761 listp);
762 }
763 bh = bh->b_this_page;
764 } while (bh != head);
765 }
766 pagevec_release(&pvec);
767 cond_resched();
768 }
769 }
770
nilfs_dispose_list(struct the_nilfs * nilfs,struct list_head * head,int force)771 static void nilfs_dispose_list(struct the_nilfs *nilfs,
772 struct list_head *head, int force)
773 {
774 struct nilfs_inode_info *ii, *n;
775 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
776 unsigned int nv = 0;
777
778 while (!list_empty(head)) {
779 spin_lock(&nilfs->ns_inode_lock);
780 list_for_each_entry_safe(ii, n, head, i_dirty) {
781 list_del_init(&ii->i_dirty);
782 if (force) {
783 if (unlikely(ii->i_bh)) {
784 brelse(ii->i_bh);
785 ii->i_bh = NULL;
786 }
787 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
788 set_bit(NILFS_I_QUEUED, &ii->i_state);
789 list_add_tail(&ii->i_dirty,
790 &nilfs->ns_dirty_files);
791 continue;
792 }
793 ivec[nv++] = ii;
794 if (nv == SC_N_INODEVEC)
795 break;
796 }
797 spin_unlock(&nilfs->ns_inode_lock);
798
799 for (pii = ivec; nv > 0; pii++, nv--)
800 iput(&(*pii)->vfs_inode);
801 }
802 }
803
nilfs_iput_work_func(struct work_struct * work)804 static void nilfs_iput_work_func(struct work_struct *work)
805 {
806 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
807 sc_iput_work);
808 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
809
810 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
811 }
812
nilfs_test_metadata_dirty(struct the_nilfs * nilfs,struct nilfs_root * root)813 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
814 struct nilfs_root *root)
815 {
816 int ret = 0;
817
818 if (nilfs_mdt_fetch_dirty(root->ifile))
819 ret++;
820 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
821 ret++;
822 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
823 ret++;
824 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
825 ret++;
826 return ret;
827 }
828
nilfs_segctor_clean(struct nilfs_sc_info * sci)829 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
830 {
831 return list_empty(&sci->sc_dirty_files) &&
832 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
833 sci->sc_nfreesegs == 0 &&
834 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
835 }
836
nilfs_segctor_confirm(struct nilfs_sc_info * sci)837 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
838 {
839 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
840 int ret = 0;
841
842 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
843 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
844
845 spin_lock(&nilfs->ns_inode_lock);
846 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
847 ret++;
848
849 spin_unlock(&nilfs->ns_inode_lock);
850 return ret;
851 }
852
nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info * sci)853 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
854 {
855 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
856
857 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
858 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
859 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
860 nilfs_mdt_clear_dirty(nilfs->ns_dat);
861 }
862
nilfs_segctor_create_checkpoint(struct nilfs_sc_info * sci)863 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
864 {
865 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
866 struct buffer_head *bh_cp;
867 struct nilfs_checkpoint *raw_cp;
868 int err;
869
870 /* XXX: this interface will be changed */
871 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
872 &raw_cp, &bh_cp);
873 if (likely(!err)) {
874 /*
875 * The following code is duplicated with cpfile. But, it is
876 * needed to collect the checkpoint even if it was not newly
877 * created.
878 */
879 mark_buffer_dirty(bh_cp);
880 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
881 nilfs_cpfile_put_checkpoint(
882 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
883 } else if (err == -EINVAL || err == -ENOENT) {
884 nilfs_error(sci->sc_super,
885 "checkpoint creation failed due to metadata corruption.");
886 err = -EIO;
887 }
888 return err;
889 }
890
nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info * sci)891 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
892 {
893 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
894 struct buffer_head *bh_cp;
895 struct nilfs_checkpoint *raw_cp;
896 int err;
897
898 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
899 &raw_cp, &bh_cp);
900 if (unlikely(err)) {
901 if (err == -EINVAL || err == -ENOENT) {
902 nilfs_error(sci->sc_super,
903 "checkpoint finalization failed due to metadata corruption.");
904 err = -EIO;
905 }
906 goto failed_ibh;
907 }
908 raw_cp->cp_snapshot_list.ssl_next = 0;
909 raw_cp->cp_snapshot_list.ssl_prev = 0;
910 raw_cp->cp_inodes_count =
911 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
912 raw_cp->cp_blocks_count =
913 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
914 raw_cp->cp_nblk_inc =
915 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
916 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
917 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
918
919 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
920 nilfs_checkpoint_clear_minor(raw_cp);
921 else
922 nilfs_checkpoint_set_minor(raw_cp);
923
924 nilfs_write_inode_common(sci->sc_root->ifile,
925 &raw_cp->cp_ifile_inode, 1);
926 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
927 return 0;
928
929 failed_ibh:
930 return err;
931 }
932
nilfs_fill_in_file_bmap(struct inode * ifile,struct nilfs_inode_info * ii)933 static void nilfs_fill_in_file_bmap(struct inode *ifile,
934 struct nilfs_inode_info *ii)
935
936 {
937 struct buffer_head *ibh;
938 struct nilfs_inode *raw_inode;
939
940 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
941 ibh = ii->i_bh;
942 BUG_ON(!ibh);
943 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
944 ibh);
945 nilfs_bmap_write(ii->i_bmap, raw_inode);
946 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
947 }
948 }
949
nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info * sci)950 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
951 {
952 struct nilfs_inode_info *ii;
953
954 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
955 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
956 set_bit(NILFS_I_COLLECTED, &ii->i_state);
957 }
958 }
959
nilfs_segctor_fill_in_super_root(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)960 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
961 struct the_nilfs *nilfs)
962 {
963 struct buffer_head *bh_sr;
964 struct nilfs_super_root *raw_sr;
965 unsigned int isz, srsz;
966
967 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
968 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
969 isz = nilfs->ns_inode_size;
970 srsz = NILFS_SR_BYTES(isz);
971
972 raw_sr->sr_bytes = cpu_to_le16(srsz);
973 raw_sr->sr_nongc_ctime
974 = cpu_to_le64(nilfs_doing_gc() ?
975 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
976 raw_sr->sr_flags = 0;
977
978 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
979 NILFS_SR_DAT_OFFSET(isz), 1);
980 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
981 NILFS_SR_CPFILE_OFFSET(isz), 1);
982 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
983 NILFS_SR_SUFILE_OFFSET(isz), 1);
984 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
985 }
986
nilfs_redirty_inodes(struct list_head * head)987 static void nilfs_redirty_inodes(struct list_head *head)
988 {
989 struct nilfs_inode_info *ii;
990
991 list_for_each_entry(ii, head, i_dirty) {
992 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
993 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
994 }
995 }
996
nilfs_drop_collected_inodes(struct list_head * head)997 static void nilfs_drop_collected_inodes(struct list_head *head)
998 {
999 struct nilfs_inode_info *ii;
1000
1001 list_for_each_entry(ii, head, i_dirty) {
1002 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1003 continue;
1004
1005 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1006 set_bit(NILFS_I_UPDATED, &ii->i_state);
1007 }
1008 }
1009
nilfs_segctor_apply_buffers(struct nilfs_sc_info * sci,struct inode * inode,struct list_head * listp,int (* collect)(struct nilfs_sc_info *,struct buffer_head *,struct inode *))1010 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1011 struct inode *inode,
1012 struct list_head *listp,
1013 int (*collect)(struct nilfs_sc_info *,
1014 struct buffer_head *,
1015 struct inode *))
1016 {
1017 struct buffer_head *bh, *n;
1018 int err = 0;
1019
1020 if (collect) {
1021 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1022 list_del_init(&bh->b_assoc_buffers);
1023 err = collect(sci, bh, inode);
1024 brelse(bh);
1025 if (unlikely(err))
1026 goto dispose_buffers;
1027 }
1028 return 0;
1029 }
1030
1031 dispose_buffers:
1032 while (!list_empty(listp)) {
1033 bh = list_first_entry(listp, struct buffer_head,
1034 b_assoc_buffers);
1035 list_del_init(&bh->b_assoc_buffers);
1036 brelse(bh);
1037 }
1038 return err;
1039 }
1040
nilfs_segctor_buffer_rest(struct nilfs_sc_info * sci)1041 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1042 {
1043 /* Remaining number of blocks within segment buffer */
1044 return sci->sc_segbuf_nblocks -
1045 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1046 }
1047
nilfs_segctor_scan_file(struct nilfs_sc_info * sci,struct inode * inode,const struct nilfs_sc_operations * sc_ops)1048 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1049 struct inode *inode,
1050 const struct nilfs_sc_operations *sc_ops)
1051 {
1052 LIST_HEAD(data_buffers);
1053 LIST_HEAD(node_buffers);
1054 int err;
1055
1056 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1057 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1058
1059 n = nilfs_lookup_dirty_data_buffers(
1060 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1061 if (n > rest) {
1062 err = nilfs_segctor_apply_buffers(
1063 sci, inode, &data_buffers,
1064 sc_ops->collect_data);
1065 BUG_ON(!err); /* always receive -E2BIG or true error */
1066 goto break_or_fail;
1067 }
1068 }
1069 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1070
1071 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1072 err = nilfs_segctor_apply_buffers(
1073 sci, inode, &data_buffers, sc_ops->collect_data);
1074 if (unlikely(err)) {
1075 /* dispose node list */
1076 nilfs_segctor_apply_buffers(
1077 sci, inode, &node_buffers, NULL);
1078 goto break_or_fail;
1079 }
1080 sci->sc_stage.flags |= NILFS_CF_NODE;
1081 }
1082 /* Collect node */
1083 err = nilfs_segctor_apply_buffers(
1084 sci, inode, &node_buffers, sc_ops->collect_node);
1085 if (unlikely(err))
1086 goto break_or_fail;
1087
1088 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1089 err = nilfs_segctor_apply_buffers(
1090 sci, inode, &node_buffers, sc_ops->collect_bmap);
1091 if (unlikely(err))
1092 goto break_or_fail;
1093
1094 nilfs_segctor_end_finfo(sci, inode);
1095 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1096
1097 break_or_fail:
1098 return err;
1099 }
1100
nilfs_segctor_scan_file_dsync(struct nilfs_sc_info * sci,struct inode * inode)1101 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1102 struct inode *inode)
1103 {
1104 LIST_HEAD(data_buffers);
1105 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1106 int err;
1107
1108 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1109 sci->sc_dsync_start,
1110 sci->sc_dsync_end);
1111
1112 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1113 nilfs_collect_file_data);
1114 if (!err) {
1115 nilfs_segctor_end_finfo(sci, inode);
1116 BUG_ON(n > rest);
1117 /* always receive -E2BIG or true error if n > rest */
1118 }
1119 return err;
1120 }
1121
nilfs_segctor_collect_blocks(struct nilfs_sc_info * sci,int mode)1122 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1123 {
1124 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1125 struct list_head *head;
1126 struct nilfs_inode_info *ii;
1127 size_t ndone;
1128 int err = 0;
1129
1130 switch (nilfs_sc_cstage_get(sci)) {
1131 case NILFS_ST_INIT:
1132 /* Pre-processes */
1133 sci->sc_stage.flags = 0;
1134
1135 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1136 sci->sc_nblk_inc = 0;
1137 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1138 if (mode == SC_LSEG_DSYNC) {
1139 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1140 goto dsync_mode;
1141 }
1142 }
1143
1144 sci->sc_stage.dirty_file_ptr = NULL;
1145 sci->sc_stage.gc_inode_ptr = NULL;
1146 if (mode == SC_FLUSH_DAT) {
1147 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1148 goto dat_stage;
1149 }
1150 nilfs_sc_cstage_inc(sci);
1151 fallthrough;
1152 case NILFS_ST_GC:
1153 if (nilfs_doing_gc()) {
1154 head = &sci->sc_gc_inodes;
1155 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1156 head, i_dirty);
1157 list_for_each_entry_continue(ii, head, i_dirty) {
1158 err = nilfs_segctor_scan_file(
1159 sci, &ii->vfs_inode,
1160 &nilfs_sc_file_ops);
1161 if (unlikely(err)) {
1162 sci->sc_stage.gc_inode_ptr = list_entry(
1163 ii->i_dirty.prev,
1164 struct nilfs_inode_info,
1165 i_dirty);
1166 goto break_or_fail;
1167 }
1168 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1169 }
1170 sci->sc_stage.gc_inode_ptr = NULL;
1171 }
1172 nilfs_sc_cstage_inc(sci);
1173 fallthrough;
1174 case NILFS_ST_FILE:
1175 head = &sci->sc_dirty_files;
1176 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1177 i_dirty);
1178 list_for_each_entry_continue(ii, head, i_dirty) {
1179 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1180
1181 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1182 &nilfs_sc_file_ops);
1183 if (unlikely(err)) {
1184 sci->sc_stage.dirty_file_ptr =
1185 list_entry(ii->i_dirty.prev,
1186 struct nilfs_inode_info,
1187 i_dirty);
1188 goto break_or_fail;
1189 }
1190 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1191 /* XXX: required ? */
1192 }
1193 sci->sc_stage.dirty_file_ptr = NULL;
1194 if (mode == SC_FLUSH_FILE) {
1195 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1196 return 0;
1197 }
1198 nilfs_sc_cstage_inc(sci);
1199 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1200 fallthrough;
1201 case NILFS_ST_IFILE:
1202 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1203 &nilfs_sc_file_ops);
1204 if (unlikely(err))
1205 break;
1206 nilfs_sc_cstage_inc(sci);
1207 /* Creating a checkpoint */
1208 err = nilfs_segctor_create_checkpoint(sci);
1209 if (unlikely(err))
1210 break;
1211 fallthrough;
1212 case NILFS_ST_CPFILE:
1213 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1214 &nilfs_sc_file_ops);
1215 if (unlikely(err))
1216 break;
1217 nilfs_sc_cstage_inc(sci);
1218 fallthrough;
1219 case NILFS_ST_SUFILE:
1220 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1221 sci->sc_nfreesegs, &ndone);
1222 if (unlikely(err)) {
1223 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1224 sci->sc_freesegs, ndone,
1225 NULL);
1226 break;
1227 }
1228 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1229
1230 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1231 &nilfs_sc_file_ops);
1232 if (unlikely(err))
1233 break;
1234 nilfs_sc_cstage_inc(sci);
1235 fallthrough;
1236 case NILFS_ST_DAT:
1237 dat_stage:
1238 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1239 &nilfs_sc_dat_ops);
1240 if (unlikely(err))
1241 break;
1242 if (mode == SC_FLUSH_DAT) {
1243 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1244 return 0;
1245 }
1246 nilfs_sc_cstage_inc(sci);
1247 fallthrough;
1248 case NILFS_ST_SR:
1249 if (mode == SC_LSEG_SR) {
1250 /* Appending a super root */
1251 err = nilfs_segctor_add_super_root(sci);
1252 if (unlikely(err))
1253 break;
1254 }
1255 /* End of a logical segment */
1256 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1257 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1258 return 0;
1259 case NILFS_ST_DSYNC:
1260 dsync_mode:
1261 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1262 ii = sci->sc_dsync_inode;
1263 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1264 break;
1265
1266 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1267 if (unlikely(err))
1268 break;
1269 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1270 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1271 return 0;
1272 case NILFS_ST_DONE:
1273 return 0;
1274 default:
1275 BUG();
1276 }
1277
1278 break_or_fail:
1279 return err;
1280 }
1281
1282 /**
1283 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1284 * @sci: nilfs_sc_info
1285 * @nilfs: nilfs object
1286 */
nilfs_segctor_begin_construction(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1287 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1288 struct the_nilfs *nilfs)
1289 {
1290 struct nilfs_segment_buffer *segbuf, *prev;
1291 __u64 nextnum;
1292 int err, alloc = 0;
1293
1294 segbuf = nilfs_segbuf_new(sci->sc_super);
1295 if (unlikely(!segbuf))
1296 return -ENOMEM;
1297
1298 if (list_empty(&sci->sc_write_logs)) {
1299 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1300 nilfs->ns_pseg_offset, nilfs);
1301 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1302 nilfs_shift_to_next_segment(nilfs);
1303 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1304 }
1305
1306 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1307 nextnum = nilfs->ns_nextnum;
1308
1309 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1310 /* Start from the head of a new full segment */
1311 alloc++;
1312 } else {
1313 /* Continue logs */
1314 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1315 nilfs_segbuf_map_cont(segbuf, prev);
1316 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1317 nextnum = prev->sb_nextnum;
1318
1319 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1320 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1321 segbuf->sb_sum.seg_seq++;
1322 alloc++;
1323 }
1324 }
1325
1326 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1327 if (err)
1328 goto failed;
1329
1330 if (alloc) {
1331 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1332 if (err)
1333 goto failed;
1334 }
1335 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1336
1337 BUG_ON(!list_empty(&sci->sc_segbufs));
1338 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1339 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1340 return 0;
1341
1342 failed:
1343 nilfs_segbuf_free(segbuf);
1344 return err;
1345 }
1346
nilfs_segctor_extend_segments(struct nilfs_sc_info * sci,struct the_nilfs * nilfs,int nadd)1347 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1348 struct the_nilfs *nilfs, int nadd)
1349 {
1350 struct nilfs_segment_buffer *segbuf, *prev;
1351 struct inode *sufile = nilfs->ns_sufile;
1352 __u64 nextnextnum;
1353 LIST_HEAD(list);
1354 int err, ret, i;
1355
1356 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1357 /*
1358 * Since the segment specified with nextnum might be allocated during
1359 * the previous construction, the buffer including its segusage may
1360 * not be dirty. The following call ensures that the buffer is dirty
1361 * and will pin the buffer on memory until the sufile is written.
1362 */
1363 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1364 if (unlikely(err))
1365 return err;
1366
1367 for (i = 0; i < nadd; i++) {
1368 /* extend segment info */
1369 err = -ENOMEM;
1370 segbuf = nilfs_segbuf_new(sci->sc_super);
1371 if (unlikely(!segbuf))
1372 goto failed;
1373
1374 /* map this buffer to region of segment on-disk */
1375 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1376 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1377
1378 /* allocate the next next full segment */
1379 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1380 if (unlikely(err))
1381 goto failed_segbuf;
1382
1383 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1384 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1385
1386 list_add_tail(&segbuf->sb_list, &list);
1387 prev = segbuf;
1388 }
1389 list_splice_tail(&list, &sci->sc_segbufs);
1390 return 0;
1391
1392 failed_segbuf:
1393 nilfs_segbuf_free(segbuf);
1394 failed:
1395 list_for_each_entry(segbuf, &list, sb_list) {
1396 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1397 WARN_ON(ret); /* never fails */
1398 }
1399 nilfs_destroy_logs(&list);
1400 return err;
1401 }
1402
nilfs_free_incomplete_logs(struct list_head * logs,struct the_nilfs * nilfs)1403 static void nilfs_free_incomplete_logs(struct list_head *logs,
1404 struct the_nilfs *nilfs)
1405 {
1406 struct nilfs_segment_buffer *segbuf, *prev;
1407 struct inode *sufile = nilfs->ns_sufile;
1408 int ret;
1409
1410 segbuf = NILFS_FIRST_SEGBUF(logs);
1411 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1412 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1413 WARN_ON(ret); /* never fails */
1414 }
1415 if (atomic_read(&segbuf->sb_err)) {
1416 /* Case 1: The first segment failed */
1417 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1418 /*
1419 * Case 1a: Partial segment appended into an existing
1420 * segment
1421 */
1422 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1423 segbuf->sb_fseg_end);
1424 else /* Case 1b: New full segment */
1425 set_nilfs_discontinued(nilfs);
1426 }
1427
1428 prev = segbuf;
1429 list_for_each_entry_continue(segbuf, logs, sb_list) {
1430 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1431 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1432 WARN_ON(ret); /* never fails */
1433 }
1434 if (atomic_read(&segbuf->sb_err) &&
1435 segbuf->sb_segnum != nilfs->ns_nextnum)
1436 /* Case 2: extended segment (!= next) failed */
1437 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1438 prev = segbuf;
1439 }
1440 }
1441
nilfs_segctor_update_segusage(struct nilfs_sc_info * sci,struct inode * sufile)1442 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1443 struct inode *sufile)
1444 {
1445 struct nilfs_segment_buffer *segbuf;
1446 unsigned long live_blocks;
1447 int ret;
1448
1449 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1450 live_blocks = segbuf->sb_sum.nblocks +
1451 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1452 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1453 live_blocks,
1454 sci->sc_seg_ctime);
1455 WARN_ON(ret); /* always succeed because the segusage is dirty */
1456 }
1457 }
1458
nilfs_cancel_segusage(struct list_head * logs,struct inode * sufile)1459 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1460 {
1461 struct nilfs_segment_buffer *segbuf;
1462 int ret;
1463
1464 segbuf = NILFS_FIRST_SEGBUF(logs);
1465 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1466 segbuf->sb_pseg_start -
1467 segbuf->sb_fseg_start, 0);
1468 WARN_ON(ret); /* always succeed because the segusage is dirty */
1469
1470 list_for_each_entry_continue(segbuf, logs, sb_list) {
1471 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1472 0, 0);
1473 WARN_ON(ret); /* always succeed */
1474 }
1475 }
1476
nilfs_segctor_truncate_segments(struct nilfs_sc_info * sci,struct nilfs_segment_buffer * last,struct inode * sufile)1477 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1478 struct nilfs_segment_buffer *last,
1479 struct inode *sufile)
1480 {
1481 struct nilfs_segment_buffer *segbuf = last;
1482 int ret;
1483
1484 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1485 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1486 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1487 WARN_ON(ret);
1488 }
1489 nilfs_truncate_logs(&sci->sc_segbufs, last);
1490 }
1491
1492
nilfs_segctor_collect(struct nilfs_sc_info * sci,struct the_nilfs * nilfs,int mode)1493 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1494 struct the_nilfs *nilfs, int mode)
1495 {
1496 struct nilfs_cstage prev_stage = sci->sc_stage;
1497 int err, nadd = 1;
1498
1499 /* Collection retry loop */
1500 for (;;) {
1501 sci->sc_nblk_this_inc = 0;
1502 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1503
1504 err = nilfs_segctor_reset_segment_buffer(sci);
1505 if (unlikely(err))
1506 goto failed;
1507
1508 err = nilfs_segctor_collect_blocks(sci, mode);
1509 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1510 if (!err)
1511 break;
1512
1513 if (unlikely(err != -E2BIG))
1514 goto failed;
1515
1516 /* The current segment is filled up */
1517 if (mode != SC_LSEG_SR ||
1518 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1519 break;
1520
1521 nilfs_clear_logs(&sci->sc_segbufs);
1522
1523 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1524 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1525 sci->sc_freesegs,
1526 sci->sc_nfreesegs,
1527 NULL);
1528 WARN_ON(err); /* do not happen */
1529 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1530 }
1531
1532 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1533 if (unlikely(err))
1534 return err;
1535
1536 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1537 sci->sc_stage = prev_stage;
1538 }
1539 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1540 return 0;
1541
1542 failed:
1543 return err;
1544 }
1545
nilfs_list_replace_buffer(struct buffer_head * old_bh,struct buffer_head * new_bh)1546 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1547 struct buffer_head *new_bh)
1548 {
1549 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1550
1551 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1552 /* The caller must release old_bh */
1553 }
1554
1555 static int
nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info * sci,struct nilfs_segment_buffer * segbuf,int mode)1556 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1557 struct nilfs_segment_buffer *segbuf,
1558 int mode)
1559 {
1560 struct inode *inode = NULL;
1561 sector_t blocknr;
1562 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1563 unsigned long nblocks = 0, ndatablk = 0;
1564 const struct nilfs_sc_operations *sc_op = NULL;
1565 struct nilfs_segsum_pointer ssp;
1566 struct nilfs_finfo *finfo = NULL;
1567 union nilfs_binfo binfo;
1568 struct buffer_head *bh, *bh_org;
1569 ino_t ino = 0;
1570 int err = 0;
1571
1572 if (!nfinfo)
1573 goto out;
1574
1575 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1576 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1577 ssp.offset = sizeof(struct nilfs_segment_summary);
1578
1579 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1580 if (bh == segbuf->sb_super_root)
1581 break;
1582 if (!finfo) {
1583 finfo = nilfs_segctor_map_segsum_entry(
1584 sci, &ssp, sizeof(*finfo));
1585 ino = le64_to_cpu(finfo->fi_ino);
1586 nblocks = le32_to_cpu(finfo->fi_nblocks);
1587 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1588
1589 inode = bh->b_page->mapping->host;
1590
1591 if (mode == SC_LSEG_DSYNC)
1592 sc_op = &nilfs_sc_dsync_ops;
1593 else if (ino == NILFS_DAT_INO)
1594 sc_op = &nilfs_sc_dat_ops;
1595 else /* file blocks */
1596 sc_op = &nilfs_sc_file_ops;
1597 }
1598 bh_org = bh;
1599 get_bh(bh_org);
1600 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1601 &binfo);
1602 if (bh != bh_org)
1603 nilfs_list_replace_buffer(bh_org, bh);
1604 brelse(bh_org);
1605 if (unlikely(err))
1606 goto failed_bmap;
1607
1608 if (ndatablk > 0)
1609 sc_op->write_data_binfo(sci, &ssp, &binfo);
1610 else
1611 sc_op->write_node_binfo(sci, &ssp, &binfo);
1612
1613 blocknr++;
1614 if (--nblocks == 0) {
1615 finfo = NULL;
1616 if (--nfinfo == 0)
1617 break;
1618 } else if (ndatablk > 0)
1619 ndatablk--;
1620 }
1621 out:
1622 return 0;
1623
1624 failed_bmap:
1625 return err;
1626 }
1627
nilfs_segctor_assign(struct nilfs_sc_info * sci,int mode)1628 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1629 {
1630 struct nilfs_segment_buffer *segbuf;
1631 int err;
1632
1633 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1634 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1635 if (unlikely(err))
1636 return err;
1637 nilfs_segbuf_fill_in_segsum(segbuf);
1638 }
1639 return 0;
1640 }
1641
nilfs_begin_page_io(struct page * page)1642 static void nilfs_begin_page_io(struct page *page)
1643 {
1644 if (!page || PageWriteback(page))
1645 /*
1646 * For split b-tree node pages, this function may be called
1647 * twice. We ignore the 2nd or later calls by this check.
1648 */
1649 return;
1650
1651 lock_page(page);
1652 clear_page_dirty_for_io(page);
1653 set_page_writeback(page);
1654 unlock_page(page);
1655 }
1656
nilfs_segctor_prepare_write(struct nilfs_sc_info * sci)1657 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1658 {
1659 struct nilfs_segment_buffer *segbuf;
1660 struct page *bd_page = NULL, *fs_page = NULL;
1661
1662 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1663 struct buffer_head *bh;
1664
1665 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1666 b_assoc_buffers) {
1667 if (bh->b_page != bd_page) {
1668 if (bd_page) {
1669 lock_page(bd_page);
1670 clear_page_dirty_for_io(bd_page);
1671 set_page_writeback(bd_page);
1672 unlock_page(bd_page);
1673 }
1674 bd_page = bh->b_page;
1675 }
1676 }
1677
1678 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1679 b_assoc_buffers) {
1680 set_buffer_async_write(bh);
1681 if (bh == segbuf->sb_super_root) {
1682 if (bh->b_page != bd_page) {
1683 lock_page(bd_page);
1684 clear_page_dirty_for_io(bd_page);
1685 set_page_writeback(bd_page);
1686 unlock_page(bd_page);
1687 bd_page = bh->b_page;
1688 }
1689 break;
1690 }
1691 if (bh->b_page != fs_page) {
1692 nilfs_begin_page_io(fs_page);
1693 fs_page = bh->b_page;
1694 }
1695 }
1696 }
1697 if (bd_page) {
1698 lock_page(bd_page);
1699 clear_page_dirty_for_io(bd_page);
1700 set_page_writeback(bd_page);
1701 unlock_page(bd_page);
1702 }
1703 nilfs_begin_page_io(fs_page);
1704 }
1705
nilfs_segctor_write(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1706 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1707 struct the_nilfs *nilfs)
1708 {
1709 int ret;
1710
1711 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1712 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1713 return ret;
1714 }
1715
nilfs_end_page_io(struct page * page,int err)1716 static void nilfs_end_page_io(struct page *page, int err)
1717 {
1718 if (!page)
1719 return;
1720
1721 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1722 /*
1723 * For b-tree node pages, this function may be called twice
1724 * or more because they might be split in a segment.
1725 */
1726 if (PageDirty(page)) {
1727 /*
1728 * For pages holding split b-tree node buffers, dirty
1729 * flag on the buffers may be cleared discretely.
1730 * In that case, the page is once redirtied for
1731 * remaining buffers, and it must be cancelled if
1732 * all the buffers get cleaned later.
1733 */
1734 lock_page(page);
1735 if (nilfs_page_buffers_clean(page))
1736 __nilfs_clear_page_dirty(page);
1737 unlock_page(page);
1738 }
1739 return;
1740 }
1741
1742 if (!err) {
1743 if (!nilfs_page_buffers_clean(page))
1744 __set_page_dirty_nobuffers(page);
1745 ClearPageError(page);
1746 } else {
1747 __set_page_dirty_nobuffers(page);
1748 SetPageError(page);
1749 }
1750
1751 end_page_writeback(page);
1752 }
1753
nilfs_abort_logs(struct list_head * logs,int err)1754 static void nilfs_abort_logs(struct list_head *logs, int err)
1755 {
1756 struct nilfs_segment_buffer *segbuf;
1757 struct page *bd_page = NULL, *fs_page = NULL;
1758 struct buffer_head *bh;
1759
1760 if (list_empty(logs))
1761 return;
1762
1763 list_for_each_entry(segbuf, logs, sb_list) {
1764 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1765 b_assoc_buffers) {
1766 if (bh->b_page != bd_page) {
1767 if (bd_page)
1768 end_page_writeback(bd_page);
1769 bd_page = bh->b_page;
1770 }
1771 }
1772
1773 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1774 b_assoc_buffers) {
1775 clear_buffer_async_write(bh);
1776 if (bh == segbuf->sb_super_root) {
1777 if (bh->b_page != bd_page) {
1778 end_page_writeback(bd_page);
1779 bd_page = bh->b_page;
1780 }
1781 break;
1782 }
1783 if (bh->b_page != fs_page) {
1784 nilfs_end_page_io(fs_page, err);
1785 fs_page = bh->b_page;
1786 }
1787 }
1788 }
1789 if (bd_page)
1790 end_page_writeback(bd_page);
1791
1792 nilfs_end_page_io(fs_page, err);
1793 }
1794
nilfs_segctor_abort_construction(struct nilfs_sc_info * sci,struct the_nilfs * nilfs,int err)1795 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1796 struct the_nilfs *nilfs, int err)
1797 {
1798 LIST_HEAD(logs);
1799 int ret;
1800
1801 list_splice_tail_init(&sci->sc_write_logs, &logs);
1802 ret = nilfs_wait_on_logs(&logs);
1803 nilfs_abort_logs(&logs, ret ? : err);
1804
1805 list_splice_tail_init(&sci->sc_segbufs, &logs);
1806 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1807 nilfs_free_incomplete_logs(&logs, nilfs);
1808
1809 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1810 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1811 sci->sc_freesegs,
1812 sci->sc_nfreesegs,
1813 NULL);
1814 WARN_ON(ret); /* do not happen */
1815 }
1816
1817 nilfs_destroy_logs(&logs);
1818 }
1819
nilfs_set_next_segment(struct the_nilfs * nilfs,struct nilfs_segment_buffer * segbuf)1820 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1821 struct nilfs_segment_buffer *segbuf)
1822 {
1823 nilfs->ns_segnum = segbuf->sb_segnum;
1824 nilfs->ns_nextnum = segbuf->sb_nextnum;
1825 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1826 + segbuf->sb_sum.nblocks;
1827 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1828 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1829 }
1830
nilfs_segctor_complete_write(struct nilfs_sc_info * sci)1831 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1832 {
1833 struct nilfs_segment_buffer *segbuf;
1834 struct page *bd_page = NULL, *fs_page = NULL;
1835 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1836 int update_sr = false;
1837
1838 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1839 struct buffer_head *bh;
1840
1841 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1842 b_assoc_buffers) {
1843 set_buffer_uptodate(bh);
1844 clear_buffer_dirty(bh);
1845 if (bh->b_page != bd_page) {
1846 if (bd_page)
1847 end_page_writeback(bd_page);
1848 bd_page = bh->b_page;
1849 }
1850 }
1851 /*
1852 * We assume that the buffers which belong to the same page
1853 * continue over the buffer list.
1854 * Under this assumption, the last BHs of pages is
1855 * identifiable by the discontinuity of bh->b_page
1856 * (page != fs_page).
1857 *
1858 * For B-tree node blocks, however, this assumption is not
1859 * guaranteed. The cleanup code of B-tree node pages needs
1860 * special care.
1861 */
1862 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1863 b_assoc_buffers) {
1864 const unsigned long set_bits = BIT(BH_Uptodate);
1865 const unsigned long clear_bits =
1866 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1867 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1868 BIT(BH_NILFS_Redirected));
1869
1870 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1871 if (bh == segbuf->sb_super_root) {
1872 if (bh->b_page != bd_page) {
1873 end_page_writeback(bd_page);
1874 bd_page = bh->b_page;
1875 }
1876 update_sr = true;
1877 break;
1878 }
1879 if (bh->b_page != fs_page) {
1880 nilfs_end_page_io(fs_page, 0);
1881 fs_page = bh->b_page;
1882 }
1883 }
1884
1885 if (!nilfs_segbuf_simplex(segbuf)) {
1886 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1887 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1888 sci->sc_lseg_stime = jiffies;
1889 }
1890 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1891 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1892 }
1893 }
1894 /*
1895 * Since pages may continue over multiple segment buffers,
1896 * end of the last page must be checked outside of the loop.
1897 */
1898 if (bd_page)
1899 end_page_writeback(bd_page);
1900
1901 nilfs_end_page_io(fs_page, 0);
1902
1903 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1904
1905 if (nilfs_doing_gc())
1906 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1907 else
1908 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1909
1910 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1911
1912 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1913 nilfs_set_next_segment(nilfs, segbuf);
1914
1915 if (update_sr) {
1916 nilfs->ns_flushed_device = 0;
1917 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1918 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1919
1920 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1921 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1922 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1923 nilfs_segctor_clear_metadata_dirty(sci);
1924 } else
1925 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1926 }
1927
nilfs_segctor_wait(struct nilfs_sc_info * sci)1928 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1929 {
1930 int ret;
1931
1932 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1933 if (!ret) {
1934 nilfs_segctor_complete_write(sci);
1935 nilfs_destroy_logs(&sci->sc_write_logs);
1936 }
1937 return ret;
1938 }
1939
nilfs_segctor_collect_dirty_files(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1940 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1941 struct the_nilfs *nilfs)
1942 {
1943 struct nilfs_inode_info *ii, *n;
1944 struct inode *ifile = sci->sc_root->ifile;
1945
1946 spin_lock(&nilfs->ns_inode_lock);
1947 retry:
1948 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1949 if (!ii->i_bh) {
1950 struct buffer_head *ibh;
1951 int err;
1952
1953 spin_unlock(&nilfs->ns_inode_lock);
1954 err = nilfs_ifile_get_inode_block(
1955 ifile, ii->vfs_inode.i_ino, &ibh);
1956 if (unlikely(err)) {
1957 nilfs_warn(sci->sc_super,
1958 "log writer: error %d getting inode block (ino=%lu)",
1959 err, ii->vfs_inode.i_ino);
1960 return err;
1961 }
1962 spin_lock(&nilfs->ns_inode_lock);
1963 if (likely(!ii->i_bh))
1964 ii->i_bh = ibh;
1965 else
1966 brelse(ibh);
1967 goto retry;
1968 }
1969
1970 // Always redirty the buffer to avoid race condition
1971 mark_buffer_dirty(ii->i_bh);
1972 nilfs_mdt_mark_dirty(ifile);
1973
1974 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1975 set_bit(NILFS_I_BUSY, &ii->i_state);
1976 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1977 }
1978 spin_unlock(&nilfs->ns_inode_lock);
1979
1980 return 0;
1981 }
1982
nilfs_segctor_drop_written_files(struct nilfs_sc_info * sci,struct the_nilfs * nilfs)1983 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1984 struct the_nilfs *nilfs)
1985 {
1986 struct nilfs_inode_info *ii, *n;
1987 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1988 int defer_iput = false;
1989
1990 spin_lock(&nilfs->ns_inode_lock);
1991 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1992 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1993 test_bit(NILFS_I_DIRTY, &ii->i_state))
1994 continue;
1995
1996 clear_bit(NILFS_I_BUSY, &ii->i_state);
1997 brelse(ii->i_bh);
1998 ii->i_bh = NULL;
1999 list_del_init(&ii->i_dirty);
2000 if (!ii->vfs_inode.i_nlink || during_mount) {
2001 /*
2002 * Defer calling iput() to avoid deadlocks if
2003 * i_nlink == 0 or mount is not yet finished.
2004 */
2005 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2006 defer_iput = true;
2007 } else {
2008 spin_unlock(&nilfs->ns_inode_lock);
2009 iput(&ii->vfs_inode);
2010 spin_lock(&nilfs->ns_inode_lock);
2011 }
2012 }
2013 spin_unlock(&nilfs->ns_inode_lock);
2014
2015 if (defer_iput)
2016 schedule_work(&sci->sc_iput_work);
2017 }
2018
2019 /*
2020 * Main procedure of segment constructor
2021 */
nilfs_segctor_do_construct(struct nilfs_sc_info * sci,int mode)2022 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2023 {
2024 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2025 int err;
2026
2027 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2028 sci->sc_cno = nilfs->ns_cno;
2029
2030 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2031 if (unlikely(err))
2032 goto out;
2033
2034 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2035 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2036
2037 if (nilfs_segctor_clean(sci))
2038 goto out;
2039
2040 do {
2041 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2042
2043 err = nilfs_segctor_begin_construction(sci, nilfs);
2044 if (unlikely(err))
2045 goto out;
2046
2047 /* Update time stamp */
2048 sci->sc_seg_ctime = ktime_get_real_seconds();
2049
2050 err = nilfs_segctor_collect(sci, nilfs, mode);
2051 if (unlikely(err))
2052 goto failed;
2053
2054 /* Avoid empty segment */
2055 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2056 nilfs_segbuf_empty(sci->sc_curseg)) {
2057 nilfs_segctor_abort_construction(sci, nilfs, 1);
2058 goto out;
2059 }
2060
2061 err = nilfs_segctor_assign(sci, mode);
2062 if (unlikely(err))
2063 goto failed;
2064
2065 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2066 nilfs_segctor_fill_in_file_bmap(sci);
2067
2068 if (mode == SC_LSEG_SR &&
2069 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2070 err = nilfs_segctor_fill_in_checkpoint(sci);
2071 if (unlikely(err))
2072 goto failed_to_write;
2073
2074 nilfs_segctor_fill_in_super_root(sci, nilfs);
2075 }
2076 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2077
2078 /* Write partial segments */
2079 nilfs_segctor_prepare_write(sci);
2080
2081 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2082 nilfs->ns_crc_seed);
2083
2084 err = nilfs_segctor_write(sci, nilfs);
2085 if (unlikely(err))
2086 goto failed_to_write;
2087
2088 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2089 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2090 /*
2091 * At this point, we avoid double buffering
2092 * for blocksize < pagesize because page dirty
2093 * flag is turned off during write and dirty
2094 * buffers are not properly collected for
2095 * pages crossing over segments.
2096 */
2097 err = nilfs_segctor_wait(sci);
2098 if (err)
2099 goto failed_to_write;
2100 }
2101 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2102
2103 out:
2104 nilfs_segctor_drop_written_files(sci, nilfs);
2105 return err;
2106
2107 failed_to_write:
2108 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2109 nilfs_redirty_inodes(&sci->sc_dirty_files);
2110
2111 failed:
2112 if (nilfs_doing_gc())
2113 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2114 nilfs_segctor_abort_construction(sci, nilfs, err);
2115 goto out;
2116 }
2117
2118 /**
2119 * nilfs_segctor_start_timer - set timer of background write
2120 * @sci: nilfs_sc_info
2121 *
2122 * If the timer has already been set, it ignores the new request.
2123 * This function MUST be called within a section locking the segment
2124 * semaphore.
2125 */
nilfs_segctor_start_timer(struct nilfs_sc_info * sci)2126 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2127 {
2128 spin_lock(&sci->sc_state_lock);
2129 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2130 sci->sc_timer.expires = jiffies + sci->sc_interval;
2131 add_timer(&sci->sc_timer);
2132 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2133 }
2134 spin_unlock(&sci->sc_state_lock);
2135 }
2136
nilfs_segctor_do_flush(struct nilfs_sc_info * sci,int bn)2137 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2138 {
2139 spin_lock(&sci->sc_state_lock);
2140 if (!(sci->sc_flush_request & BIT(bn))) {
2141 unsigned long prev_req = sci->sc_flush_request;
2142
2143 sci->sc_flush_request |= BIT(bn);
2144 if (!prev_req)
2145 wake_up(&sci->sc_wait_daemon);
2146 }
2147 spin_unlock(&sci->sc_state_lock);
2148 }
2149
2150 /**
2151 * nilfs_flush_segment - trigger a segment construction for resource control
2152 * @sb: super block
2153 * @ino: inode number of the file to be flushed out.
2154 */
nilfs_flush_segment(struct super_block * sb,ino_t ino)2155 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2156 {
2157 struct the_nilfs *nilfs = sb->s_fs_info;
2158 struct nilfs_sc_info *sci = nilfs->ns_writer;
2159
2160 if (!sci || nilfs_doing_construction())
2161 return;
2162 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2163 /* assign bit 0 to data files */
2164 }
2165
2166 struct nilfs_segctor_wait_request {
2167 wait_queue_entry_t wq;
2168 __u32 seq;
2169 int err;
2170 atomic_t done;
2171 };
2172
nilfs_segctor_sync(struct nilfs_sc_info * sci)2173 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2174 {
2175 struct nilfs_segctor_wait_request wait_req;
2176 int err = 0;
2177
2178 spin_lock(&sci->sc_state_lock);
2179 init_wait(&wait_req.wq);
2180 wait_req.err = 0;
2181 atomic_set(&wait_req.done, 0);
2182 wait_req.seq = ++sci->sc_seq_request;
2183 spin_unlock(&sci->sc_state_lock);
2184
2185 init_waitqueue_entry(&wait_req.wq, current);
2186 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2187 set_current_state(TASK_INTERRUPTIBLE);
2188 wake_up(&sci->sc_wait_daemon);
2189
2190 for (;;) {
2191 if (atomic_read(&wait_req.done)) {
2192 err = wait_req.err;
2193 break;
2194 }
2195 if (!signal_pending(current)) {
2196 schedule();
2197 continue;
2198 }
2199 err = -ERESTARTSYS;
2200 break;
2201 }
2202 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2203 return err;
2204 }
2205
nilfs_segctor_wakeup(struct nilfs_sc_info * sci,int err)2206 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2207 {
2208 struct nilfs_segctor_wait_request *wrq, *n;
2209 unsigned long flags;
2210
2211 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2212 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2213 if (!atomic_read(&wrq->done) &&
2214 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2215 wrq->err = err;
2216 atomic_set(&wrq->done, 1);
2217 }
2218 if (atomic_read(&wrq->done)) {
2219 wrq->wq.func(&wrq->wq,
2220 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2221 0, NULL);
2222 }
2223 }
2224 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2225 }
2226
2227 /**
2228 * nilfs_construct_segment - construct a logical segment
2229 * @sb: super block
2230 *
2231 * Return Value: On success, 0 is retured. On errors, one of the following
2232 * negative error code is returned.
2233 *
2234 * %-EROFS - Read only filesystem.
2235 *
2236 * %-EIO - I/O error
2237 *
2238 * %-ENOSPC - No space left on device (only in a panic state).
2239 *
2240 * %-ERESTARTSYS - Interrupted.
2241 *
2242 * %-ENOMEM - Insufficient memory available.
2243 */
nilfs_construct_segment(struct super_block * sb)2244 int nilfs_construct_segment(struct super_block *sb)
2245 {
2246 struct the_nilfs *nilfs = sb->s_fs_info;
2247 struct nilfs_sc_info *sci = nilfs->ns_writer;
2248 struct nilfs_transaction_info *ti;
2249 int err;
2250
2251 if (sb_rdonly(sb) || unlikely(!sci))
2252 return -EROFS;
2253
2254 /* A call inside transactions causes a deadlock. */
2255 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2256
2257 err = nilfs_segctor_sync(sci);
2258 return err;
2259 }
2260
2261 /**
2262 * nilfs_construct_dsync_segment - construct a data-only logical segment
2263 * @sb: super block
2264 * @inode: inode whose data blocks should be written out
2265 * @start: start byte offset
2266 * @end: end byte offset (inclusive)
2267 *
2268 * Return Value: On success, 0 is retured. On errors, one of the following
2269 * negative error code is returned.
2270 *
2271 * %-EROFS - Read only filesystem.
2272 *
2273 * %-EIO - I/O error
2274 *
2275 * %-ENOSPC - No space left on device (only in a panic state).
2276 *
2277 * %-ERESTARTSYS - Interrupted.
2278 *
2279 * %-ENOMEM - Insufficient memory available.
2280 */
nilfs_construct_dsync_segment(struct super_block * sb,struct inode * inode,loff_t start,loff_t end)2281 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2282 loff_t start, loff_t end)
2283 {
2284 struct the_nilfs *nilfs = sb->s_fs_info;
2285 struct nilfs_sc_info *sci = nilfs->ns_writer;
2286 struct nilfs_inode_info *ii;
2287 struct nilfs_transaction_info ti;
2288 int err = 0;
2289
2290 if (sb_rdonly(sb) || unlikely(!sci))
2291 return -EROFS;
2292
2293 nilfs_transaction_lock(sb, &ti, 0);
2294
2295 ii = NILFS_I(inode);
2296 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2297 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2298 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2299 nilfs_discontinued(nilfs)) {
2300 nilfs_transaction_unlock(sb);
2301 err = nilfs_segctor_sync(sci);
2302 return err;
2303 }
2304
2305 spin_lock(&nilfs->ns_inode_lock);
2306 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2307 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2308 spin_unlock(&nilfs->ns_inode_lock);
2309 nilfs_transaction_unlock(sb);
2310 return 0;
2311 }
2312 spin_unlock(&nilfs->ns_inode_lock);
2313 sci->sc_dsync_inode = ii;
2314 sci->sc_dsync_start = start;
2315 sci->sc_dsync_end = end;
2316
2317 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2318 if (!err)
2319 nilfs->ns_flushed_device = 0;
2320
2321 nilfs_transaction_unlock(sb);
2322 return err;
2323 }
2324
2325 #define FLUSH_FILE_BIT (0x1) /* data file only */
2326 #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2327
2328 /**
2329 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2330 * @sci: segment constructor object
2331 */
nilfs_segctor_accept(struct nilfs_sc_info * sci)2332 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2333 {
2334 spin_lock(&sci->sc_state_lock);
2335 sci->sc_seq_accepted = sci->sc_seq_request;
2336 spin_unlock(&sci->sc_state_lock);
2337 del_timer_sync(&sci->sc_timer);
2338 }
2339
2340 /**
2341 * nilfs_segctor_notify - notify the result of request to caller threads
2342 * @sci: segment constructor object
2343 * @mode: mode of log forming
2344 * @err: error code to be notified
2345 */
nilfs_segctor_notify(struct nilfs_sc_info * sci,int mode,int err)2346 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2347 {
2348 /* Clear requests (even when the construction failed) */
2349 spin_lock(&sci->sc_state_lock);
2350
2351 if (mode == SC_LSEG_SR) {
2352 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2353 sci->sc_seq_done = sci->sc_seq_accepted;
2354 nilfs_segctor_wakeup(sci, err);
2355 sci->sc_flush_request = 0;
2356 } else {
2357 if (mode == SC_FLUSH_FILE)
2358 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2359 else if (mode == SC_FLUSH_DAT)
2360 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2361
2362 /* re-enable timer if checkpoint creation was not done */
2363 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2364 time_before(jiffies, sci->sc_timer.expires))
2365 add_timer(&sci->sc_timer);
2366 }
2367 spin_unlock(&sci->sc_state_lock);
2368 }
2369
2370 /**
2371 * nilfs_segctor_construct - form logs and write them to disk
2372 * @sci: segment constructor object
2373 * @mode: mode of log forming
2374 */
nilfs_segctor_construct(struct nilfs_sc_info * sci,int mode)2375 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2376 {
2377 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2378 struct nilfs_super_block **sbp;
2379 int err = 0;
2380
2381 nilfs_segctor_accept(sci);
2382
2383 if (nilfs_discontinued(nilfs))
2384 mode = SC_LSEG_SR;
2385 if (!nilfs_segctor_confirm(sci))
2386 err = nilfs_segctor_do_construct(sci, mode);
2387
2388 if (likely(!err)) {
2389 if (mode != SC_FLUSH_DAT)
2390 atomic_set(&nilfs->ns_ndirtyblks, 0);
2391 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2392 nilfs_discontinued(nilfs)) {
2393 down_write(&nilfs->ns_sem);
2394 err = -EIO;
2395 sbp = nilfs_prepare_super(sci->sc_super,
2396 nilfs_sb_will_flip(nilfs));
2397 if (likely(sbp)) {
2398 nilfs_set_log_cursor(sbp[0], nilfs);
2399 err = nilfs_commit_super(sci->sc_super,
2400 NILFS_SB_COMMIT);
2401 }
2402 up_write(&nilfs->ns_sem);
2403 }
2404 }
2405
2406 nilfs_segctor_notify(sci, mode, err);
2407 return err;
2408 }
2409
nilfs_construction_timeout(struct timer_list * t)2410 static void nilfs_construction_timeout(struct timer_list *t)
2411 {
2412 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2413
2414 wake_up_process(sci->sc_timer_task);
2415 }
2416
2417 static void
nilfs_remove_written_gcinodes(struct the_nilfs * nilfs,struct list_head * head)2418 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2419 {
2420 struct nilfs_inode_info *ii, *n;
2421
2422 list_for_each_entry_safe(ii, n, head, i_dirty) {
2423 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2424 continue;
2425 list_del_init(&ii->i_dirty);
2426 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2427 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2428 iput(&ii->vfs_inode);
2429 }
2430 }
2431
nilfs_clean_segments(struct super_block * sb,struct nilfs_argv * argv,void ** kbufs)2432 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2433 void **kbufs)
2434 {
2435 struct the_nilfs *nilfs = sb->s_fs_info;
2436 struct nilfs_sc_info *sci = nilfs->ns_writer;
2437 struct nilfs_transaction_info ti;
2438 int err;
2439
2440 if (unlikely(!sci))
2441 return -EROFS;
2442
2443 nilfs_transaction_lock(sb, &ti, 1);
2444
2445 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2446 if (unlikely(err))
2447 goto out_unlock;
2448
2449 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2450 if (unlikely(err)) {
2451 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2452 goto out_unlock;
2453 }
2454
2455 sci->sc_freesegs = kbufs[4];
2456 sci->sc_nfreesegs = argv[4].v_nmembs;
2457 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2458
2459 for (;;) {
2460 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2461 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2462
2463 if (likely(!err))
2464 break;
2465
2466 nilfs_warn(sb, "error %d cleaning segments", err);
2467 set_current_state(TASK_INTERRUPTIBLE);
2468 schedule_timeout(sci->sc_interval);
2469 }
2470 if (nilfs_test_opt(nilfs, DISCARD)) {
2471 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2472 sci->sc_nfreesegs);
2473 if (ret) {
2474 nilfs_warn(sb,
2475 "error %d on discard request, turning discards off for the device",
2476 ret);
2477 nilfs_clear_opt(nilfs, DISCARD);
2478 }
2479 }
2480
2481 out_unlock:
2482 sci->sc_freesegs = NULL;
2483 sci->sc_nfreesegs = 0;
2484 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2485 nilfs_transaction_unlock(sb);
2486 return err;
2487 }
2488
nilfs_segctor_thread_construct(struct nilfs_sc_info * sci,int mode)2489 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2490 {
2491 struct nilfs_transaction_info ti;
2492
2493 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2494 nilfs_segctor_construct(sci, mode);
2495
2496 /*
2497 * Unclosed segment should be retried. We do this using sc_timer.
2498 * Timeout of sc_timer will invoke complete construction which leads
2499 * to close the current logical segment.
2500 */
2501 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2502 nilfs_segctor_start_timer(sci);
2503
2504 nilfs_transaction_unlock(sci->sc_super);
2505 }
2506
nilfs_segctor_do_immediate_flush(struct nilfs_sc_info * sci)2507 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2508 {
2509 int mode = 0;
2510
2511 spin_lock(&sci->sc_state_lock);
2512 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2513 SC_FLUSH_DAT : SC_FLUSH_FILE;
2514 spin_unlock(&sci->sc_state_lock);
2515
2516 if (mode) {
2517 nilfs_segctor_do_construct(sci, mode);
2518
2519 spin_lock(&sci->sc_state_lock);
2520 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2521 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2522 spin_unlock(&sci->sc_state_lock);
2523 }
2524 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2525 }
2526
nilfs_segctor_flush_mode(struct nilfs_sc_info * sci)2527 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2528 {
2529 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2530 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2531 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2532 return SC_FLUSH_FILE;
2533 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2534 return SC_FLUSH_DAT;
2535 }
2536 return SC_LSEG_SR;
2537 }
2538
2539 /**
2540 * nilfs_segctor_thread - main loop of the segment constructor thread.
2541 * @arg: pointer to a struct nilfs_sc_info.
2542 *
2543 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2544 * to execute segment constructions.
2545 */
nilfs_segctor_thread(void * arg)2546 static int nilfs_segctor_thread(void *arg)
2547 {
2548 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2549 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2550 int timeout = 0;
2551
2552 sci->sc_timer_task = current;
2553
2554 /* start sync. */
2555 sci->sc_task = current;
2556 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2557 nilfs_info(sci->sc_super,
2558 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2559 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2560
2561 spin_lock(&sci->sc_state_lock);
2562 loop:
2563 for (;;) {
2564 int mode;
2565
2566 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2567 goto end_thread;
2568
2569 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2570 mode = SC_LSEG_SR;
2571 else if (sci->sc_flush_request)
2572 mode = nilfs_segctor_flush_mode(sci);
2573 else
2574 break;
2575
2576 spin_unlock(&sci->sc_state_lock);
2577 nilfs_segctor_thread_construct(sci, mode);
2578 spin_lock(&sci->sc_state_lock);
2579 timeout = 0;
2580 }
2581
2582
2583 if (freezing(current)) {
2584 spin_unlock(&sci->sc_state_lock);
2585 try_to_freeze();
2586 spin_lock(&sci->sc_state_lock);
2587 } else {
2588 DEFINE_WAIT(wait);
2589 int should_sleep = 1;
2590
2591 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2592 TASK_INTERRUPTIBLE);
2593
2594 if (sci->sc_seq_request != sci->sc_seq_done)
2595 should_sleep = 0;
2596 else if (sci->sc_flush_request)
2597 should_sleep = 0;
2598 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2599 should_sleep = time_before(jiffies,
2600 sci->sc_timer.expires);
2601
2602 if (should_sleep) {
2603 spin_unlock(&sci->sc_state_lock);
2604 schedule();
2605 spin_lock(&sci->sc_state_lock);
2606 }
2607 finish_wait(&sci->sc_wait_daemon, &wait);
2608 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2609 time_after_eq(jiffies, sci->sc_timer.expires));
2610
2611 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2612 set_nilfs_discontinued(nilfs);
2613 }
2614 goto loop;
2615
2616 end_thread:
2617 spin_unlock(&sci->sc_state_lock);
2618
2619 /* end sync. */
2620 sci->sc_task = NULL;
2621 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2622 return 0;
2623 }
2624
nilfs_segctor_start_thread(struct nilfs_sc_info * sci)2625 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2626 {
2627 struct task_struct *t;
2628
2629 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2630 if (IS_ERR(t)) {
2631 int err = PTR_ERR(t);
2632
2633 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2634 err);
2635 return err;
2636 }
2637 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2638 return 0;
2639 }
2640
nilfs_segctor_kill_thread(struct nilfs_sc_info * sci)2641 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2642 __acquires(&sci->sc_state_lock)
2643 __releases(&sci->sc_state_lock)
2644 {
2645 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2646
2647 while (sci->sc_task) {
2648 wake_up(&sci->sc_wait_daemon);
2649 spin_unlock(&sci->sc_state_lock);
2650 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2651 spin_lock(&sci->sc_state_lock);
2652 }
2653 }
2654
2655 /*
2656 * Setup & clean-up functions
2657 */
nilfs_segctor_new(struct super_block * sb,struct nilfs_root * root)2658 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2659 struct nilfs_root *root)
2660 {
2661 struct the_nilfs *nilfs = sb->s_fs_info;
2662 struct nilfs_sc_info *sci;
2663
2664 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2665 if (!sci)
2666 return NULL;
2667
2668 sci->sc_super = sb;
2669
2670 nilfs_get_root(root);
2671 sci->sc_root = root;
2672
2673 init_waitqueue_head(&sci->sc_wait_request);
2674 init_waitqueue_head(&sci->sc_wait_daemon);
2675 init_waitqueue_head(&sci->sc_wait_task);
2676 spin_lock_init(&sci->sc_state_lock);
2677 INIT_LIST_HEAD(&sci->sc_dirty_files);
2678 INIT_LIST_HEAD(&sci->sc_segbufs);
2679 INIT_LIST_HEAD(&sci->sc_write_logs);
2680 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2681 INIT_LIST_HEAD(&sci->sc_iput_queue);
2682 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2683 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2684
2685 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2686 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2687 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2688
2689 if (nilfs->ns_interval)
2690 sci->sc_interval = HZ * nilfs->ns_interval;
2691 if (nilfs->ns_watermark)
2692 sci->sc_watermark = nilfs->ns_watermark;
2693 return sci;
2694 }
2695
nilfs_segctor_write_out(struct nilfs_sc_info * sci)2696 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2697 {
2698 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2699
2700 /*
2701 * The segctord thread was stopped and its timer was removed.
2702 * But some tasks remain.
2703 */
2704 do {
2705 struct nilfs_transaction_info ti;
2706
2707 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2708 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2709 nilfs_transaction_unlock(sci->sc_super);
2710
2711 flush_work(&sci->sc_iput_work);
2712
2713 } while (ret && retrycount-- > 0);
2714 }
2715
2716 /**
2717 * nilfs_segctor_destroy - destroy the segment constructor.
2718 * @sci: nilfs_sc_info
2719 *
2720 * nilfs_segctor_destroy() kills the segctord thread and frees
2721 * the nilfs_sc_info struct.
2722 * Caller must hold the segment semaphore.
2723 */
nilfs_segctor_destroy(struct nilfs_sc_info * sci)2724 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2725 {
2726 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2727 int flag;
2728
2729 up_write(&nilfs->ns_segctor_sem);
2730
2731 spin_lock(&sci->sc_state_lock);
2732 nilfs_segctor_kill_thread(sci);
2733 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2734 || sci->sc_seq_request != sci->sc_seq_done);
2735 spin_unlock(&sci->sc_state_lock);
2736
2737 if (flush_work(&sci->sc_iput_work))
2738 flag = true;
2739
2740 if (flag || !nilfs_segctor_confirm(sci))
2741 nilfs_segctor_write_out(sci);
2742
2743 if (!list_empty(&sci->sc_dirty_files)) {
2744 nilfs_warn(sci->sc_super,
2745 "disposed unprocessed dirty file(s) when stopping log writer");
2746 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2747 }
2748
2749 if (!list_empty(&sci->sc_iput_queue)) {
2750 nilfs_warn(sci->sc_super,
2751 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2752 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2753 }
2754
2755 WARN_ON(!list_empty(&sci->sc_segbufs));
2756 WARN_ON(!list_empty(&sci->sc_write_logs));
2757
2758 nilfs_put_root(sci->sc_root);
2759
2760 down_write(&nilfs->ns_segctor_sem);
2761
2762 del_timer_sync(&sci->sc_timer);
2763 kfree(sci);
2764 }
2765
2766 /**
2767 * nilfs_attach_log_writer - attach log writer
2768 * @sb: super block instance
2769 * @root: root object of the current filesystem tree
2770 *
2771 * This allocates a log writer object, initializes it, and starts the
2772 * log writer.
2773 *
2774 * Return Value: On success, 0 is returned. On error, one of the following
2775 * negative error code is returned.
2776 *
2777 * %-ENOMEM - Insufficient memory available.
2778 */
nilfs_attach_log_writer(struct super_block * sb,struct nilfs_root * root)2779 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2780 {
2781 struct the_nilfs *nilfs = sb->s_fs_info;
2782 int err;
2783
2784 if (nilfs->ns_writer) {
2785 /*
2786 * This happens if the filesystem is made read-only by
2787 * __nilfs_error or nilfs_remount and then remounted
2788 * read/write. In these cases, reuse the existing
2789 * writer.
2790 */
2791 return 0;
2792 }
2793
2794 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2795 if (!nilfs->ns_writer)
2796 return -ENOMEM;
2797
2798 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2799
2800 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2801 if (unlikely(err))
2802 nilfs_detach_log_writer(sb);
2803
2804 return err;
2805 }
2806
2807 /**
2808 * nilfs_detach_log_writer - destroy log writer
2809 * @sb: super block instance
2810 *
2811 * This kills log writer daemon, frees the log writer object, and
2812 * destroys list of dirty files.
2813 */
nilfs_detach_log_writer(struct super_block * sb)2814 void nilfs_detach_log_writer(struct super_block *sb)
2815 {
2816 struct the_nilfs *nilfs = sb->s_fs_info;
2817 LIST_HEAD(garbage_list);
2818
2819 down_write(&nilfs->ns_segctor_sem);
2820 if (nilfs->ns_writer) {
2821 nilfs_segctor_destroy(nilfs->ns_writer);
2822 nilfs->ns_writer = NULL;
2823 }
2824
2825 /* Force to free the list of dirty files */
2826 spin_lock(&nilfs->ns_inode_lock);
2827 if (!list_empty(&nilfs->ns_dirty_files)) {
2828 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2829 nilfs_warn(sb,
2830 "disposed unprocessed dirty file(s) when detaching log writer");
2831 }
2832 spin_unlock(&nilfs->ns_inode_lock);
2833 up_write(&nilfs->ns_segctor_sem);
2834
2835 nilfs_dispose_list(nilfs, &garbage_list, 1);
2836 }
2837