1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29
30 struct workqueue_struct *gfs2_freeze_wq;
31
32 extern struct workqueue_struct *gfs2_control_wq;
33
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 fs_err(gl->gl_name.ln_sbd,
37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38 "state 0x%lx\n",
39 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 bh->b_page->mapping, bh->b_page->flags);
41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 gl->gl_name.ln_type, gl->gl_name.ln_number,
43 gfs2_glock2aspace(gl));
44 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 gfs2_withdraw(gl->gl_name.ln_sbd);
46 }
47
48 /**
49 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50 * @gl: the glock
51 * @fsync: set when called from fsync (not all buffers will be clean)
52 *
53 * None of the buffers should be dirty, locked, or pinned.
54 */
55
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57 unsigned int nr_revokes)
58 {
59 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60 struct list_head *head = &gl->gl_ail_list;
61 struct gfs2_bufdata *bd, *tmp;
62 struct buffer_head *bh;
63 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
64
65 gfs2_log_lock(sdp);
66 spin_lock(&sdp->sd_ail_lock);
67 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
68 if (nr_revokes == 0)
69 break;
70 bh = bd->bd_bh;
71 if (bh->b_state & b_state) {
72 if (fsync)
73 continue;
74 gfs2_ail_error(gl, bh);
75 }
76 gfs2_trans_add_revoke(sdp, bd);
77 nr_revokes--;
78 }
79 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
80 spin_unlock(&sdp->sd_ail_lock);
81 gfs2_log_unlock(sdp);
82 }
83
84
gfs2_ail_empty_gl(struct gfs2_glock * gl)85 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
86 {
87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
88 struct gfs2_trans tr;
89 int ret;
90
91 memset(&tr, 0, sizeof(tr));
92 INIT_LIST_HEAD(&tr.tr_buf);
93 INIT_LIST_HEAD(&tr.tr_databuf);
94 INIT_LIST_HEAD(&tr.tr_ail1_list);
95 INIT_LIST_HEAD(&tr.tr_ail2_list);
96 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
97
98 if (!tr.tr_revokes) {
99 bool have_revokes;
100 bool log_in_flight;
101
102 /*
103 * We have nothing on the ail, but there could be revokes on
104 * the sdp revoke queue, in which case, we still want to flush
105 * the log and wait for it to finish.
106 *
107 * If the sdp revoke list is empty too, we might still have an
108 * io outstanding for writing revokes, so we should wait for
109 * it before returning.
110 *
111 * If none of these conditions are true, our revokes are all
112 * flushed and we can return.
113 */
114 gfs2_log_lock(sdp);
115 have_revokes = !list_empty(&sdp->sd_log_revokes);
116 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117 gfs2_log_unlock(sdp);
118 if (have_revokes)
119 goto flush;
120 if (log_in_flight)
121 log_flush_wait(sdp);
122 return 0;
123 }
124
125 /* A shortened, inline version of gfs2_trans_begin()
126 * tr->alloced is not set since the transaction structure is
127 * on the stack */
128 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
129 tr.tr_ip = _RET_IP_;
130 ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131 if (ret < 0)
132 return ret;
133 WARN_ON_ONCE(current->journal_info);
134 current->journal_info = &tr;
135
136 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
137
138 gfs2_trans_end(sdp);
139 flush:
140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
141 GFS2_LFC_AIL_EMPTY_GL);
142 return 0;
143 }
144
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)145 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
146 {
147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
148 unsigned int revokes = atomic_read(&gl->gl_ail_count);
149 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
150 int ret;
151
152 if (!revokes)
153 return;
154
155 while (revokes > max_revokes)
156 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
157
158 ret = gfs2_trans_begin(sdp, 0, max_revokes);
159 if (ret)
160 return;
161 __gfs2_ail_flush(gl, fsync, max_revokes);
162 gfs2_trans_end(sdp);
163 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
164 GFS2_LFC_AIL_FLUSH);
165 }
166
167 /**
168 * gfs2_rgrp_metasync - sync out the metadata of a resource group
169 * @gl: the glock protecting the resource group
170 *
171 */
172
gfs2_rgrp_metasync(struct gfs2_glock * gl)173 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
174 {
175 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
176 struct address_space *metamapping = &sdp->sd_aspace;
177 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
178 const unsigned bsize = sdp->sd_sb.sb_bsize;
179 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
180 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
181 int error;
182
183 filemap_fdatawrite_range(metamapping, start, end);
184 error = filemap_fdatawait_range(metamapping, start, end);
185 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
186 mapping_set_error(metamapping, error);
187 if (error)
188 gfs2_io_error(sdp);
189 return error;
190 }
191
192 /**
193 * rgrp_go_sync - sync out the metadata for this glock
194 * @gl: the glock
195 *
196 * Called when demoting or unlocking an EX glock. We must flush
197 * to disk all dirty buffers/pages relating to this glock, and must not
198 * return to caller to demote/unlock the glock until I/O is complete.
199 */
200
rgrp_go_sync(struct gfs2_glock * gl)201 static int rgrp_go_sync(struct gfs2_glock *gl)
202 {
203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
204 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
205 int error;
206
207 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
208 return 0;
209 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
210
211 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
212 GFS2_LFC_RGRP_GO_SYNC);
213 error = gfs2_rgrp_metasync(gl);
214 if (!error)
215 error = gfs2_ail_empty_gl(gl);
216 gfs2_free_clones(rgd);
217 return error;
218 }
219
220 /**
221 * rgrp_go_inval - invalidate the metadata for this glock
222 * @gl: the glock
223 * @flags:
224 *
225 * We never used LM_ST_DEFERRED with resource groups, so that we
226 * should always see the metadata flag set here.
227 *
228 */
229
rgrp_go_inval(struct gfs2_glock * gl,int flags)230 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
231 {
232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
233 struct address_space *mapping = &sdp->sd_aspace;
234 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
235 const unsigned bsize = sdp->sd_sb.sb_bsize;
236 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
237 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
238
239 gfs2_rgrp_brelse(rgd);
240 WARN_ON_ONCE(!(flags & DIO_METADATA));
241 truncate_inode_pages_range(mapping, start, end);
242 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
243 }
244
gfs2_rgrp_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)245 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
246 const char *fs_id_buf)
247 {
248 struct gfs2_rgrpd *rgd = gl->gl_object;
249
250 if (rgd)
251 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
252 }
253
gfs2_glock2inode(struct gfs2_glock * gl)254 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
255 {
256 struct gfs2_inode *ip;
257
258 spin_lock(&gl->gl_lockref.lock);
259 ip = gl->gl_object;
260 if (ip)
261 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
262 spin_unlock(&gl->gl_lockref.lock);
263 return ip;
264 }
265
gfs2_glock2rgrp(struct gfs2_glock * gl)266 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
267 {
268 struct gfs2_rgrpd *rgd;
269
270 spin_lock(&gl->gl_lockref.lock);
271 rgd = gl->gl_object;
272 spin_unlock(&gl->gl_lockref.lock);
273
274 return rgd;
275 }
276
gfs2_clear_glop_pending(struct gfs2_inode * ip)277 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
278 {
279 if (!ip)
280 return;
281
282 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
283 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
284 }
285
286 /**
287 * gfs2_inode_metasync - sync out the metadata of an inode
288 * @gl: the glock protecting the inode
289 *
290 */
gfs2_inode_metasync(struct gfs2_glock * gl)291 int gfs2_inode_metasync(struct gfs2_glock *gl)
292 {
293 struct address_space *metamapping = gfs2_glock2aspace(gl);
294 int error;
295
296 filemap_fdatawrite(metamapping);
297 error = filemap_fdatawait(metamapping);
298 if (error)
299 gfs2_io_error(gl->gl_name.ln_sbd);
300 return error;
301 }
302
303 /**
304 * inode_go_sync - Sync the dirty metadata of an inode
305 * @gl: the glock protecting the inode
306 *
307 */
308
inode_go_sync(struct gfs2_glock * gl)309 static int inode_go_sync(struct gfs2_glock *gl)
310 {
311 struct gfs2_inode *ip = gfs2_glock2inode(gl);
312 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
313 struct address_space *metamapping = gfs2_glock2aspace(gl);
314 int error = 0, ret;
315
316 if (isreg) {
317 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
318 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
319 inode_dio_wait(&ip->i_inode);
320 }
321 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
322 goto out;
323
324 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
325
326 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
327 GFS2_LFC_INODE_GO_SYNC);
328 filemap_fdatawrite(metamapping);
329 if (isreg) {
330 struct address_space *mapping = ip->i_inode.i_mapping;
331 filemap_fdatawrite(mapping);
332 error = filemap_fdatawait(mapping);
333 mapping_set_error(mapping, error);
334 }
335 ret = gfs2_inode_metasync(gl);
336 if (!error)
337 error = ret;
338 gfs2_ail_empty_gl(gl);
339 /*
340 * Writeback of the data mapping may cause the dirty flag to be set
341 * so we have to clear it again here.
342 */
343 smp_mb__before_atomic();
344 clear_bit(GLF_DIRTY, &gl->gl_flags);
345
346 out:
347 gfs2_clear_glop_pending(ip);
348 return error;
349 }
350
351 /**
352 * inode_go_inval - prepare a inode glock to be released
353 * @gl: the glock
354 * @flags:
355 *
356 * Normally we invalidate everything, but if we are moving into
357 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
358 * can keep hold of the metadata, since it won't have changed.
359 *
360 */
361
inode_go_inval(struct gfs2_glock * gl,int flags)362 static void inode_go_inval(struct gfs2_glock *gl, int flags)
363 {
364 struct gfs2_inode *ip = gfs2_glock2inode(gl);
365
366 if (flags & DIO_METADATA) {
367 struct address_space *mapping = gfs2_glock2aspace(gl);
368 truncate_inode_pages(mapping, 0);
369 if (ip) {
370 set_bit(GIF_INVALID, &ip->i_flags);
371 forget_all_cached_acls(&ip->i_inode);
372 security_inode_invalidate_secctx(&ip->i_inode);
373 gfs2_dir_hash_inval(ip);
374 }
375 }
376
377 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
378 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
379 GFS2_LOG_HEAD_FLUSH_NORMAL |
380 GFS2_LFC_INODE_GO_INVAL);
381 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
382 }
383 if (ip && S_ISREG(ip->i_inode.i_mode))
384 truncate_inode_pages(ip->i_inode.i_mapping, 0);
385
386 gfs2_clear_glop_pending(ip);
387 }
388
389 /**
390 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
391 * @gl: the glock
392 *
393 * Returns: 1 if it's ok
394 */
395
inode_go_demote_ok(const struct gfs2_glock * gl)396 static int inode_go_demote_ok(const struct gfs2_glock *gl)
397 {
398 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
399
400 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
401 return 0;
402
403 return 1;
404 }
405
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)406 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
407 {
408 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
409 const struct gfs2_dinode *str = buf;
410 struct timespec64 atime;
411 u16 height, depth;
412
413 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
414 goto corrupt;
415 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
416 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
417 ip->i_inode.i_rdev = 0;
418 switch (ip->i_inode.i_mode & S_IFMT) {
419 case S_IFBLK:
420 case S_IFCHR:
421 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
422 be32_to_cpu(str->di_minor));
423 break;
424 }
425
426 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
427 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
428 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
429 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
430 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
431 atime.tv_sec = be64_to_cpu(str->di_atime);
432 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
433 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
434 ip->i_inode.i_atime = atime;
435 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
436 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
437 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
438 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
439
440 ip->i_goal = be64_to_cpu(str->di_goal_meta);
441 ip->i_generation = be64_to_cpu(str->di_generation);
442
443 ip->i_diskflags = be32_to_cpu(str->di_flags);
444 ip->i_eattr = be64_to_cpu(str->di_eattr);
445 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
446 gfs2_set_inode_flags(&ip->i_inode);
447 height = be16_to_cpu(str->di_height);
448 if (unlikely(height > sdp->sd_max_height))
449 goto corrupt;
450 ip->i_height = (u8)height;
451
452 depth = be16_to_cpu(str->di_depth);
453 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
454 goto corrupt;
455 ip->i_depth = (u8)depth;
456 ip->i_entries = be32_to_cpu(str->di_entries);
457
458 if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip))
459 goto corrupt;
460
461 if (S_ISREG(ip->i_inode.i_mode))
462 gfs2_set_aops(&ip->i_inode);
463
464 return 0;
465 corrupt:
466 gfs2_consist_inode(ip);
467 return -EIO;
468 }
469
470 /**
471 * gfs2_inode_refresh - Refresh the incore copy of the dinode
472 * @ip: The GFS2 inode
473 *
474 * Returns: errno
475 */
476
gfs2_inode_refresh(struct gfs2_inode * ip)477 int gfs2_inode_refresh(struct gfs2_inode *ip)
478 {
479 struct buffer_head *dibh;
480 int error;
481
482 error = gfs2_meta_inode_buffer(ip, &dibh);
483 if (error)
484 return error;
485
486 error = gfs2_dinode_in(ip, dibh->b_data);
487 brelse(dibh);
488 clear_bit(GIF_INVALID, &ip->i_flags);
489
490 return error;
491 }
492
493 /**
494 * inode_go_lock - operation done after an inode lock is locked by a process
495 * @gl: the glock
496 * @flags:
497 *
498 * Returns: errno
499 */
500
inode_go_lock(struct gfs2_holder * gh)501 static int inode_go_lock(struct gfs2_holder *gh)
502 {
503 struct gfs2_glock *gl = gh->gh_gl;
504 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
505 struct gfs2_inode *ip = gl->gl_object;
506 int error = 0;
507
508 if (!ip || (gh->gh_flags & GL_SKIP))
509 return 0;
510
511 if (test_bit(GIF_INVALID, &ip->i_flags)) {
512 error = gfs2_inode_refresh(ip);
513 if (error)
514 return error;
515 }
516
517 if (gh->gh_state != LM_ST_DEFERRED)
518 inode_dio_wait(&ip->i_inode);
519
520 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
521 (gl->gl_state == LM_ST_EXCLUSIVE) &&
522 (gh->gh_state == LM_ST_EXCLUSIVE)) {
523 spin_lock(&sdp->sd_trunc_lock);
524 if (list_empty(&ip->i_trunc_list))
525 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
526 spin_unlock(&sdp->sd_trunc_lock);
527 wake_up(&sdp->sd_quota_wait);
528 return 1;
529 }
530
531 return error;
532 }
533
534 /**
535 * inode_go_dump - print information about an inode
536 * @seq: The iterator
537 * @ip: the inode
538 * @fs_id_buf: file system id (may be empty)
539 *
540 */
541
inode_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)542 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
543 const char *fs_id_buf)
544 {
545 struct gfs2_inode *ip = gl->gl_object;
546 struct inode *inode = &ip->i_inode;
547 unsigned long nrpages;
548
549 if (ip == NULL)
550 return;
551
552 xa_lock_irq(&inode->i_data.i_pages);
553 nrpages = inode->i_data.nrpages;
554 xa_unlock_irq(&inode->i_data.i_pages);
555
556 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
557 "p:%lu\n", fs_id_buf,
558 (unsigned long long)ip->i_no_formal_ino,
559 (unsigned long long)ip->i_no_addr,
560 IF2DT(ip->i_inode.i_mode), ip->i_flags,
561 (unsigned int)ip->i_diskflags,
562 (unsigned long long)i_size_read(inode), nrpages);
563 }
564
565 /**
566 * freeze_go_sync - promote/demote the freeze glock
567 * @gl: the glock
568 * @state: the requested state
569 * @flags:
570 *
571 */
572
freeze_go_sync(struct gfs2_glock * gl)573 static int freeze_go_sync(struct gfs2_glock *gl)
574 {
575 int error = 0;
576 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
577
578 /*
579 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
580 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
581 * all the nodes should have the freeze glock in SH mode and they all
582 * call do_xmote: One for EX and the others for UN. They ALL must
583 * freeze locally, and they ALL must queue freeze work. The freeze_work
584 * calls freeze_func, which tries to reacquire the freeze glock in SH,
585 * effectively waiting for the thaw on the node who holds it in EX.
586 * Once thawed, the work func acquires the freeze glock in
587 * SH and everybody goes back to thawed.
588 */
589 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
590 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
591 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
592 error = freeze_super(sdp->sd_vfs);
593 if (error) {
594 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
595 error);
596 if (gfs2_withdrawn(sdp)) {
597 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
598 return 0;
599 }
600 gfs2_assert_withdraw(sdp, 0);
601 }
602 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
603 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
604 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
605 GFS2_LFC_FREEZE_GO_SYNC);
606 else /* read-only mounts */
607 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
608 }
609 return 0;
610 }
611
612 /**
613 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
614 * @gl: the glock
615 *
616 */
617
freeze_go_xmote_bh(struct gfs2_glock * gl,struct gfs2_holder * gh)618 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
619 {
620 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
621 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
622 struct gfs2_glock *j_gl = ip->i_gl;
623 struct gfs2_log_header_host head;
624 int error;
625
626 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
627 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
628
629 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
630 if (gfs2_assert_withdraw_delayed(sdp, !error))
631 return error;
632 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
633 GFS2_LOG_HEAD_UNMOUNT))
634 return -EIO;
635 sdp->sd_log_sequence = head.lh_sequence + 1;
636 gfs2_log_pointers_init(sdp, head.lh_blkno);
637 }
638 return 0;
639 }
640
641 /**
642 * trans_go_demote_ok
643 * @gl: the glock
644 *
645 * Always returns 0
646 */
647
freeze_go_demote_ok(const struct gfs2_glock * gl)648 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
649 {
650 return 0;
651 }
652
653 /**
654 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
655 * @gl: the glock
656 *
657 * gl_lockref.lock lock is held while calling this
658 */
iopen_go_callback(struct gfs2_glock * gl,bool remote)659 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
660 {
661 struct gfs2_inode *ip = gl->gl_object;
662 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
663
664 if (!remote || sb_rdonly(sdp->sd_vfs))
665 return;
666
667 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
668 gl->gl_state == LM_ST_SHARED && ip) {
669 gl->gl_lockref.count++;
670 if (!queue_delayed_work(gfs2_delete_workqueue,
671 &gl->gl_delete, 0))
672 gl->gl_lockref.count--;
673 }
674 }
675
iopen_go_demote_ok(const struct gfs2_glock * gl)676 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
677 {
678 return !gfs2_delete_work_queued(gl);
679 }
680
681 /**
682 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
683 * @gl: glock being freed
684 *
685 * For now, this is only used for the journal inode glock. In withdraw
686 * situations, we need to wait for the glock to be freed so that we know
687 * other nodes may proceed with recovery / journal replay.
688 */
inode_go_free(struct gfs2_glock * gl)689 static void inode_go_free(struct gfs2_glock *gl)
690 {
691 /* Note that we cannot reference gl_object because it's already set
692 * to NULL by this point in its lifecycle. */
693 if (!test_bit(GLF_FREEING, &gl->gl_flags))
694 return;
695 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
696 wake_up_bit(&gl->gl_flags, GLF_FREEING);
697 }
698
699 /**
700 * nondisk_go_callback - used to signal when a node did a withdraw
701 * @gl: the nondisk glock
702 * @remote: true if this came from a different cluster node
703 *
704 */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)705 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
706 {
707 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
708
709 /* Ignore the callback unless it's from another node, and it's the
710 live lock. */
711 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
712 return;
713
714 /* First order of business is to cancel the demote request. We don't
715 * really want to demote a nondisk glock. At best it's just to inform
716 * us of another node's withdraw. We'll keep it in SH mode. */
717 clear_bit(GLF_DEMOTE, &gl->gl_flags);
718 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
719
720 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
721 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
722 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
723 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
724 return;
725
726 /* We only care when a node wants us to unlock, because that means
727 * they want a journal recovered. */
728 if (gl->gl_demote_state != LM_ST_UNLOCKED)
729 return;
730
731 if (sdp->sd_args.ar_spectator) {
732 fs_warn(sdp, "Spectator node cannot recover journals.\n");
733 return;
734 }
735
736 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
737 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
738 /*
739 * We can't call remote_withdraw directly here or gfs2_recover_journal
740 * because this is called from the glock unlock function and the
741 * remote_withdraw needs to enqueue and dequeue the same "live" glock
742 * we were called from. So we queue it to the control work queue in
743 * lock_dlm.
744 */
745 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
746 }
747
748 const struct gfs2_glock_operations gfs2_meta_glops = {
749 .go_type = LM_TYPE_META,
750 .go_flags = GLOF_NONDISK,
751 };
752
753 const struct gfs2_glock_operations gfs2_inode_glops = {
754 .go_sync = inode_go_sync,
755 .go_inval = inode_go_inval,
756 .go_demote_ok = inode_go_demote_ok,
757 .go_lock = inode_go_lock,
758 .go_dump = inode_go_dump,
759 .go_type = LM_TYPE_INODE,
760 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
761 .go_free = inode_go_free,
762 };
763
764 const struct gfs2_glock_operations gfs2_rgrp_glops = {
765 .go_sync = rgrp_go_sync,
766 .go_inval = rgrp_go_inval,
767 .go_lock = gfs2_rgrp_go_lock,
768 .go_dump = gfs2_rgrp_go_dump,
769 .go_type = LM_TYPE_RGRP,
770 .go_flags = GLOF_LVB,
771 };
772
773 const struct gfs2_glock_operations gfs2_freeze_glops = {
774 .go_sync = freeze_go_sync,
775 .go_xmote_bh = freeze_go_xmote_bh,
776 .go_demote_ok = freeze_go_demote_ok,
777 .go_type = LM_TYPE_NONDISK,
778 .go_flags = GLOF_NONDISK,
779 };
780
781 const struct gfs2_glock_operations gfs2_iopen_glops = {
782 .go_type = LM_TYPE_IOPEN,
783 .go_callback = iopen_go_callback,
784 .go_demote_ok = iopen_go_demote_ok,
785 .go_flags = GLOF_LRU | GLOF_NONDISK,
786 .go_subclass = 1,
787 };
788
789 const struct gfs2_glock_operations gfs2_flock_glops = {
790 .go_type = LM_TYPE_FLOCK,
791 .go_flags = GLOF_LRU | GLOF_NONDISK,
792 };
793
794 const struct gfs2_glock_operations gfs2_nondisk_glops = {
795 .go_type = LM_TYPE_NONDISK,
796 .go_flags = GLOF_NONDISK,
797 .go_callback = nondisk_go_callback,
798 };
799
800 const struct gfs2_glock_operations gfs2_quota_glops = {
801 .go_type = LM_TYPE_QUOTA,
802 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
803 };
804
805 const struct gfs2_glock_operations gfs2_journal_glops = {
806 .go_type = LM_TYPE_JOURNAL,
807 .go_flags = GLOF_NONDISK,
808 };
809
810 const struct gfs2_glock_operations *gfs2_glops_list[] = {
811 [LM_TYPE_META] = &gfs2_meta_glops,
812 [LM_TYPE_INODE] = &gfs2_inode_glops,
813 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
814 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
815 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
816 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
817 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
818 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
819 };
820
821