1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmglue.c
5 *
6 * Code which implements an OCFS2 specific interface to our DLM.
7 *
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
29 #include <linux/mm.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
36
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
39
40 #include "ocfs2.h"
41 #include "ocfs2_lockingver.h"
42
43 #include "alloc.h"
44 #include "dcache.h"
45 #include "dlmglue.h"
46 #include "extent_map.h"
47 #include "file.h"
48 #include "heartbeat.h"
49 #include "inode.h"
50 #include "journal.h"
51 #include "stackglue.h"
52 #include "slot_map.h"
53 #include "super.h"
54 #include "uptodate.h"
55 #include "quota.h"
56 #include "refcounttree.h"
57
58 #include "buffer_head_io.h"
59
60 struct ocfs2_mask_waiter {
61 struct list_head mw_item;
62 int mw_status;
63 struct completion mw_complete;
64 unsigned long mw_mask;
65 unsigned long mw_goal;
66 #ifdef CONFIG_OCFS2_FS_STATS
67 ktime_t mw_lock_start;
68 #endif
69 };
70
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
75
76 /*
77 * Return value from ->downconvert_worker functions.
78 *
79 * These control the precise actions of ocfs2_unblock_lock()
80 * and ocfs2_process_blocked_lock()
81 *
82 */
83 enum ocfs2_unblock_action {
84 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
85 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
86 * ->post_unlock callback */
87 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
88 * ->post_unlock() callback. */
89 };
90
91 struct ocfs2_unblock_ctl {
92 int requeue;
93 enum ocfs2_unblock_action unblock_action;
94 };
95
96 /* Lockdep class keys */
97 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
98
99 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
100 int new_level);
101 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
102
103 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
104 int blocking);
105
106 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
107 int blocking);
108
109 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
110 struct ocfs2_lock_res *lockres);
111
112 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
113
114 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
115 int new_level);
116 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
117 int blocking);
118
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
120
121 /* This aids in debugging situations where a bad LVB might be involved. */
ocfs2_dump_meta_lvb_info(u64 level,const char * function,unsigned int line,struct ocfs2_lock_res * lockres)122 static void ocfs2_dump_meta_lvb_info(u64 level,
123 const char *function,
124 unsigned int line,
125 struct ocfs2_lock_res *lockres)
126 {
127 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
128
129 mlog(level, "LVB information for %s (called from %s:%u):\n",
130 lockres->l_name, function, line);
131 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
132 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
133 be32_to_cpu(lvb->lvb_igeneration));
134 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
135 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
136 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
137 be16_to_cpu(lvb->lvb_imode));
138 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
139 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
140 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
141 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
142 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
143 be32_to_cpu(lvb->lvb_iattr));
144 }
145
146
147 /*
148 * OCFS2 Lock Resource Operations
149 *
150 * These fine tune the behavior of the generic dlmglue locking infrastructure.
151 *
152 * The most basic of lock types can point ->l_priv to their respective
153 * struct ocfs2_super and allow the default actions to manage things.
154 *
155 * Right now, each lock type also needs to implement an init function,
156 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
157 * should be called when the lock is no longer needed (i.e., object
158 * destruction time).
159 */
160 struct ocfs2_lock_res_ops {
161 /*
162 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
163 * this callback if ->l_priv is not an ocfs2_super pointer
164 */
165 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
166
167 /*
168 * Optionally called in the downconvert thread after a
169 * successful downconvert. The lockres will not be referenced
170 * after this callback is called, so it is safe to free
171 * memory, etc.
172 *
173 * The exact semantics of when this is called are controlled
174 * by ->downconvert_worker()
175 */
176 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
177
178 /*
179 * Allow a lock type to add checks to determine whether it is
180 * safe to downconvert a lock. Return 0 to re-queue the
181 * downconvert at a later time, nonzero to continue.
182 *
183 * For most locks, the default checks that there are no
184 * incompatible holders are sufficient.
185 *
186 * Called with the lockres spinlock held.
187 */
188 int (*check_downconvert)(struct ocfs2_lock_res *, int);
189
190 /*
191 * Allows a lock type to populate the lock value block. This
192 * is called on downconvert, and when we drop a lock.
193 *
194 * Locks that want to use this should set LOCK_TYPE_USES_LVB
195 * in the flags field.
196 *
197 * Called with the lockres spinlock held.
198 */
199 void (*set_lvb)(struct ocfs2_lock_res *);
200
201 /*
202 * Called from the downconvert thread when it is determined
203 * that a lock will be downconverted. This is called without
204 * any locks held so the function can do work that might
205 * schedule (syncing out data, etc).
206 *
207 * This should return any one of the ocfs2_unblock_action
208 * values, depending on what it wants the thread to do.
209 */
210 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
211
212 /*
213 * LOCK_TYPE_* flags which describe the specific requirements
214 * of a lock type. Descriptions of each individual flag follow.
215 */
216 int flags;
217 };
218
219 /*
220 * Some locks want to "refresh" potentially stale data when a
221 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
222 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
223 * individual lockres l_flags member from the ast function. It is
224 * expected that the locking wrapper will clear the
225 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
226 */
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
228
229 /*
230 * Indicate that a lock type makes use of the lock value block. The
231 * ->set_lvb lock type callback must be defined.
232 */
233 #define LOCK_TYPE_USES_LVB 0x2
234
235 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
236 .get_osb = ocfs2_get_inode_osb,
237 .flags = 0,
238 };
239
240 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
241 .get_osb = ocfs2_get_inode_osb,
242 .check_downconvert = ocfs2_check_meta_downconvert,
243 .set_lvb = ocfs2_set_meta_lvb,
244 .downconvert_worker = ocfs2_data_convert_worker,
245 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
246 };
247
248 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
249 .flags = LOCK_TYPE_REQUIRES_REFRESH,
250 };
251
252 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
253 .flags = 0,
254 };
255
256 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
257 .flags = 0,
258 };
259
260 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
261 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
262 };
263
264 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
265 .get_osb = ocfs2_get_dentry_osb,
266 .post_unlock = ocfs2_dentry_post_unlock,
267 .downconvert_worker = ocfs2_dentry_convert_worker,
268 .flags = 0,
269 };
270
271 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
272 .get_osb = ocfs2_get_inode_osb,
273 .flags = 0,
274 };
275
276 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
277 .get_osb = ocfs2_get_file_osb,
278 .flags = 0,
279 };
280
281 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
282 .set_lvb = ocfs2_set_qinfo_lvb,
283 .get_osb = ocfs2_get_qinfo_osb,
284 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
285 };
286
287 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
288 .check_downconvert = ocfs2_check_refcount_downconvert,
289 .downconvert_worker = ocfs2_refcount_convert_worker,
290 .flags = 0,
291 };
292
ocfs2_is_inode_lock(struct ocfs2_lock_res * lockres)293 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
294 {
295 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
296 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
298 }
299
ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb * lksb)300 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
301 {
302 return container_of(lksb, struct ocfs2_lock_res, l_lksb);
303 }
304
ocfs2_lock_res_inode(struct ocfs2_lock_res * lockres)305 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
306 {
307 BUG_ON(!ocfs2_is_inode_lock(lockres));
308
309 return (struct inode *) lockres->l_priv;
310 }
311
ocfs2_lock_res_dl(struct ocfs2_lock_res * lockres)312 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
313 {
314 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
315
316 return (struct ocfs2_dentry_lock *)lockres->l_priv;
317 }
318
ocfs2_lock_res_qinfo(struct ocfs2_lock_res * lockres)319 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
320 {
321 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
322
323 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
324 }
325
326 static inline struct ocfs2_refcount_tree *
ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res * res)327 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
328 {
329 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
330 }
331
ocfs2_get_lockres_osb(struct ocfs2_lock_res * lockres)332 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
333 {
334 if (lockres->l_ops->get_osb)
335 return lockres->l_ops->get_osb(lockres);
336
337 return (struct ocfs2_super *)lockres->l_priv;
338 }
339
340 static int ocfs2_lock_create(struct ocfs2_super *osb,
341 struct ocfs2_lock_res *lockres,
342 int level,
343 u32 dlm_flags);
344 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
345 int wanted);
346 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
347 struct ocfs2_lock_res *lockres,
348 int level, unsigned long caller_ip);
ocfs2_cluster_unlock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int level)349 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
350 struct ocfs2_lock_res *lockres,
351 int level)
352 {
353 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
354 }
355
356 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
358 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
359 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
360 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
361 struct ocfs2_lock_res *lockres);
362 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
363 int convert);
364 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
365 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
366 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
367 _err, _func, _lockres->l_name); \
368 else \
369 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
370 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
371 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
372 } while (0)
373 static int ocfs2_downconvert_thread(void *arg);
374 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
375 struct ocfs2_lock_res *lockres);
376 static int ocfs2_inode_lock_update(struct inode *inode,
377 struct buffer_head **bh);
378 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
379 static inline int ocfs2_highest_compat_lock_level(int level);
380 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
381 int new_level);
382 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
383 struct ocfs2_lock_res *lockres,
384 int new_level,
385 int lvb,
386 unsigned int generation);
387 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
388 struct ocfs2_lock_res *lockres);
389 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
390 struct ocfs2_lock_res *lockres);
391
392
ocfs2_build_lock_name(enum ocfs2_lock_type type,u64 blkno,u32 generation,char * name)393 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
394 u64 blkno,
395 u32 generation,
396 char *name)
397 {
398 int len;
399
400 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
401
402 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
403 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
404 (long long)blkno, generation);
405
406 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
407
408 mlog(0, "built lock resource with name: %s\n", name);
409 }
410
411 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
412
ocfs2_add_lockres_tracking(struct ocfs2_lock_res * res,struct ocfs2_dlm_debug * dlm_debug)413 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
414 struct ocfs2_dlm_debug *dlm_debug)
415 {
416 mlog(0, "Add tracking for lockres %s\n", res->l_name);
417
418 spin_lock(&ocfs2_dlm_tracking_lock);
419 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
420 spin_unlock(&ocfs2_dlm_tracking_lock);
421 }
422
ocfs2_remove_lockres_tracking(struct ocfs2_lock_res * res)423 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
424 {
425 spin_lock(&ocfs2_dlm_tracking_lock);
426 if (!list_empty(&res->l_debug_list))
427 list_del_init(&res->l_debug_list);
428 spin_unlock(&ocfs2_dlm_tracking_lock);
429 }
430
431 #ifdef CONFIG_OCFS2_FS_STATS
ocfs2_init_lock_stats(struct ocfs2_lock_res * res)432 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
433 {
434 res->l_lock_refresh = 0;
435 memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
436 memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
437 }
438
ocfs2_update_lock_stats(struct ocfs2_lock_res * res,int level,struct ocfs2_mask_waiter * mw,int ret)439 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
440 struct ocfs2_mask_waiter *mw, int ret)
441 {
442 u32 usec;
443 ktime_t kt;
444 struct ocfs2_lock_stats *stats;
445
446 if (level == LKM_PRMODE)
447 stats = &res->l_lock_prmode;
448 else if (level == LKM_EXMODE)
449 stats = &res->l_lock_exmode;
450 else
451 return;
452
453 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
454 usec = ktime_to_us(kt);
455
456 stats->ls_gets++;
457 stats->ls_total += ktime_to_ns(kt);
458 /* overflow */
459 if (unlikely(stats->ls_gets == 0)) {
460 stats->ls_gets++;
461 stats->ls_total = ktime_to_ns(kt);
462 }
463
464 if (stats->ls_max < usec)
465 stats->ls_max = usec;
466
467 if (ret)
468 stats->ls_fail++;
469 }
470
ocfs2_track_lock_refresh(struct ocfs2_lock_res * lockres)471 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
472 {
473 lockres->l_lock_refresh++;
474 }
475
ocfs2_init_start_time(struct ocfs2_mask_waiter * mw)476 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
477 {
478 mw->mw_lock_start = ktime_get();
479 }
480 #else
ocfs2_init_lock_stats(struct ocfs2_lock_res * res)481 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
482 {
483 }
ocfs2_update_lock_stats(struct ocfs2_lock_res * res,int level,struct ocfs2_mask_waiter * mw,int ret)484 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
485 int level, struct ocfs2_mask_waiter *mw, int ret)
486 {
487 }
ocfs2_track_lock_refresh(struct ocfs2_lock_res * lockres)488 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
489 {
490 }
ocfs2_init_start_time(struct ocfs2_mask_waiter * mw)491 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
492 {
493 }
494 #endif
495
ocfs2_lock_res_init_common(struct ocfs2_super * osb,struct ocfs2_lock_res * res,enum ocfs2_lock_type type,struct ocfs2_lock_res_ops * ops,void * priv)496 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
497 struct ocfs2_lock_res *res,
498 enum ocfs2_lock_type type,
499 struct ocfs2_lock_res_ops *ops,
500 void *priv)
501 {
502 res->l_type = type;
503 res->l_ops = ops;
504 res->l_priv = priv;
505
506 res->l_level = DLM_LOCK_IV;
507 res->l_requested = DLM_LOCK_IV;
508 res->l_blocking = DLM_LOCK_IV;
509 res->l_action = OCFS2_AST_INVALID;
510 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
511
512 res->l_flags = OCFS2_LOCK_INITIALIZED;
513
514 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
515
516 ocfs2_init_lock_stats(res);
517 #ifdef CONFIG_DEBUG_LOCK_ALLOC
518 if (type != OCFS2_LOCK_TYPE_OPEN)
519 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
520 &lockdep_keys[type], 0);
521 else
522 res->l_lockdep_map.key = NULL;
523 #endif
524 }
525
ocfs2_lock_res_init_once(struct ocfs2_lock_res * res)526 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
527 {
528 /* This also clears out the lock status block */
529 memset(res, 0, sizeof(struct ocfs2_lock_res));
530 spin_lock_init(&res->l_lock);
531 init_waitqueue_head(&res->l_event);
532 INIT_LIST_HEAD(&res->l_blocked_list);
533 INIT_LIST_HEAD(&res->l_mask_waiters);
534 INIT_LIST_HEAD(&res->l_holders);
535 }
536
ocfs2_inode_lock_res_init(struct ocfs2_lock_res * res,enum ocfs2_lock_type type,unsigned int generation,struct inode * inode)537 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
538 enum ocfs2_lock_type type,
539 unsigned int generation,
540 struct inode *inode)
541 {
542 struct ocfs2_lock_res_ops *ops;
543
544 switch(type) {
545 case OCFS2_LOCK_TYPE_RW:
546 ops = &ocfs2_inode_rw_lops;
547 break;
548 case OCFS2_LOCK_TYPE_META:
549 ops = &ocfs2_inode_inode_lops;
550 break;
551 case OCFS2_LOCK_TYPE_OPEN:
552 ops = &ocfs2_inode_open_lops;
553 break;
554 default:
555 mlog_bug_on_msg(1, "type: %d\n", type);
556 ops = NULL; /* thanks, gcc */
557 break;
558 };
559
560 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
561 generation, res->l_name);
562 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
563 }
564
ocfs2_get_inode_osb(struct ocfs2_lock_res * lockres)565 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
566 {
567 struct inode *inode = ocfs2_lock_res_inode(lockres);
568
569 return OCFS2_SB(inode->i_sb);
570 }
571
ocfs2_get_qinfo_osb(struct ocfs2_lock_res * lockres)572 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
573 {
574 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
575
576 return OCFS2_SB(info->dqi_gi.dqi_sb);
577 }
578
ocfs2_get_file_osb(struct ocfs2_lock_res * lockres)579 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
580 {
581 struct ocfs2_file_private *fp = lockres->l_priv;
582
583 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
584 }
585
ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res * lockres)586 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
587 {
588 __be64 inode_blkno_be;
589
590 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
591 sizeof(__be64));
592
593 return be64_to_cpu(inode_blkno_be);
594 }
595
ocfs2_get_dentry_osb(struct ocfs2_lock_res * lockres)596 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
597 {
598 struct ocfs2_dentry_lock *dl = lockres->l_priv;
599
600 return OCFS2_SB(dl->dl_inode->i_sb);
601 }
602
ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock * dl,u64 parent,struct inode * inode)603 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
604 u64 parent, struct inode *inode)
605 {
606 int len;
607 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
608 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
609 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
610
611 ocfs2_lock_res_init_once(lockres);
612
613 /*
614 * Unfortunately, the standard lock naming scheme won't work
615 * here because we have two 16 byte values to use. Instead,
616 * we'll stuff the inode number as a binary value. We still
617 * want error prints to show something without garbling the
618 * display, so drop a null byte in there before the inode
619 * number. A future version of OCFS2 will likely use all
620 * binary lock names. The stringified names have been a
621 * tremendous aid in debugging, but now that the debugfs
622 * interface exists, we can mangle things there if need be.
623 *
624 * NOTE: We also drop the standard "pad" value (the total lock
625 * name size stays the same though - the last part is all
626 * zeros due to the memset in ocfs2_lock_res_init_once()
627 */
628 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
629 "%c%016llx",
630 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
631 (long long)parent);
632
633 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
634
635 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
636 sizeof(__be64));
637
638 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
639 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
640 dl);
641 }
642
ocfs2_super_lock_res_init(struct ocfs2_lock_res * res,struct ocfs2_super * osb)643 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
644 struct ocfs2_super *osb)
645 {
646 /* Superblock lockres doesn't come from a slab so we call init
647 * once on it manually. */
648 ocfs2_lock_res_init_once(res);
649 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
650 0, res->l_name);
651 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
652 &ocfs2_super_lops, osb);
653 }
654
ocfs2_rename_lock_res_init(struct ocfs2_lock_res * res,struct ocfs2_super * osb)655 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
656 struct ocfs2_super *osb)
657 {
658 /* Rename lockres doesn't come from a slab so we call init
659 * once on it manually. */
660 ocfs2_lock_res_init_once(res);
661 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
662 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
663 &ocfs2_rename_lops, osb);
664 }
665
ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res * res,struct ocfs2_super * osb)666 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
667 struct ocfs2_super *osb)
668 {
669 /* nfs_sync lockres doesn't come from a slab so we call init
670 * once on it manually. */
671 ocfs2_lock_res_init_once(res);
672 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
673 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
674 &ocfs2_nfs_sync_lops, osb);
675 }
676
ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res * res,struct ocfs2_super * osb)677 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
678 struct ocfs2_super *osb)
679 {
680 ocfs2_lock_res_init_once(res);
681 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
682 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
683 &ocfs2_orphan_scan_lops, osb);
684 }
685
ocfs2_file_lock_res_init(struct ocfs2_lock_res * lockres,struct ocfs2_file_private * fp)686 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
687 struct ocfs2_file_private *fp)
688 {
689 struct inode *inode = fp->fp_file->f_mapping->host;
690 struct ocfs2_inode_info *oi = OCFS2_I(inode);
691
692 ocfs2_lock_res_init_once(lockres);
693 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
694 inode->i_generation, lockres->l_name);
695 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
696 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
697 fp);
698 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
699 }
700
ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res * lockres,struct ocfs2_mem_dqinfo * info)701 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
702 struct ocfs2_mem_dqinfo *info)
703 {
704 ocfs2_lock_res_init_once(lockres);
705 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
706 0, lockres->l_name);
707 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
708 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
709 info);
710 }
711
ocfs2_refcount_lock_res_init(struct ocfs2_lock_res * lockres,struct ocfs2_super * osb,u64 ref_blkno,unsigned int generation)712 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
713 struct ocfs2_super *osb, u64 ref_blkno,
714 unsigned int generation)
715 {
716 ocfs2_lock_res_init_once(lockres);
717 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
718 generation, lockres->l_name);
719 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
720 &ocfs2_refcount_block_lops, osb);
721 }
722
ocfs2_lock_res_free(struct ocfs2_lock_res * res)723 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
724 {
725 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
726 return;
727
728 ocfs2_remove_lockres_tracking(res);
729
730 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
731 "Lockres %s is on the blocked list\n",
732 res->l_name);
733 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
734 "Lockres %s has mask waiters pending\n",
735 res->l_name);
736 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
737 "Lockres %s is locked\n",
738 res->l_name);
739 mlog_bug_on_msg(res->l_ro_holders,
740 "Lockres %s has %u ro holders\n",
741 res->l_name, res->l_ro_holders);
742 mlog_bug_on_msg(res->l_ex_holders,
743 "Lockres %s has %u ex holders\n",
744 res->l_name, res->l_ex_holders);
745
746 /* Need to clear out the lock status block for the dlm */
747 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
748
749 res->l_flags = 0UL;
750 }
751
752 /*
753 * Keep a list of processes who have interest in a lockres.
754 * Note: this is now only uesed for check recursive cluster locking.
755 */
ocfs2_add_holder(struct ocfs2_lock_res * lockres,struct ocfs2_lock_holder * oh)756 static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
757 struct ocfs2_lock_holder *oh)
758 {
759 INIT_LIST_HEAD(&oh->oh_list);
760 oh->oh_owner_pid = get_pid(task_pid(current));
761
762 spin_lock(&lockres->l_lock);
763 list_add_tail(&oh->oh_list, &lockres->l_holders);
764 spin_unlock(&lockres->l_lock);
765 }
766
ocfs2_remove_holder(struct ocfs2_lock_res * lockres,struct ocfs2_lock_holder * oh)767 static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
768 struct ocfs2_lock_holder *oh)
769 {
770 spin_lock(&lockres->l_lock);
771 list_del(&oh->oh_list);
772 spin_unlock(&lockres->l_lock);
773
774 put_pid(oh->oh_owner_pid);
775 }
776
ocfs2_is_locked_by_me(struct ocfs2_lock_res * lockres)777 static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
778 {
779 struct ocfs2_lock_holder *oh;
780 struct pid *pid;
781
782 /* look in the list of holders for one with the current task as owner */
783 spin_lock(&lockres->l_lock);
784 pid = task_pid(current);
785 list_for_each_entry(oh, &lockres->l_holders, oh_list) {
786 if (oh->oh_owner_pid == pid) {
787 spin_unlock(&lockres->l_lock);
788 return 1;
789 }
790 }
791 spin_unlock(&lockres->l_lock);
792
793 return 0;
794 }
795
ocfs2_inc_holders(struct ocfs2_lock_res * lockres,int level)796 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
797 int level)
798 {
799 BUG_ON(!lockres);
800
801 switch(level) {
802 case DLM_LOCK_EX:
803 lockres->l_ex_holders++;
804 break;
805 case DLM_LOCK_PR:
806 lockres->l_ro_holders++;
807 break;
808 default:
809 BUG();
810 }
811 }
812
ocfs2_dec_holders(struct ocfs2_lock_res * lockres,int level)813 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
814 int level)
815 {
816 BUG_ON(!lockres);
817
818 switch(level) {
819 case DLM_LOCK_EX:
820 BUG_ON(!lockres->l_ex_holders);
821 lockres->l_ex_holders--;
822 break;
823 case DLM_LOCK_PR:
824 BUG_ON(!lockres->l_ro_holders);
825 lockres->l_ro_holders--;
826 break;
827 default:
828 BUG();
829 }
830 }
831
832 /* WARNING: This function lives in a world where the only three lock
833 * levels are EX, PR, and NL. It *will* have to be adjusted when more
834 * lock types are added. */
ocfs2_highest_compat_lock_level(int level)835 static inline int ocfs2_highest_compat_lock_level(int level)
836 {
837 int new_level = DLM_LOCK_EX;
838
839 if (level == DLM_LOCK_EX)
840 new_level = DLM_LOCK_NL;
841 else if (level == DLM_LOCK_PR)
842 new_level = DLM_LOCK_PR;
843 return new_level;
844 }
845
lockres_set_flags(struct ocfs2_lock_res * lockres,unsigned long newflags)846 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
847 unsigned long newflags)
848 {
849 struct ocfs2_mask_waiter *mw, *tmp;
850
851 assert_spin_locked(&lockres->l_lock);
852
853 lockres->l_flags = newflags;
854
855 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
856 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
857 continue;
858
859 list_del_init(&mw->mw_item);
860 mw->mw_status = 0;
861 complete(&mw->mw_complete);
862 }
863 }
lockres_or_flags(struct ocfs2_lock_res * lockres,unsigned long or)864 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
865 {
866 lockres_set_flags(lockres, lockres->l_flags | or);
867 }
lockres_clear_flags(struct ocfs2_lock_res * lockres,unsigned long clear)868 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
869 unsigned long clear)
870 {
871 lockres_set_flags(lockres, lockres->l_flags & ~clear);
872 }
873
ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res * lockres)874 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
875 {
876 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
877 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
878 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
879 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
880
881 lockres->l_level = lockres->l_requested;
882 if (lockres->l_level <=
883 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
884 lockres->l_blocking = DLM_LOCK_NL;
885 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
886 }
887 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
888 }
889
ocfs2_generic_handle_convert_action(struct ocfs2_lock_res * lockres)890 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
891 {
892 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
893 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
894
895 /* Convert from RO to EX doesn't really need anything as our
896 * information is already up to data. Convert from NL to
897 * *anything* however should mark ourselves as needing an
898 * update */
899 if (lockres->l_level == DLM_LOCK_NL &&
900 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
901 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
902
903 lockres->l_level = lockres->l_requested;
904
905 /*
906 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
907 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
908 * downconverting the lock before the upconvert has fully completed.
909 */
910 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
911
912 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
913 }
914
ocfs2_generic_handle_attach_action(struct ocfs2_lock_res * lockres)915 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
916 {
917 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
918 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
919
920 if (lockres->l_requested > DLM_LOCK_NL &&
921 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
922 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
923 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
924
925 lockres->l_level = lockres->l_requested;
926 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
927 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
928 }
929
ocfs2_generic_handle_bast(struct ocfs2_lock_res * lockres,int level)930 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
931 int level)
932 {
933 int needs_downconvert = 0;
934
935 assert_spin_locked(&lockres->l_lock);
936
937 if (level > lockres->l_blocking) {
938 /* only schedule a downconvert if we haven't already scheduled
939 * one that goes low enough to satisfy the level we're
940 * blocking. this also catches the case where we get
941 * duplicate BASTs */
942 if (ocfs2_highest_compat_lock_level(level) <
943 ocfs2_highest_compat_lock_level(lockres->l_blocking))
944 needs_downconvert = 1;
945
946 lockres->l_blocking = level;
947 }
948
949 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
950 lockres->l_name, level, lockres->l_level, lockres->l_blocking,
951 needs_downconvert);
952
953 if (needs_downconvert)
954 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
955 mlog(0, "needs_downconvert = %d\n", needs_downconvert);
956 return needs_downconvert;
957 }
958
959 /*
960 * OCFS2_LOCK_PENDING and l_pending_gen.
961 *
962 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
963 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
964 * for more details on the race.
965 *
966 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
967 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
968 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
969 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
970 * the caller is going to try to clear PENDING again. If nothing else is
971 * happening, __lockres_clear_pending() sees PENDING is unset and does
972 * nothing.
973 *
974 * But what if another path (eg downconvert thread) has just started a
975 * new locking action? The other path has re-set PENDING. Our path
976 * cannot clear PENDING, because that will re-open the original race
977 * window.
978 *
979 * [Example]
980 *
981 * ocfs2_meta_lock()
982 * ocfs2_cluster_lock()
983 * set BUSY
984 * set PENDING
985 * drop l_lock
986 * ocfs2_dlm_lock()
987 * ocfs2_locking_ast() ocfs2_downconvert_thread()
988 * clear PENDING ocfs2_unblock_lock()
989 * take_l_lock
990 * !BUSY
991 * ocfs2_prepare_downconvert()
992 * set BUSY
993 * set PENDING
994 * drop l_lock
995 * take l_lock
996 * clear PENDING
997 * drop l_lock
998 * <window>
999 * ocfs2_dlm_lock()
1000 *
1001 * So as you can see, we now have a window where l_lock is not held,
1002 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
1003 *
1004 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
1005 * set by ocfs2_prepare_downconvert(). That wasn't nice.
1006 *
1007 * To solve this we introduce l_pending_gen. A call to
1008 * lockres_clear_pending() will only do so when it is passed a generation
1009 * number that matches the lockres. lockres_set_pending() will return the
1010 * current generation number. When ocfs2_cluster_lock() goes to clear
1011 * PENDING, it passes the generation it got from set_pending(). In our
1012 * example above, the generation numbers will *not* match. Thus,
1013 * ocfs2_cluster_lock() will not clear the PENDING set by
1014 * ocfs2_prepare_downconvert().
1015 */
1016
1017 /* Unlocked version for ocfs2_locking_ast() */
__lockres_clear_pending(struct ocfs2_lock_res * lockres,unsigned int generation,struct ocfs2_super * osb)1018 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
1019 unsigned int generation,
1020 struct ocfs2_super *osb)
1021 {
1022 assert_spin_locked(&lockres->l_lock);
1023
1024 /*
1025 * The ast and locking functions can race us here. The winner
1026 * will clear pending, the loser will not.
1027 */
1028 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
1029 (lockres->l_pending_gen != generation))
1030 return;
1031
1032 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
1033 lockres->l_pending_gen++;
1034
1035 /*
1036 * The downconvert thread may have skipped us because we
1037 * were PENDING. Wake it up.
1038 */
1039 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1040 ocfs2_wake_downconvert_thread(osb);
1041 }
1042
1043 /* Locked version for callers of ocfs2_dlm_lock() */
lockres_clear_pending(struct ocfs2_lock_res * lockres,unsigned int generation,struct ocfs2_super * osb)1044 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1045 unsigned int generation,
1046 struct ocfs2_super *osb)
1047 {
1048 unsigned long flags;
1049
1050 spin_lock_irqsave(&lockres->l_lock, flags);
1051 __lockres_clear_pending(lockres, generation, osb);
1052 spin_unlock_irqrestore(&lockres->l_lock, flags);
1053 }
1054
lockres_set_pending(struct ocfs2_lock_res * lockres)1055 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1056 {
1057 assert_spin_locked(&lockres->l_lock);
1058 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1059
1060 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1061
1062 return lockres->l_pending_gen;
1063 }
1064
ocfs2_blocking_ast(struct ocfs2_dlm_lksb * lksb,int level)1065 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1066 {
1067 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1068 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1069 int needs_downconvert;
1070 unsigned long flags;
1071
1072 BUG_ON(level <= DLM_LOCK_NL);
1073
1074 mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1075 "type %s\n", lockres->l_name, level, lockres->l_level,
1076 ocfs2_lock_type_string(lockres->l_type));
1077
1078 /*
1079 * We can skip the bast for locks which don't enable caching -
1080 * they'll be dropped at the earliest possible time anyway.
1081 */
1082 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1083 return;
1084
1085 spin_lock_irqsave(&lockres->l_lock, flags);
1086 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1087 if (needs_downconvert)
1088 ocfs2_schedule_blocked_lock(osb, lockres);
1089 spin_unlock_irqrestore(&lockres->l_lock, flags);
1090
1091 wake_up(&lockres->l_event);
1092
1093 ocfs2_wake_downconvert_thread(osb);
1094 }
1095
ocfs2_locking_ast(struct ocfs2_dlm_lksb * lksb)1096 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1097 {
1098 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1099 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1100 unsigned long flags;
1101 int status;
1102
1103 spin_lock_irqsave(&lockres->l_lock, flags);
1104
1105 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1106
1107 if (status == -EAGAIN) {
1108 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1109 goto out;
1110 }
1111
1112 if (status) {
1113 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1114 lockres->l_name, status);
1115 spin_unlock_irqrestore(&lockres->l_lock, flags);
1116 return;
1117 }
1118
1119 mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1120 "level %d => %d\n", lockres->l_name, lockres->l_action,
1121 lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1122
1123 switch(lockres->l_action) {
1124 case OCFS2_AST_ATTACH:
1125 ocfs2_generic_handle_attach_action(lockres);
1126 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1127 break;
1128 case OCFS2_AST_CONVERT:
1129 ocfs2_generic_handle_convert_action(lockres);
1130 break;
1131 case OCFS2_AST_DOWNCONVERT:
1132 ocfs2_generic_handle_downconvert_action(lockres);
1133 break;
1134 default:
1135 mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1136 "flags 0x%lx, unlock: %u\n",
1137 lockres->l_name, lockres->l_action, lockres->l_flags,
1138 lockres->l_unlock_action);
1139 BUG();
1140 }
1141 out:
1142 /* set it to something invalid so if we get called again we
1143 * can catch it. */
1144 lockres->l_action = OCFS2_AST_INVALID;
1145
1146 /* Did we try to cancel this lock? Clear that state */
1147 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1148 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1149
1150 /*
1151 * We may have beaten the locking functions here. We certainly
1152 * know that dlm_lock() has been called :-)
1153 * Because we can't have two lock calls in flight at once, we
1154 * can use lockres->l_pending_gen.
1155 */
1156 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1157
1158 wake_up(&lockres->l_event);
1159 spin_unlock_irqrestore(&lockres->l_lock, flags);
1160 }
1161
ocfs2_unlock_ast(struct ocfs2_dlm_lksb * lksb,int error)1162 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1163 {
1164 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1165 unsigned long flags;
1166
1167 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1168 lockres->l_name, lockres->l_unlock_action);
1169
1170 spin_lock_irqsave(&lockres->l_lock, flags);
1171 if (error) {
1172 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1173 "unlock_action %d\n", error, lockres->l_name,
1174 lockres->l_unlock_action);
1175 spin_unlock_irqrestore(&lockres->l_lock, flags);
1176 return;
1177 }
1178
1179 switch(lockres->l_unlock_action) {
1180 case OCFS2_UNLOCK_CANCEL_CONVERT:
1181 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1182 lockres->l_action = OCFS2_AST_INVALID;
1183 /* Downconvert thread may have requeued this lock, we
1184 * need to wake it. */
1185 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1186 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1187 break;
1188 case OCFS2_UNLOCK_DROP_LOCK:
1189 lockres->l_level = DLM_LOCK_IV;
1190 break;
1191 default:
1192 BUG();
1193 }
1194
1195 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1196 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1197 wake_up(&lockres->l_event);
1198 spin_unlock_irqrestore(&lockres->l_lock, flags);
1199 }
1200
1201 /*
1202 * This is the filesystem locking protocol. It provides the lock handling
1203 * hooks for the underlying DLM. It has a maximum version number.
1204 * The version number allows interoperability with systems running at
1205 * the same major number and an equal or smaller minor number.
1206 *
1207 * Whenever the filesystem does new things with locks (adds or removes a
1208 * lock, orders them differently, does different things underneath a lock),
1209 * the version must be changed. The protocol is negotiated when joining
1210 * the dlm domain. A node may join the domain if its major version is
1211 * identical to all other nodes and its minor version is greater than
1212 * or equal to all other nodes. When its minor version is greater than
1213 * the other nodes, it will run at the minor version specified by the
1214 * other nodes.
1215 *
1216 * If a locking change is made that will not be compatible with older
1217 * versions, the major number must be increased and the minor version set
1218 * to zero. If a change merely adds a behavior that can be disabled when
1219 * speaking to older versions, the minor version must be increased. If a
1220 * change adds a fully backwards compatible change (eg, LVB changes that
1221 * are just ignored by older versions), the version does not need to be
1222 * updated.
1223 */
1224 static struct ocfs2_locking_protocol lproto = {
1225 .lp_max_version = {
1226 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1227 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1228 },
1229 .lp_lock_ast = ocfs2_locking_ast,
1230 .lp_blocking_ast = ocfs2_blocking_ast,
1231 .lp_unlock_ast = ocfs2_unlock_ast,
1232 };
1233
ocfs2_set_locking_protocol(void)1234 void ocfs2_set_locking_protocol(void)
1235 {
1236 ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1237 }
1238
ocfs2_recover_from_dlm_error(struct ocfs2_lock_res * lockres,int convert)1239 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1240 int convert)
1241 {
1242 unsigned long flags;
1243
1244 spin_lock_irqsave(&lockres->l_lock, flags);
1245 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1246 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1247 if (convert)
1248 lockres->l_action = OCFS2_AST_INVALID;
1249 else
1250 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1251 spin_unlock_irqrestore(&lockres->l_lock, flags);
1252
1253 wake_up(&lockres->l_event);
1254 }
1255
1256 /* Note: If we detect another process working on the lock (i.e.,
1257 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1258 * to do the right thing in that case.
1259 */
ocfs2_lock_create(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int level,u32 dlm_flags)1260 static int ocfs2_lock_create(struct ocfs2_super *osb,
1261 struct ocfs2_lock_res *lockres,
1262 int level,
1263 u32 dlm_flags)
1264 {
1265 int ret = 0;
1266 unsigned long flags;
1267 unsigned int gen;
1268
1269 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1270 dlm_flags);
1271
1272 spin_lock_irqsave(&lockres->l_lock, flags);
1273 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1274 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1275 spin_unlock_irqrestore(&lockres->l_lock, flags);
1276 goto bail;
1277 }
1278
1279 lockres->l_action = OCFS2_AST_ATTACH;
1280 lockres->l_requested = level;
1281 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1282 gen = lockres_set_pending(lockres);
1283 spin_unlock_irqrestore(&lockres->l_lock, flags);
1284
1285 ret = ocfs2_dlm_lock(osb->cconn,
1286 level,
1287 &lockres->l_lksb,
1288 dlm_flags,
1289 lockres->l_name,
1290 OCFS2_LOCK_ID_MAX_LEN - 1);
1291 lockres_clear_pending(lockres, gen, osb);
1292 if (ret) {
1293 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1294 ocfs2_recover_from_dlm_error(lockres, 1);
1295 }
1296
1297 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1298
1299 bail:
1300 return ret;
1301 }
1302
ocfs2_check_wait_flag(struct ocfs2_lock_res * lockres,int flag)1303 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1304 int flag)
1305 {
1306 unsigned long flags;
1307 int ret;
1308
1309 spin_lock_irqsave(&lockres->l_lock, flags);
1310 ret = lockres->l_flags & flag;
1311 spin_unlock_irqrestore(&lockres->l_lock, flags);
1312
1313 return ret;
1314 }
1315
ocfs2_wait_on_busy_lock(struct ocfs2_lock_res * lockres)1316 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1317
1318 {
1319 wait_event(lockres->l_event,
1320 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1321 }
1322
ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res * lockres)1323 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1324
1325 {
1326 wait_event(lockres->l_event,
1327 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1328 }
1329
1330 /* predict what lock level we'll be dropping down to on behalf
1331 * of another node, and return true if the currently wanted
1332 * level will be compatible with it. */
ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res * lockres,int wanted)1333 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1334 int wanted)
1335 {
1336 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1337
1338 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1339 }
1340
ocfs2_init_mask_waiter(struct ocfs2_mask_waiter * mw)1341 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1342 {
1343 INIT_LIST_HEAD(&mw->mw_item);
1344 init_completion(&mw->mw_complete);
1345 ocfs2_init_start_time(mw);
1346 }
1347
ocfs2_wait_for_mask(struct ocfs2_mask_waiter * mw)1348 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1349 {
1350 wait_for_completion(&mw->mw_complete);
1351 /* Re-arm the completion in case we want to wait on it again */
1352 reinit_completion(&mw->mw_complete);
1353 return mw->mw_status;
1354 }
1355
lockres_add_mask_waiter(struct ocfs2_lock_res * lockres,struct ocfs2_mask_waiter * mw,unsigned long mask,unsigned long goal)1356 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1357 struct ocfs2_mask_waiter *mw,
1358 unsigned long mask,
1359 unsigned long goal)
1360 {
1361 BUG_ON(!list_empty(&mw->mw_item));
1362
1363 assert_spin_locked(&lockres->l_lock);
1364
1365 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1366 mw->mw_mask = mask;
1367 mw->mw_goal = goal;
1368 }
1369
1370 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1371 * if the mask still hadn't reached its goal */
lockres_remove_mask_waiter(struct ocfs2_lock_res * lockres,struct ocfs2_mask_waiter * mw)1372 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1373 struct ocfs2_mask_waiter *mw)
1374 {
1375 unsigned long flags;
1376 int ret = 0;
1377
1378 spin_lock_irqsave(&lockres->l_lock, flags);
1379 if (!list_empty(&mw->mw_item)) {
1380 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1381 ret = -EBUSY;
1382
1383 list_del_init(&mw->mw_item);
1384 init_completion(&mw->mw_complete);
1385 }
1386 spin_unlock_irqrestore(&lockres->l_lock, flags);
1387
1388 return ret;
1389
1390 }
1391
ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter * mw,struct ocfs2_lock_res * lockres)1392 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1393 struct ocfs2_lock_res *lockres)
1394 {
1395 int ret;
1396
1397 ret = wait_for_completion_interruptible(&mw->mw_complete);
1398 if (ret)
1399 lockres_remove_mask_waiter(lockres, mw);
1400 else
1401 ret = mw->mw_status;
1402 /* Re-arm the completion in case we want to wait on it again */
1403 reinit_completion(&mw->mw_complete);
1404 return ret;
1405 }
1406
__ocfs2_cluster_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int level,u32 lkm_flags,int arg_flags,int l_subclass,unsigned long caller_ip)1407 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1408 struct ocfs2_lock_res *lockres,
1409 int level,
1410 u32 lkm_flags,
1411 int arg_flags,
1412 int l_subclass,
1413 unsigned long caller_ip)
1414 {
1415 struct ocfs2_mask_waiter mw;
1416 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1417 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1418 unsigned long flags;
1419 unsigned int gen;
1420 int noqueue_attempted = 0;
1421 int kick_dc = 0;
1422
1423 ocfs2_init_mask_waiter(&mw);
1424
1425 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1426 lkm_flags |= DLM_LKF_VALBLK;
1427
1428 again:
1429 wait = 0;
1430
1431 spin_lock_irqsave(&lockres->l_lock, flags);
1432
1433 if (catch_signals && signal_pending(current)) {
1434 ret = -ERESTARTSYS;
1435 goto unlock;
1436 }
1437
1438 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1439 "Cluster lock called on freeing lockres %s! flags "
1440 "0x%lx\n", lockres->l_name, lockres->l_flags);
1441
1442 /* We only compare against the currently granted level
1443 * here. If the lock is blocked waiting on a downconvert,
1444 * we'll get caught below. */
1445 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1446 level > lockres->l_level) {
1447 /* is someone sitting in dlm_lock? If so, wait on
1448 * them. */
1449 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1450 wait = 1;
1451 goto unlock;
1452 }
1453
1454 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1455 /*
1456 * We've upconverted. If the lock now has a level we can
1457 * work with, we take it. If, however, the lock is not at the
1458 * required level, we go thru the full cycle. One way this could
1459 * happen is if a process requesting an upconvert to PR is
1460 * closely followed by another requesting upconvert to an EX.
1461 * If the process requesting EX lands here, we want it to
1462 * continue attempting to upconvert and let the process
1463 * requesting PR take the lock.
1464 * If multiple processes request upconvert to PR, the first one
1465 * here will take the lock. The others will have to go thru the
1466 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1467 * downconvert request.
1468 */
1469 if (level <= lockres->l_level)
1470 goto update_holders;
1471 }
1472
1473 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1474 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1475 /* is the lock is currently blocked on behalf of
1476 * another node */
1477 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1478 wait = 1;
1479 goto unlock;
1480 }
1481
1482 if (level > lockres->l_level) {
1483 if (noqueue_attempted > 0) {
1484 ret = -EAGAIN;
1485 goto unlock;
1486 }
1487 if (lkm_flags & DLM_LKF_NOQUEUE)
1488 noqueue_attempted = 1;
1489
1490 if (lockres->l_action != OCFS2_AST_INVALID)
1491 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1492 lockres->l_name, lockres->l_action);
1493
1494 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1495 lockres->l_action = OCFS2_AST_ATTACH;
1496 lkm_flags &= ~DLM_LKF_CONVERT;
1497 } else {
1498 lockres->l_action = OCFS2_AST_CONVERT;
1499 lkm_flags |= DLM_LKF_CONVERT;
1500 }
1501
1502 lockres->l_requested = level;
1503 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1504 gen = lockres_set_pending(lockres);
1505 spin_unlock_irqrestore(&lockres->l_lock, flags);
1506
1507 BUG_ON(level == DLM_LOCK_IV);
1508 BUG_ON(level == DLM_LOCK_NL);
1509
1510 mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1511 lockres->l_name, lockres->l_level, level);
1512
1513 /* call dlm_lock to upgrade lock now */
1514 ret = ocfs2_dlm_lock(osb->cconn,
1515 level,
1516 &lockres->l_lksb,
1517 lkm_flags,
1518 lockres->l_name,
1519 OCFS2_LOCK_ID_MAX_LEN - 1);
1520 lockres_clear_pending(lockres, gen, osb);
1521 if (ret) {
1522 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1523 (ret != -EAGAIN)) {
1524 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1525 ret, lockres);
1526 }
1527 ocfs2_recover_from_dlm_error(lockres, 1);
1528 goto out;
1529 }
1530
1531 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1532 lockres->l_name);
1533
1534 /* At this point we've gone inside the dlm and need to
1535 * complete our work regardless. */
1536 catch_signals = 0;
1537
1538 /* wait for busy to clear and carry on */
1539 goto again;
1540 }
1541
1542 update_holders:
1543 /* Ok, if we get here then we're good to go. */
1544 ocfs2_inc_holders(lockres, level);
1545
1546 ret = 0;
1547 unlock:
1548 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1549
1550 /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
1551 kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
1552
1553 spin_unlock_irqrestore(&lockres->l_lock, flags);
1554 if (kick_dc)
1555 ocfs2_wake_downconvert_thread(osb);
1556 out:
1557 /*
1558 * This is helping work around a lock inversion between the page lock
1559 * and dlm locks. One path holds the page lock while calling aops
1560 * which block acquiring dlm locks. The voting thread holds dlm
1561 * locks while acquiring page locks while down converting data locks.
1562 * This block is helping an aop path notice the inversion and back
1563 * off to unlock its page lock before trying the dlm lock again.
1564 */
1565 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1566 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1567 wait = 0;
1568 if (lockres_remove_mask_waiter(lockres, &mw))
1569 ret = -EAGAIN;
1570 else
1571 goto again;
1572 }
1573 if (wait) {
1574 ret = ocfs2_wait_for_mask(&mw);
1575 if (ret == 0)
1576 goto again;
1577 mlog_errno(ret);
1578 }
1579 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1580
1581 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1582 if (!ret && lockres->l_lockdep_map.key != NULL) {
1583 if (level == DLM_LOCK_PR)
1584 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1585 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1586 caller_ip);
1587 else
1588 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1589 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1590 caller_ip);
1591 }
1592 #endif
1593 return ret;
1594 }
1595
ocfs2_cluster_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int level,u32 lkm_flags,int arg_flags)1596 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1597 struct ocfs2_lock_res *lockres,
1598 int level,
1599 u32 lkm_flags,
1600 int arg_flags)
1601 {
1602 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1603 0, _RET_IP_);
1604 }
1605
1606
__ocfs2_cluster_unlock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int level,unsigned long caller_ip)1607 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1608 struct ocfs2_lock_res *lockres,
1609 int level,
1610 unsigned long caller_ip)
1611 {
1612 unsigned long flags;
1613
1614 spin_lock_irqsave(&lockres->l_lock, flags);
1615 ocfs2_dec_holders(lockres, level);
1616 ocfs2_downconvert_on_unlock(osb, lockres);
1617 spin_unlock_irqrestore(&lockres->l_lock, flags);
1618 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1619 if (lockres->l_lockdep_map.key != NULL)
1620 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1621 #endif
1622 }
1623
ocfs2_create_new_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int ex,int local)1624 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1625 struct ocfs2_lock_res *lockres,
1626 int ex,
1627 int local)
1628 {
1629 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1630 unsigned long flags;
1631 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1632
1633 spin_lock_irqsave(&lockres->l_lock, flags);
1634 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1635 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1636 spin_unlock_irqrestore(&lockres->l_lock, flags);
1637
1638 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1639 }
1640
1641 /* Grants us an EX lock on the data and metadata resources, skipping
1642 * the normal cluster directory lookup. Use this ONLY on newly created
1643 * inodes which other nodes can't possibly see, and which haven't been
1644 * hashed in the inode hash yet. This can give us a good performance
1645 * increase as it'll skip the network broadcast normally associated
1646 * with creating a new lock resource. */
ocfs2_create_new_inode_locks(struct inode * inode)1647 int ocfs2_create_new_inode_locks(struct inode *inode)
1648 {
1649 int ret;
1650 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1651
1652 BUG_ON(!inode);
1653 BUG_ON(!ocfs2_inode_is_new(inode));
1654
1655 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1656
1657 /* NOTE: That we don't increment any of the holder counts, nor
1658 * do we add anything to a journal handle. Since this is
1659 * supposed to be a new inode which the cluster doesn't know
1660 * about yet, there is no need to. As far as the LVB handling
1661 * is concerned, this is basically like acquiring an EX lock
1662 * on a resource which has an invalid one -- we'll set it
1663 * valid when we release the EX. */
1664
1665 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1666 if (ret) {
1667 mlog_errno(ret);
1668 goto bail;
1669 }
1670
1671 /*
1672 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1673 * don't use a generation in their lock names.
1674 */
1675 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1676 if (ret) {
1677 mlog_errno(ret);
1678 goto bail;
1679 }
1680
1681 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1682 if (ret) {
1683 mlog_errno(ret);
1684 goto bail;
1685 }
1686
1687 bail:
1688 return ret;
1689 }
1690
ocfs2_rw_lock(struct inode * inode,int write)1691 int ocfs2_rw_lock(struct inode *inode, int write)
1692 {
1693 int status, level;
1694 struct ocfs2_lock_res *lockres;
1695 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1696
1697 BUG_ON(!inode);
1698
1699 mlog(0, "inode %llu take %s RW lock\n",
1700 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1701 write ? "EXMODE" : "PRMODE");
1702
1703 if (ocfs2_mount_local(osb))
1704 return 0;
1705
1706 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1707
1708 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1709
1710 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1711 0);
1712 if (status < 0)
1713 mlog_errno(status);
1714
1715 return status;
1716 }
1717
ocfs2_rw_unlock(struct inode * inode,int write)1718 void ocfs2_rw_unlock(struct inode *inode, int write)
1719 {
1720 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1721 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1722 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1723
1724 mlog(0, "inode %llu drop %s RW lock\n",
1725 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1726 write ? "EXMODE" : "PRMODE");
1727
1728 if (!ocfs2_mount_local(osb))
1729 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1730 }
1731
1732 /*
1733 * ocfs2_open_lock always get PR mode lock.
1734 */
ocfs2_open_lock(struct inode * inode)1735 int ocfs2_open_lock(struct inode *inode)
1736 {
1737 int status = 0;
1738 struct ocfs2_lock_res *lockres;
1739 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1740
1741 BUG_ON(!inode);
1742
1743 mlog(0, "inode %llu take PRMODE open lock\n",
1744 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1745
1746 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1747 goto out;
1748
1749 lockres = &OCFS2_I(inode)->ip_open_lockres;
1750
1751 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1752 DLM_LOCK_PR, 0, 0);
1753 if (status < 0)
1754 mlog_errno(status);
1755
1756 out:
1757 return status;
1758 }
1759
ocfs2_try_open_lock(struct inode * inode,int write)1760 int ocfs2_try_open_lock(struct inode *inode, int write)
1761 {
1762 int status = 0, level;
1763 struct ocfs2_lock_res *lockres;
1764 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1765
1766 BUG_ON(!inode);
1767
1768 mlog(0, "inode %llu try to take %s open lock\n",
1769 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1770 write ? "EXMODE" : "PRMODE");
1771
1772 if (ocfs2_is_hard_readonly(osb)) {
1773 if (write)
1774 status = -EROFS;
1775 goto out;
1776 }
1777
1778 if (ocfs2_mount_local(osb))
1779 goto out;
1780
1781 lockres = &OCFS2_I(inode)->ip_open_lockres;
1782
1783 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1784
1785 /*
1786 * The file system may already holding a PRMODE/EXMODE open lock.
1787 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1788 * other nodes and the -EAGAIN will indicate to the caller that
1789 * this inode is still in use.
1790 */
1791 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1792 level, DLM_LKF_NOQUEUE, 0);
1793
1794 out:
1795 return status;
1796 }
1797
1798 /*
1799 * ocfs2_open_unlock unlock PR and EX mode open locks.
1800 */
ocfs2_open_unlock(struct inode * inode)1801 void ocfs2_open_unlock(struct inode *inode)
1802 {
1803 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1804 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1805
1806 mlog(0, "inode %llu drop open lock\n",
1807 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1808
1809 if (ocfs2_mount_local(osb))
1810 goto out;
1811
1812 if(lockres->l_ro_holders)
1813 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1814 DLM_LOCK_PR);
1815 if(lockres->l_ex_holders)
1816 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1817 DLM_LOCK_EX);
1818
1819 out:
1820 return;
1821 }
1822
ocfs2_flock_handle_signal(struct ocfs2_lock_res * lockres,int level)1823 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1824 int level)
1825 {
1826 int ret;
1827 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1828 unsigned long flags;
1829 struct ocfs2_mask_waiter mw;
1830
1831 ocfs2_init_mask_waiter(&mw);
1832
1833 retry_cancel:
1834 spin_lock_irqsave(&lockres->l_lock, flags);
1835 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1836 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1837 if (ret) {
1838 spin_unlock_irqrestore(&lockres->l_lock, flags);
1839 ret = ocfs2_cancel_convert(osb, lockres);
1840 if (ret < 0) {
1841 mlog_errno(ret);
1842 goto out;
1843 }
1844 goto retry_cancel;
1845 }
1846 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1847 spin_unlock_irqrestore(&lockres->l_lock, flags);
1848
1849 ocfs2_wait_for_mask(&mw);
1850 goto retry_cancel;
1851 }
1852
1853 ret = -ERESTARTSYS;
1854 /*
1855 * We may still have gotten the lock, in which case there's no
1856 * point to restarting the syscall.
1857 */
1858 if (lockres->l_level == level)
1859 ret = 0;
1860
1861 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1862 lockres->l_flags, lockres->l_level, lockres->l_action);
1863
1864 spin_unlock_irqrestore(&lockres->l_lock, flags);
1865
1866 out:
1867 return ret;
1868 }
1869
1870 /*
1871 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1872 * flock() calls. The locking approach this requires is sufficiently
1873 * different from all other cluster lock types that we implement a
1874 * separate path to the "low-level" dlm calls. In particular:
1875 *
1876 * - No optimization of lock levels is done - we take at exactly
1877 * what's been requested.
1878 *
1879 * - No lock caching is employed. We immediately downconvert to
1880 * no-lock at unlock time. This also means flock locks never go on
1881 * the blocking list).
1882 *
1883 * - Since userspace can trivially deadlock itself with flock, we make
1884 * sure to allow cancellation of a misbehaving applications flock()
1885 * request.
1886 *
1887 * - Access to any flock lockres doesn't require concurrency, so we
1888 * can simplify the code by requiring the caller to guarantee
1889 * serialization of dlmglue flock calls.
1890 */
ocfs2_file_lock(struct file * file,int ex,int trylock)1891 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1892 {
1893 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1894 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1895 unsigned long flags;
1896 struct ocfs2_file_private *fp = file->private_data;
1897 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1898 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1899 struct ocfs2_mask_waiter mw;
1900
1901 ocfs2_init_mask_waiter(&mw);
1902
1903 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1904 (lockres->l_level > DLM_LOCK_NL)) {
1905 mlog(ML_ERROR,
1906 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1907 "level: %u\n", lockres->l_name, lockres->l_flags,
1908 lockres->l_level);
1909 return -EINVAL;
1910 }
1911
1912 spin_lock_irqsave(&lockres->l_lock, flags);
1913 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1914 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1915 spin_unlock_irqrestore(&lockres->l_lock, flags);
1916
1917 /*
1918 * Get the lock at NLMODE to start - that way we
1919 * can cancel the upconvert request if need be.
1920 */
1921 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1922 if (ret < 0) {
1923 mlog_errno(ret);
1924 goto out;
1925 }
1926
1927 ret = ocfs2_wait_for_mask(&mw);
1928 if (ret) {
1929 mlog_errno(ret);
1930 goto out;
1931 }
1932 spin_lock_irqsave(&lockres->l_lock, flags);
1933 }
1934
1935 lockres->l_action = OCFS2_AST_CONVERT;
1936 lkm_flags |= DLM_LKF_CONVERT;
1937 lockres->l_requested = level;
1938 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1939
1940 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1941 spin_unlock_irqrestore(&lockres->l_lock, flags);
1942
1943 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1944 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1945 if (ret) {
1946 if (!trylock || (ret != -EAGAIN)) {
1947 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1948 ret = -EINVAL;
1949 }
1950
1951 ocfs2_recover_from_dlm_error(lockres, 1);
1952 lockres_remove_mask_waiter(lockres, &mw);
1953 goto out;
1954 }
1955
1956 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1957 if (ret == -ERESTARTSYS) {
1958 /*
1959 * Userspace can cause deadlock itself with
1960 * flock(). Current behavior locally is to allow the
1961 * deadlock, but abort the system call if a signal is
1962 * received. We follow this example, otherwise a
1963 * poorly written program could sit in kernel until
1964 * reboot.
1965 *
1966 * Handling this is a bit more complicated for Ocfs2
1967 * though. We can't exit this function with an
1968 * outstanding lock request, so a cancel convert is
1969 * required. We intentionally overwrite 'ret' - if the
1970 * cancel fails and the lock was granted, it's easier
1971 * to just bubble success back up to the user.
1972 */
1973 ret = ocfs2_flock_handle_signal(lockres, level);
1974 } else if (!ret && (level > lockres->l_level)) {
1975 /* Trylock failed asynchronously */
1976 BUG_ON(!trylock);
1977 ret = -EAGAIN;
1978 }
1979
1980 out:
1981
1982 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1983 lockres->l_name, ex, trylock, ret);
1984 return ret;
1985 }
1986
ocfs2_file_unlock(struct file * file)1987 void ocfs2_file_unlock(struct file *file)
1988 {
1989 int ret;
1990 unsigned int gen;
1991 unsigned long flags;
1992 struct ocfs2_file_private *fp = file->private_data;
1993 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1994 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1995 struct ocfs2_mask_waiter mw;
1996
1997 ocfs2_init_mask_waiter(&mw);
1998
1999 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
2000 return;
2001
2002 if (lockres->l_level == DLM_LOCK_NL)
2003 return;
2004
2005 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
2006 lockres->l_name, lockres->l_flags, lockres->l_level,
2007 lockres->l_action);
2008
2009 spin_lock_irqsave(&lockres->l_lock, flags);
2010 /*
2011 * Fake a blocking ast for the downconvert code.
2012 */
2013 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
2014 lockres->l_blocking = DLM_LOCK_EX;
2015
2016 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
2017 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
2018 spin_unlock_irqrestore(&lockres->l_lock, flags);
2019
2020 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
2021 if (ret) {
2022 mlog_errno(ret);
2023 return;
2024 }
2025
2026 ret = ocfs2_wait_for_mask(&mw);
2027 if (ret)
2028 mlog_errno(ret);
2029 }
2030
ocfs2_downconvert_on_unlock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)2031 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2032 struct ocfs2_lock_res *lockres)
2033 {
2034 int kick = 0;
2035
2036 /* If we know that another node is waiting on our lock, kick
2037 * the downconvert thread * pre-emptively when we reach a release
2038 * condition. */
2039 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
2040 switch(lockres->l_blocking) {
2041 case DLM_LOCK_EX:
2042 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
2043 kick = 1;
2044 break;
2045 case DLM_LOCK_PR:
2046 if (!lockres->l_ex_holders)
2047 kick = 1;
2048 break;
2049 default:
2050 BUG();
2051 }
2052 }
2053
2054 if (kick)
2055 ocfs2_wake_downconvert_thread(osb);
2056 }
2057
2058 #define OCFS2_SEC_BITS 34
2059 #define OCFS2_SEC_SHIFT (64 - 34)
2060 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2061
2062 /* LVB only has room for 64 bits of time here so we pack it for
2063 * now. */
ocfs2_pack_timespec(struct timespec * spec)2064 static u64 ocfs2_pack_timespec(struct timespec *spec)
2065 {
2066 u64 res;
2067 u64 sec = spec->tv_sec;
2068 u32 nsec = spec->tv_nsec;
2069
2070 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2071
2072 return res;
2073 }
2074
2075 /* Call this with the lockres locked. I am reasonably sure we don't
2076 * need ip_lock in this function as anyone who would be changing those
2077 * values is supposed to be blocked in ocfs2_inode_lock right now. */
__ocfs2_stuff_meta_lvb(struct inode * inode)2078 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2079 {
2080 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2081 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2082 struct ocfs2_meta_lvb *lvb;
2083
2084 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2085
2086 /*
2087 * Invalidate the LVB of a deleted inode - this way other
2088 * nodes are forced to go to disk and discover the new inode
2089 * status.
2090 */
2091 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2092 lvb->lvb_version = 0;
2093 goto out;
2094 }
2095
2096 lvb->lvb_version = OCFS2_LVB_VERSION;
2097 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
2098 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2099 lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
2100 lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
2101 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
2102 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
2103 lvb->lvb_iatime_packed =
2104 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
2105 lvb->lvb_ictime_packed =
2106 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2107 lvb->lvb_imtime_packed =
2108 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2109 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2110 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2111 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2112
2113 out:
2114 mlog_meta_lvb(0, lockres);
2115 }
2116
ocfs2_unpack_timespec(struct timespec * spec,u64 packed_time)2117 static void ocfs2_unpack_timespec(struct timespec *spec,
2118 u64 packed_time)
2119 {
2120 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2121 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2122 }
2123
ocfs2_refresh_inode_from_lvb(struct inode * inode)2124 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2125 {
2126 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2127 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2128 struct ocfs2_meta_lvb *lvb;
2129
2130 mlog_meta_lvb(0, lockres);
2131
2132 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2133
2134 /* We're safe here without the lockres lock... */
2135 spin_lock(&oi->ip_lock);
2136 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2137 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2138
2139 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2140 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2141 ocfs2_set_inode_flags(inode);
2142
2143 /* fast-symlinks are a special case */
2144 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2145 inode->i_blocks = 0;
2146 else
2147 inode->i_blocks = ocfs2_inode_sector_count(inode);
2148
2149 i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2150 i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2151 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2152 set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2153 ocfs2_unpack_timespec(&inode->i_atime,
2154 be64_to_cpu(lvb->lvb_iatime_packed));
2155 ocfs2_unpack_timespec(&inode->i_mtime,
2156 be64_to_cpu(lvb->lvb_imtime_packed));
2157 ocfs2_unpack_timespec(&inode->i_ctime,
2158 be64_to_cpu(lvb->lvb_ictime_packed));
2159 spin_unlock(&oi->ip_lock);
2160 }
2161
ocfs2_meta_lvb_is_trustable(struct inode * inode,struct ocfs2_lock_res * lockres)2162 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2163 struct ocfs2_lock_res *lockres)
2164 {
2165 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2166
2167 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2168 && lvb->lvb_version == OCFS2_LVB_VERSION
2169 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2170 return 1;
2171 return 0;
2172 }
2173
2174 /* Determine whether a lock resource needs to be refreshed, and
2175 * arbitrate who gets to refresh it.
2176 *
2177 * 0 means no refresh needed.
2178 *
2179 * > 0 means you need to refresh this and you MUST call
2180 * ocfs2_complete_lock_res_refresh afterwards. */
ocfs2_should_refresh_lock_res(struct ocfs2_lock_res * lockres)2181 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2182 {
2183 unsigned long flags;
2184 int status = 0;
2185
2186 refresh_check:
2187 spin_lock_irqsave(&lockres->l_lock, flags);
2188 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2189 spin_unlock_irqrestore(&lockres->l_lock, flags);
2190 goto bail;
2191 }
2192
2193 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2194 spin_unlock_irqrestore(&lockres->l_lock, flags);
2195
2196 ocfs2_wait_on_refreshing_lock(lockres);
2197 goto refresh_check;
2198 }
2199
2200 /* Ok, I'll be the one to refresh this lock. */
2201 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2202 spin_unlock_irqrestore(&lockres->l_lock, flags);
2203
2204 status = 1;
2205 bail:
2206 mlog(0, "status %d\n", status);
2207 return status;
2208 }
2209
2210 /* If status is non zero, I'll mark it as not being in refresh
2211 * anymroe, but i won't clear the needs refresh flag. */
ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res * lockres,int status)2212 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2213 int status)
2214 {
2215 unsigned long flags;
2216
2217 spin_lock_irqsave(&lockres->l_lock, flags);
2218 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2219 if (!status)
2220 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2221 spin_unlock_irqrestore(&lockres->l_lock, flags);
2222
2223 wake_up(&lockres->l_event);
2224 }
2225
2226 /* may or may not return a bh if it went to disk. */
ocfs2_inode_lock_update(struct inode * inode,struct buffer_head ** bh)2227 static int ocfs2_inode_lock_update(struct inode *inode,
2228 struct buffer_head **bh)
2229 {
2230 int status = 0;
2231 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2232 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2233 struct ocfs2_dinode *fe;
2234 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2235
2236 if (ocfs2_mount_local(osb))
2237 goto bail;
2238
2239 spin_lock(&oi->ip_lock);
2240 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2241 mlog(0, "Orphaned inode %llu was deleted while we "
2242 "were waiting on a lock. ip_flags = 0x%x\n",
2243 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2244 spin_unlock(&oi->ip_lock);
2245 status = -ENOENT;
2246 goto bail;
2247 }
2248 spin_unlock(&oi->ip_lock);
2249
2250 if (!ocfs2_should_refresh_lock_res(lockres))
2251 goto bail;
2252
2253 /* This will discard any caching information we might have had
2254 * for the inode metadata. */
2255 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2256
2257 ocfs2_extent_map_trunc(inode, 0);
2258
2259 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2260 mlog(0, "Trusting LVB on inode %llu\n",
2261 (unsigned long long)oi->ip_blkno);
2262 ocfs2_refresh_inode_from_lvb(inode);
2263 } else {
2264 /* Boo, we have to go to disk. */
2265 /* read bh, cast, ocfs2_refresh_inode */
2266 status = ocfs2_read_inode_block(inode, bh);
2267 if (status < 0) {
2268 mlog_errno(status);
2269 goto bail_refresh;
2270 }
2271 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2272
2273 /* This is a good chance to make sure we're not
2274 * locking an invalid object. ocfs2_read_inode_block()
2275 * already checked that the inode block is sane.
2276 *
2277 * We bug on a stale inode here because we checked
2278 * above whether it was wiped from disk. The wiping
2279 * node provides a guarantee that we receive that
2280 * message and can mark the inode before dropping any
2281 * locks associated with it. */
2282 mlog_bug_on_msg(inode->i_generation !=
2283 le32_to_cpu(fe->i_generation),
2284 "Invalid dinode %llu disk generation: %u "
2285 "inode->i_generation: %u\n",
2286 (unsigned long long)oi->ip_blkno,
2287 le32_to_cpu(fe->i_generation),
2288 inode->i_generation);
2289 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2290 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2291 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2292 (unsigned long long)oi->ip_blkno,
2293 (unsigned long long)le64_to_cpu(fe->i_dtime),
2294 le32_to_cpu(fe->i_flags));
2295
2296 ocfs2_refresh_inode(inode, fe);
2297 ocfs2_track_lock_refresh(lockres);
2298 }
2299
2300 status = 0;
2301 bail_refresh:
2302 ocfs2_complete_lock_res_refresh(lockres, status);
2303 bail:
2304 return status;
2305 }
2306
ocfs2_assign_bh(struct inode * inode,struct buffer_head ** ret_bh,struct buffer_head * passed_bh)2307 static int ocfs2_assign_bh(struct inode *inode,
2308 struct buffer_head **ret_bh,
2309 struct buffer_head *passed_bh)
2310 {
2311 int status;
2312
2313 if (passed_bh) {
2314 /* Ok, the update went to disk for us, use the
2315 * returned bh. */
2316 *ret_bh = passed_bh;
2317 get_bh(*ret_bh);
2318
2319 return 0;
2320 }
2321
2322 status = ocfs2_read_inode_block(inode, ret_bh);
2323 if (status < 0)
2324 mlog_errno(status);
2325
2326 return status;
2327 }
2328
2329 /*
2330 * returns < 0 error if the callback will never be called, otherwise
2331 * the result of the lock will be communicated via the callback.
2332 */
ocfs2_inode_lock_full_nested(struct inode * inode,struct buffer_head ** ret_bh,int ex,int arg_flags,int subclass)2333 int ocfs2_inode_lock_full_nested(struct inode *inode,
2334 struct buffer_head **ret_bh,
2335 int ex,
2336 int arg_flags,
2337 int subclass)
2338 {
2339 int status, level, acquired;
2340 u32 dlm_flags;
2341 struct ocfs2_lock_res *lockres = NULL;
2342 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2343 struct buffer_head *local_bh = NULL;
2344
2345 BUG_ON(!inode);
2346
2347 mlog(0, "inode %llu, take %s META lock\n",
2348 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2349 ex ? "EXMODE" : "PRMODE");
2350
2351 status = 0;
2352 acquired = 0;
2353 /* We'll allow faking a readonly metadata lock for
2354 * rodevices. */
2355 if (ocfs2_is_hard_readonly(osb)) {
2356 if (ex)
2357 status = -EROFS;
2358 goto getbh;
2359 }
2360
2361 if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
2362 ocfs2_mount_local(osb))
2363 goto update;
2364
2365 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2366 ocfs2_wait_for_recovery(osb);
2367
2368 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2369 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2370 dlm_flags = 0;
2371 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2372 dlm_flags |= DLM_LKF_NOQUEUE;
2373
2374 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2375 arg_flags, subclass, _RET_IP_);
2376 if (status < 0) {
2377 if (status != -EAGAIN)
2378 mlog_errno(status);
2379 goto bail;
2380 }
2381
2382 /* Notify the error cleanup path to drop the cluster lock. */
2383 acquired = 1;
2384
2385 /* We wait twice because a node may have died while we were in
2386 * the lower dlm layers. The second time though, we've
2387 * committed to owning this lock so we don't allow signals to
2388 * abort the operation. */
2389 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2390 ocfs2_wait_for_recovery(osb);
2391
2392 update:
2393 /*
2394 * We only see this flag if we're being called from
2395 * ocfs2_read_locked_inode(). It means we're locking an inode
2396 * which hasn't been populated yet, so clear the refresh flag
2397 * and let the caller handle it.
2398 */
2399 if (inode->i_state & I_NEW) {
2400 status = 0;
2401 if (lockres)
2402 ocfs2_complete_lock_res_refresh(lockres, 0);
2403 goto bail;
2404 }
2405
2406 /* This is fun. The caller may want a bh back, or it may
2407 * not. ocfs2_inode_lock_update definitely wants one in, but
2408 * may or may not read one, depending on what's in the
2409 * LVB. The result of all of this is that we've *only* gone to
2410 * disk if we have to, so the complexity is worthwhile. */
2411 status = ocfs2_inode_lock_update(inode, &local_bh);
2412 if (status < 0) {
2413 if (status != -ENOENT)
2414 mlog_errno(status);
2415 goto bail;
2416 }
2417 getbh:
2418 if (ret_bh) {
2419 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2420 if (status < 0) {
2421 mlog_errno(status);
2422 goto bail;
2423 }
2424 }
2425
2426 bail:
2427 if (status < 0) {
2428 if (ret_bh && (*ret_bh)) {
2429 brelse(*ret_bh);
2430 *ret_bh = NULL;
2431 }
2432 if (acquired)
2433 ocfs2_inode_unlock(inode, ex);
2434 }
2435
2436 if (local_bh)
2437 brelse(local_bh);
2438
2439 return status;
2440 }
2441
2442 /*
2443 * This is working around a lock inversion between tasks acquiring DLM
2444 * locks while holding a page lock and the downconvert thread which
2445 * blocks dlm lock acquiry while acquiring page locks.
2446 *
2447 * ** These _with_page variantes are only intended to be called from aop
2448 * methods that hold page locks and return a very specific *positive* error
2449 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2450 *
2451 * The DLM is called such that it returns -EAGAIN if it would have
2452 * blocked waiting for the downconvert thread. In that case we unlock
2453 * our page so the downconvert thread can make progress. Once we've
2454 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2455 * that called us can bubble that back up into the VFS who will then
2456 * immediately retry the aop call.
2457 *
2458 * We do a blocking lock and immediate unlock before returning, though, so that
2459 * the lock has a great chance of being cached on this node by the time the VFS
2460 * calls back to retry the aop. This has a potential to livelock as nodes
2461 * ping locks back and forth, but that's a risk we're willing to take to avoid
2462 * the lock inversion simply.
2463 */
ocfs2_inode_lock_with_page(struct inode * inode,struct buffer_head ** ret_bh,int ex,struct page * page)2464 int ocfs2_inode_lock_with_page(struct inode *inode,
2465 struct buffer_head **ret_bh,
2466 int ex,
2467 struct page *page)
2468 {
2469 int ret;
2470
2471 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2472 if (ret == -EAGAIN) {
2473 unlock_page(page);
2474 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2475 ocfs2_inode_unlock(inode, ex);
2476 ret = AOP_TRUNCATED_PAGE;
2477 }
2478
2479 return ret;
2480 }
2481
ocfs2_inode_lock_atime(struct inode * inode,struct vfsmount * vfsmnt,int * level)2482 int ocfs2_inode_lock_atime(struct inode *inode,
2483 struct vfsmount *vfsmnt,
2484 int *level)
2485 {
2486 int ret;
2487
2488 ret = ocfs2_inode_lock(inode, NULL, 0);
2489 if (ret < 0) {
2490 mlog_errno(ret);
2491 return ret;
2492 }
2493
2494 /*
2495 * If we should update atime, we will get EX lock,
2496 * otherwise we just get PR lock.
2497 */
2498 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2499 struct buffer_head *bh = NULL;
2500
2501 ocfs2_inode_unlock(inode, 0);
2502 ret = ocfs2_inode_lock(inode, &bh, 1);
2503 if (ret < 0) {
2504 mlog_errno(ret);
2505 return ret;
2506 }
2507 *level = 1;
2508 if (ocfs2_should_update_atime(inode, vfsmnt))
2509 ocfs2_update_inode_atime(inode, bh);
2510 if (bh)
2511 brelse(bh);
2512 } else
2513 *level = 0;
2514
2515 return ret;
2516 }
2517
ocfs2_inode_unlock(struct inode * inode,int ex)2518 void ocfs2_inode_unlock(struct inode *inode,
2519 int ex)
2520 {
2521 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2522 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2523 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2524
2525 mlog(0, "inode %llu drop %s META lock\n",
2526 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2527 ex ? "EXMODE" : "PRMODE");
2528
2529 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2530 !ocfs2_mount_local(osb))
2531 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2532 }
2533
2534 /*
2535 * This _tracker variantes are introduced to deal with the recursive cluster
2536 * locking issue. The idea is to keep track of a lock holder on the stack of
2537 * the current process. If there's a lock holder on the stack, we know the
2538 * task context is already protected by cluster locking. Currently, they're
2539 * used in some VFS entry routines.
2540 *
2541 * return < 0 on error, return == 0 if there's no lock holder on the stack
2542 * before this call, return == 1 if this call would be a recursive locking.
2543 */
ocfs2_inode_lock_tracker(struct inode * inode,struct buffer_head ** ret_bh,int ex,struct ocfs2_lock_holder * oh)2544 int ocfs2_inode_lock_tracker(struct inode *inode,
2545 struct buffer_head **ret_bh,
2546 int ex,
2547 struct ocfs2_lock_holder *oh)
2548 {
2549 int status;
2550 int arg_flags = 0, has_locked;
2551 struct ocfs2_lock_res *lockres;
2552
2553 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2554 has_locked = ocfs2_is_locked_by_me(lockres);
2555 /* Just get buffer head if the cluster lock has been taken */
2556 if (has_locked)
2557 arg_flags = OCFS2_META_LOCK_GETBH;
2558
2559 if (likely(!has_locked || ret_bh)) {
2560 status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
2561 if (status < 0) {
2562 if (status != -ENOENT)
2563 mlog_errno(status);
2564 return status;
2565 }
2566 }
2567 if (!has_locked)
2568 ocfs2_add_holder(lockres, oh);
2569
2570 return has_locked;
2571 }
2572
ocfs2_inode_unlock_tracker(struct inode * inode,int ex,struct ocfs2_lock_holder * oh,int had_lock)2573 void ocfs2_inode_unlock_tracker(struct inode *inode,
2574 int ex,
2575 struct ocfs2_lock_holder *oh,
2576 int had_lock)
2577 {
2578 struct ocfs2_lock_res *lockres;
2579
2580 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2581 if (!had_lock) {
2582 ocfs2_remove_holder(lockres, oh);
2583 ocfs2_inode_unlock(inode, ex);
2584 }
2585 }
2586
ocfs2_orphan_scan_lock(struct ocfs2_super * osb,u32 * seqno)2587 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2588 {
2589 struct ocfs2_lock_res *lockres;
2590 struct ocfs2_orphan_scan_lvb *lvb;
2591 int status = 0;
2592
2593 if (ocfs2_is_hard_readonly(osb))
2594 return -EROFS;
2595
2596 if (ocfs2_mount_local(osb))
2597 return 0;
2598
2599 lockres = &osb->osb_orphan_scan.os_lockres;
2600 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2601 if (status < 0)
2602 return status;
2603
2604 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2605 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2606 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2607 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2608 else
2609 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2610
2611 return status;
2612 }
2613
ocfs2_orphan_scan_unlock(struct ocfs2_super * osb,u32 seqno)2614 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2615 {
2616 struct ocfs2_lock_res *lockres;
2617 struct ocfs2_orphan_scan_lvb *lvb;
2618
2619 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2620 lockres = &osb->osb_orphan_scan.os_lockres;
2621 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2622 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2623 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2624 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2625 }
2626 }
2627
ocfs2_super_lock(struct ocfs2_super * osb,int ex)2628 int ocfs2_super_lock(struct ocfs2_super *osb,
2629 int ex)
2630 {
2631 int status = 0;
2632 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2633 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2634
2635 if (ocfs2_is_hard_readonly(osb))
2636 return -EROFS;
2637
2638 if (ocfs2_mount_local(osb))
2639 goto bail;
2640
2641 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2642 if (status < 0) {
2643 mlog_errno(status);
2644 goto bail;
2645 }
2646
2647 /* The super block lock path is really in the best position to
2648 * know when resources covered by the lock need to be
2649 * refreshed, so we do it here. Of course, making sense of
2650 * everything is up to the caller :) */
2651 status = ocfs2_should_refresh_lock_res(lockres);
2652 if (status) {
2653 status = ocfs2_refresh_slot_info(osb);
2654
2655 ocfs2_complete_lock_res_refresh(lockres, status);
2656
2657 if (status < 0) {
2658 ocfs2_cluster_unlock(osb, lockres, level);
2659 mlog_errno(status);
2660 }
2661 ocfs2_track_lock_refresh(lockres);
2662 }
2663 bail:
2664 return status;
2665 }
2666
ocfs2_super_unlock(struct ocfs2_super * osb,int ex)2667 void ocfs2_super_unlock(struct ocfs2_super *osb,
2668 int ex)
2669 {
2670 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2671 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2672
2673 if (!ocfs2_mount_local(osb))
2674 ocfs2_cluster_unlock(osb, lockres, level);
2675 }
2676
ocfs2_rename_lock(struct ocfs2_super * osb)2677 int ocfs2_rename_lock(struct ocfs2_super *osb)
2678 {
2679 int status;
2680 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2681
2682 if (ocfs2_is_hard_readonly(osb))
2683 return -EROFS;
2684
2685 if (ocfs2_mount_local(osb))
2686 return 0;
2687
2688 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2689 if (status < 0)
2690 mlog_errno(status);
2691
2692 return status;
2693 }
2694
ocfs2_rename_unlock(struct ocfs2_super * osb)2695 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2696 {
2697 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2698
2699 if (!ocfs2_mount_local(osb))
2700 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2701 }
2702
ocfs2_nfs_sync_lock(struct ocfs2_super * osb,int ex)2703 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2704 {
2705 int status;
2706 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2707
2708 if (ocfs2_is_hard_readonly(osb))
2709 return -EROFS;
2710
2711 if (ocfs2_mount_local(osb))
2712 return 0;
2713
2714 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2715 0, 0);
2716 if (status < 0)
2717 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2718
2719 return status;
2720 }
2721
ocfs2_nfs_sync_unlock(struct ocfs2_super * osb,int ex)2722 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2723 {
2724 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2725
2726 if (!ocfs2_mount_local(osb))
2727 ocfs2_cluster_unlock(osb, lockres,
2728 ex ? LKM_EXMODE : LKM_PRMODE);
2729 }
2730
ocfs2_dentry_lock(struct dentry * dentry,int ex)2731 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2732 {
2733 int ret;
2734 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2735 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2736 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2737
2738 BUG_ON(!dl);
2739
2740 if (ocfs2_is_hard_readonly(osb)) {
2741 if (ex)
2742 return -EROFS;
2743 return 0;
2744 }
2745
2746 if (ocfs2_mount_local(osb))
2747 return 0;
2748
2749 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2750 if (ret < 0)
2751 mlog_errno(ret);
2752
2753 return ret;
2754 }
2755
ocfs2_dentry_unlock(struct dentry * dentry,int ex)2756 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2757 {
2758 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2759 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2760 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2761
2762 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2763 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2764 }
2765
2766 /* Reference counting of the dlm debug structure. We want this because
2767 * open references on the debug inodes can live on after a mount, so
2768 * we can't rely on the ocfs2_super to always exist. */
ocfs2_dlm_debug_free(struct kref * kref)2769 static void ocfs2_dlm_debug_free(struct kref *kref)
2770 {
2771 struct ocfs2_dlm_debug *dlm_debug;
2772
2773 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2774
2775 kfree(dlm_debug);
2776 }
2777
ocfs2_put_dlm_debug(struct ocfs2_dlm_debug * dlm_debug)2778 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2779 {
2780 if (dlm_debug)
2781 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2782 }
2783
ocfs2_get_dlm_debug(struct ocfs2_dlm_debug * debug)2784 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2785 {
2786 kref_get(&debug->d_refcnt);
2787 }
2788
ocfs2_new_dlm_debug(void)2789 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2790 {
2791 struct ocfs2_dlm_debug *dlm_debug;
2792
2793 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2794 if (!dlm_debug) {
2795 mlog_errno(-ENOMEM);
2796 goto out;
2797 }
2798
2799 kref_init(&dlm_debug->d_refcnt);
2800 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2801 dlm_debug->d_locking_state = NULL;
2802 out:
2803 return dlm_debug;
2804 }
2805
2806 /* Access to this is arbitrated for us via seq_file->sem. */
2807 struct ocfs2_dlm_seq_priv {
2808 struct ocfs2_dlm_debug *p_dlm_debug;
2809 struct ocfs2_lock_res p_iter_res;
2810 struct ocfs2_lock_res p_tmp_res;
2811 };
2812
ocfs2_dlm_next_res(struct ocfs2_lock_res * start,struct ocfs2_dlm_seq_priv * priv)2813 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2814 struct ocfs2_dlm_seq_priv *priv)
2815 {
2816 struct ocfs2_lock_res *iter, *ret = NULL;
2817 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2818
2819 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2820
2821 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2822 /* discover the head of the list */
2823 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2824 mlog(0, "End of list found, %p\n", ret);
2825 break;
2826 }
2827
2828 /* We track our "dummy" iteration lockres' by a NULL
2829 * l_ops field. */
2830 if (iter->l_ops != NULL) {
2831 ret = iter;
2832 break;
2833 }
2834 }
2835
2836 return ret;
2837 }
2838
ocfs2_dlm_seq_start(struct seq_file * m,loff_t * pos)2839 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2840 {
2841 struct ocfs2_dlm_seq_priv *priv = m->private;
2842 struct ocfs2_lock_res *iter;
2843
2844 spin_lock(&ocfs2_dlm_tracking_lock);
2845 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2846 if (iter) {
2847 /* Since lockres' have the lifetime of their container
2848 * (which can be inodes, ocfs2_supers, etc) we want to
2849 * copy this out to a temporary lockres while still
2850 * under the spinlock. Obviously after this we can't
2851 * trust any pointers on the copy returned, but that's
2852 * ok as the information we want isn't typically held
2853 * in them. */
2854 priv->p_tmp_res = *iter;
2855 iter = &priv->p_tmp_res;
2856 }
2857 spin_unlock(&ocfs2_dlm_tracking_lock);
2858
2859 return iter;
2860 }
2861
ocfs2_dlm_seq_stop(struct seq_file * m,void * v)2862 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2863 {
2864 }
2865
ocfs2_dlm_seq_next(struct seq_file * m,void * v,loff_t * pos)2866 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2867 {
2868 struct ocfs2_dlm_seq_priv *priv = m->private;
2869 struct ocfs2_lock_res *iter = v;
2870 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2871
2872 spin_lock(&ocfs2_dlm_tracking_lock);
2873 iter = ocfs2_dlm_next_res(iter, priv);
2874 list_del_init(&dummy->l_debug_list);
2875 if (iter) {
2876 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2877 priv->p_tmp_res = *iter;
2878 iter = &priv->p_tmp_res;
2879 }
2880 spin_unlock(&ocfs2_dlm_tracking_lock);
2881
2882 return iter;
2883 }
2884
2885 /*
2886 * Version is used by debugfs.ocfs2 to determine the format being used
2887 *
2888 * New in version 2
2889 * - Lock stats printed
2890 * New in version 3
2891 * - Max time in lock stats is in usecs (instead of nsecs)
2892 */
2893 #define OCFS2_DLM_DEBUG_STR_VERSION 3
ocfs2_dlm_seq_show(struct seq_file * m,void * v)2894 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2895 {
2896 int i;
2897 char *lvb;
2898 struct ocfs2_lock_res *lockres = v;
2899
2900 if (!lockres)
2901 return -EINVAL;
2902
2903 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2904
2905 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2906 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2907 lockres->l_name,
2908 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2909 else
2910 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2911
2912 seq_printf(m, "%d\t"
2913 "0x%lx\t"
2914 "0x%x\t"
2915 "0x%x\t"
2916 "%u\t"
2917 "%u\t"
2918 "%d\t"
2919 "%d\t",
2920 lockres->l_level,
2921 lockres->l_flags,
2922 lockres->l_action,
2923 lockres->l_unlock_action,
2924 lockres->l_ro_holders,
2925 lockres->l_ex_holders,
2926 lockres->l_requested,
2927 lockres->l_blocking);
2928
2929 /* Dump the raw LVB */
2930 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2931 for(i = 0; i < DLM_LVB_LEN; i++)
2932 seq_printf(m, "0x%x\t", lvb[i]);
2933
2934 #ifdef CONFIG_OCFS2_FS_STATS
2935 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
2936 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
2937 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
2938 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
2939 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
2940 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
2941 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
2942 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
2943 # define lock_refresh(_l) ((_l)->l_lock_refresh)
2944 #else
2945 # define lock_num_prmode(_l) (0)
2946 # define lock_num_exmode(_l) (0)
2947 # define lock_num_prmode_failed(_l) (0)
2948 # define lock_num_exmode_failed(_l) (0)
2949 # define lock_total_prmode(_l) (0ULL)
2950 # define lock_total_exmode(_l) (0ULL)
2951 # define lock_max_prmode(_l) (0)
2952 # define lock_max_exmode(_l) (0)
2953 # define lock_refresh(_l) (0)
2954 #endif
2955 /* The following seq_print was added in version 2 of this output */
2956 seq_printf(m, "%u\t"
2957 "%u\t"
2958 "%u\t"
2959 "%u\t"
2960 "%llu\t"
2961 "%llu\t"
2962 "%u\t"
2963 "%u\t"
2964 "%u\t",
2965 lock_num_prmode(lockres),
2966 lock_num_exmode(lockres),
2967 lock_num_prmode_failed(lockres),
2968 lock_num_exmode_failed(lockres),
2969 lock_total_prmode(lockres),
2970 lock_total_exmode(lockres),
2971 lock_max_prmode(lockres),
2972 lock_max_exmode(lockres),
2973 lock_refresh(lockres));
2974
2975 /* End the line */
2976 seq_printf(m, "\n");
2977 return 0;
2978 }
2979
2980 static const struct seq_operations ocfs2_dlm_seq_ops = {
2981 .start = ocfs2_dlm_seq_start,
2982 .stop = ocfs2_dlm_seq_stop,
2983 .next = ocfs2_dlm_seq_next,
2984 .show = ocfs2_dlm_seq_show,
2985 };
2986
ocfs2_dlm_debug_release(struct inode * inode,struct file * file)2987 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2988 {
2989 struct seq_file *seq = file->private_data;
2990 struct ocfs2_dlm_seq_priv *priv = seq->private;
2991 struct ocfs2_lock_res *res = &priv->p_iter_res;
2992
2993 ocfs2_remove_lockres_tracking(res);
2994 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2995 return seq_release_private(inode, file);
2996 }
2997
ocfs2_dlm_debug_open(struct inode * inode,struct file * file)2998 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2999 {
3000 struct ocfs2_dlm_seq_priv *priv;
3001 struct ocfs2_super *osb;
3002
3003 priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
3004 if (!priv) {
3005 mlog_errno(-ENOMEM);
3006 return -ENOMEM;
3007 }
3008
3009 osb = inode->i_private;
3010 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
3011 priv->p_dlm_debug = osb->osb_dlm_debug;
3012 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
3013
3014 ocfs2_add_lockres_tracking(&priv->p_iter_res,
3015 priv->p_dlm_debug);
3016
3017 return 0;
3018 }
3019
3020 static const struct file_operations ocfs2_dlm_debug_fops = {
3021 .open = ocfs2_dlm_debug_open,
3022 .release = ocfs2_dlm_debug_release,
3023 .read = seq_read,
3024 .llseek = seq_lseek,
3025 };
3026
ocfs2_dlm_init_debug(struct ocfs2_super * osb)3027 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
3028 {
3029 int ret = 0;
3030 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3031
3032 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
3033 S_IFREG|S_IRUSR,
3034 osb->osb_debug_root,
3035 osb,
3036 &ocfs2_dlm_debug_fops);
3037 if (!dlm_debug->d_locking_state) {
3038 ret = -EINVAL;
3039 mlog(ML_ERROR,
3040 "Unable to create locking state debugfs file.\n");
3041 goto out;
3042 }
3043
3044 ocfs2_get_dlm_debug(dlm_debug);
3045 out:
3046 return ret;
3047 }
3048
ocfs2_dlm_shutdown_debug(struct ocfs2_super * osb)3049 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
3050 {
3051 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3052
3053 if (dlm_debug) {
3054 debugfs_remove(dlm_debug->d_locking_state);
3055 ocfs2_put_dlm_debug(dlm_debug);
3056 }
3057 }
3058
ocfs2_dlm_init(struct ocfs2_super * osb)3059 int ocfs2_dlm_init(struct ocfs2_super *osb)
3060 {
3061 int status = 0;
3062 struct ocfs2_cluster_connection *conn = NULL;
3063
3064 if (ocfs2_mount_local(osb)) {
3065 osb->node_num = 0;
3066 goto local;
3067 }
3068
3069 status = ocfs2_dlm_init_debug(osb);
3070 if (status < 0) {
3071 mlog_errno(status);
3072 goto bail;
3073 }
3074
3075 /* launch downconvert thread */
3076 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
3077 if (IS_ERR(osb->dc_task)) {
3078 status = PTR_ERR(osb->dc_task);
3079 osb->dc_task = NULL;
3080 mlog_errno(status);
3081 goto bail;
3082 }
3083
3084 /* for now, uuid == domain */
3085 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
3086 osb->osb_cluster_name,
3087 strlen(osb->osb_cluster_name),
3088 osb->uuid_str,
3089 strlen(osb->uuid_str),
3090 &lproto, ocfs2_do_node_down, osb,
3091 &conn);
3092 if (status) {
3093 mlog_errno(status);
3094 goto bail;
3095 }
3096
3097 status = ocfs2_cluster_this_node(conn, &osb->node_num);
3098 if (status < 0) {
3099 mlog_errno(status);
3100 mlog(ML_ERROR,
3101 "could not find this host's node number\n");
3102 ocfs2_cluster_disconnect(conn, 0);
3103 goto bail;
3104 }
3105
3106 local:
3107 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3108 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3109 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3110 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3111
3112 osb->cconn = conn;
3113
3114 status = 0;
3115 bail:
3116 if (status < 0) {
3117 ocfs2_dlm_shutdown_debug(osb);
3118 if (osb->dc_task)
3119 kthread_stop(osb->dc_task);
3120 }
3121
3122 return status;
3123 }
3124
ocfs2_dlm_shutdown(struct ocfs2_super * osb,int hangup_pending)3125 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3126 int hangup_pending)
3127 {
3128 ocfs2_drop_osb_locks(osb);
3129
3130 /*
3131 * Now that we have dropped all locks and ocfs2_dismount_volume()
3132 * has disabled recovery, the DLM won't be talking to us. It's
3133 * safe to tear things down before disconnecting the cluster.
3134 */
3135
3136 if (osb->dc_task) {
3137 kthread_stop(osb->dc_task);
3138 osb->dc_task = NULL;
3139 }
3140
3141 ocfs2_lock_res_free(&osb->osb_super_lockres);
3142 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3143 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3144 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3145
3146 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3147 osb->cconn = NULL;
3148
3149 ocfs2_dlm_shutdown_debug(osb);
3150 }
3151
ocfs2_drop_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)3152 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3153 struct ocfs2_lock_res *lockres)
3154 {
3155 int ret;
3156 unsigned long flags;
3157 u32 lkm_flags = 0;
3158
3159 /* We didn't get anywhere near actually using this lockres. */
3160 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3161 goto out;
3162
3163 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3164 lkm_flags |= DLM_LKF_VALBLK;
3165
3166 spin_lock_irqsave(&lockres->l_lock, flags);
3167
3168 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3169 "lockres %s, flags 0x%lx\n",
3170 lockres->l_name, lockres->l_flags);
3171
3172 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3173 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3174 "%u, unlock_action = %u\n",
3175 lockres->l_name, lockres->l_flags, lockres->l_action,
3176 lockres->l_unlock_action);
3177
3178 spin_unlock_irqrestore(&lockres->l_lock, flags);
3179
3180 /* XXX: Today we just wait on any busy
3181 * locks... Perhaps we need to cancel converts in the
3182 * future? */
3183 ocfs2_wait_on_busy_lock(lockres);
3184
3185 spin_lock_irqsave(&lockres->l_lock, flags);
3186 }
3187
3188 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3189 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3190 lockres->l_level == DLM_LOCK_EX &&
3191 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3192 lockres->l_ops->set_lvb(lockres);
3193 }
3194
3195 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3196 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3197 lockres->l_name);
3198 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3199 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3200
3201 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3202 spin_unlock_irqrestore(&lockres->l_lock, flags);
3203 goto out;
3204 }
3205
3206 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3207
3208 /* make sure we never get here while waiting for an ast to
3209 * fire. */
3210 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3211
3212 /* is this necessary? */
3213 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3214 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3215 spin_unlock_irqrestore(&lockres->l_lock, flags);
3216
3217 mlog(0, "lock %s\n", lockres->l_name);
3218
3219 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3220 if (ret) {
3221 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3222 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3223 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3224 BUG();
3225 }
3226 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3227 lockres->l_name);
3228
3229 ocfs2_wait_on_busy_lock(lockres);
3230 out:
3231 return 0;
3232 }
3233
3234 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3235 struct ocfs2_lock_res *lockres);
3236
3237 /* Mark the lockres as being dropped. It will no longer be
3238 * queued if blocking, but we still may have to wait on it
3239 * being dequeued from the downconvert thread before we can consider
3240 * it safe to drop.
3241 *
3242 * You can *not* attempt to call cluster_lock on this lockres anymore. */
ocfs2_mark_lockres_freeing(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)3243 void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
3244 struct ocfs2_lock_res *lockres)
3245 {
3246 int status;
3247 struct ocfs2_mask_waiter mw;
3248 unsigned long flags, flags2;
3249
3250 ocfs2_init_mask_waiter(&mw);
3251
3252 spin_lock_irqsave(&lockres->l_lock, flags);
3253 lockres->l_flags |= OCFS2_LOCK_FREEING;
3254 if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
3255 /*
3256 * We know the downconvert is queued but not in progress
3257 * because we are the downconvert thread and processing
3258 * different lock. So we can just remove the lock from the
3259 * queue. This is not only an optimization but also a way
3260 * to avoid the following deadlock:
3261 * ocfs2_dentry_post_unlock()
3262 * ocfs2_dentry_lock_put()
3263 * ocfs2_drop_dentry_lock()
3264 * iput()
3265 * ocfs2_evict_inode()
3266 * ocfs2_clear_inode()
3267 * ocfs2_mark_lockres_freeing()
3268 * ... blocks waiting for OCFS2_LOCK_QUEUED
3269 * since we are the downconvert thread which
3270 * should clear the flag.
3271 */
3272 spin_unlock_irqrestore(&lockres->l_lock, flags);
3273 spin_lock_irqsave(&osb->dc_task_lock, flags2);
3274 list_del_init(&lockres->l_blocked_list);
3275 osb->blocked_lock_count--;
3276 spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
3277 /*
3278 * Warn if we recurse into another post_unlock call. Strictly
3279 * speaking it isn't a problem but we need to be careful if
3280 * that happens (stack overflow, deadlocks, ...) so warn if
3281 * ocfs2 grows a path for which this can happen.
3282 */
3283 WARN_ON_ONCE(lockres->l_ops->post_unlock);
3284 /* Since the lock is freeing we don't do much in the fn below */
3285 ocfs2_process_blocked_lock(osb, lockres);
3286 return;
3287 }
3288 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3289 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3290 spin_unlock_irqrestore(&lockres->l_lock, flags);
3291
3292 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3293
3294 status = ocfs2_wait_for_mask(&mw);
3295 if (status)
3296 mlog_errno(status);
3297
3298 spin_lock_irqsave(&lockres->l_lock, flags);
3299 }
3300 spin_unlock_irqrestore(&lockres->l_lock, flags);
3301 }
3302
ocfs2_simple_drop_lockres(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)3303 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3304 struct ocfs2_lock_res *lockres)
3305 {
3306 int ret;
3307
3308 ocfs2_mark_lockres_freeing(osb, lockres);
3309 ret = ocfs2_drop_lock(osb, lockres);
3310 if (ret)
3311 mlog_errno(ret);
3312 }
3313
ocfs2_drop_osb_locks(struct ocfs2_super * osb)3314 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3315 {
3316 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3317 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3318 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3319 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3320 }
3321
ocfs2_drop_inode_locks(struct inode * inode)3322 int ocfs2_drop_inode_locks(struct inode *inode)
3323 {
3324 int status, err;
3325
3326 /* No need to call ocfs2_mark_lockres_freeing here -
3327 * ocfs2_clear_inode has done it for us. */
3328
3329 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3330 &OCFS2_I(inode)->ip_open_lockres);
3331 if (err < 0)
3332 mlog_errno(err);
3333
3334 status = err;
3335
3336 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3337 &OCFS2_I(inode)->ip_inode_lockres);
3338 if (err < 0)
3339 mlog_errno(err);
3340 if (err < 0 && !status)
3341 status = err;
3342
3343 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3344 &OCFS2_I(inode)->ip_rw_lockres);
3345 if (err < 0)
3346 mlog_errno(err);
3347 if (err < 0 && !status)
3348 status = err;
3349
3350 return status;
3351 }
3352
ocfs2_prepare_downconvert(struct ocfs2_lock_res * lockres,int new_level)3353 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3354 int new_level)
3355 {
3356 assert_spin_locked(&lockres->l_lock);
3357
3358 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3359
3360 if (lockres->l_level <= new_level) {
3361 mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3362 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3363 "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3364 new_level, list_empty(&lockres->l_blocked_list),
3365 list_empty(&lockres->l_mask_waiters), lockres->l_type,
3366 lockres->l_flags, lockres->l_ro_holders,
3367 lockres->l_ex_holders, lockres->l_action,
3368 lockres->l_unlock_action, lockres->l_requested,
3369 lockres->l_blocking, lockres->l_pending_gen);
3370 BUG();
3371 }
3372
3373 mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3374 lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3375
3376 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3377 lockres->l_requested = new_level;
3378 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3379 return lockres_set_pending(lockres);
3380 }
3381
ocfs2_downconvert_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,int new_level,int lvb,unsigned int generation)3382 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3383 struct ocfs2_lock_res *lockres,
3384 int new_level,
3385 int lvb,
3386 unsigned int generation)
3387 {
3388 int ret;
3389 u32 dlm_flags = DLM_LKF_CONVERT;
3390
3391 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3392 lockres->l_level, new_level);
3393
3394 if (lvb)
3395 dlm_flags |= DLM_LKF_VALBLK;
3396
3397 ret = ocfs2_dlm_lock(osb->cconn,
3398 new_level,
3399 &lockres->l_lksb,
3400 dlm_flags,
3401 lockres->l_name,
3402 OCFS2_LOCK_ID_MAX_LEN - 1);
3403 lockres_clear_pending(lockres, generation, osb);
3404 if (ret) {
3405 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3406 ocfs2_recover_from_dlm_error(lockres, 1);
3407 goto bail;
3408 }
3409
3410 ret = 0;
3411 bail:
3412 return ret;
3413 }
3414
3415 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
ocfs2_prepare_cancel_convert(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)3416 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3417 struct ocfs2_lock_res *lockres)
3418 {
3419 assert_spin_locked(&lockres->l_lock);
3420
3421 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3422 /* If we're already trying to cancel a lock conversion
3423 * then just drop the spinlock and allow the caller to
3424 * requeue this lock. */
3425 mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3426 return 0;
3427 }
3428
3429 /* were we in a convert when we got the bast fire? */
3430 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3431 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3432 /* set things up for the unlockast to know to just
3433 * clear out the ast_action and unset busy, etc. */
3434 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3435
3436 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3437 "lock %s, invalid flags: 0x%lx\n",
3438 lockres->l_name, lockres->l_flags);
3439
3440 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3441
3442 return 1;
3443 }
3444
ocfs2_cancel_convert(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)3445 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3446 struct ocfs2_lock_res *lockres)
3447 {
3448 int ret;
3449
3450 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3451 DLM_LKF_CANCEL);
3452 if (ret) {
3453 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3454 ocfs2_recover_from_dlm_error(lockres, 0);
3455 }
3456
3457 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3458
3459 return ret;
3460 }
3461
ocfs2_unblock_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres,struct ocfs2_unblock_ctl * ctl)3462 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3463 struct ocfs2_lock_res *lockres,
3464 struct ocfs2_unblock_ctl *ctl)
3465 {
3466 unsigned long flags;
3467 int blocking;
3468 int new_level;
3469 int level;
3470 int ret = 0;
3471 int set_lvb = 0;
3472 unsigned int gen;
3473
3474 spin_lock_irqsave(&lockres->l_lock, flags);
3475
3476 recheck:
3477 /*
3478 * Is it still blocking? If not, we have no more work to do.
3479 */
3480 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3481 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3482 spin_unlock_irqrestore(&lockres->l_lock, flags);
3483 ret = 0;
3484 goto leave;
3485 }
3486
3487 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3488 /* XXX
3489 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3490 * exists entirely for one reason - another thread has set
3491 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3492 *
3493 * If we do ocfs2_cancel_convert() before the other thread
3494 * calls dlm_lock(), our cancel will do nothing. We will
3495 * get no ast, and we will have no way of knowing the
3496 * cancel failed. Meanwhile, the other thread will call
3497 * into dlm_lock() and wait...forever.
3498 *
3499 * Why forever? Because another node has asked for the
3500 * lock first; that's why we're here in unblock_lock().
3501 *
3502 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3503 * set, we just requeue the unblock. Only when the other
3504 * thread has called dlm_lock() and cleared PENDING will
3505 * we then cancel their request.
3506 *
3507 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3508 * at the same time they set OCFS2_DLM_BUSY. They must
3509 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3510 */
3511 if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3512 mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3513 lockres->l_name);
3514 goto leave_requeue;
3515 }
3516
3517 ctl->requeue = 1;
3518 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3519 spin_unlock_irqrestore(&lockres->l_lock, flags);
3520 if (ret) {
3521 ret = ocfs2_cancel_convert(osb, lockres);
3522 if (ret < 0)
3523 mlog_errno(ret);
3524 }
3525 goto leave;
3526 }
3527
3528 /*
3529 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3530 * set when the ast is received for an upconvert just before the
3531 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3532 * on the heels of the ast, we want to delay the downconvert just
3533 * enough to allow the up requestor to do its task. Because this
3534 * lock is in the blocked queue, the lock will be downconverted
3535 * as soon as the requestor is done with the lock.
3536 */
3537 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3538 goto leave_requeue;
3539
3540 /*
3541 * How can we block and yet be at NL? We were trying to upconvert
3542 * from NL and got canceled. The code comes back here, and now
3543 * we notice and clear BLOCKING.
3544 */
3545 if (lockres->l_level == DLM_LOCK_NL) {
3546 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3547 mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3548 lockres->l_blocking = DLM_LOCK_NL;
3549 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3550 spin_unlock_irqrestore(&lockres->l_lock, flags);
3551 goto leave;
3552 }
3553
3554 /* if we're blocking an exclusive and we have *any* holders,
3555 * then requeue. */
3556 if ((lockres->l_blocking == DLM_LOCK_EX)
3557 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3558 mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3559 lockres->l_name, lockres->l_ex_holders,
3560 lockres->l_ro_holders);
3561 goto leave_requeue;
3562 }
3563
3564 /* If it's a PR we're blocking, then only
3565 * requeue if we've got any EX holders */
3566 if (lockres->l_blocking == DLM_LOCK_PR &&
3567 lockres->l_ex_holders) {
3568 mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3569 lockres->l_name, lockres->l_ex_holders);
3570 goto leave_requeue;
3571 }
3572
3573 /*
3574 * Can we get a lock in this state if the holder counts are
3575 * zero? The meta data unblock code used to check this.
3576 */
3577 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3578 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3579 mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3580 lockres->l_name);
3581 goto leave_requeue;
3582 }
3583
3584 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3585
3586 if (lockres->l_ops->check_downconvert
3587 && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3588 mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3589 lockres->l_name);
3590 goto leave_requeue;
3591 }
3592
3593 /* If we get here, then we know that there are no more
3594 * incompatible holders (and anyone asking for an incompatible
3595 * lock is blocked). We can now downconvert the lock */
3596 if (!lockres->l_ops->downconvert_worker)
3597 goto downconvert;
3598
3599 /* Some lockres types want to do a bit of work before
3600 * downconverting a lock. Allow that here. The worker function
3601 * may sleep, so we save off a copy of what we're blocking as
3602 * it may change while we're not holding the spin lock. */
3603 blocking = lockres->l_blocking;
3604 level = lockres->l_level;
3605 spin_unlock_irqrestore(&lockres->l_lock, flags);
3606
3607 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3608
3609 if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3610 mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3611 lockres->l_name);
3612 goto leave;
3613 }
3614
3615 spin_lock_irqsave(&lockres->l_lock, flags);
3616 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3617 /* If this changed underneath us, then we can't drop
3618 * it just yet. */
3619 mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3620 "Recheck\n", lockres->l_name, blocking,
3621 lockres->l_blocking, level, lockres->l_level);
3622 goto recheck;
3623 }
3624
3625 downconvert:
3626 ctl->requeue = 0;
3627
3628 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3629 if (lockres->l_level == DLM_LOCK_EX)
3630 set_lvb = 1;
3631
3632 /*
3633 * We only set the lvb if the lock has been fully
3634 * refreshed - otherwise we risk setting stale
3635 * data. Otherwise, there's no need to actually clear
3636 * out the lvb here as it's value is still valid.
3637 */
3638 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3639 lockres->l_ops->set_lvb(lockres);
3640 }
3641
3642 gen = ocfs2_prepare_downconvert(lockres, new_level);
3643 spin_unlock_irqrestore(&lockres->l_lock, flags);
3644 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3645 gen);
3646
3647 leave:
3648 if (ret)
3649 mlog_errno(ret);
3650 return ret;
3651
3652 leave_requeue:
3653 spin_unlock_irqrestore(&lockres->l_lock, flags);
3654 ctl->requeue = 1;
3655
3656 return 0;
3657 }
3658
ocfs2_data_convert_worker(struct ocfs2_lock_res * lockres,int blocking)3659 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3660 int blocking)
3661 {
3662 struct inode *inode;
3663 struct address_space *mapping;
3664 struct ocfs2_inode_info *oi;
3665
3666 inode = ocfs2_lock_res_inode(lockres);
3667 mapping = inode->i_mapping;
3668
3669 if (S_ISDIR(inode->i_mode)) {
3670 oi = OCFS2_I(inode);
3671 oi->ip_dir_lock_gen++;
3672 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3673 goto out;
3674 }
3675
3676 if (!S_ISREG(inode->i_mode))
3677 goto out;
3678
3679 /*
3680 * We need this before the filemap_fdatawrite() so that it can
3681 * transfer the dirty bit from the PTE to the
3682 * page. Unfortunately this means that even for EX->PR
3683 * downconverts, we'll lose our mappings and have to build
3684 * them up again.
3685 */
3686 unmap_mapping_range(mapping, 0, 0, 0);
3687
3688 if (filemap_fdatawrite(mapping)) {
3689 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3690 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3691 }
3692 sync_mapping_buffers(mapping);
3693 if (blocking == DLM_LOCK_EX) {
3694 truncate_inode_pages(mapping, 0);
3695 } else {
3696 /* We only need to wait on the I/O if we're not also
3697 * truncating pages because truncate_inode_pages waits
3698 * for us above. We don't truncate pages if we're
3699 * blocking anything < EXMODE because we want to keep
3700 * them around in that case. */
3701 filemap_fdatawait(mapping);
3702 }
3703
3704 out:
3705 return UNBLOCK_CONTINUE;
3706 }
3707
ocfs2_ci_checkpointed(struct ocfs2_caching_info * ci,struct ocfs2_lock_res * lockres,int new_level)3708 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3709 struct ocfs2_lock_res *lockres,
3710 int new_level)
3711 {
3712 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3713
3714 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3715 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3716
3717 if (checkpointed)
3718 return 1;
3719
3720 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3721 return 0;
3722 }
3723
ocfs2_check_meta_downconvert(struct ocfs2_lock_res * lockres,int new_level)3724 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3725 int new_level)
3726 {
3727 struct inode *inode = ocfs2_lock_res_inode(lockres);
3728
3729 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3730 }
3731
ocfs2_set_meta_lvb(struct ocfs2_lock_res * lockres)3732 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3733 {
3734 struct inode *inode = ocfs2_lock_res_inode(lockres);
3735
3736 __ocfs2_stuff_meta_lvb(inode);
3737 }
3738
3739 /*
3740 * Does the final reference drop on our dentry lock. Right now this
3741 * happens in the downconvert thread, but we could choose to simplify the
3742 * dlmglue API and push these off to the ocfs2_wq in the future.
3743 */
ocfs2_dentry_post_unlock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)3744 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3745 struct ocfs2_lock_res *lockres)
3746 {
3747 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3748 ocfs2_dentry_lock_put(osb, dl);
3749 }
3750
3751 /*
3752 * d_delete() matching dentries before the lock downconvert.
3753 *
3754 * At this point, any process waiting to destroy the
3755 * dentry_lock due to last ref count is stopped by the
3756 * OCFS2_LOCK_QUEUED flag.
3757 *
3758 * We have two potential problems
3759 *
3760 * 1) If we do the last reference drop on our dentry_lock (via dput)
3761 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3762 * the downconvert to finish. Instead we take an elevated
3763 * reference and push the drop until after we've completed our
3764 * unblock processing.
3765 *
3766 * 2) There might be another process with a final reference,
3767 * waiting on us to finish processing. If this is the case, we
3768 * detect it and exit out - there's no more dentries anyway.
3769 */
ocfs2_dentry_convert_worker(struct ocfs2_lock_res * lockres,int blocking)3770 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3771 int blocking)
3772 {
3773 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3774 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3775 struct dentry *dentry;
3776 unsigned long flags;
3777 int extra_ref = 0;
3778
3779 /*
3780 * This node is blocking another node from getting a read
3781 * lock. This happens when we've renamed within a
3782 * directory. We've forced the other nodes to d_delete(), but
3783 * we never actually dropped our lock because it's still
3784 * valid. The downconvert code will retain a PR for this node,
3785 * so there's no further work to do.
3786 */
3787 if (blocking == DLM_LOCK_PR)
3788 return UNBLOCK_CONTINUE;
3789
3790 /*
3791 * Mark this inode as potentially orphaned. The code in
3792 * ocfs2_delete_inode() will figure out whether it actually
3793 * needs to be freed or not.
3794 */
3795 spin_lock(&oi->ip_lock);
3796 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3797 spin_unlock(&oi->ip_lock);
3798
3799 /*
3800 * Yuck. We need to make sure however that the check of
3801 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3802 * respect to a reference decrement or the setting of that
3803 * flag.
3804 */
3805 spin_lock_irqsave(&lockres->l_lock, flags);
3806 spin_lock(&dentry_attach_lock);
3807 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3808 && dl->dl_count) {
3809 dl->dl_count++;
3810 extra_ref = 1;
3811 }
3812 spin_unlock(&dentry_attach_lock);
3813 spin_unlock_irqrestore(&lockres->l_lock, flags);
3814
3815 mlog(0, "extra_ref = %d\n", extra_ref);
3816
3817 /*
3818 * We have a process waiting on us in ocfs2_dentry_iput(),
3819 * which means we can't have any more outstanding
3820 * aliases. There's no need to do any more work.
3821 */
3822 if (!extra_ref)
3823 return UNBLOCK_CONTINUE;
3824
3825 spin_lock(&dentry_attach_lock);
3826 while (1) {
3827 dentry = ocfs2_find_local_alias(dl->dl_inode,
3828 dl->dl_parent_blkno, 1);
3829 if (!dentry)
3830 break;
3831 spin_unlock(&dentry_attach_lock);
3832
3833 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3834 dentry->d_name.name);
3835
3836 /*
3837 * The following dcache calls may do an
3838 * iput(). Normally we don't want that from the
3839 * downconverting thread, but in this case it's ok
3840 * because the requesting node already has an
3841 * exclusive lock on the inode, so it can't be queued
3842 * for a downconvert.
3843 */
3844 d_delete(dentry);
3845 dput(dentry);
3846
3847 spin_lock(&dentry_attach_lock);
3848 }
3849 spin_unlock(&dentry_attach_lock);
3850
3851 /*
3852 * If we are the last holder of this dentry lock, there is no
3853 * reason to downconvert so skip straight to the unlock.
3854 */
3855 if (dl->dl_count == 1)
3856 return UNBLOCK_STOP_POST;
3857
3858 return UNBLOCK_CONTINUE_POST;
3859 }
3860
ocfs2_check_refcount_downconvert(struct ocfs2_lock_res * lockres,int new_level)3861 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3862 int new_level)
3863 {
3864 struct ocfs2_refcount_tree *tree =
3865 ocfs2_lock_res_refcount_tree(lockres);
3866
3867 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3868 }
3869
ocfs2_refcount_convert_worker(struct ocfs2_lock_res * lockres,int blocking)3870 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3871 int blocking)
3872 {
3873 struct ocfs2_refcount_tree *tree =
3874 ocfs2_lock_res_refcount_tree(lockres);
3875
3876 ocfs2_metadata_cache_purge(&tree->rf_ci);
3877
3878 return UNBLOCK_CONTINUE;
3879 }
3880
ocfs2_set_qinfo_lvb(struct ocfs2_lock_res * lockres)3881 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3882 {
3883 struct ocfs2_qinfo_lvb *lvb;
3884 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3885 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3886 oinfo->dqi_gi.dqi_type);
3887
3888 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3889 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3890 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3891 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3892 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3893 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3894 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3895 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3896 }
3897
ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo * oinfo,int ex)3898 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3899 {
3900 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3901 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3902 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3903
3904 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3905 ocfs2_cluster_unlock(osb, lockres, level);
3906 }
3907
ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo * oinfo)3908 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3909 {
3910 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3911 oinfo->dqi_gi.dqi_type);
3912 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3913 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3914 struct buffer_head *bh = NULL;
3915 struct ocfs2_global_disk_dqinfo *gdinfo;
3916 int status = 0;
3917
3918 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3919 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3920 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3921 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3922 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3923 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3924 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3925 oinfo->dqi_gi.dqi_free_entry =
3926 be32_to_cpu(lvb->lvb_free_entry);
3927 } else {
3928 status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
3929 oinfo->dqi_giblk, &bh);
3930 if (status) {
3931 mlog_errno(status);
3932 goto bail;
3933 }
3934 gdinfo = (struct ocfs2_global_disk_dqinfo *)
3935 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3936 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3937 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3938 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3939 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3940 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3941 oinfo->dqi_gi.dqi_free_entry =
3942 le32_to_cpu(gdinfo->dqi_free_entry);
3943 brelse(bh);
3944 ocfs2_track_lock_refresh(lockres);
3945 }
3946
3947 bail:
3948 return status;
3949 }
3950
3951 /* Lock quota info, this function expects at least shared lock on the quota file
3952 * so that we can safely refresh quota info from disk. */
ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo * oinfo,int ex)3953 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3954 {
3955 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3956 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3957 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3958 int status = 0;
3959
3960 /* On RO devices, locking really isn't needed... */
3961 if (ocfs2_is_hard_readonly(osb)) {
3962 if (ex)
3963 status = -EROFS;
3964 goto bail;
3965 }
3966 if (ocfs2_mount_local(osb))
3967 goto bail;
3968
3969 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3970 if (status < 0) {
3971 mlog_errno(status);
3972 goto bail;
3973 }
3974 if (!ocfs2_should_refresh_lock_res(lockres))
3975 goto bail;
3976 /* OK, we have the lock but we need to refresh the quota info */
3977 status = ocfs2_refresh_qinfo(oinfo);
3978 if (status)
3979 ocfs2_qinfo_unlock(oinfo, ex);
3980 ocfs2_complete_lock_res_refresh(lockres, status);
3981 bail:
3982 return status;
3983 }
3984
ocfs2_refcount_lock(struct ocfs2_refcount_tree * ref_tree,int ex)3985 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
3986 {
3987 int status;
3988 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3989 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3990 struct ocfs2_super *osb = lockres->l_priv;
3991
3992
3993 if (ocfs2_is_hard_readonly(osb))
3994 return -EROFS;
3995
3996 if (ocfs2_mount_local(osb))
3997 return 0;
3998
3999 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4000 if (status < 0)
4001 mlog_errno(status);
4002
4003 return status;
4004 }
4005
ocfs2_refcount_unlock(struct ocfs2_refcount_tree * ref_tree,int ex)4006 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
4007 {
4008 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4009 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4010 struct ocfs2_super *osb = lockres->l_priv;
4011
4012 if (!ocfs2_mount_local(osb))
4013 ocfs2_cluster_unlock(osb, lockres, level);
4014 }
4015
ocfs2_process_blocked_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)4016 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
4017 struct ocfs2_lock_res *lockres)
4018 {
4019 int status;
4020 struct ocfs2_unblock_ctl ctl = {0, 0,};
4021 unsigned long flags;
4022
4023 /* Our reference to the lockres in this function can be
4024 * considered valid until we remove the OCFS2_LOCK_QUEUED
4025 * flag. */
4026
4027 BUG_ON(!lockres);
4028 BUG_ON(!lockres->l_ops);
4029
4030 mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
4031
4032 /* Detect whether a lock has been marked as going away while
4033 * the downconvert thread was processing other things. A lock can
4034 * still be marked with OCFS2_LOCK_FREEING after this check,
4035 * but short circuiting here will still save us some
4036 * performance. */
4037 spin_lock_irqsave(&lockres->l_lock, flags);
4038 if (lockres->l_flags & OCFS2_LOCK_FREEING)
4039 goto unqueue;
4040 spin_unlock_irqrestore(&lockres->l_lock, flags);
4041
4042 status = ocfs2_unblock_lock(osb, lockres, &ctl);
4043 if (status < 0)
4044 mlog_errno(status);
4045
4046 spin_lock_irqsave(&lockres->l_lock, flags);
4047 unqueue:
4048 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
4049 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
4050 } else
4051 ocfs2_schedule_blocked_lock(osb, lockres);
4052
4053 mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
4054 ctl.requeue ? "yes" : "no");
4055 spin_unlock_irqrestore(&lockres->l_lock, flags);
4056
4057 if (ctl.unblock_action != UNBLOCK_CONTINUE
4058 && lockres->l_ops->post_unlock)
4059 lockres->l_ops->post_unlock(osb, lockres);
4060 }
4061
ocfs2_schedule_blocked_lock(struct ocfs2_super * osb,struct ocfs2_lock_res * lockres)4062 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4063 struct ocfs2_lock_res *lockres)
4064 {
4065 unsigned long flags;
4066
4067 assert_spin_locked(&lockres->l_lock);
4068
4069 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
4070 /* Do not schedule a lock for downconvert when it's on
4071 * the way to destruction - any nodes wanting access
4072 * to the resource will get it soon. */
4073 mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
4074 lockres->l_name, lockres->l_flags);
4075 return;
4076 }
4077
4078 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
4079
4080 spin_lock_irqsave(&osb->dc_task_lock, flags);
4081 if (list_empty(&lockres->l_blocked_list)) {
4082 list_add_tail(&lockres->l_blocked_list,
4083 &osb->blocked_lock_list);
4084 osb->blocked_lock_count++;
4085 }
4086 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4087 }
4088
ocfs2_downconvert_thread_do_work(struct ocfs2_super * osb)4089 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4090 {
4091 unsigned long processed;
4092 unsigned long flags;
4093 struct ocfs2_lock_res *lockres;
4094
4095 spin_lock_irqsave(&osb->dc_task_lock, flags);
4096 /* grab this early so we know to try again if a state change and
4097 * wake happens part-way through our work */
4098 osb->dc_work_sequence = osb->dc_wake_sequence;
4099
4100 processed = osb->blocked_lock_count;
4101 /*
4102 * blocked lock processing in this loop might call iput which can
4103 * remove items off osb->blocked_lock_list. Downconvert up to
4104 * 'processed' number of locks, but stop short if we had some
4105 * removed in ocfs2_mark_lockres_freeing when downconverting.
4106 */
4107 while (processed && !list_empty(&osb->blocked_lock_list)) {
4108 lockres = list_entry(osb->blocked_lock_list.next,
4109 struct ocfs2_lock_res, l_blocked_list);
4110 list_del_init(&lockres->l_blocked_list);
4111 osb->blocked_lock_count--;
4112 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4113
4114 BUG_ON(!processed);
4115 processed--;
4116
4117 ocfs2_process_blocked_lock(osb, lockres);
4118
4119 spin_lock_irqsave(&osb->dc_task_lock, flags);
4120 }
4121 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4122 }
4123
ocfs2_downconvert_thread_lists_empty(struct ocfs2_super * osb)4124 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
4125 {
4126 int empty = 0;
4127 unsigned long flags;
4128
4129 spin_lock_irqsave(&osb->dc_task_lock, flags);
4130 if (list_empty(&osb->blocked_lock_list))
4131 empty = 1;
4132
4133 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4134 return empty;
4135 }
4136
ocfs2_downconvert_thread_should_wake(struct ocfs2_super * osb)4137 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4138 {
4139 int should_wake = 0;
4140 unsigned long flags;
4141
4142 spin_lock_irqsave(&osb->dc_task_lock, flags);
4143 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4144 should_wake = 1;
4145 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4146
4147 return should_wake;
4148 }
4149
ocfs2_downconvert_thread(void * arg)4150 static int ocfs2_downconvert_thread(void *arg)
4151 {
4152 int status = 0;
4153 struct ocfs2_super *osb = arg;
4154
4155 /* only quit once we've been asked to stop and there is no more
4156 * work available */
4157 while (!(kthread_should_stop() &&
4158 ocfs2_downconvert_thread_lists_empty(osb))) {
4159
4160 wait_event_interruptible(osb->dc_event,
4161 ocfs2_downconvert_thread_should_wake(osb) ||
4162 kthread_should_stop());
4163
4164 mlog(0, "downconvert_thread: awoken\n");
4165
4166 ocfs2_downconvert_thread_do_work(osb);
4167 }
4168
4169 osb->dc_task = NULL;
4170 return status;
4171 }
4172
ocfs2_wake_downconvert_thread(struct ocfs2_super * osb)4173 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4174 {
4175 unsigned long flags;
4176
4177 spin_lock_irqsave(&osb->dc_task_lock, flags);
4178 /* make sure the voting thread gets a swipe at whatever changes
4179 * the caller may have made to the voting state */
4180 osb->dc_wake_sequence++;
4181 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4182 wake_up(&osb->dc_event);
4183 }
4184