• Home
  • Raw
  • Download

Lines Matching refs:lockres

37 static inline int user_check_wait_flag(struct user_lock_res *lockres,  in user_check_wait_flag()  argument
42 spin_lock(&lockres->l_lock); in user_check_wait_flag()
43 ret = lockres->l_flags & flag; in user_check_wait_flag()
44 spin_unlock(&lockres->l_lock); in user_check_wait_flag()
49 static inline void user_wait_on_busy_lock(struct user_lock_res *lockres) in user_wait_on_busy_lock() argument
52 wait_event(lockres->l_event, in user_wait_on_busy_lock()
53 !user_check_wait_flag(lockres, USER_LOCK_BUSY)); in user_wait_on_busy_lock()
56 static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) in user_wait_on_blocked_lock() argument
59 wait_event(lockres->l_event, in user_wait_on_blocked_lock()
60 !user_check_wait_flag(lockres, USER_LOCK_BLOCKED)); in user_wait_on_blocked_lock()
65 cluster_connection_from_user_lockres(struct user_lock_res *lockres) in cluster_connection_from_user_lockres() argument
69 ip = container_of(lockres, in cluster_connection_from_user_lockres()
76 user_dlm_inode_from_user_lockres(struct user_lock_res *lockres) in user_dlm_inode_from_user_lockres() argument
80 ip = container_of(lockres, in user_dlm_inode_from_user_lockres()
86 static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) in user_recover_from_dlm_error() argument
88 spin_lock(&lockres->l_lock); in user_recover_from_dlm_error()
89 lockres->l_flags &= ~USER_LOCK_BUSY; in user_recover_from_dlm_error()
90 spin_unlock(&lockres->l_lock); in user_recover_from_dlm_error()
115 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); in user_ast() local
119 lockres->l_namelen, lockres->l_name, lockres->l_level, in user_ast()
120 lockres->l_requested); in user_ast()
122 spin_lock(&lockres->l_lock); in user_ast()
124 status = ocfs2_dlm_lock_status(&lockres->l_lksb); in user_ast()
127 status, lockres->l_namelen, lockres->l_name); in user_ast()
128 spin_unlock(&lockres->l_lock); in user_ast()
132 mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV, in user_ast()
134 lockres->l_namelen, lockres->l_name, lockres->l_flags); in user_ast()
137 if (lockres->l_requested < lockres->l_level) { in user_ast()
138 if (lockres->l_requested <= in user_ast()
139 user_highest_compat_lock_level(lockres->l_blocking)) { in user_ast()
140 lockres->l_blocking = DLM_LOCK_NL; in user_ast()
141 lockres->l_flags &= ~USER_LOCK_BLOCKED; in user_ast()
145 lockres->l_level = lockres->l_requested; in user_ast()
146 lockres->l_requested = DLM_LOCK_IV; in user_ast()
147 lockres->l_flags |= USER_LOCK_ATTACHED; in user_ast()
148 lockres->l_flags &= ~USER_LOCK_BUSY; in user_ast()
150 spin_unlock(&lockres->l_lock); in user_ast()
152 wake_up(&lockres->l_event); in user_ast()
155 static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) in user_dlm_grab_inode_ref() argument
158 inode = user_dlm_inode_from_user_lockres(lockres); in user_dlm_grab_inode_ref()
165 static void __user_dlm_queue_lockres(struct user_lock_res *lockres) in __user_dlm_queue_lockres() argument
167 if (!(lockres->l_flags & USER_LOCK_QUEUED)) { in __user_dlm_queue_lockres()
168 user_dlm_grab_inode_ref(lockres); in __user_dlm_queue_lockres()
170 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); in __user_dlm_queue_lockres()
172 queue_work(user_dlm_worker, &lockres->l_work); in __user_dlm_queue_lockres()
173 lockres->l_flags |= USER_LOCK_QUEUED; in __user_dlm_queue_lockres()
177 static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) in __user_dlm_cond_queue_lockres() argument
181 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) in __user_dlm_cond_queue_lockres()
184 switch (lockres->l_blocking) { in __user_dlm_cond_queue_lockres()
186 if (!lockres->l_ex_holders && !lockres->l_ro_holders) in __user_dlm_cond_queue_lockres()
190 if (!lockres->l_ex_holders) in __user_dlm_cond_queue_lockres()
198 __user_dlm_queue_lockres(lockres); in __user_dlm_cond_queue_lockres()
203 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); in user_bast() local
206 lockres->l_namelen, lockres->l_name, level, lockres->l_level); in user_bast()
208 spin_lock(&lockres->l_lock); in user_bast()
209 lockres->l_flags |= USER_LOCK_BLOCKED; in user_bast()
210 if (level > lockres->l_blocking) in user_bast()
211 lockres->l_blocking = level; in user_bast()
213 __user_dlm_queue_lockres(lockres); in user_bast()
214 spin_unlock(&lockres->l_lock); in user_bast()
216 wake_up(&lockres->l_event); in user_bast()
221 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); in user_unlock_ast() local
224 lockres->l_namelen, lockres->l_name, lockres->l_flags); in user_unlock_ast()
229 spin_lock(&lockres->l_lock); in user_unlock_ast()
233 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN in user_unlock_ast()
234 && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { in user_unlock_ast()
235 lockres->l_level = DLM_LOCK_IV; in user_unlock_ast()
240 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); in user_unlock_ast()
241 lockres->l_flags &= ~USER_LOCK_IN_CANCEL; in user_unlock_ast()
244 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); in user_unlock_ast()
246 lockres->l_requested = DLM_LOCK_IV; /* cancel an in user_unlock_ast()
249 lockres->l_flags &= ~USER_LOCK_IN_CANCEL; in user_unlock_ast()
252 if (lockres->l_flags & USER_LOCK_BLOCKED) in user_unlock_ast()
253 __user_dlm_queue_lockres(lockres); in user_unlock_ast()
256 lockres->l_flags &= ~USER_LOCK_BUSY; in user_unlock_ast()
258 spin_unlock(&lockres->l_lock); in user_unlock_ast()
260 wake_up(&lockres->l_event); in user_unlock_ast()
278 static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) in user_dlm_drop_inode_ref() argument
281 inode = user_dlm_inode_from_user_lockres(lockres); in user_dlm_drop_inode_ref()
288 struct user_lock_res *lockres = in user_dlm_unblock_lock() local
291 cluster_connection_from_user_lockres(lockres); in user_dlm_unblock_lock()
293 mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
295 spin_lock(&lockres->l_lock); in user_dlm_unblock_lock()
297 mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), in user_dlm_unblock_lock()
299 lockres->l_namelen, lockres->l_name, lockres->l_flags); in user_dlm_unblock_lock()
303 lockres->l_flags &= ~USER_LOCK_QUEUED; in user_dlm_unblock_lock()
310 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { in user_dlm_unblock_lock()
312 lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
313 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
317 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { in user_dlm_unblock_lock()
319 lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
320 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
324 if (lockres->l_flags & USER_LOCK_BUSY) { in user_dlm_unblock_lock()
325 if (lockres->l_flags & USER_LOCK_IN_CANCEL) { in user_dlm_unblock_lock()
327 lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
328 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
332 lockres->l_flags |= USER_LOCK_IN_CANCEL; in user_dlm_unblock_lock()
333 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
335 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, in user_dlm_unblock_lock()
338 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); in user_dlm_unblock_lock()
345 if ((lockres->l_blocking == DLM_LOCK_EX) in user_dlm_unblock_lock()
346 && (lockres->l_ex_holders || lockres->l_ro_holders)) { in user_dlm_unblock_lock()
347 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
349 lockres->l_namelen, lockres->l_name, in user_dlm_unblock_lock()
350 lockres->l_ex_holders, lockres->l_ro_holders); in user_dlm_unblock_lock()
354 if ((lockres->l_blocking == DLM_LOCK_PR) in user_dlm_unblock_lock()
355 && lockres->l_ex_holders) { in user_dlm_unblock_lock()
356 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
358 lockres->l_namelen, lockres->l_name, in user_dlm_unblock_lock()
359 lockres->l_ex_holders); in user_dlm_unblock_lock()
364 new_level = user_highest_compat_lock_level(lockres->l_blocking); in user_dlm_unblock_lock()
365 lockres->l_requested = new_level; in user_dlm_unblock_lock()
366 lockres->l_flags |= USER_LOCK_BUSY; in user_dlm_unblock_lock()
368 lockres->l_namelen, lockres->l_name, lockres->l_level, new_level); in user_dlm_unblock_lock()
369 spin_unlock(&lockres->l_lock); in user_dlm_unblock_lock()
372 status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb, in user_dlm_unblock_lock()
374 lockres->l_name, in user_dlm_unblock_lock()
375 lockres->l_namelen); in user_dlm_unblock_lock()
377 user_log_dlm_error("ocfs2_dlm_lock", status, lockres); in user_dlm_unblock_lock()
378 user_recover_from_dlm_error(lockres); in user_dlm_unblock_lock()
382 user_dlm_drop_inode_ref(lockres); in user_dlm_unblock_lock()
385 static inline void user_dlm_inc_holders(struct user_lock_res *lockres, in user_dlm_inc_holders() argument
390 lockres->l_ex_holders++; in user_dlm_inc_holders()
393 lockres->l_ro_holders++; in user_dlm_inc_holders()
404 user_may_continue_on_blocked_lock(struct user_lock_res *lockres, in user_may_continue_on_blocked_lock() argument
407 BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED)); in user_may_continue_on_blocked_lock()
409 return wanted <= user_highest_compat_lock_level(lockres->l_blocking); in user_may_continue_on_blocked_lock()
412 int user_dlm_cluster_lock(struct user_lock_res *lockres, in user_dlm_cluster_lock() argument
418 cluster_connection_from_user_lockres(lockres); in user_dlm_cluster_lock()
423 lockres->l_namelen, lockres->l_name); in user_dlm_cluster_lock()
429 lockres->l_namelen, lockres->l_name, level, lkm_flags); in user_dlm_cluster_lock()
437 spin_lock(&lockres->l_lock); in user_dlm_cluster_lock()
438 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { in user_dlm_cluster_lock()
439 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
447 if ((lockres->l_flags & USER_LOCK_BUSY) && in user_dlm_cluster_lock()
448 (level > lockres->l_level)) { in user_dlm_cluster_lock()
451 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
453 user_wait_on_busy_lock(lockres); in user_dlm_cluster_lock()
457 if ((lockres->l_flags & USER_LOCK_BLOCKED) && in user_dlm_cluster_lock()
458 (!user_may_continue_on_blocked_lock(lockres, level))) { in user_dlm_cluster_lock()
461 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
463 user_wait_on_blocked_lock(lockres); in user_dlm_cluster_lock()
467 if (level > lockres->l_level) { in user_dlm_cluster_lock()
469 if (lockres->l_level != DLM_LOCK_IV) in user_dlm_cluster_lock()
472 lockres->l_requested = level; in user_dlm_cluster_lock()
473 lockres->l_flags |= USER_LOCK_BUSY; in user_dlm_cluster_lock()
474 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
480 status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb, in user_dlm_cluster_lock()
481 local_flags, lockres->l_name, in user_dlm_cluster_lock()
482 lockres->l_namelen); in user_dlm_cluster_lock()
487 status, lockres); in user_dlm_cluster_lock()
488 user_recover_from_dlm_error(lockres); in user_dlm_cluster_lock()
492 user_wait_on_busy_lock(lockres); in user_dlm_cluster_lock()
496 user_dlm_inc_holders(lockres, level); in user_dlm_cluster_lock()
497 spin_unlock(&lockres->l_lock); in user_dlm_cluster_lock()
504 static inline void user_dlm_dec_holders(struct user_lock_res *lockres, in user_dlm_dec_holders() argument
509 BUG_ON(!lockres->l_ex_holders); in user_dlm_dec_holders()
510 lockres->l_ex_holders--; in user_dlm_dec_holders()
513 BUG_ON(!lockres->l_ro_holders); in user_dlm_dec_holders()
514 lockres->l_ro_holders--; in user_dlm_dec_holders()
521 void user_dlm_cluster_unlock(struct user_lock_res *lockres, in user_dlm_cluster_unlock() argument
527 lockres->l_namelen, lockres->l_name); in user_dlm_cluster_unlock()
531 spin_lock(&lockres->l_lock); in user_dlm_cluster_unlock()
532 user_dlm_dec_holders(lockres, level); in user_dlm_cluster_unlock()
533 __user_dlm_cond_queue_lockres(lockres); in user_dlm_cluster_unlock()
534 spin_unlock(&lockres->l_lock); in user_dlm_cluster_unlock()
541 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; in user_dlm_write_lvb() local
546 spin_lock(&lockres->l_lock); in user_dlm_write_lvb()
548 BUG_ON(lockres->l_level < DLM_LOCK_EX); in user_dlm_write_lvb()
549 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); in user_dlm_write_lvb()
552 spin_unlock(&lockres->l_lock); in user_dlm_write_lvb()
557 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; in user_dlm_read_lvb() local
561 spin_lock(&lockres->l_lock); in user_dlm_read_lvb()
563 BUG_ON(lockres->l_level < DLM_LOCK_PR); in user_dlm_read_lvb()
564 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) { in user_dlm_read_lvb()
565 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); in user_dlm_read_lvb()
570 spin_unlock(&lockres->l_lock); in user_dlm_read_lvb()
574 void user_dlm_lock_res_init(struct user_lock_res *lockres, in user_dlm_lock_res_init() argument
577 memset(lockres, 0, sizeof(*lockres)); in user_dlm_lock_res_init()
579 spin_lock_init(&lockres->l_lock); in user_dlm_lock_res_init()
580 init_waitqueue_head(&lockres->l_event); in user_dlm_lock_res_init()
581 lockres->l_level = DLM_LOCK_IV; in user_dlm_lock_res_init()
582 lockres->l_requested = DLM_LOCK_IV; in user_dlm_lock_res_init()
583 lockres->l_blocking = DLM_LOCK_IV; in user_dlm_lock_res_init()
588 memcpy(lockres->l_name, in user_dlm_lock_res_init()
591 lockres->l_namelen = dentry->d_name.len; in user_dlm_lock_res_init()
594 int user_dlm_destroy_lock(struct user_lock_res *lockres) in user_dlm_destroy_lock() argument
598 cluster_connection_from_user_lockres(lockres); in user_dlm_destroy_lock()
600 mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_destroy_lock()
602 spin_lock(&lockres->l_lock); in user_dlm_destroy_lock()
603 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { in user_dlm_destroy_lock()
604 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
608 lockres->l_flags |= USER_LOCK_IN_TEARDOWN; in user_dlm_destroy_lock()
610 while (lockres->l_flags & USER_LOCK_BUSY) { in user_dlm_destroy_lock()
611 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
613 user_wait_on_busy_lock(lockres); in user_dlm_destroy_lock()
615 spin_lock(&lockres->l_lock); in user_dlm_destroy_lock()
618 if (lockres->l_ro_holders || lockres->l_ex_holders) { in user_dlm_destroy_lock()
619 lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; in user_dlm_destroy_lock()
620 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
625 if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { in user_dlm_destroy_lock()
630 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
634 lockres->l_flags &= ~USER_LOCK_ATTACHED; in user_dlm_destroy_lock()
635 lockres->l_flags |= USER_LOCK_BUSY; in user_dlm_destroy_lock()
636 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
638 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK); in user_dlm_destroy_lock()
640 spin_lock(&lockres->l_lock); in user_dlm_destroy_lock()
641 lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; in user_dlm_destroy_lock()
642 lockres->l_flags &= ~USER_LOCK_BUSY; in user_dlm_destroy_lock()
643 spin_unlock(&lockres->l_lock); in user_dlm_destroy_lock()
644 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); in user_dlm_destroy_lock()
648 user_wait_on_busy_lock(lockres); in user_dlm_destroy_lock()